blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5317b35317ba1ab2da314d6bd8ad9be085d19480 | 13f4a06cd439f579e34bf38406a9d5647fe7a0f3 | /nn_ns/parsing/FS/readme.py | d02accd2f405f4744acde147164ec31870528870 | [] | no_license | edt-yxz-zzd/python3_src | 43d6c2a8ef2a618f750b59e207a2806132076526 | 41f3a506feffb5f33d4559e5b69717d9bb6303c9 | refs/heads/master | 2023-05-12T01:46:28.198286 | 2023-05-01T13:46:32 | 2023-05-01T13:46:32 | 143,530,977 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,141 | py | 3 froms:
FSM, [Rule], regular_expression
FSM:
TotalState : FSM
formal - FSM{initial :: TotalState,
finals :: Set TotalState,
error :: TotalState,
transition :: Map TotalState (Map Symbol TotalState)
}
informal - NFSM{initials :: Set PartialState,
finals :: Set PartialState,
error :: Set PartialState, # empty_set
transition :: Map PartialState (Map (Maybe Symbol) (Set PartialState))
}
PartialState : FA
# esp cleaned_dfa
formal - DFA {initial :: Maybe PartialState,
finals :: Set PartialState,
error :: Maybe PartialState, # nothing
transition :: Map PartialState (Map Symbol PartialState)
}
informal - NDFA {initials :: Set PartialState,
finals :: Set PartialState,
error :: Set PartialState, # empty_set
transition :: Map PartialState (Map (Maybe Symbol) (Set PartialState))
}
{initials::Set PartialState, transition::[Rule]}: # NDFA-RuleForm # a direct map into/from a NDFA
FormalNDFARule :: (PartialState, Maybe (Maybe Symbol, PartialState))
(a, Nothing) -> [a in finals]
(a, Just (maybe_symbol, b)) -> "a = maybe_symbol b"
InformalNDFARule :: (Nonterminal, [Symbol], Maybe Nonterminal)
where PartialState = (Nonterminal, Integer)
(a, ls, Nothing) -> [(a, len(ls)) in finals]
regular_expression: # RE-RuleForm # using star but without recur (even tail-recur) # DAG
BasicRe a = ReConcat [BasicRe a]
| ReUnion [BasicRe a]
| ReStar (BasicRe a)
| ReSymbol a
ExtendedRe a = BasicRe a
| ReComplement a
| ReIntersect a
| [
"[email protected]"
] | |
51a7abc5c786abeb3e55dc95ed53aef57e85b34a | 94df6050f2a262da23f62dd678ccc4366b7657fc | /temporary/bin/tqdm | 9efa8c44cdb5ce4f8dc9684a0b3710267894d570 | [] | no_license | EkenePhDAVHV/phd-autonomous-cars-frank | 29cc2fc608db53d4d060422022dc5019cf6360f0 | 1daed3425bfad99dac31543fbeb7950e25aa2878 | refs/heads/main | 2023-04-29T06:02:59.444072 | 2021-05-23T11:04:07 | 2021-05-23T11:04:07 | 357,157,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | #!/home/ekene/PycharmProjects/phd-autonomous-cars-frank/temporary/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tqdm.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
b3673d87cd687e139daef7c90d95e0a9126b841d | 954df5fb1ceaf64fe3004e0b072b78024065cdd0 | /virtual/Lib/site-packages/future/moves/_markupbase.py | 41c4f96c55552b677772b61ba497a16ba84b3df8 | [] | no_license | chelseyrandolph/cs440_DatabaseUI | 7dc5b4c3d0a4e72023db61f4a613fc889bc69f86 | 28355cdfe0f4732568f1f8e43e2ce7809b4fc260 | refs/heads/master | 2022-06-06T19:18:31.819483 | 2020-05-05T20:51:58 | 2020-05-05T20:51:58 | 259,436,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from __future__ import absolute_import
from future.utils import PY3
if PY3:
pass
else:
__future_module__ = True
from markupbase import *
| [
"[email protected]"
] | |
17af078221d30f88e222eb9d6c5861dc1a20e88a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_disintegrating.py | 532e7be5907664219ed247bb6f5173a80c0ad3de | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py |
from xai.brain.wordbase.verbs._disintegrate import _DISINTEGRATE
#calss header
class _DISINTEGRATING(_DISINTEGRATE, ):
def __init__(self,):
_DISINTEGRATE.__init__(self)
self.name = "DISINTEGRATING"
self.specie = 'verbs'
self.basic = "disintegrate"
self.jsondata = {}
| [
"[email protected]"
] | |
50ac6709acfc86d952d4ef089c648926671f477b | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/sacremoses/corpus.py | 721a5bbd1be20eb165825ba3c6dae6936c425b69 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d72f15b792d94c6d388af9f8c568d412c657f055c925abdae378464c275c54a4
size 5016
| [
"[email protected]"
] | |
d31f11bddf9791dee17880f0c0425c13ad495a90 | ab6c6559d9cfac36c3c4ece192fa2300767662d1 | /Python Game Development for Beginners - Working Files/Chapter 5/Increasing Challenge with Levels Part I/main.py | 38ecd797fc5c5f6c15d8d84cfd91391e4842c047 | [] | no_license | Igor-Nosatov/PythonGameDev_Trinket | 962b86572c74c64652a24768dfec2101fcae221f | e6166f69307ded6880b0aaa3299c0a151807bb9c | refs/heads/master | 2020-06-24T20:22:57.187289 | 2016-05-03T10:33:26 | 2016-05-03T10:33:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,111 | py | # import the turtle module so we can use all the neat code it contains
import turtle
from helpercode import BoxTurtle, printwin, checkpos, maketurtles
from time import sleep
from random import randint, choice
# Create variables to contain our BoxTurtle objects
boxturtles = maketurtles()
# Create a variable `tina` that is a Turtle() object. Set shape to 'turtle'
tina = turtle.Turtle()
tina.shape('turtle')
tina.penup()
# Create a variable `screen`, a Screen() object, that will handle keyss
screen = turtle.Screen()
# Keyboard controls
def go_left():
tina.left(11)
def go_right():
tina.right(11)
# Check intersections with boxes when the turtle moves
def go_forward():
tina.forward(10)
check_intersect()
checkpos([tina])
def go_backward():
tina.backward(10)
check_intersect()
checkpos([tina])
# This function loops through the `boxes` list and uses each
# box's `intersect()` method to check whether it intersects
# with tina.
def check_intersect():
for box in boxturtles:
if not box.hit and box.intersect(tina):
box.hit = True
box.flash()
# Tell the program which functions go with which keys
screen.onkey(go_left, 'Left')
screen.onkey(go_right, 'Right')
screen.onkey(go_forward, 'Up')
screen.onkey(go_backward, 'Down')
# Debugging function - press 'w' to hit all but one turtle
def win():
for t in boxturtles[1:]:
screen.tracer(0)
t.flash()
t.hit = True
screen.tracer(1)
screen.onkey(win, 'w')
# This play function will call itself every .1 seconds and return if the player loses
def play():
# Tell the screen to listen for key presses
screen.listen()
# Check boxes' hit state
hits = []
for box in boxturtles:
hits.append(box.hit)
# If all boxes are hit, the game is over!
if False not in hits:
printwin(tina)
return
mover = choice(boxturtles)
if not mover.hit:
mover.move()
# Sometimes,a turtle will awaken
else:
if randint(0,100) < 5:
mover.awaken()
checkpos(boxturtles)
# start the function over in 100 miliseconds (.1 seconds)
screen.ontimer(play, 100)
play()
turtle.done() | [
"[email protected]"
] | |
a3f236ba9acc0a4b6555b96f6a332662b412630d | 4591b4c66f443a2a54c858a8f3b529b8f388a5e4 | /workshops/migrations/0009_auto_20141201_0016.py | 63d5f5e8ede381a4c432f6a8b3b7406a26f704cf | [
"MIT"
] | permissive | sburns/amy | 39e11b48212304c7620e56a66c2f585d3d5951ae | 7a315ba934f45e2234aaf1ea0e953b88a6239e10 | refs/heads/master | 2020-12-28T20:31:22.103801 | 2015-01-20T20:27:31 | 2015-01-20T20:27:31 | 27,539,122 | 0 | 1 | null | 2015-01-27T17:43:06 | 2014-12-04T12:18:40 | Python | UTF-8 | Python | false | false | 451 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0008_person'),
]
operations = [
migrations.AlterField(
model_name='person',
name='email',
field=models.CharField(max_length=100, unique=True, null=True),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
ff27d1695dcafdf6c0990e339bae4ebdc384fe83 | c5a921726a3805663d26a2dbaa47e49497931d4e | /Algorithms/challenges/lc437_path_sum_3.py | 3c2379a605bdceaccb345b85e6736d43f336db08 | [] | no_license | snowdj/cs_course | a50d07548198b4202e8abde01ec572e2cce38ab3 | fa6504cb5145d10952f4615478fa745f4b35ba13 | refs/heads/master | 2020-03-17T15:18:52.190747 | 2018-05-13T08:08:51 | 2018-05-13T08:08:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | """
Time: O(n)
Space: O(n)
You are given a binary tree in which each node contains an integer value.
Find the number of paths that sum to a given value.
The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).
The tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000.
Example:
root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
10
/ \
5 -3
/ \ \
3 2 11
/ \ \
3 -2 1
Return 3. The paths that sum to 8 are:
1. 5 -> 3
2. 5 -> 2 -> 1
3. -3 -> 11
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Brute-force DFS. Pre-order traversal.
# Time: O(nlg(n)), worst O(n^2) Space: O(lg(n)), worst O(n)
class Solution:
def pathSum(self, root, target):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
res, stk = 0, [] # a stack to remember the path from root to current node
def dfs(node, cumsum):
nonlocal res, target
if not node:
return
cumsum += node.val
if cumsum == target:
res += 1
stk.append(node.val)
t = cumsum
for i in range(len(stk)-1): # Not including the last one to avoid counting none-node case for target==0
t -= stk[i]
if t == target:
res += 1
dfs(node.left, cumsum)
dfs(node.right, cumsum)
stk.pop()
dfs(root, 0)
return res
# Pre-order DFS with 2-sum hash table
# Time: O(n) Space: O(n+lg(n))
from collections import defaultdict
class Solution2:
def pathSum(self, root, target):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
res, tbl = 0, defaultdict(int)
tbl[0] = 1
def dfs(node, cumsum):
nonlocal res, tbl
if not node:
return
cumsum += node.val
res += tbl[cumsum - target]
tbl[cumsum] += 1 # increament after updating result to avoid counting none-node case for target==0
dfs(node.left, cumsum)
dfs(node.right, cumsum)
tbl[cumsum] -= 1
dfs(root, 0)
return res
# Same as solution 1 brute-force, but using recursion instead of nodes stack.
# Time: O(nlg(n)), worst O(n^2) Space: O(lg(n)), worst O(n)
class Solution3:
def pathSum(self, root, target):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
if not root:
return 0
return self.sumup(root, 0, target) + self.pathSum(root.left, target) + self.pathSum(root.right, target)
def sumup(self, node, pre, target):
if not node:
return 0
cur = pre + node.val
return (cur == target) + self.sumup(node.left, cur, target) + self.sumup(node.right, cur, target)
| [
"[email protected]"
] | |
2113063d729a811629f3bc376ba4bf53a6246231 | ea2f7efb514b7e33eb205519cfffc356f58a9816 | /Clases en Python/__repr()__.py | 6f40939cad236fb3207cb6550a444771c025da4d | [] | no_license | MGijon/Learning-Python | fa79071bf53172743e96d2c614be2963a5107a9d | 728f8d7e30729a965c5a093e08005d715aa6e46b | refs/heads/master | 2021-06-28T15:44:34.082472 | 2019-02-23T17:58:06 | 2019-02-23T17:58:06 | 105,207,614 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | ''' __REPR()__: Nos devuelve una cadena de texto con la representación única de un objeto. Es útil, por ejemplo, a la hora de depurar un error.
------------
A la representación única accedemos de dos formas: con la función repr() o con las dobles comillas hacia atrás (``).
Si __repr()__ no está definido, Python en lugar de darnos un error nos generará una representación automática del objeto,
indicando el nombre de su clase y su posición en la memoria.
'''
class Triangulo(object):
def __init__(self, base, altura):
self.base = base
self.altura = altura
def __str__(self):
clase = type(self).__name__
mensaje = '{0} con base {1} y altura {2}.'.format(clase, self.base, self.altura)
return mensaje
t = Triangulo(12, 124)
print(t)
print('en este caso no hemos definido __repr()__, Python lo generará automáticamente...')
print(repr(t))
import math
class Circulo(object):
def __init__(self, radio):
self.radio = radio
@property
def area(self):
return 2 * math.pi * self.radio
def __str__(self):
clase = type(self).__name__
mensaje = '{0} de radio {1} y área {2}'.format(clase, self.radio, self.area)
return mensaje
def __repr__(self):
clase = type(self).__name__
mensaje = '{0}({1})'.format(clase, self.radio)
return mensaje
c = Circulo(131)
print(c) # Circulo de radio 131 y área 823.0972752405258
print(repr(c)) # Circulo(131)
print(eval(repr(c))) # Circulo de radio 131 y área 823.0972752405258
##################### MORALEJA ###########################################################
# --------- #
# #
# __str__ : PARA USUARIOS #
# __repr–– : PARA DESARROLLADORES #
# #
###########################################################################################
| [
"[email protected]"
] | |
70d28bdb9d82aa11081654760958d50a0e9b5ae3 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/battle_control/controllers/consumables/__init__.py | de6cdb4912e1bd8a0b0ace2de737e8453afc24ad | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,212 | py | # 2016.11.19 19:48:19 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/battle_control/controllers/consumables/__init__.py
from gui.battle_control.controllers.consumables import ammo_ctrl
from gui.battle_control.controllers.consumables import equipment_ctrl
from gui.battle_control.controllers.consumables import opt_devices_ctrl
def createAmmoCtrl(setup):
if setup.isReplayRecording:
return ammo_ctrl.AmmoReplayRecorder(setup.replayCtrl)
if setup.isReplayPlaying:
return ammo_ctrl.AmmoReplayPlayer(setup.replayCtrl)
return ammo_ctrl.AmmoController()
def createEquipmentCtrl(setup):
if setup.isReplayPlaying:
clazz = equipment_ctrl.EquipmentsReplayPlayer
else:
clazz = equipment_ctrl.EquipmentsController
return clazz()
def createOptDevicesCtrl():
return opt_devices_ctrl.OptionalDevicesController()
__all__ = ('createAmmoCtrl', 'createEquipmentCtrl', 'createOptDevicesCtrl')
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\battle_control\controllers\consumables\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:48:19 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
7d72e289cd1a204ce8d9543b02b422fad79372c9 | 9848584d5f1858692fb4cdbe793bc91ed3be920e | /coding/00239-sliding-window-max/solution.py | aef03fa50138e58d6f572230081501d70f98fcf3 | [] | no_license | misaka-10032/leetcode | 1212223585cc27d3dfc6d2ca6a27770f06e427e3 | 20580185c6f72f3c09a725168af48893156161f5 | refs/heads/master | 2020-12-12T09:45:31.491801 | 2020-09-14T00:18:19 | 2020-09-14T00:18:19 | 50,267,669 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | #!/usr/bin/env python3
# encoding: utf-8
import collections
from typing import List
class DecreasingGarbageCollectionQueue:
def __init__(self, ttl: int):
self._ttl = ttl
self._q = collections.deque()
def append(self, t: int, v: int):
# First, clean up the stale elements.
while self._q and self._q[0][0] + self._ttl <= t:
self._q.popleft()
# Second, make sure the values are decreasing.
while self._q and self._q[-1][1] <= v:
self._q.pop()
self._q.append((t, v))
def peek(self) -> int:
return self._q[0][1]
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
# Construct a queue that has decreasing values, and only contains the
# element in a time window.
q = DecreasingGarbageCollectionQueue(k)
result = []
for i, v in enumerate(nums):
q.append(i, v)
if i < k - 1:
continue
result.append(q.peek())
return result
| [
"[email protected]"
] | |
ed33f94bbd108c9000ac2d9dc0d03f9bc890dcbc | 1f689e448d8b510ea6575590cb6920048b4e9aea | /leetcode/202_happy_number.py | 238115bb7972505ac6b64021c56ccdb3faf05303 | [] | no_license | lijenpan/python | 52c6061ff90c611efd039b1858339edbefdb5ad0 | 7f67045a83bd2592ccc399420194094fb78404b8 | refs/heads/master | 2020-05-30T10:53:15.634090 | 2016-12-02T20:50:28 | 2016-12-02T20:50:28 | 7,646,477 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | """
Write an algorithm to determine if a number is "happy".
A happy number is a number defined by the following process: Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers.
Example: 19 is a happy number
12 + 92 = 82
82 + 22 = 68
62 + 82 = 100
12 + 02 + 02 = 1
==============================
This question shouldn't be easy. The naive approach will get you stuck in the loop.
Until you found out that (through repetitions) happy numbers contains 4, you are in for
a hell of a coding session.
"""
def isHappy(n):
"""
:type n: int
:rtype: bool
"""
temp = 0
while n != 1 and n != 4:
while n:
temp += (n % 10) * (n % 10)
n /= 10
n = temp
temp = 0
return 1 == n
| [
"[email protected]"
] | |
34457b9f1292450d30115f4b973ae6c397ad444b | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/tools/weather/views.py | bc7bb0580ac24124ade857e06b690326ad36e083 | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 6,497 | py | # -*- coding: utf-8 -*-
__author__ = "liupeiyu"
import time
from datetime import timedelta, datetime, date
import urllib, urllib2
import os
import json
from django.http import HttpResponseRedirect, HttpResponse
from django.template import Context, RequestContext
from django.contrib.auth.decorators import login_required, permission_required
from django.conf import settings
from django.shortcuts import render_to_response
from django.contrib.auth.models import User, Group, Permission
from django.contrib import auth
from django.db.models import Q
import httplib
from core.jsonresponse import JsonResponse, create_response, decode_json_str
from core import dateutil
from core.exceptionutil import full_stack
from tools.models import *
from watchdog.utils import watchdog_fatal
WATCHDOG_TYPE = 'WHETHER_API'
########################################################################
# get_weather_info: 获得天气信息
########################################################################
def get_weather_info(request):
weathers = Weather.objects.all()
response = create_response(200)
city_code = "101180801"
morning_time = 6 # 早晨时间
night_time = 18 # 晚上时间
today_date = datetime.now()
try:
if weathers.count() == 0:
weather_info, weather = __get_weather_info(city_code)
else:
weather = weathers[0]
if __is_out_time_span(weather.update_time, weather.update_span):
weather_info, weather = __get_weather_info(city_code, weather_id=weather.id)
else:
weather_info = json.loads(weather.info)
response.data.weather_info = weather_info
response.data.today_date = today_date.strftime("%Y年%m月%d日")
response.data.create_time = weather.update_time.strftime("%Y年%m月%d日 %H:%M")
# 计算白天还是晚上,True为白天,False为晚上
hour = int(weather.update_time.strftime("%H"))
if morning_time <= hour and hour < night_time:
response.data.is_daytime = True
else:
response.data.is_daytime = False
# 当前温度
response.data.current_temp = __get_current_temp(city_code)
except:
response = create_response(500)
response.errMsg = u'获取失败'
response.innerErrMsg = full_stack()
watchdog_fatal(u'代码错误!%s' % response.innerErrMsg, WATCHDOG_TYPE)
return response.get_response()
########################################################################
# __get_weather_info: 获取近6天气信息
########################################################################
def __get_weather_info(city_code, weather_id = 0):
data_str, error_info = __get_http_response_data("m.weather.com.cn", "/data/%s.html" % city_code)
weather_info = []
weather = None
if data_str:
info_json = decode_json_str(data_str)
weather_json = info_json['weatherinfo']
# 计算周几
weeks = [u'一', u'二', u'三', u'四', u'五', u'六', u'日']
week_index = __get_week_index(weeks, weather_json['week'])
# 获取今天日期
today_date = datetime.now()
total_days, low_date, cur_date, high_date = dateutil.get_date_range(dateutil.get_today(), '6', 6)
date_list = dateutil.get_date_range_list(datetime.date(today_date), high_date)
for i in range(1,7):
data = dict()
data['date'] = date_list[i-1].strftime("%Y年%m月%d日")
data['weather'] = weather_json['weather%d' % i]
data['temp'] = weather_json['temp%d' % i]
data['week'] = u'周%s' % weeks[week_index]
# 给week赋值下标
week_index = week_index + 1 if week_index + 1 < len(weeks) else 0
weather_info.append(data)
# 判断是否已经添加过数据,如果添加过就修改
if weather_id:
weather = Weather.objects.get(id=weather_id)
weather.info = json.dumps(weather_info)
weather.update_time = today_date
weather.save()
else:
weather = Weather.objects.create(info=json.dumps(weather_info), city_code = city_code)
else:
if weather_id:
weather = Weather.objects.get(id=weather_id)
weather_info = json.loads(weather.info)
# print u'更新数据,天气的api不可用!'
watchdog_fatal(u'更新数据,天气的api不可用!%s' % error_info, WATCHDOG_TYPE)
else:
# print u'首次获取数据,天气的api不可用!'
watchdog_fatal(u'首次获取数据,天气的api不可用!%s' % error_info, WATCHDOG_TYPE)
return weather_info, weather
########################################################################
# __get_current_temp: 获取当前天气温度
########################################################################
def __get_current_temp(city_code):
data_str, error_info = __get_http_response_data("www.weather.com.cn", "/data/sk/%s.html" % city_code)
temp = ''
if data_str:
info_json = decode_json_str(data_str)
# 当前温度
temp = info_json['weatherinfo']['temp']
else:
# print u'获取当前天气温度,天气的api不可用!'
watchdog_fatal(u'获取当前天气温度,发送请求失败!%s' % error_info, WATCHDOG_TYPE)
return temp
########################################################################
# __is_out_time_span: 判断时间是否超出时间间隔
########################################################################
def __is_out_time_span(update_time, update_span):
update_span = update_span * 60 * 1000
create_time = long(time.mktime(update_time.timetuple()))*1000
now = long(time.time()) * 1000
if now-create_time > update_span:
return True
else:
return False
########################################################################
# __get_http_response_data: 发送http请求,返回数据
########################################################################
def __get_http_response_data(domain, url, method="GET"):
error_info = None
conn = httplib.HTTPConnection(domain)
try:
conn.request(method, url)
r1 = conn.getresponse()
print r1.status
if r1.status is not 200:
error_info = r1.read()
data_str = None
else:
data_str = r1.read()
except:
data_str = None
error_info = full_stack()
finally:
conn.close()
return data_str, error_info
########################################################################
# __get_week_index: 获取周期下标
########################################################################
def __get_week_index(weeks, string):
string = string[-1:]
for i in range(len(weeks)):
if weeks[i] == string:
return i | [
"[email protected]"
] | |
8ab8b6ab34a49d1936eb7cb2cdfa1fa2034968d1 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4459225.3.spec | bae40e140f22b81e61c4e43270ce074687df979f | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,687 | spec | {
"id": "mgm4459225.3",
"metadata": {
"mgm4459225.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 193250,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 4432,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 303,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 477,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/205.screen.h_sapiens_asm.info"
},
"205.screen.h_sapiens_asm.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 210,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/205.screen.h_sapiens_asm.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1733,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 128179,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 469,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 18542,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 306,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 216473,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 142884,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 37744,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 24616,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 261896,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 13979,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 9828,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 18821,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 28290,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 1977900,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 97,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 766,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 36,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 2624,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 3693,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 1353,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 374,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22933,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 10339,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.species.stats"
}
},
"id": "mgm4459225.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4459225.3"
}
},
"raw": {
"mgm4459225.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4459225.3"
}
}
} | [
"[email protected]"
] | |
7fc1aab1de73aa78dbb82daf249adb798a862e6e | ac0e9a702e73739209b24ba3f6d9297647e06b76 | /Example Files/Intermediate/phonebook_example_unittest/test_phonebook.py | 877ed597231b563f8b52e0fd04a0c7d5d40c137e | [] | no_license | afettouhi/PyStudentManager | 9c256c38b20136f10b86fb2e2270bb5848be802d | 71343bc52e5426b2267f068bd6af2e66c0807f08 | refs/heads/master | 2020-05-14T16:53:09.501889 | 2019-06-07T14:22:44 | 2019-06-07T14:22:44 | 181,881,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py |
import unittest
from phonebook import Phonebook
class PhonebookTest(unittest.TestCase):
def setUp(self):
self.phonebook = Phonebook()
def test_lookup_entry_by_name(self):
self.phonebook.add("Bob", "12345")
self.assertEqual("12345", self.phonebook.lookup("Bob"))
def test_missing_entry_raises_KeyError(self):
with self.assertRaises(KeyError):
self.phonebook.lookup("missing")
def test_empty_phonebook_is_consistent(self):
self.assertFalse(self.phonebook.is_consistent())
def test_phonebook_with_normal_entries_is_consistent(self):
self.phonebook.add("Bob", "12345")
self.phonebook.add("Mary", "012345")
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_with_duplicate_entries_is_inconsistent(self):
self.phonebook.add("Bob", "12345")
self.phonebook.add("Mary", "12345")
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_with_numbers_that_prefix_one_another_is_inconsistent(self):
self.phonebook.add("Bob", "12345")
self.phonebook.add("Mary", "123")
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_adds_names_and_numbers(self):
self.phonebook.add("Sue", "12345")
self.assertIn("Sue", self.phonebook.get_names())
self.assertIn("12345", self.phonebook.get_numbers())
| [
"[email protected]"
] | |
35757bf0f4d8afe1c0b99428daee2cf27e28c9fd | 97af3c1e09edbb09dfabe0dd8cb5334735d874b6 | /code/lib/python/console/clint/textui/progress.py | 960c35b9cb5d9319ca98f0dd9a3e887086ff01bf | [] | no_license | joyrexus/ldp | 31d3e155110e3249ad0f7c97f1b663120c6a125d | d0e15f051bb175fc66a4647b3001b31702aa16f3 | refs/heads/master | 2021-01-17T14:30:46.115805 | 2015-05-05T20:20:14 | 2015-05-05T20:20:14 | 11,434,923 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # -*- coding: utf-8 -*-
"""
clint.textui.progress
~~~~~~~~~~~~~~~~~
This module provides the progressbar functionality.
"""
from __future__ import absolute_import
import sys
STREAM = sys.stderr
BAR_TEMPLATE = '%s[%s%s] %i/%i\r'
BAR_EMPTY_CHAR = '-'
BAR_FILLED_CHAR = '='
DOTS_CHAR = '.'
def bar(it, label='', width=32, hide=False):
"""Progress iterator. Wrap your iterables with it."""
def _show(_i):
x = int(width*_i/count)
if not hide:
STREAM.write(BAR_TEMPLATE % (
label, BAR_FILLED_CHAR*x, BAR_EMPTY_CHAR*(width-x), _i, count))
STREAM.flush()
count = len(it)
if count:
_show(0)
for i, item in enumerate(it):
yield item
_show(i+1)
if not hide:
STREAM.write('\n')
STREAM.flush()
def dots(it, label='', hide=False):
"""Progress iterator. Prints a dot for each item being iterated"""
count = 0
if not hide:
STREAM.write(label)
for item in it:
if not hide:
STREAM.write(DOTS_CHAR)
sys.stderr.flush()
count += 1
yield item
STREAM.write('\n')
STREAM.flush()
| [
"[email protected]"
] | |
f32f2075cffb1ee258d2840c969615cb58be0bbf | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /qM6zWQM7gdfPgE9Hh_10.py | ac926ffffc597d07e0e765dc6f988e28824815d1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | """
Given a _dictionary_ of some items with _star ratings_ and a _specified star
rating_ , return a new dictionary of items **which match the specified star
rating**. Return `"No results found"` if _no item_ matches the _star rating_
given.
### Examples
filter_by_rating({
"Luxury Chocolates" : "*****",
"Tasty Chocolates" : "****",
"Aunty May Chocolates" : "*****",
"Generic Chocolates" : "***"
}, "*****") ➞ {
"Luxury Chocolates" : "*****",
"Aunty May Chocolates" : "*****"
}
filter_by_rating({
"Continental Hotel" : "****",
"Big Street Hotel" : "**",
"Corner Hotel" : "**",
"Trashviews Hotel" : "*",
"Hazbins" : "*****"
}, "*") ➞ {
"Trashviews Hotel" : "*"
}
filter_by_rating({
"Solo Restaurant" : "***",
"Finest Dinings" : "*****",
"Burger Stand" : "***"
}, "****") ➞ "No results found"
### Notes
N/A
"""
def filter_by_rating(d, rating):
dict = b = { key: value for key, value in d.items() if value == rating }
if dict == {}:
return 'No results found'
else:
return dict
| [
"[email protected]"
] | |
c93abfeac1a23ee94be4cfa675344b58b62a7439 | 42a812ac785752921dcdddd4ae56064b51452b39 | /bulletin/post/tests/test_post_view.py | cd2fe8bc9b73e4f73864e1875b41fc2744fb8149 | [] | no_license | Pre-Onboarding-Listerine/aimmo-assignment-team-1 | e4a15d3e71f1985febf911360691389f5996f0fb | d94dd7482f065ac1b020bb500984740c13af14e6 | refs/heads/main | 2023-09-02T12:23:49.693075 | 2021-11-03T00:25:18 | 2021-11-03T00:25:18 | 423,444,898 | 1 | 3 | null | 2021-11-02T16:35:38 | 2021-11-01T11:46:19 | Python | UTF-8 | Python | false | false | 4,881 | py | import json
import unittest
from datetime import datetime
from http import HTTPStatus
from unittest import mock
from unittest.mock import MagicMock
import jwt
from assertpy import assert_that
from django.conf import settings
from django.test import Client
from member.models import Member
from ..dto.deleted_post_id import DeletedPostId
from ..dto.post_changes import PostChanges
from ..dto.post_content import PostContents
from ..dto.post_details import PostDetails
from ..models.posting import Posting
from ..service import PostService
from member.service import MemberService
class PostViewTest(unittest.TestCase):
def setUp(self):
self.client = Client()
@mock.patch.object(MemberService, 'get_member')
@mock.patch.object(PostService, 'write')
def test_create_post_with_post_contents(self, write, get_member):
get_member.return_value = Member(
username="asd",
password="123qwe"
)
access_token = "Bearer " + jwt.encode(
payload={
"username": "asd"
},
key=settings.JWT_SECRET,
algorithm=settings.JWT_ALGORITHM
)
headers = {"HTTP_Authorization": access_token}
response = self.client.post(
"/posts",
data=json.dumps({
"title": "json title",
"content": "json content",
"category": "json"
}),
content_type="application/json",
**headers
)
assert_that(response.status_code).is_equal_to(HTTPStatus.CREATED)
write.assert_called_with(
PostContents(
title="json title",
content="json content",
category="json"
),
Member(
username="asd",
password="123qwe"
)
)
@mock.patch.object(PostService, 'edit')
@mock.patch.object(MemberService, 'get_member')
def test_update_post_with_author(self, get_member, edit):
get_member.return_value = Member(
username="asd",
password="123qwe"
)
access_token = "Bearer " + jwt.encode(
payload={
"username": "asd"
},
key=settings.JWT_SECRET,
algorithm=settings.JWT_ALGORITHM
)
headers = {"HTTP_Authorization": access_token}
response = self.client.patch(
"/posts",
data=json.dumps({
"id": 1,
"title": "json title",
"content": "json content",
}),
content_type="application/json",
**headers
)
assert_that(response.status_code).is_equal_to(HTTPStatus.OK)
changes = PostChanges(
id=1,
title="json title",
content="json content"
)
updater = Member(
username="asd",
password="123qwe"
)
edit.assert_called_with(changes, updater)
@mock.patch.object(PostService, 'remove')
@mock.patch.object(MemberService, 'get_member')
def test_delete_with_author(self, get_member, remove):
get_member.return_value = Member(
username="asd",
password="123qwe"
)
access_token = "Bearer " + jwt.encode(
payload={
"username": "asd"
},
key=settings.JWT_SECRET,
algorithm=settings.JWT_ALGORITHM
)
headers = {"HTTP_Authorization": access_token}
response = self.client.delete(
"/posts",
data=json.dumps({
"id": 1
}),
content_type="application/json",
**headers
)
assert_that(response.status_code).is_equal_to(HTTPStatus.NO_CONTENT)
deleted_post_id = DeletedPostId(
id=1
)
deleter = Member(
username="asd",
password="123qwe"
)
remove.assert_called_with(deleted_post_id, deleter)
@mock.patch.object(PostService, 'details')
def test_get_details_with_post_id(self, details):
author = Member(
username="asd",
password="123qwe"
)
details.return_value = PostDetails(
id=1,
author=author.username,
title="before title",
content="before content",
category="before",
created_at=datetime.utcnow().strftime("%m-%d-%Y, %H:%M:%S"),
updated_at=datetime.utcnow().strftime("%m-%d-%Y, %H:%M:%S"),
comments=[],
hits=0
)
response = self.client.get(
"/posts/1"
)
assert_that(response.status_code).is_equal_to(HTTPStatus.OK)
details.assert_called_with(1, None)
| [
"[email protected]"
] | |
74458f6a29b52a4aba737448865b8f86ca8a360b | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/lib/surface/version.py | 7d7321ca5431114f3472d2997a60ebba92f03cde | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 1,488 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to print version information for Cloud SDK components.
"""
from googlecloudsdk.calliope import base
from googlecloudsdk.core import config
from googlecloudsdk.core.updater import update_manager
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Version(base.Command):
"""Print version information for Cloud SDK components.
This command prints version information for each installed Cloud SDK
component and prints a message if updates are available.
"""
def Run(self, args):
if config.Paths().sdk_root:
# Components are only valid if this is a built Cloud SDK.
manager = update_manager.UpdateManager()
versions = dict(manager.GetCurrentVersionsInformation())
else:
versions = {}
versions['Google Cloud SDK'] = config.CLOUD_SDK_VERSION
return versions
def Format(self, args):
return 'flattened[no-pad,separator=" "]'
| [
"[email protected]"
] | |
d7f2ac70b8cb10c2f05a112b0c00c9af580c876b | fcb87e969a3989f2023f3847a5f0e1289a0a8694 | /sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object_list.py | 15f0af7ef8ff43197a068b6a31c7c792caf4f15b | [
"BSD-3-Clause"
] | permissive | SKA-ScienceDataProcessor/integration-prototype | 299eb0aa41ba9c7f683f5bac101af5a19fccb171 | 5875dc0489f707232534ce75daf3707f909bcd15 | refs/heads/master | 2021-05-01T05:02:16.697902 | 2019-07-28T22:32:05 | 2019-07-28T22:32:05 | 58,473,707 | 3 | 10 | BSD-3-Clause | 2021-03-25T22:21:08 | 2016-05-10T15:41:14 | C++ | UTF-8 | Python | false | false | 3,436 | py | # -*- coding: utf-8 -*-
"""Base class for list of scheduling or processing block data objects."""
from typing import List
from ._scheduling_object import SchedulingObject
from .. import ConfigDb
from .._events.event_queue import EventQueue
from .._events.pubsub import get_subscribers, publish, subscribe
DB = ConfigDb()
class SchedulingObjectList:
"""Base class for SBI and PB data objects API."""
def __init__(self, object_type: str):
"""Initialise variables.
Args:
object_type (str): Object Type
"""
self.type = object_type
@property
def num_active(self) -> int:
"""Get the number of active scheduling objects."""
return len(self.active)
@property
def num_aborted(self) -> int:
"""Get the number of aborted scheduling objects."""
return len(self.aborted)
@property
def num_completed(self) -> int:
"""Get the number of completed scheduling objects."""
return len(self.completed)
@property
def active(self) -> List[str]:
"""Get list of active scheduling objects.
Returns:
list, list of object ids
"""
return DB.get_list('{}:active'.format(self.type))
@property
def aborted(self) -> List[str]:
"""Get list of aborted scheduling objects.
Returns:
list, list of object ids
"""
return DB.get_list('{}:aborted'.format(self.type))
@property
def completed(self) -> List[str]:
"""Get list of completed scheduling objects.
Returns:
list, list of object ids
"""
return DB.get_list('{}:completed'.format(self.type))
def set_complete(self, object_id: str):
"""Mark the specified object as completed."""
if object_id in self.active:
DB.remove_from_list('{}:active'.format(self.type), object_id)
DB.append_to_list('{}:completed'.format(self.type), object_id)
###########################################################################
# Pub/sub events functions
###########################################################################
def subscribe(self, subscriber: str) -> EventQueue:
"""Subscribe to scheduling object events.
Args:
subscriber (str): Subscriber name.
Returns:
events.EventQueue, Event queue object for querying PB events.
"""
return subscribe(self.type, subscriber)
def get_subscribers(self) -> List[str]:
"""Get the list of subscribers.
Get the list of subscribers to Scheduling Block Instance (SBI) or
Processing Block events.
Returns:
List[str], list of subscriber names.
"""
return get_subscribers(self.type)
def publish(self, object_id: str, event_type: str,
event_data: dict = None):
"""Publish a scheduling object event.
Args:
object_id (str): ID of the scheduling object
event_type (str): Type of event.
event_data (dict, optional): Event data.
"""
object_key = SchedulingObject.get_key(self.type, object_id)
publish(event_type=event_type,
event_data=event_data,
object_type=self.type,
object_id=object_id,
object_key=object_key,
origin=None)
| [
"[email protected]"
] | |
fcdbe8b38ae560684105297029179656a604f2db | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /222/222.count-complete-tree-nodes.233499249.Accepted.leetcode.py | 07968f423174e017c6f9c7febffa58330579725a | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def countNodes(self, root):
if not root:
return 0
left_subtree = self.left_depth(root.left)
right_subtree = self.left_depth(root.right)
if left_subtree == right_subtree:
return 2**left_subtree + self.countNodes(root.right)
else:
return 2**right_subtree + self.countNodes(root.left)
def left_depth(self, node):
depth = 0
while node:
node = node.left
depth += 1
return depth
| [
"[email protected]"
] | |
3c83baf87db5dfcbd91d068acf92999196d079f9 | 5b221c2809d82cf13a2b24a56589943315cdb381 | /2017/2017-29.py | 55d90764145038cedbdefc1a88653ac763c185a6 | [] | no_license | Bruce-V/CS-BM25 | c2cd797e9be2fc55af9c8944882fd55109ebee61 | 2401f0ddb24c1712b13c0c96e13565f60d48705d | refs/heads/main | 2023-01-04T23:29:20.906427 | 2020-11-09T08:44:22 | 2020-11-09T08:44:22 | 259,228,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,071 | py | # Copyright 2020 zicheng Zhang([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pymongo
import re
from math import log
myclient =pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["pubmed"]
mywords = mydb["freqwords3"] #pubmed中所有的词频、化学词、关键词和主题词表
mytopic=mydb["topics2017"]#pubmed中的主题词相关文献列表
mypapers=mydb["papers"]#pubmed中文献信息表
mytopicdb=myclient["cs2017_29"]
mydata=mytopicdb["cs2017_score_29"]#按词表长度改进过后的2次排序表
mycount = mytopicdb["cs2017_score_29_related"]#聚类后对应与主题相关联的文献
def sortsecond(myfreq,mydata,yuzhi):
k = 0
k1 = 1.2
b1 = 0.75
k2 = 1.2
b2 = 0.75
idf_ampullary = log((29138919 - 2979 + 0.5) / (2979 + 0.5), 10)
idf_carcinoma = log((29138919 - 494907 + 0.5) / (494907 + 0.5), 10)
idf_kras = log((29138919 - 11153 + 0.5) / (11153 + 0.5), 10)
idf_ele_1 = log((13670358 - 4386 + 0.5) / (4386 + 0.5), 10)
idf_ele_2 = log((13670358 - 9122 + 0.5) / (9122 + 0.5), 10)
idf_ele_3 = log((13670358 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_1 = log((25389659 - 7320 + 0.5) / (7320 + 0.5), 10)
idf_eleM_2 = log((25389659 - 3644 + 0.5) / (3644 + 0.5), 10)
idf_eleM_3 = log((25389659 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_4 = log((25389659 - 9122 + 0.5) / (9122 + 0.5), 10)
idf_eleM_5 = log((25389659 - 12216 + 0.5) / (12216 + 0.5), 10)
idf_eleM_6 = log((25389659 - 17437618 + 0.5) / (17437618 + 0.5), 10)
idf_eleM_7 = log((25389659 - 8002162 + 0.5) / (8002162 + 0.5), 10)
idf_eleM_8 = log((25389659 - 4029038 + 0.5) / (4029038 + 0.5), 10)
idf_eleM_9 = log((25389659 - 2842020 + 0.5) / (2842020 + 0.5), 10)
idf_eleM_10 = log((25389659 - 4785026 + 0.5) / (4785026 + 0.5), 10)
idf_eleK_1 = log((5435471 - 48 + 0.5) / (48 + 0.5), 10)
idf_eleK_2 = log((5435471 - 1503 + 0.5) / (1503 + 0.5), 10)
for x in myfreq.find({}, {'PMID', 'wordfreq', 'ChemicalNameList', 'MeshHeadingNameList', 'KeywordsList'},
no_cursor_timeout=True):
ss1 = 0
ss2 = 0
ss4 = 0
len_freq = 0
ampullary_score = 0
carcinoma_score = 0
kras_score = 0
gx = 0
gx1 = 0
gx2 = 0
gx3 = 0
if int(x['PMID']) <= 27868941:
cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9]") # 匹配不是中文、大小写、数字的其他字符
ChemicalNameList = x['ChemicalNameList']
MeshHeadingNameList = x['MeshHeadingNameList']
KeywordsList = x['KeywordsList']
wordfreq = x['wordfreq']
ampullary = [True for x in wordfreq.items() if 'ampullary' in x]
carcinoma = [True for x in wordfreq.items() if 'carcinoma' in x]
# ---------------摘要统计-------------------#
for key in wordfreq:
len_freq = len_freq + wordfreq[key]
for key in wordfreq:
if 'ampullary ' in key:
ampullary_score = ampullary_score + wordfreq[key]
for key in wordfreq:
key1 = cop.sub('', key)
if 'carcinoma' in key1:
carcinoma_score = carcinoma_score + wordfreq[key]
for key in wordfreq:
key1 = cop.sub('', key)
if 'kras' in key1:
kras_score = kras_score + wordfreq[key]
#---------------共现分析摘要-------------------#
if len(ampullary) != 0 and ampullary[0] and len(carcinoma) != 0 and carcinoma[0]:
for key in wordfreq:
key1 = cop.sub('', key)
if 'kras' in key1:
gx = idf_kras
break
# ---------------共现分析化学-------------------#
if len(ampullary) != 0 and ampullary[0] and len(carcinoma) != 0 and carcinoma[0]:
for ele in ChemicalNameList:
if 'ras' in ele['NameOfSubstance']:
gx = idf_kras
break
# ---------------共现分析医学主题词-------------------#
if len(ampullary) != 0 and ampullary[0] and len(carcinoma) != 0 and carcinoma[0]:
for eleM in MeshHeadingNameList:
if 'ras' in eleM['MeshHeadingName']:
gx = idf_kras
break
# ---------------共现分析关键字-------------------#
if len(ampullary) != 0 and ampullary[0] and len(carcinoma) != 0 and carcinoma[0]:
for eleK in KeywordsList:
if 'kras' in str(eleK).lower():
gx = idf_kras
break
bm25_ampullary_score = (((k1 + 1) * ampullary_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + ampullary_score))
bm25_carcinoma_score = (((k1 + 1) * carcinoma_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + carcinoma_score))
bm25_kras_score = (((k1 + 1) * kras_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + kras_score))
bm25_ab_score = idf_ampullary * bm25_ampullary_score + idf_carcinoma * bm25_carcinoma_score + idf_kras * bm25_kras_score
idf_para = [{str(ampullary_score): idf_ampullary}, {str(carcinoma_score): idf_carcinoma},{str(kras_score): idf_kras}]
for ele in ChemicalNameList:
# if re.findall(r'(BRAF|Proto-Oncogene Proteins B-raf|human|humans|male)',ele['NameOfSubstance']):
if 'KRAS' in ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_1
break
for ele in ChemicalNameList:
# if re.findall(r'(BRAF|Proto-Oncogene Proteins B-raf|human|humans|male)',ele['NameOfSubstance']):
if 'Proto-Oncogene Proteins p21(ras)' in ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_2
break
for ele in ChemicalNameList:
# if re.findall(r'(BRAF|Proto-Oncogene Proteins B-raf|human|humans|male)',ele['NameOfSubstance']):
if 'Genes, ras' in ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_3
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Ampulla of Vater' in eleM['MeshHeadingName']:
ss2 = ss2 +idf_eleM_1
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Common Bile Duct Neoplasms' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_2
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'KRAS' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_3
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Proto-Oncogene Proteins p21(ras)' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_4
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Genes, ras' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_5
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if re.findall(r'(Human|Humans)', eleM['MeshHeadingName']):
ss2 = ss2 + idf_eleM_6
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Male' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_7
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Middle Aged' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_8
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Aged' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_9
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if re.findall(r'(Adult|Adults)', eleM['MeshHeadingName']):
ss2 = ss2 + idf_eleM_10
break
for eleK in KeywordsList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'ampullary carcinoma' in str(eleK).lower():
ss4 = ss4 + idf_eleK_1
break
for eleK in KeywordsList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'kras' in str(eleK).lower():
ss4 = ss4 + idf_eleK_2
break
total_gx = (gx + gx1 + gx2 + gx3)
cmk_len = len(ChemicalNameList) + len(MeshHeadingNameList) + len(KeywordsList)
bm25_cmk_len = ss1 + ss2 + ss4
bm25_cmk_score = (((k2 + 1) * bm25_cmk_len) / ((k2 * (b2 + (1 - b2) * (cmk_len / 13))) + bm25_cmk_len))
bm25_score = bm25_ab_score + bm25_cmk_score + total_gx
if (bm25_score > yuzhi):
mydict = {"PMID": x['PMID'], "ab_score": bm25_ab_score, "idf_para": idf_para,
"cmk_len": cmk_len, "cmk_freq": bm25_cmk_len, "bm25_cmk_score": bm25_cmk_score,
"gx": total_gx,
"bm25_score": bm25_score,
"ChemicalNameList": x['ChemicalNameList'],
"MeshHeadingNameList": x['MeshHeadingNameList'], "KeywordsList": x['KeywordsList']}
y = mydata.insert_one(mydict)
k = k + 1
print(str(y) + '---------' + str(k))
def count(mysort,mycount,topic):
for x in mysort.find({},
{'PMID', 'ab_score', 'idf_para', 'cmk_len', 'cmk_freq', 'bm25_cmk_score', 'gx', 'bm25_score',
'ChemicalNameList', 'MeshHeadingNameList', 'KeywordsList'}):
kk = 0
for y in mytopic.find({"topic": topic}, {'PMID', 'relate'}):
if x['PMID'] == y['PMID']:
mydict = {"PMID": x['PMID'], "related": y['relate'], "ab_score": x["ab_score"],
"idf_para": x['idf_para'],
"cmk_len": x['cmk_len'], "cmk_freq": x['cmk_freq'], 'bm25_cmk_score': x['bm25_cmk_score'],
'gx': x['gx'],
"bm25_score": x['bm25_score'],
"ChemicalNameList": x['ChemicalNameList'], "MeshHeadingNameList": x['MeshHeadingNameList'],
"KeywordsList": x['KeywordsList']}
ss = mycount.insert_one(mydict)
print(ss)
kk = kk + 1
if (kk == 0):
mydict = {"PMID": x['PMID'], "related": -1, "ab_score": x["ab_score"], "idf_para": x['idf_para'],
"cmk_len": x['cmk_len'], "cmk_freq": x['cmk_freq'], 'bm25_cmk_score': x['bm25_cmk_score'],
'gx': x['gx'],
"bm25_score": x['bm25_score'],
"ChemicalNameList": x['ChemicalNameList'], "MeshHeadingNameList": x['MeshHeadingNameList'],
"KeywordsList": x['KeywordsList']}
ss = mycount.insert_one(mydict)
print(ss)
if __name__ == '__main__':
sortsecond(mywords,mydata,6)
count(mydata,mycount,"29")
| [
"[email protected]"
] | |
102cfb4a48484d5440f4765e4468f290cddc203a | ea9f2c578e479fcaebbba84d2a1fe63e96f9145d | /src/common/models/user.py | 4d4c9b4f978ae046c363d45934812a5da49ed9b4 | [] | no_license | spandey2405/onlinecoderbackend | 1a6bd278f725ae5b1ad1c57b951ac5f9f87b71eb | afffd81c027a46247dd47e2ca02ab981e124b09a | refs/heads/master | 2021-01-17T07:57:03.077054 | 2016-08-01T13:41:50 | 2016-08-01T13:41:50 | 64,668,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | from django.db import models
from src.common.libraries.constants import *
import binascii, os, uuid
class UserManager(models.Manager):
def generate_userid(self):
return str(uuid.uuid4())
def generate_salt(self):
return binascii.hexlify(os.urandom(SALT_LENGTH/2)).decode()
class User(models.Model):
user_id = models.CharField(max_length=UID_LENGTH, primary_key=True, editable=False)
name = models.EmailField(max_length=200)
email = models.EmailField(max_length=MAX_EMAIL_LENGTH, unique=True)
password_hash = models.CharField(max_length=MAX_PASSWORD_LENGTH)
phoneno = models.CharField(max_length=10, default=0)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
salt = models.CharField(max_length=SALT_LENGTH)
objects = UserManager()
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def save(self, *args, **kwargs):
if not self.user_id:
self.user_id = User.objects.generate_userid()
if not self.salt:
self.salt = User.objects.generate_salt()
return super(User, self).save(*args, **kwargs)
def __unicode__(self):
return self.user_id
class Meta:
db_table = 'user'
app_label = 'common' | [
"[email protected]"
] | |
e3d8cb3403b6a91ceba70ae0162d75363b5c0a9d | 01abb5fe2d6a51e8ee4330eaead043f4f9aad99d | /Repo_Files/Zips/plugin.video.streamhub/resources/lib/smodules/trailer.py | 729c4e3dd4f78c4945f1e6ce4a8b48274938d418 | [] | no_license | MrAnhell/StreamHub | 01bb97bd3ae385205f3c1ac6c0c883d70dd20b9f | e70f384abf23c83001152eae87c6897f2d3aef99 | refs/heads/master | 2021-01-18T23:25:48.119585 | 2017-09-06T12:39:41 | 2017-09-06T12:39:41 | 87,110,979 | 0 | 0 | null | 2017-04-03T19:09:49 | 2017-04-03T19:09:49 | null | UTF-8 | Python | false | false | 3,905 | py | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,json,urlparse,base64,random
from resources.lib.smodules import client
from resources.lib.smodules import control
class trailer:
def __init__(self):
self.base_link = 'http://www.youtube.com'
self.key_link = random.choice(['QUl6YVN5RDd2aFpDLTYta2habTVuYlVyLTZ0Q0JRQnZWcnFkeHNz', 'QUl6YVN5Q2RiNEFNenZpVG0yaHJhSFY3MXo2Nl9HNXBhM2ZvVXd3'])
self.key_link = '&key=%s' % base64.urlsafe_b64decode(self.key_link)
self.search_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&maxResults=5&q=%s'
self.youtube_search = 'https://www.googleapis.com/youtube/v3/search?q='
self.youtube_watch = 'http://www.youtube.com/watch?v=%s'
def play(self, name, url=None):
try:
url = self.worker(name, url)
if url == None: return
title = control.infoLabel('listitem.title')
if title == '': title = control.infoLabel('listitem.label')
icon = control.infoLabel('listitem.icon')
item = control.item(path=url, iconImage=icon, thumbnailImage=icon)
try: item.setArt({'icon': icon})
except: pass
item.setInfo(type='Video', infoLabels = {'title': title})
control.player.play(url, item)
except:
pass
def worker(self, name, url):
try:
if url.startswith(self.base_link):
url = self.resolve(url)
if url == None: raise Exception()
return url
elif not url.startswith('http://'):
url = self.youtube_watch % url
url = self.resolve(url)
if url == None: raise Exception()
return url
else:
raise Exception()
except:
query = name + ' trailer'
query = self.youtube_search + query
url = self.search(query)
if url == None: return
return url
def search(self, url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
url = self.search_link % urllib.quote_plus(query) + self.key_link
result = client.request(url)
items = json.loads(result)['items']
items = [(i['id']['videoId']) for i in items]
for url in items:
url = self.resolve(url)
if not url is None: return url
except:
return
def resolve(self, url):
try:
id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0]
result = client.request('http://www.youtube.com/watch?v=%s' % id)
message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'})
message = ''.join(message)
alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'})
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
| [
"[email protected]"
] | |
b843de38c9488e62441a89a633f1336a972f423a | 0d91c86aa0c70115d70f09e3e45460df73dcc652 | /alpha_a.py | d8263fba6f7a743eb66fc076ec23ea33da0d66a6 | [] | no_license | Michael-Gong/DLA_project | 589791a3ca5dba7a7d5b9a170c9e2ad712a3ae36 | 3a6211451cc404d772246f9c2b60e0c97576cfef | refs/heads/master | 2021-04-27T08:11:37.414851 | 2019-01-18T05:24:40 | 2019-01-18T05:24:40 | 122,650,552 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,172 | py | %matplotlib inline
#import sdf
import matplotlib
import matplotlib as mpl
mpl.style.use('https://raw.githubusercontent.com/Michael-Gong/DLA_project/master/style')
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
from optparse import OptionParser
import os
from mpl_toolkits.mplot3d import Axes3D
import random
from mpl_toolkits import mplot3d
from matplotlib import rc
import matplotlib.transforms as mtransforms
import sys
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
font = {'family' : 'Carlito',
'color' : 'black',
'weight' : 'normal',
'size' : 25,
}
#plt.scatter(theta_x/np.pi*180, arg_gg, c=np.linspace(1,np.size(theta_x),np.size(theta_x))[np.newaxis,:], s=20, cmap='nipy_spectral', edgecolors='None')
#cbar=plt.colorbar(ticks=np.linspace(1, np.size(theta_x), 5), shrink=1)# orientation='horizontal', shrink=0.2)
#cbar.set_label(r'$Nth$', fontdict=font)
#plt.xlim(-45,45)
##print(theta_x)
#plt.xlabel(r'$\theta\ [degree]$',fontdict=font)
#plt.ylabel(r'$\gamma$',fontdict=font)
##plt.xticks(fontsize=30); plt.yticks(fontsize=30);
##plt.ylim(0,2000.0)
a0=np.linspace(10,210,1001)
#alpha=0.04**1.5*a0/(4.6**0.75)
alpha= (179.0**0.5*a0**2/2.3e6-9.6*a0**2/2.03e6-1.3e1/2.03e6)**0.5
#plt.plot(a0,alpha,'-k',linewidth=4)
plt.plot(a0,(a0**2-6.5)**0.5/1000.0,'-k',linewidth=4)
alpha=0.04**1.5*a0/(4.6**0.75)
#plt.plot(a0,alpha,'--b',linewidth=4)
u = 1.0/12.5
a0_1=np.array([10,25,50,75,100,125,150,200])
alpha_1=np.array([-2+2*u,-2+6*u,-2+10*u,-2+11*u,-1+1.5*u,-1+3*u,-1+4*u,-1+5*u])
plt.scatter(a0_1,10**(alpha_1-0.25*u),marker='+',s=40,color='r')
plt.xlabel(r'$a_0$',fontdict=font)
plt.ylabel(r'$\alpha$',fontdict=font)
plt.xticks(fontsize=30); plt.yticks(fontsize=30);
plt.yscale('log')
plt.ylim(10**-2,10**0)
fig = plt.gcf()
#fig.set_size_inches(30, 15)
fig.set_size_inches(8, 4)
#fig.savefig('./bunch_theta_en.png',format='png',dpi=160)
#plt.close("all")
| [
"[email protected]"
] | |
ff4ae30a5bc2aa2818fcf1314ca8b8c98913fbaf | c8be7becd7466bd6639382156e0886fce3cfb386 | /array_list_repeat.py | cd49a02546a3accdc328440da4354653614c9424 | [] | no_license | wlgud0402/pyfiles | 864db71827aba5653d53320322eb8de8b0a5fc49 | 0e8b96c4bbfb20e1b5667ce482abe75061662299 | refs/heads/master | 2021-02-28T00:42:51.321207 | 2020-03-09T10:48:52 | 2020-03-09T10:48:52 | 245,648,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #반복문을 사용한 리스트 생성
array = []
for i in range(0,20,2):
array.append(i * i)
print(array)
print()
#리스트 안에 for문 사용하기
list_a = [z * z for z in range(0, 20, 2)] #최종결과를 앞에 작성 z*z
print(list_a)
print()
#if문도 추가하기
newarray = [1,2,3,4,5,6,7,8,9]
output = [number for number in newarray if number != 3]
print(output) | [
"[email protected]"
] | |
3fabf4f4ba845759d4b8fc8576fc5bc284056ab8 | a4dfbafdb2d1cc39534a481747fe9746ebb4ef7a | /src/models/base_models/resnest_model.py | 158eb4a786862e66ce97c979d9f509c5c8e10334 | [] | no_license | huangchuanhong/dist_face_pytorch | 3f41045f662de0f9826bc5041bdd2b9abbcb9558 | dc662b713564b2c3f5a61d4ad0e8a78e4aa54a84 | refs/heads/master | 2022-12-31T23:01:26.997504 | 2020-10-26T08:29:39 | 2020-10-26T08:29:39 | 264,177,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | import torch.nn as nn
from .backbones import ResNest
from ..registry import BASE_MODEL
from ..utils import constant_init, normal_init, kaiming_init
@BASE_MODEL.register_module
class ResNestModel(nn.Module):
def __init__(self,
feature_dim,
**kwargs):
super(ResNestModel, self).__init__()
self.backbone = ResNest(**kwargs)
self.gdc = nn.Conv2d(2048, 2048, groups=2048//16, kernel_size=(7, 7), stride=(1, 1), padding=(0, 0), bias=False)
self.bn = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, feature_dim)
def init_weights(self, pretrained=None):
self.backbone.init_weights(pretrained=pretrained)
kaiming_init(self.gdc)
constant_init(self.bn, 1)
#normal_init(self.fc, std=0.01)
def forward(self, input):
output = self.backbone(input)
output = self.gdc(output)
output = self.bn(output)
output = output.view([-1, 2048])
output = self.fc(output)
return output
def train(self, mode):
self.backbone.train(mode)
self.bn.train(mode)
| [
"[email protected]"
] | |
d0303d53dd3eba23fd2b686900359aa35a47c0bb | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /python/paddle/vision/models/alexnet.py | 4239395c03319dd88ea9923153eb9cc250de73f0 | [
"Apache-2.0"
] | permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 7,002 | py | # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn.functional as F
from paddle import nn
from paddle.fluid.param_attr import ParamAttr
from paddle.nn import Conv2D, Dropout, Linear, MaxPool2D, ReLU
from paddle.nn.initializer import Uniform
from paddle.utils.download import get_weights_path_from_url
model_urls = {
"alexnet": (
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/AlexNet_pretrained.pdparams",
"7f0f9f737132e02732d75a1459d98a43",
)
}
__all__ = []
class ConvPoolLayer(nn.Layer):
def __init__(
self,
input_channels,
output_channels,
filter_size,
stride,
padding,
stdv,
groups=1,
act=None,
):
super().__init__()
self.relu = ReLU() if act == "relu" else None
self._conv = Conv2D(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
def forward(self, inputs):
x = self._conv(inputs)
if self.relu is not None:
x = self.relu(x)
x = self._pool(x)
return x
class AlexNet(nn.Layer):
"""AlexNet model from
`"ImageNet Classification with Deep Convolutional Neural Networks"
<https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf>`_.
Args:
num_classes (int, optional): Output dim of last fc layer. If num_classes <= 0, last fc layer
will not be defined. Default: 1000.
Returns:
:ref:`api_paddle_nn_Layer`. An instance of AlexNet model.
Examples:
.. code-block:: python
import paddle
from paddle.vision.models import AlexNet
alexnet = AlexNet()
x = paddle.rand([1, 3, 224, 224])
out = alexnet(x)
print(out.shape)
# [1, 1000]
"""
def __init__(self, num_classes=1000):
super().__init__()
self.num_classes = num_classes
stdv = 1.0 / math.sqrt(3 * 11 * 11)
self._conv1 = ConvPoolLayer(3, 64, 11, 4, 2, stdv, act="relu")
stdv = 1.0 / math.sqrt(64 * 5 * 5)
self._conv2 = ConvPoolLayer(64, 192, 5, 1, 2, stdv, act="relu")
stdv = 1.0 / math.sqrt(192 * 3 * 3)
self._conv3 = Conv2D(
192,
384,
3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
stdv = 1.0 / math.sqrt(384 * 3 * 3)
self._conv4 = Conv2D(
384,
256,
3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
stdv = 1.0 / math.sqrt(256 * 3 * 3)
self._conv5 = ConvPoolLayer(256, 256, 3, 1, 1, stdv, act="relu")
if self.num_classes > 0:
stdv = 1.0 / math.sqrt(256 * 6 * 6)
self._drop1 = Dropout(p=0.5, mode="downscale_in_infer")
self._fc6 = Linear(
in_features=256 * 6 * 6,
out_features=4096,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
self._drop2 = Dropout(p=0.5, mode="downscale_in_infer")
self._fc7 = Linear(
in_features=4096,
out_features=4096,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
self._fc8 = Linear(
in_features=4096,
out_features=num_classes,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
def forward(self, inputs):
x = self._conv1(inputs)
x = self._conv2(x)
x = self._conv3(x)
x = F.relu(x)
x = self._conv4(x)
x = F.relu(x)
x = self._conv5(x)
if self.num_classes > 0:
x = paddle.flatten(x, start_axis=1, stop_axis=-1)
x = self._drop1(x)
x = self._fc6(x)
x = F.relu(x)
x = self._drop2(x)
x = self._fc7(x)
x = F.relu(x)
x = self._fc8(x)
return x
def _alexnet(arch, pretrained, **kwargs):
model = AlexNet(**kwargs)
if pretrained:
assert (
arch in model_urls
), "{} model do not have a pretrained model now, you should set pretrained=False".format(
arch
)
weight_path = get_weights_path_from_url(
model_urls[arch][0], model_urls[arch][1]
)
param = paddle.load(weight_path)
model.load_dict(param)
return model
def alexnet(pretrained=False, **kwargs):
"""AlexNet model from
`"ImageNet Classification with Deep Convolutional Neural Networks"
<https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf>`_.
Args:
pretrained (bool, optional): Whether to load pre-trained weights. If True, returns a model pre-trained
on ImageNet. Default: False.
**kwargs (optional): Additional keyword arguments. For details, please refer to :ref:`AlexNet <api_paddle_vision_AlexNet>`.
Returns:
:ref:`api_paddle_nn_Layer`. An instance of AlexNet model.
Examples:
.. code-block:: python
import paddle
from paddle.vision.models import alexnet
# build model
model = alexnet()
# build model and load imagenet pretrained weight
# model = alexnet(pretrained=True)
x = paddle.rand([1, 3, 224, 224])
out = model(x)
print(out.shape)
# [1, 1000]
"""
return _alexnet('alexnet', pretrained, **kwargs)
| [
"[email protected]"
] | |
af1b04d6cf97703519e4498002d19f6698381301 | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartB/Py判断是否为合理的括号.py | 2a3605334e6226a0c403baec737ed955220c4db7 | [] | no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py |
class Solution:
def isValid(self, s: str) -> bool:
dic = {'{': '}', '[': ']', '(': ')', '?': '?'}
stack = ['?']
for c in s:
if c in dic: stack.append(c)
elif dic[stack.pop()] != c: return False
return len(stack) == 1
if __name__ == '__main__':
s = Solution()
print(s.isValid("(){}"))
| [
"[email protected]"
] | |
01e81a1f99193030b8a12ff979b36ab877ecbdbd | 9dd14d428b2513376f0e1c3ec06a3b06fc60fc73 | /solution/operators/sdi_pandas_0.0.37/sdi_pandas_0.0.36/content/files/vflow/subengines/com/sap/python36/operators/sdi_pandas/sample/sample.py | 41fd2ff0d55d9875b9f252b305861c73eef12369 | [
"MIT"
] | permissive | thhapke/sdi_pandas | 38b1a3a688c62621fb858f03e4ac2f3bcbc20b88 | 7a9108007459260a30ea7ee404a76b42861c81c5 | refs/heads/master | 2020-07-24T10:40:05.643337 | 2020-04-08T06:59:52 | 2020-04-08T06:59:52 | 207,894,698 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,795 | py | import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
import pandas as pd
EXAMPLE_ROWS = 5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 2, 3, 4, 5], 'xcol2': ['A', 'A', 'B', 'B', 'C'], \
'xcol3': ['K', 'L', 'M', 'N', 'O'], 'xcol4': ['a1', 'a1', 'b1', 'b1', 'b1']})
default_msg = api.Message(attributes = {'format': 'pandas', 'name': 'test'}, body=df)
callback(default_msg)
class config:
## Meta data
config_params = dict()
version = '0.0.17'
tags = {'pandas': '','sdi_utils':''}
operator_description = "Sample from Dataframe"
operator_description_long = "Sampling over a DataFrame but keeps datasets with the same value of the \
defined column as set and not splitting them, e.g. sampling with the invariant_column='date' samples \
but ensures that all datasets of a certain date are taken or none. This leads to the fact that the \
sample_size is only a guiding target. Depending on the size of the datasets with the same value of \
the *invariant_column* compared to the *sample_size* this could deviate a lot. "
add_readme = dict()
add_readme["References"] = "[pandas doc: sample](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html)"
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
sample_size = 0.1
config_params['sample_size'] = {'title': 'Sample size', 'description': 'Sample size', 'type': 'number'}
random_state = 1
config_params['random_state'] = {'title': 'Random state', 'description': 'Random state', 'type': 'integer'}
invariant_column = ''
config_params['invariant_column'] = {'title': 'Invariant column', 'description': 'Column where all the same value records should be kept as a whole in a sample', 'type': 'string'}
def process(msg) :
att_dict = dict()
att_dict['config'] = dict()
att_dict['operator'] = 'sample'
if api.config.debug_mode == True:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='DEBUG')
else:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='INFO')
logger.info("Process started")
time_monitor = tp.progress()
# start custom process definition
# test if body refers to a DataFrame type
prev_att = msg.attributes
df = msg.body
if not isinstance(df, pd.DataFrame):
logger.error('Message body does not contain a pandas DataFrame')
raise TypeError('Message body does not contain a pandas DataFrame')
att_dict = dict()
att_dict['config'] = dict()
###### start calculation
sample_size = api.config.sample_size
if sample_size < 1 :
sample_size = int(sample_size * df.shape[0])
if sample_size < 1 :
sample_size = 1
logger.warning("Fraction of sample size too small. Set sample size to 1.")
elif sample_size > df.shape[0]:
logger.warning("Sample size larger than number of rows")
logger.debug("Samples_size: {}/() ({})".format(sample_size,df.shape[0],sample_size/df.shape[0]))
random_state = api.config.random_state
invariant_column = tfp.read_value(api.config.invariant_column)
if invariant_column and sample_size < df.shape[0]:
# get the average number of records for each value of invariant
sc_df = df.groupby(invariant_column)[invariant_column].count()
sample_size_invariant = int(sample_size / sc_df.mean())
sample_size_invariant = 1 if sample_size_invariant == 0 else sample_size_invariant # ensure minimum
sc_df = sc_df.sample(n=sample_size_invariant, random_state=random_state).to_frame()
sc_df.rename(columns={invariant_column: 'sum'}, inplace=True)
# sample the df by merge 2 df
df = pd.merge(df, sc_df, how='inner', right_index=True, left_on=invariant_column)
df.drop(columns=['sum'], inplace=True)
else:
df = df.sample(n=sample_size, random_state=random_state)
###### end calculation
##############################################
# final infos to attributes and info message
##############################################
if df.empty:
raise ValueError('DataFrame is empty')
logger.info('End of Process: {}'.format(time_monitor.elapsed_time()))
att_dict['memory'] = df.memory_usage(deep=True).sum() / 1024 ** 2
att_dict['columns'] = str(list(df.columns))
att_dict['shape'] = df.shape
att_dict['id'] = str(id(df))
logger.debug('Columns: {}'.format(str(df.columns)))
logger.debug('Shape (#rows - #columns): {} - {}'.format(df.shape[0], df.shape[1]))
logger.debug('Memory: {} kB'.format(att_dict['memory']))
example_rows = EXAMPLE_ROWS if df.shape[0] > EXAMPLE_ROWS else df.shape[0]
for i in range(0, example_rows):
att_dict['row_' + str(i)] = str([str(i)[:10].ljust(10) for i in df.iloc[i, :].tolist()])
logger.debug('Head data: {}'.format(att_dict['row_' + str(i)]))
# end custom process definition
log = log_stream.getvalue()
msg = api.Message(attributes=att_dict,body=df)
return log, msg
inports = [{'name': 'data', 'type': 'message.DataFrame',"description":"Input data"}]
outports = [{'name': 'log', 'type': 'string',"description":"Logging data"}, \
{'name': 'data', 'type': 'message.DataFrame',"description":"Output data"}]
def call_on_input(msg) :
log, msg = process(msg)
api.send(outports[0]['name'], log)
api.send(outports[1]['name'], msg)
api.set_port_callback([inports[0]['name']], call_on_input)
def main() :
print('Test: Default')
api.set_port_callback([inports[0]['name']], call_on_input)
| [
"[email protected]"
] | |
1131b28b0a153d0d74427cea61cfce5a5b8d28f4 | 90cc37b6cc67bce397411631587a14be72085d2e | /tests/unit/test_deployment.py | 6cda651e7bc2a1406caa35b50b68d8220d34e492 | [
"Unlicense"
] | permissive | michaeljoseph/righteous | 49d36bb895945a26d5db4b3d13a2e303aef3ef93 | ba95c574a94df85aca33397cc77b053e7f545705 | refs/heads/master | 2016-09-06T11:01:57.478168 | 2013-10-17T11:00:27 | 2013-10-17T11:00:27 | 2,584,142 | 2 | 1 | null | 2013-10-18T14:53:04 | 2011-10-16T00:07:14 | Python | UTF-8 | Python | false | false | 2,389 | py | from righteous.compat import urlencode
from .base import ApiTestCase
import righteous
class DeploymentTestCase(ApiTestCase):
def setUp(self):
self.setup_patching('righteous.api.deployment._request')
super(DeploymentTestCase, self).setUp()
def test_list_deployments(self):
righteous.init(
'user', 'pass', 'account_id', default_deployment_id='foo')
self.response.content = '{}'
righteous.list_deployments()
self.request.assert_called_once_with('/deployments.js')
def test_find_deployment_no_result(self):
self.response.content = '[]'
deployment = righteous.find_deployment('bruce')
request_url = '/deployments.js?filter=nickname=bruce'
self.request.assert_called_once_with(request_url)
assert not deployment
def test_deployment_info(self):
self.response.content = '{}'
righteous.deployment_info('/deployment/ref')
self.request.assert_called_once_with(
'/deployment/ref.js', prepend_api_base=False)
def test_create_deployment(self):
self.response.status_code = 201
self.response.headers['location'] = '/deployment/new_ref'
nickname = 'devops'
description = 'devops deployment'
create_data = {
'deployment[nickname]': nickname,
'deployment[description]': description,
}
expected = urlencode(create_data)
success, location = righteous.create_deployment(nickname, description)
self.request.assert_called_once_with(
'/deployments', method='POST', body=expected)
assert success
self.assertEqual(location, '/deployment/new_ref')
def test_delete_deployment(self):
self.response.content = '{}'
assert righteous.delete_deployment('/deployment/ref')
self.request.assert_called_once_with(
'/deployment/ref', method='DELETE', prepend_api_base=False)
def test_duplicate_deployment(self):
self.response.status_code = 201
self.response.headers['location'] = '/deployment/new_ref'
success, location = righteous.duplicate_deployment('/deployment/ref')
assert success
self.request.assert_any_call(
'/deployment/ref/duplicate', method='POST', prepend_api_base=False)
self.assertEqual(location, '/deployment/new_ref')
| [
"[email protected]"
] | |
b1e4cbf0ffddc1664eba106d1db12c5e68f7c59a | c1b901ed1eee4d5dc2ee252cd51b4e3c14f02554 | /Lime/output_extract.py | 22f7cbaed309f1eb2d7d5ed7fcd449a272591465 | [
"MIT"
] | permissive | lengjiayi/SpeakerVerifiaction-pytorch | 70a86c9c9029a214679e636917fb305a85a94182 | 99eb8de3357c85e2b7576da2a742be2ffd773ead | refs/heads/master | 2023-07-09T20:09:07.715305 | 2021-08-19T11:03:28 | 2021-08-19T11:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,168 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: [email protected]
@Software: PyCharm
@File: output_extract.py
@Time: 2020/3/21 5:57 PM
@Overview:
"""
from __future__ import print_function
import argparse
import json
import os
import pickle
import random
import time
from collections import OrderedDict
import numpy as np
import torch
import torch._utils
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torchvision.transforms as transforms
from kaldi_io import read_mat
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tqdm import tqdm
from Define_Model.SoftmaxLoss import AngleLinear, AdditiveMarginLinear
from Define_Model.model import PairwiseDistance
from Process_Data.Datasets.KaldiDataset import ScriptTrainDataset, \
ScriptTestDataset, ScriptValidDataset
from Process_Data.audio_processing import ConcateOrgInput, mvnormal, ConcateVarInput
from TrainAndTest.common_func import create_model
# Version conflict
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import warnings
warnings.filterwarnings("ignore")
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Speaker Recognition')
# Data options
parser.add_argument('--train-dir', type=str, help='path to dataset')
parser.add_argument('--test-dir', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--train-set-name', type=str, required=True, help='path to voxceleb1 test dataset')
parser.add_argument('--test-set-name', type=str, required=True, help='path to voxceleb1 test dataset')
parser.add_argument('--sitw-dir', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--sample-utt', type=int, default=120, metavar='SU', help='Dimensionality of the embedding')
parser.add_argument('--test-only', action='store_true', default=False, help='using Cosine similarity')
parser.add_argument('--check-path', help='folder to output model checkpoints')
parser.add_argument('--extract-path', help='folder to output model grads, etc')
parser.add_argument('--start-epochs', type=int, default=36, metavar='E', help='number of epochs to train (default: 10)')
parser.add_argument('--epochs', type=int, default=36, metavar='E', help='number of epochs to train (default: 10)')
# Data options
parser.add_argument('--feat-dim', default=64, type=int, metavar='N', help='acoustic feature dimension')
parser.add_argument('--input-dim', default=257, type=int, metavar='N', help='acoustic feature dimension')
parser.add_argument('--revert', action='store_true', default=False, help='using Cosine similarity')
parser.add_argument('--input-length', choices=['var', 'fix'], default='var',
help='choose the acoustic features type.')
parser.add_argument('--remove-vad', action='store_true', default=False, help='using Cosine similarity')
parser.add_argument('--mvnorm', action='store_true', default=False,
help='using Cosine similarity')
# Model options
parser.add_argument('--model', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--resnet-size', default=8, type=int, metavar='RES', help='The channels of convs layers)')
parser.add_argument('--filter', type=str, default='None', help='replace batchnorm with instance norm')
parser.add_argument('--input-norm', type=str, default='Mean', help='batchnorm with instance norm')
parser.add_argument('--vad', action='store_true', default=False, help='vad layers')
parser.add_argument('--inception', action='store_true', default=False, help='multi size conv layer')
parser.add_argument('--inst-norm', action='store_true', default=False, help='batchnorm with instance norm')
parser.add_argument('--mask-layer', type=str, default='None', help='time or freq masking layers')
parser.add_argument('--mask-len', type=int, default=20, help='maximum length of time or freq masking layers')
parser.add_argument('--block-type', type=str, default='None', help='replace batchnorm with instance norm')
parser.add_argument('--relu-type', type=str, default='relu', help='replace batchnorm with instance norm')
parser.add_argument('--encoder-type', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--transform', type=str, default="None", help='add a transform layer after embedding layer')
parser.add_argument('--channels', default='64,128,256', type=str,
metavar='CHA', help='The channels of convs layers)')
parser.add_argument('--fast', action='store_true', default=False, help='max pooling for fast')
parser.add_argument('--kernel-size', default='5,5', type=str, metavar='KE',
help='kernel size of conv filters')
parser.add_argument('--padding', default='', type=str, metavar='KE', help='padding size of conv filters')
parser.add_argument('--stride', default='2', type=str, metavar='ST', help='stride size of conv filters')
parser.add_argument('--time-dim', default=1, type=int, metavar='FEAT', help='acoustic feature dimension')
parser.add_argument('--avg-size', type=int, default=4, metavar='ES', help='Dimensionality of the embedding')
parser.add_argument('--loss-type', type=str, default='soft', help='path to voxceleb1 test dataset')
parser.add_argument('--dropout-p', type=float, default=0., metavar='BST',
help='input batch size for testing (default: 64)')
# args for additive margin-softmax
parser.add_argument('--margin', type=float, default=0.3, metavar='MARGIN',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--s', type=float, default=15, metavar='S',
help='the margin value for the angualr softmax loss function (default: 3.0')
# args for a-softmax
parser.add_argument('--m', type=int, default=3, metavar='M',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--lambda-min', type=int, default=5, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lambda-max', type=float, default=0.05, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--alpha', default=12, type=float,
metavar='l2 length', help='acoustic feature dimension')
parser.add_argument('--cos-sim', action='store_true', default=True, help='using Cosine similarity')
parser.add_argument('--embedding-size', type=int, metavar='ES', help='Dimensionality of the embedding')
parser.add_argument('--nj', default=12, type=int, metavar='NJOB', help='num of job')
parser.add_argument('--batch-size', type=int, default=1, metavar='BS',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='BST',
help='input batch size for testing (default: 64)')
parser.add_argument('--input-per-spks', type=int, default=192, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-input-per-file', type=int, default=1, metavar='IPFT',
help='input sample per file for testing (default: 8)')
# Device options
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--gpu-id', default='1', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--seed', type=int, default=123456, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=1, metavar='LI',
help='how many batches to wait before logging training status')
parser.add_argument('--acoustic-feature', choices=['fbank', 'spectrogram', 'mfcc'], default='fbank',
help='choose the acoustic features type.')
parser.add_argument('--makemfb', action='store_true', default=False,
help='need to make mfb file')
parser.add_argument('--makespec', action='store_true', default=False,
help='need to make spectrograms file')
args = parser.parse_args()
# Set the device to use by setting CUDA_VISIBLE_DEVICES env variable in
# order to prevent any memory allocation on unused GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
args.cuda = not args.no_cuda and torch.cuda.is_available()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.multiprocessing.set_sharing_strategy('file_system')
if args.cuda:
cudnn.benchmark = True
# Define visulaize SummaryWriter instance
kwargs = {'num_workers': args.nj, 'pin_memory': False} if args.cuda else {}
l2_dist = nn.CosineSimilarity(dim=1, eps=1e-6) if args.cos_sim else PairwiseDistance(2)
if args.input_length == 'var':
transform = transforms.Compose([
ConcateOrgInput(remove_vad=args.remove_vad),
])
transform_T = transforms.Compose([
ConcateOrgInput(remove_vad=args.remove_vad),
])
elif args.input_length == 'fix':
transform = transforms.Compose([
ConcateVarInput(remove_vad=args.remove_vad),
])
transform_T = transforms.Compose([
ConcateVarInput(remove_vad=args.remove_vad),
])
if args.mvnorm:
transform.transforms.append(mvnormal())
transform_T.transforms.append(mvnormal())
file_loader = read_mat
train_dir = ScriptTrainDataset(dir=args.train_dir, samples_per_speaker=args.input_per_spks,
loader=file_loader, transform=transform, return_uid=True)
indices = list(range(len(train_dir)))
random.shuffle(indices)
indices = indices[:args.sample_utt]
train_part = torch.utils.data.Subset(train_dir, indices)
veri_dir = ScriptTestDataset(dir=args.train_dir, loader=file_loader, transform=transform_T, return_uid=True)
veri_dir.partition(args.sample_utt)
test_dir = ScriptTestDataset(dir=args.test_dir, loader=file_loader, transform=transform_T, return_uid=True)
test_dir.partition(args.sample_utt)
valid_dir = ScriptValidDataset(valid_set=train_dir.valid_set, spk_to_idx=train_dir.spk_to_idx,
valid_uid2feat=train_dir.valid_uid2feat, valid_utt2spk_dict=train_dir.valid_utt2spk_dict,
loader=file_loader, transform=transform, return_uid=True)
indices = list(range(len(valid_dir)))
random.shuffle(indices)
indices = indices[:args.sample_utt]
valid_part = torch.utils.data.Subset(valid_dir, indices)
def train_extract(train_loader, model, file_dir, set_name, save_per_num=2500):
# switch to evaluate mode
model.eval()
input_grads = []
inputs_uids = []
pbar = tqdm(enumerate(train_loader))
for batch_idx, (data, label, uid) in pbar:
# orig = data.detach().numpy().squeeze().astype(np.float32)
data = Variable(data.cuda(), requires_grad=True)
logit, _ = model(data)
if args.loss_type == 'asoft':
classifed, _ = logit
else:
classifed = logit
# conv1 = model.conv1(data)
# bn1 = model.bn1(conv1)
# relu1 = model.relu(bn1)
# conv1 = conv1.cpu().detach().numpy().squeeze().astype(np.float32)
# bn1 = bn1.cpu().detach().numpy().squeeze().astype(np.float32)
# relu1 = relu1.cpu().detach().numpy().squeeze().astype(np.float32)
classifed[0][label.long()].backward()
grad = data.grad.cpu().numpy().squeeze().astype(np.float32)
data = data.data.cpu().numpy().squeeze().astype(np.float32)
if args.revert:
grad = grad.transpose()
data = data.transpose()
input_grads.append([data, grad])
inputs_uids.append(uid)
model.zero_grad()
if batch_idx % args.log_interval == 0:
pbar.set_description('Saving {} : [{:8d}/{:8d} ({:3.0f}%)] '.format(
uid,
batch_idx + 1,
len(train_loader.dataset),
100. * batch_idx / len(train_loader)))
if (batch_idx + 1) % save_per_num == 0 or (batch_idx + 1) == len(train_loader.dataset):
num = batch_idx // save_per_num if batch_idx + 1 % save_per_num == 0 else batch_idx // save_per_num + 1
# checkpoint_dir / extract / < dataset > / < set >.*.bin
filename = file_dir + '/%s.%d.bin' % (set_name, num)
with open(filename, 'wb') as f:
pickle.dump(input_grads, f)
with open(file_dir + '/inputs.%s.%d.json' % (set_name, num), 'w') as f:
json.dump(inputs_uids, f)
input_grads = []
inputs_uids = []
print('Saving pairs in %s.\n' % file_dir)
torch.cuda.empty_cache()
def test_extract(test_loader, model, file_dir, set_name, save_per_num=1500):
# switch to evaluate mode
model.eval()
input_grads = []
inputs_uids = []
pbar = tqdm(enumerate(test_loader))
# for batch_idx, (data_a, data_b, label) in pbar:
for batch_idx, (data_a, data_b, label, uid_a, uid_b) in pbar:
# pdb.set_trace()
data_a = Variable(data_a.cuda(), requires_grad=True)
data_b = Variable(data_b.cuda(), requires_grad=True)
_, feat_a = model(data_a)
_, feat_b = model(data_b)
cos_sim = l2_dist(feat_a, feat_b)
cos_sim[0].backward()
grad_a = data_a.grad.cpu().numpy().squeeze().astype(np.float32)
grad_b = data_b.grad.cpu().numpy().squeeze().astype(np.float32)
data_a = data_a.data.cpu().numpy().squeeze().astype(np.float32)
data_b = data_b.data.cpu().numpy().squeeze().astype(np.float32)
if args.revert:
grad_a = grad_a.transpose()
data_a = data_a.transpose()
grad_b = grad_b.transpose()
data_b = data_b.transpose()
input_grads.append((label, grad_a, grad_b, data_a, data_b))
inputs_uids.append([uid_a, uid_b])
model.zero_grad()
if batch_idx % args.log_interval == 0:
pbar.set_description('Saving pair [{:8d}/{:8d} ({:3.0f}%)] '.format(
batch_idx + 1,
len(test_loader),
100. * batch_idx / len(test_loader)))
if (batch_idx + 1) % save_per_num == 0 or (batch_idx + 1) == len(test_loader.dataset):
num = batch_idx // save_per_num if batch_idx + 1 % save_per_num == 0 else batch_idx // save_per_num + 1
# checkpoint_dir / extract / < dataset > / < set >.*.bin
filename = file_dir + '/%s.%d.bin' % (set_name, num)
# print('Saving pairs in %s.' % filename)
with open(filename, 'wb') as f:
pickle.dump(input_grads, f)
with open(file_dir + '/inputs.%s.%d.json' % (set_name, num), 'w') as f:
json.dump(inputs_uids, f)
input_grads = []
inputs_uids = []
print('Saving pairs into %s.\n' % file_dir)
torch.cuda.empty_cache()
def main():
print('\nNumber of Speakers: {}.'.format(train_dir.num_spks))
# print the experiment configuration
print('Current time is \33[91m{}\33[0m.'.format(str(time.asctime())))
print('Parsed options: {}'.format(vars(args)))
# instantiate model and initialize weights
kernel_size = args.kernel_size.split(',')
kernel_size = [int(x) for x in kernel_size]
if args.padding == '':
padding = [int((x - 1) / 2) for x in kernel_size]
else:
padding = args.padding.split(',')
padding = [int(x) for x in padding]
kernel_size = tuple(kernel_size)
padding = tuple(padding)
stride = args.stride.split(',')
stride = [int(x) for x in stride]
channels = args.channels.split(',')
channels = [int(x) for x in channels]
model_kwargs = {'input_dim': args.input_dim, 'feat_dim': args.feat_dim, 'kernel_size': kernel_size,
'mask': args.mask_layer, 'mask_len': args.mask_len, 'block_type': args.block_type,
'filter': args.filter, 'inst_norm': args.inst_norm, 'input_norm': args.input_norm,
'stride': stride, 'fast': args.fast, 'avg_size': args.avg_size, 'time_dim': args.time_dim,
'padding': padding, 'encoder_type': args.encoder_type, 'vad': args.vad,
'transform': args.transform, 'embedding_size': args.embedding_size, 'ince': args.inception,
'resnet_size': args.resnet_size, 'num_classes': train_dir.num_spks,
'channels': channels, 'alpha': args.alpha, 'dropout_p': args.dropout_p}
print('Model options: {}'.format(model_kwargs))
model = create_model(args.model, **model_kwargs)
if args.loss_type == 'asoft':
model.classifier = AngleLinear(in_features=args.embedding_size, out_features=train_dir.num_spks, m=args.m)
elif args.loss_type == 'amsoft' or args.loss_type == 'arcsoft':
model.classifier = AdditiveMarginLinear(feat_dim=args.embedding_size, n_classes=train_dir.num_spks)
train_loader = DataLoader(train_part, batch_size=args.batch_size, shuffle=False, **kwargs)
veri_loader = DataLoader(veri_dir, batch_size=args.batch_size, shuffle=False, **kwargs)
valid_loader = DataLoader(valid_part, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_dir, batch_size=args.batch_size, shuffle=False, **kwargs)
# sitw_test_loader = DataLoader(sitw_test_part, batch_size=args.batch_size, shuffle=False, **kwargs)
# sitw_dev_loader = DataLoader(sitw_dev_part, batch_size=args.batch_size, shuffle=False, **kwargs)
resume_path = args.check_path + '/checkpoint_{}.pth'
print('=> Saving output in {}\n'.format(args.extract_path))
epochs = np.arange(args.start_epochs, args.epochs + 1)
for e in epochs:
# Load model from Checkpoint file
if os.path.isfile(resume_path.format(e)):
print('=> loading checkpoint {}'.format(resume_path.format(e)))
checkpoint = torch.load(resume_path.format(e))
checkpoint_state_dict = checkpoint['state_dict']
if isinstance(checkpoint_state_dict, tuple):
checkpoint_state_dict = checkpoint_state_dict[0]
# epoch = checkpoint['epoch']
# if e == 0:
# filtered = checkpoint.state_dict()
# else:
filtered = {k: v for k, v in checkpoint_state_dict.items() if 'num_batches_tracked' not in k}
if list(filtered.keys())[0].startswith('module'):
new_state_dict = OrderedDict()
for k, v in filtered.items():
name = k[7:] # remove `module.`,表面从第7个key值字符取到最后一个字符,去掉module.
new_state_dict[name] = v # 新字典的key值对应的value为一一对应的值。
model.load_state_dict(new_state_dict)
else:
model_dict = model.state_dict()
model_dict.update(filtered)
model.load_state_dict(model_dict)
else:
print('=> no checkpoint found at %s' % resume_path.format(e))
continue
model.cuda()
file_dir = args.extract_path + '/epoch_%d' % e
if not os.path.exists(file_dir):
os.makedirs(file_dir)
if not args.test_only:
# if args.cuda:
# model_conv1 = model.conv1.weight.cpu().detach().numpy()
# np.save(file_dir + '/model.conv1.npy', model_conv1)
train_extract(train_loader, model, file_dir, '%s_train'%args.train_set_name)
train_extract(valid_loader, model, file_dir, '%s_valid'%args.train_set_name)
test_extract(veri_loader, model, file_dir, '%s_veri'%args.train_set_name)
test_extract(test_loader, model, file_dir, '%s_test'%args.test_set_name)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0ac74b8fcc292adb87b359e8d815025625d1b6c4 | e6208febf7e34d4108422c6da54453373733a421 | /sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_workflow_create_request.py | c0a6b77db6997e8fac5c7c8803d195ea356bad80 | [
"Apache-2.0"
] | permissive | wreed4/argo | 05889e5bb7738d534660c58a7ec71c454e6ac9bb | 41f94310b0f7fee1ccd533849bb3af7f1ad4f672 | refs/heads/master | 2023-01-22T05:32:12.254485 | 2022-01-27T21:24:45 | 2022-01-27T22:02:22 | 233,143,964 | 0 | 0 | Apache-2.0 | 2023-01-17T19:04:43 | 2020-01-10T22:56:25 | Go | UTF-8 | Python | false | false | 12,633 | py | """
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.create_options import CreateOptions
from argo_workflows.model.io_argoproj_workflow_v1alpha1_workflow import IoArgoprojWorkflowV1alpha1Workflow
globals()['CreateOptions'] = CreateOptions
globals()['IoArgoprojWorkflowV1alpha1Workflow'] = IoArgoprojWorkflowV1alpha1Workflow
class IoArgoprojWorkflowV1alpha1WorkflowCreateRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'create_options': (CreateOptions,), # noqa: E501
'instance_id': (str,), # noqa: E501
'namespace': (str,), # noqa: E501
'server_dry_run': (bool,), # noqa: E501
'workflow': (IoArgoprojWorkflowV1alpha1Workflow,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'create_options': 'createOptions', # noqa: E501
'instance_id': 'instanceID', # noqa: E501
'namespace': 'namespace', # noqa: E501
'server_dry_run': 'serverDryRun', # noqa: E501
'workflow': 'workflow', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1WorkflowCreateRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
create_options (CreateOptions): [optional] # noqa: E501
instance_id (str): This field is no longer used.. [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
server_dry_run (bool): [optional] # noqa: E501
workflow (IoArgoprojWorkflowV1alpha1Workflow): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1WorkflowCreateRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
create_options (CreateOptions): [optional] # noqa: E501
instance_id (str): This field is no longer used.. [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
server_dry_run (bool): [optional] # noqa: E501
workflow (IoArgoprojWorkflowV1alpha1Workflow): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
] | |
e42793c0bb18d4947a7c52488c8b146780db1a2c | 1548ce77537dcd50ab04b0eaee050b5d30553e23 | /autotabular/evaluation/abstract_evaluator.py | 383ee7a13fd7f7766c258b0df36b52ef013fbb89 | [
"Apache-2.0"
] | permissive | Shamoo100/AutoTabular | 4a20e349104246bf825ebceae33dca0a79928f2e | 7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2 | refs/heads/main | 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,259 | py | import logging
import multiprocessing
import time
import warnings
from typing import Any, Dict, List, Optional, TextIO, Tuple, Type, Union, cast
import autotabular.pipeline.classification
import autotabular.pipeline.regression
import numpy as np
from autotabular.constants import CLASSIFICATION_TASKS, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION, MULTIOUTPUT_REGRESSION, REGRESSION_TASKS
from autotabular.metrics import Scorer, calculate_loss
from autotabular.pipeline.implementations.util import convert_multioutput_multiclass_to_multilabel
from autotabular.util.backend import Backend
from autotabular.util.logging_ import PicklableClientLogger, get_named_client_logger
from ConfigSpace import Configuration
from sklearn.base import BaseEstimator
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import VotingClassifier, VotingRegressor
from smac.tae import StatusType
from threadpoolctl import threadpool_limits
__all__ = ['AbstractEvaluator']
# General TYPE definitions for numpy
TYPE_ADDITIONAL_INFO = Dict[str, Union[int, float, str, Dict, List, Tuple]]
class MyDummyClassifier(DummyClassifier):
def __init__(
self,
config: Configuration,
random_state: np.random.RandomState,
init_params: Optional[Dict[str, Any]] = None,
dataset_properties: Dict[str, Any] = {},
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
):
self.config = config
if config == 1:
super(MyDummyClassifier, self).__init__(strategy='uniform')
else:
super(MyDummyClassifier, self).__init__(strategy='most_frequent')
self.random_state = random_state
self.init_params = init_params
self.dataset_properties = dataset_properties
self.include = include
self.exclude = exclude
def pre_transform(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None
) -> Tuple[np.ndarray, Dict[str, Any]]: # pylint: disable=R0201
if fit_params is None:
fit_params = {}
return X, fit_params
def fit(
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[Union[np.ndarray, List]] = None
) -> DummyClassifier:
return super(MyDummyClassifier, self).fit(
np.ones((X.shape[0], 1)), y, sample_weight=sample_weight)
def fit_estimator(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None) -> DummyClassifier:
return self.fit(X, y)
def predict_proba(self,
X: np.ndarray,
batch_size: int = 1000) -> np.ndarray:
new_X = np.ones((X.shape[0], 1))
probas = super(MyDummyClassifier, self).predict_proba(new_X)
probas = convert_multioutput_multiclass_to_multilabel(probas).astype(
np.float32)
return probas
def estimator_supports_iterative_fit(self) -> bool: # pylint: disable=R0201
return False
def get_additional_run_info(self) -> Optional[TYPE_ADDITIONAL_INFO]: # pylint: disable=R0201
return None
class MyDummyRegressor(DummyRegressor):
def __init__(
self,
config: Configuration,
random_state: np.random.RandomState,
init_params: Optional[Dict[str, Any]] = None,
dataset_properties: Dict[str, Any] = {},
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
):
self.config = config
if config == 1:
super(MyDummyRegressor, self).__init__(strategy='mean')
else:
super(MyDummyRegressor, self).__init__(strategy='median')
self.random_state = random_state
self.init_params = init_params
self.dataset_properties = dataset_properties
self.include = include
self.exclude = exclude
def pre_transform(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None
) -> Tuple[np.ndarray, Dict[str, Any]]: # pylint: disable=R0201
if fit_params is None:
fit_params = {}
return X, fit_params
def fit(
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[Union[np.ndarray,
List]] = None) -> DummyRegressor:
return super(MyDummyRegressor, self).fit(
np.ones((X.shape[0], 1)), y, sample_weight=sample_weight)
def fit_estimator(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None) -> DummyRegressor:
return self.fit(X, y)
def predict(self, X: np.ndarray, batch_size: int = 1000) -> np.ndarray:
new_X = np.ones((X.shape[0], 1))
return super(MyDummyRegressor, self).predict(new_X).astype(np.float32)
def estimator_supports_iterative_fit(self) -> bool: # pylint: disable=R0201
return False
def get_additional_run_info(self) -> Optional[TYPE_ADDITIONAL_INFO]: # pylint: disable=R0201
return None
def _fit_and_suppress_warnings(logger: Union[logging.Logger,
PicklableClientLogger],
model: BaseEstimator, X: np.ndarray,
y: np.ndarray) -> BaseEstimator:
def send_warnings_to_log(
message: Union[Warning, str],
category: Type[Warning],
filename: str,
lineno: int,
file: Optional[TextIO] = None,
line: Optional[str] = None,
) -> None:
logger.debug('%s:%s: %s:%s' %
(filename, lineno, str(category), message))
return
with warnings.catch_warnings():
warnings.showwarning = send_warnings_to_log
model.fit(X, y)
return model
class AbstractEvaluator(object):
def __init__(
self,
backend: Backend,
queue: multiprocessing.Queue,
metric: Scorer,
port: Optional[int],
configuration: Optional[Union[int, Configuration]] = None,
scoring_functions: Optional[List[Scorer]] = None,
seed: int = 1,
output_y_hat_optimization: bool = True,
num_run: Optional[int] = None,
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
disable_file_output: Union[bool, List[str]] = False,
init_params: Optional[Dict[str, Any]] = None,
budget: Optional[float] = None,
budget_type: Optional[str] = None,
):
# Limit the number of threads that numpy uses
threadpool_limits(limits=1)
self.starttime = time.time()
self.configuration = configuration
self.backend = backend
self.port = port
self.queue = queue
self.datamanager = self.backend.load_datamanager()
self.include = include
self.exclude = exclude
self.X_valid = self.datamanager.data.get('X_valid')
self.y_valid = self.datamanager.data.get('Y_valid')
self.X_test = self.datamanager.data.get('X_test')
self.y_test = self.datamanager.data.get('Y_test')
self.metric = metric
self.task_type = self.datamanager.info['task']
self.seed = seed
self.output_y_hat_optimization = output_y_hat_optimization
self.scoring_functions = scoring_functions
if isinstance(disable_file_output, (bool, list)):
self.disable_file_output: Union[bool,
List[str]] = disable_file_output
else:
raise ValueError(
'disable_file_output should be either a bool or a list')
if self.task_type in REGRESSION_TASKS:
if not isinstance(self.configuration, Configuration):
self.model_class = MyDummyRegressor
else:
self.model_class = \
autotabular.pipeline.regression.SimpleRegressionPipeline
self.predict_function = self._predict_regression
else:
if not isinstance(self.configuration, Configuration):
self.model_class = MyDummyClassifier
else:
self.model_class = autotabular.pipeline.classification.SimpleClassificationPipeline
self.predict_function = self._predict_proba
self._init_params = {
'data_preprocessing:feat_type': self.datamanager.feat_type
}
if init_params is not None:
self._init_params.update(init_params)
if num_run is None:
num_run = 0
self.num_run = num_run
logger_name = '%s(%d):%s' % (self.__class__.__name__.split('.')[-1],
self.seed, self.datamanager.name)
if self.port is None:
self.logger = logging.getLogger(__name__)
else:
self.logger = get_named_client_logger(
name=logger_name,
port=self.port,
)
self.Y_optimization: Optional[Union[List, np.ndarray]] = None
self.Y_actual_train = None
self.budget = budget
self.budget_type = budget_type
# Please mypy to prevent not defined attr
self.model = self._get_model()
def _get_model(self) -> BaseEstimator:
if not isinstance(self.configuration, Configuration):
model = self.model_class(
config=self.configuration,
random_state=self.seed,
init_params=self._init_params)
else:
if self.task_type in REGRESSION_TASKS:
dataset_properties = {
'task': self.task_type,
'sparse': self.datamanager.info['is_sparse'] == 1,
'multioutput': self.task_type == MULTIOUTPUT_REGRESSION,
}
else:
dataset_properties = {
'task': self.task_type,
'sparse': self.datamanager.info['is_sparse'] == 1,
'multilabel': self.task_type == MULTILABEL_CLASSIFICATION,
'multiclass': self.task_type == MULTICLASS_CLASSIFICATION,
}
model = self.model_class(
config=self.configuration,
dataset_properties=dataset_properties,
random_state=self.seed,
include=self.include,
exclude=self.exclude,
init_params=self._init_params)
return model
def _loss(
self,
y_true: np.ndarray,
y_hat: np.ndarray,
scoring_functions: Optional[List[Scorer]] = None
) -> Union[float, Dict[str, float]]:
"""Auto-tabular follows a minimization goal. The calculate_loss
internally translate a score function to a minimization problem.
For a dummy prediction, the worst result is assumed.
Parameters
----------
y_true
"""
scoring_functions = (
self.scoring_functions
if scoring_functions is None else scoring_functions)
if not isinstance(self.configuration, Configuration):
if scoring_functions:
return {self.metric.name: self.metric._worst_possible_result}
else:
return self.metric._worst_possible_result
return calculate_loss(
y_true,
y_hat,
self.task_type,
self.metric,
scoring_functions=scoring_functions)
def finish_up(
self,
loss: Union[Dict[str, float], float],
train_loss: Optional[Union[float, Dict[str, float]]],
opt_pred: np.ndarray,
valid_pred: np.ndarray,
test_pred: np.ndarray,
additional_run_info: Optional[TYPE_ADDITIONAL_INFO],
file_output: bool,
final_call: bool,
status: StatusType,
) -> Tuple[float, Union[float, Dict[str, float]], int, Dict[str, Union[
str, int, float, Dict, List, Tuple]]]:
"""This function does everything necessary after the fitting is done:
* predicting
* saving the files for the ensembles_statistics
* generate output for SMAC
We use it as the signal handler so we can recycle the code for the
normal usecase and when the runsolver kills us here :)
"""
self.duration = time.time() - self.starttime
if file_output:
file_out_loss, additional_run_info_ = self.file_output(
opt_pred,
valid_pred,
test_pred,
)
else:
file_out_loss = None
additional_run_info_ = {}
validation_loss, test_loss = self.calculate_auxiliary_losses(
valid_pred,
test_pred,
)
if file_out_loss is not None:
return self.duration, file_out_loss, self.seed, additional_run_info_
if isinstance(loss, dict):
loss_ = loss
loss = loss_[self.metric.name]
else:
loss_ = {}
additional_run_info = ({} if additional_run_info is None else
additional_run_info)
for metric_name, value in loss_.items():
additional_run_info[metric_name] = value
additional_run_info['duration'] = self.duration
additional_run_info['num_run'] = self.num_run
if train_loss is not None:
additional_run_info['train_loss'] = train_loss
if validation_loss is not None:
additional_run_info['validation_loss'] = validation_loss
if test_loss is not None:
additional_run_info['test_loss'] = test_loss
rval_dict = {
'loss': loss,
'additional_run_info': additional_run_info,
'status': status
}
if final_call:
rval_dict['final_queue_element'] = True
self.queue.put(rval_dict)
return self.duration, loss_, self.seed, additional_run_info_
def calculate_auxiliary_losses(
self,
Y_valid_pred: np.ndarray,
Y_test_pred: np.ndarray,
) -> Tuple[Optional[float], Optional[float]]:
if Y_valid_pred is not None:
if self.y_valid is not None:
validation_loss: Optional[Union[float,
Dict[str,
float]]] = self._loss(
self.y_valid,
Y_valid_pred)
if isinstance(validation_loss, dict):
validation_loss = validation_loss[self.metric.name]
else:
validation_loss = None
else:
validation_loss = None
if Y_test_pred is not None:
if self.y_test is not None:
test_loss: Optional[Union[float,
Dict[str, float]]] = self._loss(
self.y_test, Y_test_pred)
if isinstance(test_loss, dict):
test_loss = test_loss[self.metric.name]
else:
test_loss = None
else:
test_loss = None
return validation_loss, test_loss
def file_output(
self,
Y_optimization_pred: np.ndarray,
Y_valid_pred: np.ndarray,
Y_test_pred: np.ndarray,
) -> Tuple[Optional[float], Dict[str, Union[str, int, float, List, Dict,
Tuple]]]:
# Abort if self.Y_optimization is None
# self.Y_optimization can be None if we use partial-cv, then,
# obviously no output should be saved.
if self.Y_optimization is None:
return None, {}
# Abort in case of shape misalignment
if np.shape(self.Y_optimization)[0] != Y_optimization_pred.shape[0]:
return (
1.0,
{
'error':
"Targets %s and prediction %s don't have "
"the same length. Probably training didn't "
'finish' %
(np.shape(self.Y_optimization), Y_optimization_pred.shape)
},
)
# Abort if predictions contain NaNs
for y, s in [
# Y_train_pred deleted here. Fix unittest accordingly.
[Y_optimization_pred, 'optimization'],
[Y_valid_pred, 'validation'],
[Y_test_pred, 'test']
]:
if y is not None and not np.all(np.isfinite(y)):
return (
1.0,
{
'error':
'Model predictions for %s set contains NaNs.' % s
},
)
# Abort if we don't want to output anything.
# Since disable_file_output can also be a list, we have to explicitly
# compare it with True.
if self.disable_file_output is True:
return None, {}
# Notice that disable_file_output==False and disable_file_output==[]
# means the same thing here.
if self.disable_file_output is False:
self.disable_file_output = []
# Here onwards, the self.disable_file_output can be treated as a list
self.disable_file_output = cast(List, self.disable_file_output)
# This file can be written independently of the others down bellow
if ('y_optimization' not in self.disable_file_output):
if self.output_y_hat_optimization:
self.backend.save_targets_ensemble(self.Y_optimization)
models: Optional[BaseEstimator] = None
if hasattr(self, 'models'):
if len(self.models) > 0 and self.models[
0] is not None: # type: ignore[attr-defined]
if ('models' not in self.disable_file_output):
if self.task_type in CLASSIFICATION_TASKS:
models = VotingClassifier(
estimators=None,
voting='soft',
)
else:
models = VotingRegressor(estimators=None)
# Mypy cannot understand hasattr yet
models.estimators_ = self.models # type: ignore[attr-defined]
self.backend.save_numrun_to_dir(
seed=self.seed,
idx=self.num_run,
budget=self.budget,
model=self.model
if 'model' not in self.disable_file_output else None,
cv_model=models
if 'cv_model' not in self.disable_file_output else None,
ensemble_predictions=(Y_optimization_pred if 'y_optimization'
not in self.disable_file_output else None),
valid_predictions=(Y_valid_pred if 'y_valid'
not in self.disable_file_output else None),
test_predictions=(Y_test_pred if 'y_test'
not in self.disable_file_output else None),
)
return None, {}
def _predict_proba(
self,
X: np.ndarray,
model: BaseEstimator,
task_type: int,
Y_train: Optional[np.ndarray] = None,
) -> np.ndarray:
def send_warnings_to_log(
message: Union[Warning, str],
category: Type[Warning],
filename: str,
lineno: int,
file: Optional[TextIO] = None,
line: Optional[str] = None,
) -> None:
self.logger.debug('%s:%s: %s:%s' %
(filename, lineno, str(category), message))
return
with warnings.catch_warnings():
warnings.showwarning = send_warnings_to_log
Y_pred = model.predict_proba(X, batch_size=1000)
if Y_train is None:
raise ValueError('Y_train is required for classification problems')
Y_pred = self._ensure_prediction_array_sizes(Y_pred, Y_train)
return Y_pred
def _predict_regression(
self,
X: np.ndarray,
model: BaseEstimator,
task_type: int,
Y_train: Optional[np.ndarray] = None) -> np.ndarray:
def send_warnings_to_log(
message: Union[Warning, str],
category: Type[Warning],
filename: str,
lineno: int,
file: Optional[TextIO] = None,
line: Optional[str] = None,
) -> None:
self.logger.debug('%s:%s: %s:%s' %
(filename, lineno, str(category), message))
return
with warnings.catch_warnings():
warnings.showwarning = send_warnings_to_log
Y_pred = model.predict(X)
if len(Y_pred.shape) == 1:
Y_pred = Y_pred.reshape((-1, 1))
return Y_pred
def _ensure_prediction_array_sizes(self, prediction: np.ndarray,
Y_train: np.ndarray) -> np.ndarray:
num_classes = self.datamanager.info['label_num']
if self.task_type == MULTICLASS_CLASSIFICATION and \
prediction.shape[1] < num_classes:
if Y_train is None:
raise ValueError('Y_train must not be None!')
classes = list(np.unique(Y_train))
mapping = dict()
for class_number in range(num_classes):
if class_number in classes:
index = classes.index(class_number)
mapping[index] = class_number
new_predictions = np.zeros((prediction.shape[0], num_classes),
dtype=np.float32)
for index in mapping:
class_index = mapping[index]
new_predictions[:, class_index] = prediction[:, index]
return new_predictions
return prediction
| [
"[email protected]"
] | |
da6990b212765548549d6a7ed409b29dfd3ff68a | 758ca5e2bf50016fbac7022ac5f9036aa8aa099b | /LeetCodeWeb.py | 3359b7e6c9cd5b4f92bd6298419aa98886ca70f5 | [] | no_license | zhantong/leetcode-web | 04f17901e4bf5a6065e35dd126dd7bbcc8b1128f | 3f79f5463e77ed7eab8b808a7004eea8c29fc35e | refs/heads/master | 2021-01-02T22:54:54.797228 | 2017-10-19T02:00:48 | 2017-10-19T02:00:48 | 99,420,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,913 | py | from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import g
import os.path
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
import sqlite3
app = Flask(__name__)
ROOT = os.path.realpath(os.path.dirname(__file__))
DATABASE = 'leetcode.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.route('/')
def hello_world():
return redirect('/problems')
@app.route('/problems')
def show_problem_list():
problem_list = get_problem_list()
return render_template('problems_summary.html', problem_list=problem_list)
@app.route('/problems/<slug>')
def show_problem(slug):
c = get_db().cursor()
c.execute('SELECT id,title FROM problem WHERE slug=?', (slug,))
id, title = c.fetchone()
description_file_name = str(id).zfill(3) + '. ' + title + '.html'
file_path = os.path.join(ROOT, 'descriptions', description_file_name)
if os.path.exists(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
description = f.read()
else:
description = '收费题目'
codes = get_codes(('python', 'java', 'c++'), id, title)
title = str(id) + '. ' + title
if 'X-PJAX' in request.headers:
return render_template('problem_description.html', description=description, codes=codes, title=title,
id=id)
return render_template('problem.html', description=description, codes=codes,
problem_list=get_problem_list(), title=title, id=id)
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def get_codes(code_types, id, title):
code_infos = {
'java': ('Java', 'java'),
'python': ('Python', 'py'),
'c++': ('C++', 'cpp')
}
codes = []
for code_type in code_types:
code_info = code_infos[code_type]
file_path = os.path.join(ROOT, 'submissions', str(id).zfill(3) + '. ' + title, code_info[0],
'Solution.' + code_info[1])
if not os.path.exists(file_path):
continue
with open(file_path, 'r', encoding='utf-8') as f:
code = highlight(f.read(), get_lexer_by_name(code_type), HtmlFormatter())
codes.append((code_info[0], code))
return codes
def get_problem_list():
problem_list = []
c = get_db().cursor()
for id, title, slug in c.execute('SELECT id,title,slug FROM problem ORDER BY id'):
problem_list.append({
'id': id,
'url': '/problems/' + slug,
'name': str(id).zfill(3) + '. ' + title
})
return problem_list
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
f41bb92d7a8588b556a3187e89551e60d327b03e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /gPJTSqmJ4qQPxRg5a_21.py | 9fc9af14fff56aa6101c0d3a246a2a1ebb7d3158 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py |
def func(num):
txt = str(num)
return sum(int(i) - len(txt) for i in txt)
| [
"[email protected]"
] | |
330058010818406687c80f7723e26b445b282e69 | 5be2fc94724cc05d2dc449e0f5b40d9fb07edd51 | /tests/test_biosample.py | 4e618528f17766b407935e78b014a86d7a17a3b8 | [
"MIT"
] | permissive | LucaCappelletti94/encodeproject | b84614683c8652f812f2c01b0002903d849a080b | a2bcae8cfbb505a978ecea95c3a007f65625c57a | refs/heads/master | 2022-05-07T13:16:58.774258 | 2022-04-27T07:51:22 | 2022-04-27T07:51:22 | 216,822,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from encodeproject import biosample, biosamples
def test_biosample():
biosample("ENCSR000EDP")
biosample("ENCSR000EDP", False)
def test_biosamples():
biosamples(["ENCFF454HMH", "ENCFF663AYS"])
biosamples(["ENCSR000EDP"], False) | [
"[email protected]"
] | |
14cc45de89528b42640f58cba86eb2f58860bbcc | 1879e4df9cff25bc0c32ff63aedc859301062f9d | /0x05-personal_data/encrypt_password.py | 088ba68a96806e4bfba46db604229b5f920df220 | [] | no_license | rakiasomai/holbertonschool-web_back_end | 0f9d36160c9762df0826adcac66b009d1076043b | f5aeeda56def93fe13d901dd52217b0dbd4124e9 | refs/heads/master | 2023-02-28T10:02:54.929275 | 2021-02-06T22:17:04 | 2021-02-06T22:17:04 | 305,420,230 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | #!/usr/bin/env python3
''' Personal data '''
import bcrypt
def hash_password(password: str) -> bytes:
''' def hash password '''
var = password.encode('utf-8')
return bcrypt.hashpw(var, bcrypt.gensalt())
def is_valid(hashed_password: bytes, password: str) -> bool:
''' def is valid '''
var = password.encode('utf-8')
return bcrypt.checkpw(var, hashed_password)
| [
"[email protected]"
] | |
5f62efd77cda877b0f315654e66fcb575dcf38a5 | b21180985c994c19e850ef51d5d87c6bf595dc21 | /wechat/queryexp.py | efc683b5018ed5bac565cde68dd6455b49f93e69 | [] | no_license | hldai/labelwc | c74d3af98576acd514f9136db663ca4cbd95708f | 38c969c61f240e49d5475be716c6b159b57220cd | refs/heads/master | 2020-12-02T22:18:06.991302 | 2017-08-13T13:04:44 | 2017-08-13T13:04:44 | 96,111,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,942 | py | from utils import load_names_file
def load_acronym_to_name(acronym_name_file, exclude_strs):
acr_name_dict = dict()
f = open(acronym_name_file, 'r')
for line in f:
line = line.strip().decode('utf-8')
acr, name, _ = line.split('\t')
if exclude_strs and acr in exclude_strs:
continue
acr_name_dict[acr] = name
# print acr, name_max
f.close()
return acr_name_dict
def load_name_to_acronym(acronym_name_file, abbrev_exclude_strs):
name_acr_cnt_dict = dict()
f = open(acronym_name_file, 'r')
for line in f:
line = line.strip().decode('utf-8')
acr, name, cnt = line.split('\t')
if name in abbrev_exclude_strs:
continue
cnt = int(cnt)
tup = name_acr_cnt_dict.get(name, None)
if not tup or tup[1] < cnt:
name_acr_cnt_dict[name] = (acr, cnt)
# print acr, name_max
f.close()
name_acr_dict = dict()
for name, (acr, cnt) in name_acr_cnt_dict.iteritems():
name_acr_dict[name] = acr
return name_acr_dict
def expand_word(word, acr_name_dict):
name_exp = ''
pl = 0
while pl < len(word):
pr = len(word)
exps = ''
while pr > pl:
exps = acr_name_dict.get(word[pl:pr], None)
if exps:
break
pr -= 1
if pr > pl:
name_exp += exps
pl = pr
else:
name_exp += word[pl]
pl = pr + 1
return name_exp
class QueryExpansion:
def __init__(self, acronym_name_file, extra_acronym_name_file, expand_exclude_strs_file,
abbrev_exclude_strs_file, cn_seg_app):
self.expand_exclude_strs = load_names_file(expand_exclude_strs_file)
self.acr_name_dict = load_acronym_to_name(acronym_name_file, self.expand_exclude_strs)
self.abbrev_exclude_strs = load_names_file(abbrev_exclude_strs_file)
self.name_acr_dict = load_name_to_acronym(acronym_name_file, self.abbrev_exclude_strs)
self.__load_extra_acronym_name_file(extra_acronym_name_file)
self.seg_app = cn_seg_app
def __load_extra_acronym_name_file(self, filename):
f = open(filename)
for line in f:
acr, name = line.strip().decode('utf-8').split('\t')
self.acr_name_dict[acr] = name
self.name_acr_dict[name] = acr
f.close()
def __expand_name_words_ob(self, name_words):
name_exp = ''
lw = len(name_words)
l = 0
while l < lw:
r = lw
cur_str = ''
while r > l:
cur_str = ''.join(name_words[l:r])
if cur_str in self.expand_exclude_strs:
break
r -= 1
if r > l:
name_exp += cur_str
l = r
else:
name_exp += expand_word(name_words[l], self.acr_name_dict)
print name_words[l], name_exp
l += 1
return name_exp
def __expand_name_words(self, name_words):
name_exp = ''
lw = len(name_words)
l = 0
while l < lw:
r = lw
flg = True
while r > l:
cur_str = ''.join(name_words[l:r])
if cur_str in self.expand_exclude_strs:
name_exp += cur_str
l = r
flg = False
break
str_exp = self.acr_name_dict.get(cur_str, '')
if str_exp:
name_exp += str_exp
l = r
flg = False
break
r -= 1
if flg:
name_exp += expand_word(name_words[l], self.acr_name_dict)
# print name_words[l], name_exp
l += 1
return name_exp
def __abbrev_name_words(self, name_words):
new_name = ''
wlen = len(name_words)
l = 0
while l < wlen:
r = wlen
flg = False
while r > l:
cur_str = ''.join(name_words[l:r])
str_acr = self.name_acr_dict.get(cur_str, '')
if str_acr:
new_name += str_acr
l = r
flg = True
break
r -= 1
if not flg:
new_name += name_words[l]
l += 1
return new_name
def query_expansion_words(self, name_words):
name_expand = self.__expand_name_words(name_words)
name_abbrev = self.__abbrev_name_words(name_words)
exp_names = []
if name_expand:
exp_names.append(name_expand)
if name_abbrev:
exp_names.append(name_abbrev)
return exp_names
def query_expansion(self, name_str):
name_words = self.seg_app.segment(name_str).split(' ')
name_expand = self.__expand_name_words(name_words)
name_abbrev = self.__abbrev_name_words(name_words)
exp_cands = [name_expand, name_abbrev]
exp_names = list()
for name in exp_cands:
if len(name) == len(name_str) - name_str.count(' '):
continue
if name != name_str:
exp_names.append(name)
return exp_names
def expand_name(self, name_str):
words = self.seg_app.segment(name_str).split(' ')
new_name = self.__expand_name_words(words)
if new_name != name_str:
return new_name
return ''
def abbrev_name(self, name_str):
words = self.seg_app.segment(name_str).split(' ')
new_name = self.__abbrev_name_words(words)
if len(new_name) == len(name_str) - 1 and ' ' in name_str:
return ''
if new_name != name_str:
return new_name
return ''
| [
"[email protected]"
] | |
b7d65448e1c658d3cc0b42437060aee5c8c46e72 | ca002961fa07883ff79ea67713bbc79e0ac79d28 | /plugins/brains/BBWander.py | 5c642987580df24602062aadb1efb8cb65ea2809 | [] | no_license | mindgitrwx/pyrobot3 | e51f8f1bac01a2509f2d89668102770053c16f56 | 45216c0c11f5efaaa4042916b2fe8eaac00fc4a7 | refs/heads/master | 2020-03-23T19:28:44.395949 | 2018-10-03T22:06:42 | 2018-10-03T22:06:42 | 141,980,775 | 0 | 3 | null | 2018-09-14T11:20:00 | 2018-07-23T07:53:27 | Python | UTF-8 | Python | false | false | 2,185 | py | # A Behavior-based control system
from pyrobot.brain.fuzzy import *
from pyrobot.brain.behaviors import *
import math, time
class Avoid (Behavior):
"""Avoid Class"""
def setup(self): # called when created
"""setup method"""
self.lasttime = time.time()
self.count = 0
def direction(self, dir):
""" computes opposite direction given an angle"""
if dir < 0.0:
return 0.9
else:
return -0.9
def update(self):
if self.count == 50:
currtime = time.time()
self.count = 0
self.lasttime = time.time()
else:
self.count += 1
close_dist, angle = min( [(s.distance(), s.angle(unit="radians")) for s in self.robot.range["front-all"]])
max_sensitive = self.robot.range.getMaxvalue() * 0.8
self.IF(Fuzzy(0.1, max_sensitive) << close_dist, 'translate', 0.0, "TooClose")
self.IF(Fuzzy(0.1, max_sensitive) >> close_dist, 'translate', 0.3, "Ok")
self.IF(Fuzzy(0.1, max_sensitive) << close_dist, 'rotate', self.direction(angle), "TooClose")
self.IF(Fuzzy(0.1, max_sensitive) >> close_dist, 'rotate', 0.0, "Ok")
class TurnAround(State):
def update(self):
if min([s.distance() for s in self.robot.range["front-all"]]) < 1.0:
self.move(0, .2)
else:
self.goto("state1")
class state1 (State):
""" sample state """
def setup(self):
self.add(Avoid(1, {'translate': .3, 'rotate': .3}))
print(("initialized state", self.name))
def update(self):
if min([s.distance() for s in self.robot.range["front-all"]]) < 1:
self.goto("TurnAround")
def INIT(engine): # passes in robot, if you need it
brain = BehaviorBasedBrain({'translate' : engine.robot.translate, \
'rotate' : engine.robot.rotate, \
'update' : engine.robot.update }, engine)
# add a few states:
brain.add(state1()) # non active
brain.add(TurnAround()) # non active
# activate a state:
brain.activate('state1') # could have made it active in constructor
return brain
| [
"[email protected]"
] | |
ed69f7188cb410e8984e1694b21b711cb0364bab | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/r_e.py | 94874cb9d8cd478b4704aa826a5d3460c87597a5 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'r_E':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
b0eba99c0ca25ed04ea431a7bee9a18f308d4931 | 646cadb1c72ef4a060343baf2fcbe271958b6878 | /tigerjython/TJExamples/10-Ef/Eff4d.py | a11bfecbf166ccc406e98f9264dc1a5edaf3fec4 | [] | no_license | tigerjython/tjinstall | bd75cf8e4ae27b639a13865ef1ec5710391a2938 | aab61519b5299c2ab4f423c6fc5d8ea7c7860a99 | refs/heads/master | 2021-01-17T08:53:50.386905 | 2018-01-12T06:56:28 | 2018-01-12T06:56:28 | 40,659,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,151 | py | from gamegrid import *
locations = {
'Althaus':Location(2, 0),
'Bellevue':Location(0, 1),
'City':Location(1, 3),
'Dom':Location(4, 2),
'Enge':Location(5, 0),
'Friedhof':Location(3, 4)}
neighbours = {
'Althaus':['Bellevue', 'Dom', 'Enge'],
'Bellevue':['Althaus', 'City', 'Dom'],
'City':['Bellevue', 'Dom', 'Friedhof'],
'Dom':['Althaus', 'Bellevue', 'City', 'Enge', 'Friedhof'],
'Enge':['Althaus', 'Dom'],
'Friedhof':['Althaus', 'City', 'Dom']}
distances = {('Althaus', 'Bellevue'):5, ('Althaus', 'Dom'):9,
('Althaus', 'Enge'):6, ('Althaus', 'Friedhof'):15,
('Bellevue', 'City'):3, ('Bellevue', 'Dom'):13,
('City', 'Dom'):4, ('City', 'Friedhof'):3,
('Dom', 'Enge'):2, ('Dom', 'Friedhof'):12}
def getNeighbourDistance(station1, station2):
if station1 < station2:
return distances[(station1, station2)]
return distances[(station2, station1)]
def totalDistance(li):
sum = 0
for i in range(len(li) - 1):
sum += getNeighbourDistance(li[i], li[i + 1])
return sum
def drawGraph():
getBg().clear()
getBg().setPaintColor(Color.blue)
for station in locations:
location = locations[station]
getBg().fillCircle(toPoint(location), 10)
startPoint = toPoint(location)
getBg().drawText(station, startPoint)
for s in neighbours[station]:
drawConnection(station, s)
if s < station:
distance = distances[(s, station)]
else:
distance = distances[(station, s)]
endPoint = toPoint(locations[s])
getBg().drawText(str(distance),
getDividingPoint(startPoint, endPoint, 0.5))
refresh()
def drawConnection(startStation, endStation):
startPoint = toPoint(locations[startStation])
endPoint = toPoint(locations[endStation])
getBg().drawLine(startPoint, endPoint)
def search(station):
global trackToTarget, trackLength
visited.append(station) # station marked as visited
# Check for solution
if station == targetStation:
currentDistance = totalDistance(visited)
if currentDistance < trackLength:
trackLength = currentDistance
trackToTarget = visited[:]
for s in neighbours[station]:
if s not in visited: # if all are visited, recursion returns
search(s) # recursive call
visited.pop() # station may be visited by another path
def getStation(location):
for station in locations:
if locations[station] == location:
return station
return None # station not found
def init():
global visited, trackToTarget, trackLength
visited = []
trackToTarget = []
trackLength = 1000
drawGraph()
def pressEvent(e):
global isStart, startStation, targetStation
mouseLoc = toLocationInGrid(e.getX(), e.getY())
mouseStation = getStation(mouseLoc)
if mouseStation == None:
return
if isStart:
isStart = False
init()
setTitle("Klicke auf Zielstation")
startStation = mouseStation
getBg().setPaintColor(Color.red)
getBg().fillCircle(toPoint(mouseLoc), 10)
else:
isStart = True
setTitle("Noch einmal? Klicke auf Startstation")
targetStation = mouseStation
getBg().setPaintColor(Color.green)
getBg().fillCircle(toPoint(mouseLoc), 10)
search(startStation)
setStatusText("Kürzester Weg von " + startStation + " nach "
+ targetStation + ": " + str(trackToTarget) + " Länge = "
+ str(trackLength))
for i in range(len(trackToTarget) - 1):
s1 = trackToTarget[i]
s2 = trackToTarget[i + 1]
getBg().setPaintColor(Color.black)
getBg().setLineWidth(3)
drawConnection(s1, s2)
getBg().setLineWidth(1)
refresh()
isStart = True
makeGameGrid(7, 5, 100, None, "sprites/city.png", False,
mousePressed = pressEvent)
setTitle("City Guide. Klicke auf Startstation")
addStatusBar(30)
show()
init()
| [
"[email protected]"
] | |
773fa456f16adc76fdbca0568bf8feb723dfad1b | 2d4af29250dca8c72b74e190e74d92f1467120a0 | /TaobaoSdk/Request/TaohuaChildcatesGetRequest.py | a624463f6f4d5bb1765b77cb318501d6f0daeeac | [] | no_license | maimiaolmc/TaobaoOpenPythonSDK | 2c671be93c40cf487c0d7d644479ba7e1043004c | d349aa8ed6229ce6d76a09f279a0896a0f8075b3 | refs/heads/master | 2020-04-06T03:52:46.585927 | 2014-06-09T08:58:27 | 2014-06-09T08:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 通过类目ID获取它的类目列表
# @author [email protected]
# @date 2012-07-03 10:25:14
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">通过类目ID获取它的类目列表</SPAN>
# <UL>
# </UL>
class TaohuaChildcatesGetRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.taohua.childcates.get"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">通过类目ID获取它的子类目列表</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.cate_id = None
| [
"[email protected]"
] | |
44745815bf70dfefbc566356404d6d02776e8a77 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03254/s956844324.py | 07d913466375174a9e3f7d1410eaa5709318f863 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | N,x = map(int, input().split())
A = sorted(map(int, input().split()))
s = 0
for i in range(N):
x -= A[i]
if x<0:
break
else:
s += 1
print(s if x<=0 else s-1) | [
"[email protected]"
] | |
21bd5066ba2a212591f1557923296b35eda07ae0 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_072/ch25_2019_08_21_19_49_43_725038.py | 4ad02d8f6560a27c1b43320c99a7c2c44a6ef538 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | a=float(input('Qual será a distância percorrida ? '))
def preco_passagem(a):
if a<=200:
return a*0.5
else:
return 100+(a-100)*0.45
print('{0:.2f}'.format(preco_passagem(a))) | [
"[email protected]"
] | |
43c46f3842293ca95fcc91f1dcb7bdd6100621cd | f0937d9fb9108cdd69c5c477a782965bb1f25da5 | /first/settings.py | 5922c4ca8b47a4245264bfa0f0f1e6fe1814266e | [] | no_license | SimeonYS/first | 64218a5c2113cebfc1e1aec3f2808dcefcc30342 | 986e7bbbe5635685ce6795ee9f1459ce5d5a8ef5 | refs/heads/main | 2023-03-29T17:29:57.300975 | 2021-03-29T07:58:32 | 2021-03-29T07:58:32 | 352,561,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | BOT_NAME = 'first'
SPIDER_MODULES = ['first.spiders']
NEWSPIDER_MODULE = 'first.spiders'
FEED_EXPORT_ENCODING = 'utf-8'
LOG_LEVEL = 'ERROR'
DOWNLOAD_DELAY = 0
USER_AGENT="Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36"
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = {
'first.pipelines.FirstPipeline': 300,
} | [
"[email protected]"
] | |
c5cf3f5dddb8cb510c9b6acf954b3ddde35e9e2e | 8506f0a22ef4edf03627951ced530b921ff4d383 | /tools/sumolib/output/convert/gpx.py | 0ec2127c1bf7a9f35b0a8fba39d2c071c8999ca0 | [] | no_license | deepak728/Traffic-Optimization- | fb0ac074fa601e524eb0d79defc7e8b84ab03138 | 85bc54de2e318f36bdcc5bb6f05badde0fb35ffe | refs/heads/master | 2020-03-29T23:29:36.740048 | 2018-11-12T09:19:17 | 2018-11-12T09:19:17 | 150,475,374 | 1 | 1 | null | 2018-11-12T09:19:19 | 2018-09-26T18:57:35 | Java | UTF-8 | Python | false | false | 1,366 | py | """
@file gpx.py
@author Jakob Erdmann
@author Laura Bieker
@date 2014-02-13
@version $Id: gpx.py 18096 2015-03-17 09:50:59Z behrisch $
This module includes functions for converting SUMO's fcd-output into
GPX format (http://en.wikipedia.org/wiki/GPS_eXchange_Format)
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2014 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from collections import defaultdict
def fcd2gpx(inpFCD, outSTRM, ignored):
tracks = defaultdict(list)
for timestep in inpFCD:
for v in timestep.vehicle:
tracks[v.id].append((timestep.time, v.x, v.y))
outSTRM.write('<?xml version="1.0" encoding="UTF-8"?>\n')
outSTRM.write('<gpx version="1.0">\n')
for vehicle, trackpoints in tracks.iteritems():
outSTRM.write(" <trk><name>%s</name><trkseg>\n" % vehicle)
for timestamp, lon, lat in trackpoints:
outSTRM.write(' <trkpt lon="%s" lat="%s"><time>%s</time></trkpt>\n' % (
lon, lat, timestamp))
outSTRM.write(" </trkseg></trk>\n")
outSTRM.write('</gpx>\n')
| [
"[email protected]"
] | |
7e0f20a3411dc570ed92600197a47eda29d7e3fc | b5ffa0109ee980406550b7f9a4f5c7587f10a759 | /sklearn库.py | c597a056daae863e773ae3d33e4f1db9b08556b2 | [] | no_license | SuneastChen/np_pd_sklearn | 07fd99f383cfaf117e6dff7beb12b240957cbbe0 | 2ff777772c5a0db1e21635796351919c049dc680 | refs/heads/master | 2020-03-07T22:38:27.311708 | 2018-04-02T13:24:03 | 2018-04-02T13:24:03 | 127,759,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,832 | py | # _*_ coding:utf-8 _*_
# !/usr/bin/python
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
iris = datasets.load_iris() # 加载指定数据库,是一个字典,data与target是key
iris_X = iris.data # 特征数据表,是二维数组
iris_y = iris.target # 结果标签,是个一维数组
print(iris_X[:3, :]) # 查看一下三行的数据
print(iris_y) # 查看结果集
# 将数据集分成训练集,测试集
X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3)
print(y_train) # 训练集自动打乱了
# 用邻近算法
knn = KNeighborsClassifier()
knn.fit(X_train, y_train) # 开始训练
print(knn.predict(X_test)) # 输入测试集得出结果
print(y_test) # 这是测试集的真实结果,对比
from sklearn.linear_model import LinearRegression
# 通用的学习模式
loaded_data = datasets.load_boston() # 加载房价的数据库
data_X = loaded_data.data
data_y = loaded_data.target
model = LinearRegression() # 调用线性回归模式
model.fit(data_X, data_y) # 训练
print(model.predict(data_X[:4, :])) # 测试
print(data_y[:4])
print(model.coef_) # 斜率,即输入特征的各比重
print(model.intercept_) # 截距
print(model.get_params()) # 返回model定义时的参数
# {'copy_X': True, 'fit_intercept': True, 'n_jobs': 1, 'normalize': False}
print(model.score(data_X, data_y)) # 将数据及结果传入,给线性模型打分,准确度
import matplotlib.pyplot as plt
# 生成数据集X,对应的线性结果集y
X, y = datasets.make_regression(n_samples=100, n_features=1, n_targets=1, noise=10)
print(X[:5, :])
plt.scatter(X, y)
plt.show()
from sklearn import preprocessing
a = np.array([[10, 2.7, 3.6],
[-100, 5, -2],
[120, 20, 40]])
print(a)
print(preprocessing.scale(a)) # 将各系列的值范围整体缩小
from sklearn.datasets.samples_generator import make_classification
from sklearn.svm import SVC
X, y = make_classification(n_samples=300, n_features=2, n_redundant=0, n_informative=2,
random_state=22, n_clusters_per_class=1, scale=100) # 生成数据
# redundant adj.多余的,冗余的 informative adj.提供有用信息的
X = preprocessing.scale(X) # 坐标轴整体浓缩
# plt.scatter(X[:, 0], X[:, 1], c=y)
# plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model = SVC() # 加入正则防止过拟合的SVC算法
model.fit(X_train, y_train)
print(model.score(X_test, y_test)) # 浓缩之后得分较高94.4 ,故系列的大小范围直接影响准确度
# 分成好几组的训练集和测试集
from sklearn.model_selection import cross_val_score
iris = datasets.load_iris() # 加载指定数据库
iris_X = iris.data # 特征数据表
iris_y = iris.target # 结果标签表
# 将数据集分成训练集,测试集
X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3)
knn = KNeighborsClassifier(n_neighbors=5) # 用邻近算法,加入参数取邻近的5个点
# 只测试一组
# knn.fit(X_train, y_train) # 开始训练
# print(knn.score(X_test, y_test)) # 只测试一组的结果得分
scores = cross_val_score(knn, X, y, cv=5, scoring='accuracy') # 分成5组训练集,测试集,分别做测试
print(scores) # 得到一个一维数组
print(scores.mean())
# 选择最优的参数,即参数取邻近的几个点准确率最高的
k_range = range(1, 31) # 参数列表
k_scores = []
for k in k_range: # 也可以把不同的学习model加入测试
knn = KNeighborsClassifier(n_neighbors=k) # 加入循环的k参数
# scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy') # for classfification(分类问题)
loss = -cross_val_score(knn, X, y, cv=10, scoring='neg_mean_squared_error') # for regression(线性回归问题),加负号
k_scores.append(loss.mean()) # 每进行一组测试,产生一个一维数组loss
# print(k_scores)
plt.plot(k_range, k_scores)
plt.xlabel('n_neighbors=k')
plt.ylabel('accuracy')
plt.show()
# 得出参数n_neighbors=10时最优,大于时就会产生过度拟合(over fitting)
# 怎么样看过度拟合
'''
from sklearn.model_selection import learning_curve
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
y = digits.target
train_sizes, train_loss, test_loss = learning_curve(
SVC(gamma=0.001), X, y, cv=5, scoring='neg_mean_squared_error', train_sizes=[i/10 for i in range(1, 11)]
) # 多组测试的方法,传入训练数量的百分比点
# print(train_sizes) # 得到每个时间段训练的数量,组成的一维数组
# print(train_loss) # 得到相应的二维数组,列数=分组数,行数=时间段的个数
# print(test_loss) # 得到相应的二维数组,列数=分组数,行数=时间段的个数
train_loss_mean = -np.mean(train_loss, axis=1) # 在表格右侧求平均,增加列,行不变,即axis=1
test_loss_mean = -np.mean(test_loss, axis=1)
plt.plot(train_sizes, train_loss_mean, 'o-', color='r', label='Training')
plt.plot(train_sizes, test_loss_mean, 'o-', color='g', label='Testing')
plt.xlabel('train_sizes')
plt.ylabel('loss')
plt.show() # 若将SVC模型的gamma参数改为0.01,便会产生过拟合
'''
# 如何测试模型中的最优参数
'''
from sklearn.model_selection import validation_curve
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
y = digits.target
param_range = np.logspace(-6, -2.3, 5) # 新参数
train_loss, test_loss = validation_curve(
SVC(), X, y, param_name='gamma', param_range=param_range,
cv=10, scoring='neg_mean_squared_error') # 返回值无train_sizes,参数无train_sizes,新增了gamma参数
train_loss_mean = -np.mean(train_loss, axis=1) # 在表格右侧求平均,增加列,行不变,即axis=1
test_loss_mean = -np.mean(test_loss, axis=1)
plt.plot(param_range, train_loss_mean, 'o-', color='r', label='Training')
plt.plot(param_range, test_loss_mean, 'o-', color='g', label='Testing')
plt.xlabel('gamma')
plt.ylabel('loss')
plt.show() # 根据图像可直观地看出,最优参数gamma=0.0005左右
'''
# 将训练好的模型,导出导入
from sklearn import svm
iris = datasets.load_iris()
X, y = iris.data, iris.target
model = SVC()
model.fit(X,y)
#方法1:用pickle模块导出导入
import pickle
with open('model.pkl', 'wb')as f:
pickle.dump(model, f)
with open('model.pkl', 'rb')as f:
model2 = pickle.load(f)
print(model2.predict(X[0:3])) # 把前3行数据做测试
#方法2:用joblib模块,性能更高效
from sklearn.externals import joblib
joblib.dump(model, 'model_joblib.pkl') # 保存模型
model3 = joblib.load('model_joblib.pkl')
print(model3.predict(X[0:6]))
| [
"[email protected]"
] | |
727b5f688d0d70414334ccda20dfd1f147a25259 | b604d6e2b1f206e6df660da2be2add78ec22941a | /resources/ros_kinetic/src/ros/rosbuild/bin/rosgcov_summarize | 9c19df610036bbb08442ed9a949fe1a44b505a54 | [] | no_license | fqez/common | 7b521773d81e2e687f6ae482f595ca3d19515e39 | f423fec07f39da9cb38f91dc4f3f1cd51c1a3130 | refs/heads/master | 2020-05-21T23:59:17.035384 | 2017-03-14T11:46:57 | 2017-03-14T11:46:57 | 62,873,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,635 | #!/usr/bin/env python3
import sys
import fileinput
import re
import os.path
USAGE = "USAGE: rosgcov_summarize <package_dir> <rosgcov_file>"
if len(sys.argv) != 3:
print(USAGE)
sys.exit(-1)
pkg = sys.argv[1]
fname = sys.argv[2]
if not os.path.exists(fname):
print('[rosgcov] %s : %.2f%% (no coverage results)' % (os.path.split(pkg)[1],0.0))
sys.exit(0)
re_hit = re.compile('^ *[0-9]*:.*')
re_miss = re.compile('^ *#####:.*')
re_branch_hit = re.compile('^branch *[0-9] *taken [0-9]*.*')
re_branch_miss = re.compile('^branch *[0-9] *never executed.*')
files = []
finput = fileinput.input(fname)
for l in finput:
ls = l.strip().split(' ')
f = os.path.join(ls[0],os.path.split(ls[1])[1])
files.append(f.strip())
total = 0
hits = 0
misses = 0
branch_total = 0
branch_hits = 0
branch_misses = 0
print('-------------------------------------------------------')
print('Coverage summary: ')
print('-------------------------------------------------------')
for f in files:
prefix = os.path.commonprefix([pkg, f])
display_name = f[len(prefix):]
if display_name[0] == '/':
display_name = display_name[1:]
print(' ' + display_name + ': ')
gcov_fname = f + '.gcov'
if not os.path.exists(gcov_fname):
print('WARNING: no coverage results for %s' % (display_name))
continue
gcovf = fileinput.input(gcov_fname)
local_total = 0
local_hits = 0
local_misses = 0
local_branch_total = 0
local_branch_hits = 0
local_branch_misses = 0
for s in gcovf:
if re_hit.match(s):
local_hits += 1
local_total += 1
elif re_miss.match(s):
local_misses += 1
local_total += 1
if re_branch_hit.match(s):
local_branch_hits += 1
local_branch_total += 1
elif re_branch_miss.match(s):
local_branch_misses += 1
local_branch_total += 1
print(' line: %.2f%% (%d / %d)' % ((100.0 * local_hits / max(local_total,1)), local_hits, local_total))
hits += local_hits
misses += local_misses
total += local_total
print(' branch: %.2f%% (%d / %d)' % ((100.0 * local_branch_hits / max(local_branch_total,1)), local_branch_hits, local_branch_total))
branch_hits += local_branch_hits
branch_misses += local_branch_misses
branch_total += local_branch_total
print('-------------------------------------------------------')
print('[rosgcov] %s : %.2f%% (%d / %d)' % (os.path.split(pkg)[1],(100.0 * hits / max(total,1)), hits, total))
print('[rosgcov] %s : branch %.2f%% (%d / %d)' % (os.path.split(pkg)[1],(100.0 * branch_hits / max(branch_total,1)), branch_hits, branch_total))
print('-------------------------------------------------------')
| [
"[email protected]"
] | ||
fa4e4448ac09a8ca4502f4e8591d83ef40112882 | fc2447b91cbee82e74e939092ec1903678f3217a | /PythonPractice/算法图解/Dijkstra's algorithm.py | 0cd528a8c91f11657af1906538a31b531f16e4a9 | [] | no_license | yglj/learngit | 0eac654e7c49f2ede064b720e6ee621a702193b4 | 74fb4b93d5726c735b64829cafc99878d8082121 | refs/heads/master | 2022-12-24T10:01:56.705046 | 2019-05-27T21:04:08 | 2019-05-27T21:04:08 | 146,157,116 | 0 | 1 | null | 2022-12-12T07:01:25 | 2018-08-26T06:28:20 | HTML | UTF-8 | Python | false | false | 1,771 | py | """
狄克斯特拉算法
每条边上的关联数字称为权重
带权重的图叫加权图
寻找加权图的最短路径
只是用于有向无环图
"""
graph = {} # 加权图
costs = {} # 开销
parents = {} # 父节点
# 图的各顶点的邻居及边的权重
graph['start'] = {}
graph['start']['a'] = 6
graph['start']['b'] = 2
# print(graph['start'].keys())
graph['a'] = {}
graph['a']['fin'] = 1
graph['b'] = {}
graph['b']['a'] = 3
graph['b']['fin'] = 5
graph['fin'] = {}
infinity = float('inf') # 无穷大
costs['a'] = 6
costs['b'] = 2
costs['fin'] = infinity
parents['a'] = 'start'
parents['b'] = 'start'
parents['fin'] = None # 开始没有到达fin的路径
processed = []
"""
1.只要还有要处理的节点
2.获取离起点最近的节点
3.更新其邻居的开销
4.如果有邻居的开销被更新,同时更新其父节点
5.将该节点标记为处理过
"""
def find_lowest_cost_node(costs):
lowest_cost = float('inf')
lowest_cost_node = None
for node in costs:
cost = costs[node]
if cost < lowest_cost and node not in processed:
lowest_cost = cost
lowest_cost_node = node
return lowest_cost_node
def main():
node = find_lowest_cost_node(costs)
while node is not None:
cost = costs[node]
neighbors = graph[node]
for n in neighbors.keys():
new_cost = cost + neighbors[n]
if costs[n] > new_cost:
costs[n] = new_cost
parents[n] = node
processed.append(node)
node = find_lowest_cost_node(costs)
if __name__ == '__main__':
main()
# print(parents)
# print(costs)
# print(graph)
processed.insert(0, 'start')
path = '->'.join(processed)
print(path)
| [
"[email protected]"
] | |
4aa3c05bab82dea4ae678dfc7c1ea442168008e2 | 414a58c691ff7b434034086433644870f8ac5c99 | /tests/test_geom.py | b1de7a128110d8a3d34fee1bc3c1dbf3d7148c62 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | WZBSocialScienceCenter/pdftabextract | 08328197681ca03b764ea2df410851c06e0a92b7 | 7b86a9098b5d397f984b1cbc6716d55860e34ef8 | refs/heads/master | 2022-08-02T16:43:42.187628 | 2022-06-24T09:51:22 | 2022-06-24T09:51:22 | 62,884,666 | 2,239 | 401 | Apache-2.0 | 2022-06-24T09:51:23 | 2016-07-08T11:44:46 | Python | UTF-8 | Python | false | false | 7,946 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 13 09:50:51 2017
@author: mkonrad
"""
import math
import pytest
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from pdftabextract.geom import (pt, ptdist, vecangle, vecrotate, overlap, lineintersect,
rect, rectcenter, rectarea, rectintersect,
normalize_angle, normalize_angle_halfcircle,
project_polarcoord_lines)
FMIN = np.finfo(np.float32).min
FMAX = np.finfo(np.float32).max
def test_pt():
x = 0
y = 1
pt0 = pt(x, y)
assert type(pt0) is np.ndarray
assert pt0.dtype == np.float
assert pt0[0] == x
assert pt0[1] == y
pt1 = pt(x, y, np.int)
assert pt1.dtype == np.int
assert pt1[0] == x
assert pt1[1] == y
def test_ptdist():
p1 = pt(0, 0)
p2 = pt(1, 0)
p3 = pt(1, 1)
assert ptdist(p1, p1) == 0
assert ptdist(p1, p2) == 1
assert ptdist(p2, p1) == ptdist(p1, p2)
assert ptdist(p1, p3) == math.sqrt(2)
def test_vecangle():
v1 = pt(1, 0)
v2 = pt(2, 0)
v3 = pt(1, 1)
v4 = pt(0, 1)
v5 = pt(0, -1)
assert np.isnan(vecangle(pt(0, 0), v1)) # pt(0, 0) is vec of no length
assert vecangle(v1, v2) == 0
assert round(vecangle(v1, v3), 4) == round(math.radians(45), 4)
assert vecangle(v2, v4) == vecangle(v1, v4) == math.radians(90)
assert vecangle(v2, v5) == math.radians(90) # always the smaller angle
@given(st.floats(min_value=FMIN, max_value=FMAX),
st.floats(min_value=FMIN, max_value=FMAX),
st.floats(min_value=FMIN, max_value=FMAX),
st.floats(min_value=FMIN, max_value=FMAX))
def test_vecangle_2(x1, y1, x2, y2):
v0 = pt(0, 0)
v1 = pt(x1, y1)
v2 = pt(x2, y2)
try:
alpha = vecangle(v1, v2)
except ValueError: # math domain error in some edge cases?
return
if np.allclose(v1, v0) or np.allclose(v2, v0):
assert np.isnan(alpha)
else:
assert 0 <= alpha <= np.pi
def test_vecrotate():
assert np.array_equal(vecrotate(pt(0, 0), 0.123), pt(0, 0))
assert np.allclose(vecrotate(pt(1, 0), math.radians(90)), pt(0, 1))
assert np.allclose(vecrotate(pt(1, 0), math.radians(90), about=pt(1, 1)), pt(2, 1))
def test_overlap():
assert overlap(0, 1, 0, 1) is True
assert overlap(0, 0, 1, 1) is False
assert overlap(0, 10, 5, 15) is True
assert overlap(-10, 10, -20, -10) is True
assert overlap(-9, 10, -20, -10) is False
def test_lineintersect():
# first with check_in_segm = True
X = lineintersect(pt(0, 0), pt(0, 0), pt(0, 0), pt(0, 0)) # coincident I
assert sum(np.isnan(X)) == len(X)
X = lineintersect(pt(0, 0), pt(0, 1), pt(0, 0), pt(0, 1)) # coincident II
assert sum(np.isnan(X)) == len(X)
assert lineintersect(pt(0, 0), pt(0, 1), pt(1, 0), pt(1, 1)) is None # parallel, non coincident
assert lineintersect(pt(0, 0), pt(0, 1), pt(1, 1), pt(2, 2)) is None # non-parellel, no intersection
assert lineintersect(pt(0, 0), pt(2, 2), pt(0, 5), pt(5, 0)) is None # non-parellel, no intersection II
assert np.array_equal(lineintersect(pt(0, 0), pt(0, 1), pt(0, 1), pt(2, 2)), pt(0, 1)) # intersection - touch
assert np.array_equal(lineintersect(pt(0, 0), pt(2, 2), pt(0, 2), pt(2, 0)), pt(1, 1)) # intersection
# now with check_in_segm = False
X = lineintersect(pt(0, 0), pt(0, 0), pt(0, 0), pt(0, 0), False) # coincident I
assert sum(np.isnan(X)) == len(X)
X = lineintersect(pt(0, 0), pt(0, 1), pt(0, 0), pt(0, 1), False) # coincident II
assert sum(np.isnan(X)) == len(X)
X = lineintersect(pt(0, 0), pt(1, 1), pt(2, 2), pt(3, 3), False) # coincident III
assert sum(np.isnan(X)) == len(X)
assert np.array_equal(lineintersect(pt(0, 0), pt(0, 1), pt(1, 1), pt(2, 2), False), pt(0, 0)) # intersection (out of segments)
assert np.array_equal(lineintersect(pt(0, 0), pt(0, 1), pt(0, 1), pt(2, 2), False), pt(0, 1)) # intersection - touch
assert np.array_equal(lineintersect(pt(0, 0), pt(2, 2), pt(0, 2), pt(2, 0), False), pt(1, 1)) # intersection
def test_rect():
with pytest.raises(ValueError):
rect(pt(0, 0), pt(1, 1, dtype=np.int)) # dtypes do not match
with pytest.raises(ValueError):
rect(pt(0, 0), pt(0, 0)) # doesn't form rect
with pytest.raises(ValueError):
rect(pt(1, 1), pt(0, 0)) # doesn't form rect
with pytest.raises(ValueError):
rect(pt(0, 0), pt(1, 0)) # doesn't form rect
a = pt(0, 0)
b = pt(1, 1)
r = rect(a, b)
assert r.dtype == a.dtype == b.dtype
assert np.array_equal(r[0], a)
assert np.array_equal(r[1], b)
a = pt(-3, -1)
b = pt(8, 1.2)
r = rect(a, b)
assert r.dtype == a.dtype == b.dtype
assert np.array_equal(r[0], a)
assert np.array_equal(r[1], b)
def test_rectcenter():
a = pt(0, 0)
b = pt(1, 1)
r = rect(a, b)
center = rectcenter(r)
assert type(center) is np.ndarray
assert np.array_equal(center, pt(0.5, 0.5))
a = pt(-3, -1)
b = pt(2, 5)
r = rect(a, b)
assert np.array_equal(rectcenter(r), pt(-0.5, 2))
def test_rectarea():
a = pt(0, 0)
b = pt(1, 1)
r = rect(a, b)
assert rectarea(r) == 1
a = pt(-3, -1)
b = pt(2, 5)
r = rect(a, b)
assert rectarea(r) == 30
def test_rectintersect():
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(-3, -1), pt(2, 5))
assert rectintersect(a, a) == rectarea(a)
assert rectintersect(b, b) == rectarea(b)
assert rectintersect(a, a, norm_intersect_area='a') == 1
assert rectintersect(a, a, norm_intersect_area='b') == 1
with pytest.raises(ValueError):
rectintersect(a, a, norm_intersect_area='c')
# complete intersect
assert rectintersect(a, b) == rectarea(a)
assert rectintersect(b, a) == rectarea(a)
assert rectintersect(a, b, norm_intersect_area='a') == 1
assert rectintersect(b, a, norm_intersect_area='b') == 1
assert rectintersect(b, a, norm_intersect_area='a') < 1
assert rectintersect(a, b, norm_intersect_area='b') < 1
# partial intersect
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(0.5, 0.5), pt(1.5, 1.5))
assert rectintersect(a, b) == 0.25
assert rectintersect(a, b, norm_intersect_area='a') == 0.25
assert rectintersect(a, b, norm_intersect_area='b') == 0.25
b = rect(pt(0.75, 0.5), pt(1.5, 1.5))
assert rectintersect(a, b) == 0.125
# touch
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(1, 1), pt(1.5, 1.5))
assert rectintersect(a, b) == 0
# no intersection
a = rect(pt(0, 0), pt(1, 1))
b = rect(pt(1.1, 1.1), pt(1.5, 1.5))
assert rectintersect(a, b) is None
def test_normalize_angle():
for i in range(-10, 10):
theta = i * np.pi
norm = normalize_angle(theta)
assert 0 <= norm < 2 * np.pi
assert norm / np.pi == i % 2
def test_normalize_angle_halfcircle():
for i in range(-10, 10):
theta = 0.5 * i * np.pi
norm = normalize_angle_halfcircle(theta)
assert 0 <= norm < np.pi
assert norm / np.pi * 2 == i % 2
@given(
st.lists(st.lists(st.floats(allow_nan=False, allow_infinity=False), min_size=2, max_size=2)),
st.integers(),
st.integers()
)
def test_project_polarcoord_lines(hough_lines, img_w, img_h):
if img_w <= 0 or img_h <= 0:
with pytest.raises(ValueError):
project_polarcoord_lines(hough_lines, img_w, img_h)
return
else:
res = project_polarcoord_lines(hough_lines, img_w, img_h)
assert type(res) is list
assert len(res) == len(hough_lines)
for pts in res:
assert len(pts) == 2
assert type(pts[0]) == type(pts[1]) == np.ndarray
assert len(pts[0]) == len(pts[1]) == 2
| [
"[email protected]"
] | |
4b3ea08a26e0a92132a0a700b7e8ff04bd9e13fb | 0420b28aa59330fb0d9548f636b1460668163887 | /accounts/migrations/0005_alter_userprofile_profile_picture.py | 591939011f4877d881bd9c3396ddd91668e6bf0a | [] | no_license | akhmadakhmedov/modamag | 30cc3ea335b7fe8fbc234149b11d2df11b627281 | 0459f27230027fab51cbaae2a594ffde52a64d04 | refs/heads/main | 2023-08-11T01:48:58.979894 | 2021-10-12T11:18:08 | 2021-10-12T11:18:08 | 391,133,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Generated by Django 3.2.5 on 2021-08-16 09:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_userprofile'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='profile_picture',
field=models.ImageField(blank=True, upload_to='images/users/'),
),
]
| [
"[email protected]"
] | |
2206b4bfd3f1f3c2510c27d0f3cce62a12de5313 | 3a6b2c80f948a7918d54b71866d94476d17f19ef | /docs/_build/html/_downloads/06b1aa4ac217e5bc4f81274b1df76753/demo3.py | f37533217c6ef48d7c40a3069c064bf780f9459f | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | beckermr/GalSim | c306c3e5c00a78c15a9d9f20d2bfa489239fb946 | 96c80bde3184f84e450c2dc441ee8fe03b5197f2 | refs/heads/releases/2.3 | 2022-12-04T20:45:35.964787 | 2022-07-01T06:23:22 | 2022-07-01T06:24:49 | 288,194,984 | 0 | 0 | NOASSERTION | 2020-08-17T13:59:25 | 2020-08-17T13:59:25 | null | UTF-8 | Python | false | false | 15,150 | py | # Copyright (c) 2012-2021 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
"""
Demo #3
The third script in our tutorial about using GalSim in python scripts: examples/demo*.py.
(This file is designed to be viewed in a window 100 characters wide.)
This script gets reasonably close to including all the principal features of an image
from a ground-based telescope. The galaxy is represented as the sum of a bulge and a disk,
where each component is represented by a sheared Sersic profile (with different Sersic
indices). The PSF has both atmospheric and optical components. The atmospheric
component is a Kolmogorov turbulent spectrum. The optical component includes defocus,
coma and astigmatism, as well as obscuration from a secondary mirror. The noise model
includes both a gain and read noise. And finally, we include the effect of a slight
telescope distortion.
New features introduced in this demo:
- obj = galsim.Sersic(n, flux, half_light_radius)
- obj = galsim.Sersic(n, flux, scale_radius)
- obj = galsim.Kolmogorov(fwhm)
- obj = galsim.OpticalPSF(lam_over_diam, defocus, coma1, coma2, astig1, astig2, obscuration)
- obj = obj.shear(e, beta) -- including how to specify an angle in GalSim
- shear = galsim.Shear(q, beta)
- obj = obj.shear(shear)
- obj3 = x1 * obj1 + x2 * obj2
- obj = obj.withFlux(flux)
- image = galsim.ImageF(image_size, image_size)
- image = obj.drawImage(image, wcs)
- image = obj.drawImage(method='sb')
- world_profile = wcs.toWorld(profile)
- shear3 = shear1 + shear2
- noise = galsim.CCDNoise(rng, sky_level, gain, read_noise)
"""
import sys
import os
import math
import logging
import galsim
def main(argv):
"""
Getting reasonably close to including all the principle features of an image from a
ground-based telescope:
- Use a bulge plus disk model for the galaxy
- Both galaxy components are Sersic profiles (n=3.5 and n=1.5 respectively)
- Let the PSF have both atmospheric and optical components.
- The atmospheric component is a Kolmogorov spectrum.
- The optical component has some defocus, coma, and astigmatism.
- Add both Poisson noise to the image and Gaussian read noise.
- Let the pixels be slightly distorted relative to the sky.
"""
# We do some fancier logging for demo3, just to demonstrate that we can:
# - we log to both stdout and to a log file
# - the log file has a lot more (mostly redundant) information
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
if not os.path.isdir('output'):
os.mkdir('output')
logFile = logging.FileHandler(os.path.join("output", "script3.log"))
logFile.setFormatter(logging.Formatter("%(name)s[%(levelname)s] %(asctime)s: %(message)s"))
logging.getLogger("demo3").addHandler(logFile)
logger = logging.getLogger("demo3")
gal_flux = 1.e6 # ADU ("Analog-to-digital units", the units of the numbers on a CCD)
bulge_n = 3.5 #
bulge_re = 2.3 # arcsec
disk_n = 1.5 #
disk_r0 = 0.85 # arcsec (corresponds to half_light_radius of ~3.7 arcsec)
bulge_frac = 0.3 #
gal_q = 0.73 # (axis ratio 0 < q < 1)
gal_beta = 23 # degrees (position angle on the sky)
atmos_fwhm=2.1 # arcsec
atmos_e = 0.13 #
atmos_beta = 0.81 # radians
opt_defocus=0.53 # wavelengths
opt_a1=-0.29 # wavelengths
opt_a2=0.12 # wavelengths
opt_c1=0.64 # wavelengths
opt_c2=-0.33 # wavelengths
opt_obscuration=0.3 # linear scale size of secondary mirror obscuration
lam = 800 # nm NB: don't use lambda - that's a reserved word.
tel_diam = 4. # meters
pixel_scale = 0.23 # arcsec / pixel
image_size = 64 # n x n pixels
wcs_g1 = -0.02 #
wcs_g2 = 0.01 #
sky_level = 2.5e4 # ADU / arcsec^2
gain = 1.7 # e- / ADU
# Note: here we assume 1 photon -> 1 e-, ignoring QE. If you wanted,
# you could include the QE factor as part of the gain.
read_noise = 0.3 # e- / pixel
random_seed = 1314662
logger.info('Starting demo script 3 using:')
logger.info(' - Galaxy is bulge plus disk, flux = %.1e',gal_flux)
logger.info(' - Bulge is Sersic (n = %.1f, re = %.2f), frac = %.1f',
bulge_n,bulge_re,bulge_frac)
logger.info(' - Disk is Sersic (n = %.1f, r0 = %.2f), frac = %.1f',
disk_n,disk_r0,1-bulge_frac)
logger.info(' - Shape is q,beta (%.2f,%.2f deg)', gal_q, gal_beta)
logger.info(' - Atmospheric PSF is Kolmogorov with fwhm = %.2f',atmos_fwhm)
logger.info(' - Shape is e,beta (%.2f,%.2f rad)', atmos_e, atmos_beta)
logger.info(' - Optical PSF has defocus = %.2f, astigmatism = (%.2f,%.2f),',
opt_defocus, opt_a1, opt_a2)
logger.info(' coma = (%.2f,%.2f), lambda = %.0f nm, D = %.1f m',
opt_c1, opt_c2, lam, tel_diam)
logger.info(' obscuration linear size = %.1f',opt_obscuration)
logger.info(' - pixel scale = %.2f,',pixel_scale)
logger.info(' - WCS distortion = (%.2f,%.2f),',wcs_g1,wcs_g2)
logger.info(' - Poisson noise (sky level = %.1e, gain = %.1f).',sky_level, gain)
logger.info(' - Gaussian read noise (sigma = %.2f).',read_noise)
# Initialize the (pseudo-)random number generator that we will be using below.
rng = galsim.BaseDeviate(random_seed+1)
# Define the galaxy profile.
# Normally Sersic profiles are specified by half-light radius, the radius that
# encloses half of the total flux. However, for some purposes, it can be
# preferable to instead specify the scale radius, where the surface brightness
# drops to 1/e of the central peak value.
bulge = galsim.Sersic(bulge_n, half_light_radius=bulge_re)
disk = galsim.Sersic(disk_n, scale_radius=disk_r0)
# Objects may be multiplied by a scalar (which means scaling the flux) and also
# added to each other.
gal = bulge_frac * bulge + (1-bulge_frac) * disk
# Could also have written the following, which does the same thing:
# gal = galsim.Add([ bulge.withFlux(bulge_frac) , disk.withFlux(1-bulge_frac) ])
# Both syntaxes work with more than two summands as well.
# Set the overall flux of the combined object.
gal = gal.withFlux(gal_flux)
# Since the total flux of the components was 1, we could also have written:
# gal *= gal_flux
# The withFlux method will always set the flux to the given value, while `gal *= flux`
# will multiply whatever the current flux is by the given factor.
# Set the shape of the galaxy according to axis ratio and position angle
# Note: All angles in GalSim must have explicit units. Options are:
# galsim.radians
# galsim.degrees
# galsim.arcmin
# galsim.arcsec
# galsim.hours
gal_shape = galsim.Shear(q=gal_q, beta=gal_beta*galsim.degrees)
gal = gal.shear(gal_shape)
logger.debug('Made galaxy profile')
# Define the atmospheric part of the PSF.
# Note: the flux here is the default flux=1.
atmos = galsim.Kolmogorov(fwhm=atmos_fwhm)
# For the PSF shape here, we use ellipticity rather than axis ratio.
# And the position angle can be either degrees or radians. Here we chose radians.
atmos = atmos.shear(e=atmos_e, beta=atmos_beta*galsim.radians)
logger.debug('Made atmospheric PSF profile')
# Define the optical part of the PSF:
# The first argument of OpticalPSF below is lambda/diam (wavelength of light / telescope
# diameter), which needs to be in the same units used to specify the image scale. We are using
# arcsec for that, so we have to self-consistently use arcsec here, using the following
# calculation:
lam_over_diam = lam * 1.e-9 / tel_diam # radians
lam_over_diam *= 206265 # arcsec
# Note that we could also have made GalSim do the conversion for us if we did not know the right
# factor:
# lam_over_diam = lam * 1.e-9 / tel_diam * galsim.radians
# lam_over_diam = lam_over_diam / galsim.arcsec
logger.debug('Calculated lambda over diam = %f arcsec', lam_over_diam)
# The rest of the values should be given in units of the wavelength of the incident light.
optics = galsim.OpticalPSF(lam_over_diam,
defocus = opt_defocus,
coma1 = opt_c1, coma2 = opt_c2,
astig1 = opt_a1, astig2 = opt_a2,
obscuration = opt_obscuration)
logger.debug('Made optical PSF profile')
# So far, our coordinate transformation between image and sky coordinates has been just a
# scaling of the units between pixels and arcsec, which we have defined as the "pixel scale".
# This is fine for many purposes, so we have made it easy to treat the coordinate systems
# this way via the `scale` parameter to commands like drawImage. However, in general, the
# transformation between the two coordinate systems can be more complicated than that,
# including distortions, rotations, variation in pixel size, and so forth. GalSim can
# model a number of different "World Coordinate System" (WCS) transformations. See the
# docstring for BaseWCS for more information.
# In this case, we use a WCS that includes a distortion (specified as g1,g2 in this case),
# which we call a ShearWCS.
wcs = galsim.ShearWCS(scale=pixel_scale, shear=galsim.Shear(g1=wcs_g1, g2=wcs_g2))
logger.debug('Made the WCS')
# Next we will convolve the components in world coordinates.
psf = galsim.Convolve([atmos, optics])
final = galsim.Convolve([psf, gal])
logger.debug('Convolved components into final profile')
# This time we specify a particular size for the image rather than let GalSim
# choose the size automatically. GalSim has several kinds of images that it can use:
# ImageF uses 32-bit floats (like a C float, aka numpy.float32)
# ImageD uses 64-bit floats (like a C double, aka numpy.float64)
# ImageS uses 16-bit integers (usually like a C short, aka numpy.int16)
# ImageI uses 32-bit integers (usually like a C int, aka numpy.int32)
# If you let the GalSim drawImage command create the image for you, it will create an ImageF.
# However, you can make a different type if you prefer. In this case, we still use
# ImageF, since 32-bit floats are fine. We just want to set the size explicitly.
image = galsim.ImageF(image_size, image_size)
# Draw the image with the given WCS. Note that we use wcs rather than scale when the
# WCS is more complicated than just a pixel scale.
final.drawImage(image=image, wcs=wcs)
# Also draw the effective PSF by itself and the optical PSF component alone.
image_epsf = galsim.ImageF(image_size, image_size)
psf.drawImage(image_epsf, wcs=wcs)
# We also draw the optical part of the PSF at its own Nyquist-sampled pixel size
# in order to better see the features of the (highly structured) profile.
# In this case, we draw a "surface brightness image" using method='sb'. Rather than
# integrate the flux over the area of each pixel, this method just samples the surface
# brightness value at the locations of the pixel centers. We will encounter a few other
# drawing methods as we go through this sequence of demos. cf. demos 7, 8, 10, and 11.
image_opticalpsf = optics.drawImage(method='sb')
logger.debug('Made image of the profile')
# This time, we use CCDNoise to model the real noise in a CCD image. It takes a sky level,
# gain, and read noise, so it can be a bit more realistic than the simpler GaussianNoise
# or PoissonNoise that we used in demos 1 and 2.
# The sky level for CCDNoise is the level per pixel that contributed to the noise.
sky_level_pixel = sky_level * pixel_scale**2
# The gain is in units of e-/ADU. Technically, one should also account for quantum efficiency
# (QE) of the detector. An ideal CCD has one electron per incident photon, but real CCDs have
# QE less than 1, so not every photon triggers an electron. We are essentially folding
# the quantum efficiency (and filter transmission and anything else like that) into the gain.
# The read_noise value is given as e-/pixel. This is modeled as a pure Gaussian noise
# added to the image after applying the pure Poisson noise.
noise = galsim.CCDNoise(rng, gain=gain, read_noise=read_noise, sky_level=sky_level_pixel)
image.addNoise(noise)
logger.debug('Added Gaussian and Poisson noise')
# Write the images to files.
file_name = os.path.join('output', 'demo3.fits')
file_name_epsf = os.path.join('output','demo3_epsf.fits')
file_name_opticalpsf = os.path.join('output','demo3_opticalpsf.fits')
image.write(file_name)
image_epsf.write(file_name_epsf)
image_opticalpsf.write(file_name_opticalpsf)
logger.info('Wrote image to %r', file_name)
logger.info('Wrote effective PSF image to %r', file_name_epsf)
logger.info('Wrote optics-only PSF image (Nyquist sampled) to %r', file_name_opticalpsf)
# Check that the HSM package, which is bundled with GalSim, finds a good estimate
# of the shear.
results = galsim.hsm.EstimateShear(image, image_epsf)
logger.info('HSM reports that the image has observed shape and size:')
logger.info(' e1 = %.3f, e2 = %.3f, sigma = %.3f (pixels)', results.observed_shape.e1,
results.observed_shape.e2, results.moments_sigma)
logger.info('When carrying out Regaussianization PSF correction, HSM reports')
logger.info(' e1, e2 = %.3f, %.3f',
results.corrected_e1, results.corrected_e2)
logger.info('Expected values in the limit that noise and non-Gaussianity are negligible:')
# Convention for shear addition is to apply the second term initially followed by the first.
# So this needs to be the WCS shear + the galaxy shape in that order.
total_shape = galsim.Shear(g1=wcs_g1, g2=wcs_g2) + gal_shape
logger.info(' e1, e2 = %.3f, %.3f', total_shape.e1, total_shape.e2)
if __name__ == "__main__":
main(sys.argv)
| [
"[email protected]"
] | |
d7260d07b1153616bbc341e18e6edb40759ede60 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02631/s086085697.py | fe20e86568153f3f73efd0b1df7a952b71276135 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | n=int(input())
a=list(map(int,input().split()))
c=a[0]
for i in range(1,n):
c=c^a[i]
for i in range(n):
print(c^a[i]) | [
"[email protected]"
] | |
1c41b31c2095067d219200c34429fe81d65f2c1a | 96c1f13473cf224113185902edd4c9c01091e106 | /tests/torchlie_tests/functional/test_se3.py | c3af91c3b45ba611167ac0d61031d6cf9bfbf0f1 | [
"MIT"
] | permissive | facebookresearch/theseus | f1e488eb5a25f5ba74a6995911bee958b5da4cf3 | 240e1206329d42fedd40399684d6e17e455c6645 | refs/heads/main | 2023-08-11T07:33:12.328520 | 2023-08-02T12:58:01 | 2023-08-02T12:58:01 | 429,570,359 | 1,410 | 105 | MIT | 2023-08-01T14:30:01 | 2021-11-18T20:28:27 | Python | UTF-8 | Python | false | false | 3,067 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence, Union
import pytest
import torch
import torchlie.functional.se3_impl as se3_impl
from torchlie.functional import SE3
from .common import (
BATCH_SIZES_TO_TEST,
TEST_EPS,
check_binary_op_broadcasting,
check_left_project_broadcasting,
check_lie_group_function,
check_jacrev_binary,
check_jacrev_unary,
run_test_op,
)
@pytest.mark.parametrize(
"op_name",
[
"exp",
"log",
"adjoint",
"inverse",
"hat",
"compose",
"transform",
"untransform",
"lift",
"project",
"left_act",
"left_project",
"normalize",
],
)
@pytest.mark.parametrize("batch_size", BATCH_SIZES_TO_TEST)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_op(op_name, batch_size, dtype):
rng = torch.Generator()
rng.manual_seed(0)
run_test_op(op_name, batch_size, dtype, rng, 6, (3, 4), se3_impl)
@pytest.mark.parametrize("batch_size", BATCH_SIZES_TO_TEST)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
def test_vee(batch_size: Union[int, Sequence[int]], dtype: torch.dtype):
if isinstance(batch_size, int):
batch_size = (batch_size,)
rng = torch.Generator()
rng.manual_seed(0)
tangent_vector = torch.rand(*batch_size, 6, dtype=dtype, generator=rng)
matrix = se3_impl._hat_autograd_fn(tangent_vector)
# check analytic backward for the operator
check_lie_group_function(se3_impl, "vee", TEST_EPS, (matrix,))
# check the correctness of hat and vee
actual_tangent_vector = se3_impl._vee_autograd_fn(matrix)
torch.testing.assert_close(
actual_tangent_vector, tangent_vector, atol=TEST_EPS, rtol=TEST_EPS
)
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("name", ["exp", "inv"])
def test_jacrev_unary(batch_size, name):
check_jacrev_unary(SE3, 6, batch_size, name)
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("name", ["compose", "transform", "untransform"])
def test_jacrev_binary(batch_size, name):
if not hasattr(torch, "vmap"):
return
check_jacrev_binary(SE3, batch_size, name)
@pytest.mark.parametrize("name", ["compose", "transform", "untransform"])
def test_binary_op_broadcasting(name):
rng = torch.Generator()
rng.manual_seed(0)
batch_sizes = [(1,), (2,), (1, 2), (2, 1), (2, 2), (2, 2, 2), tuple()]
for bs1 in batch_sizes:
for bs2 in batch_sizes:
check_binary_op_broadcasting(
SE3, name, (3, 4), bs1, bs2, torch.float64, rng
)
def test_left_project_broadcasting():
rng = torch.Generator()
rng.manual_seed(0)
batch_sizes = [tuple(), (1, 2), (1, 1, 2), (2, 1), (2, 2), (2, 2, 2)]
check_left_project_broadcasting(SE3, batch_sizes, [0, 1, 2], (3, 4), rng)
| [
"[email protected]"
] | |
908780fe69c1ca758295ca0f25b531c70571438f | 67d8173a716da10a7350213d98938aae9f2115ce | /ProgrammingCourses/CS61A/lab/lab09/tests/substitute.py | c599160851d680f435682a58dd191c6b5377599d | [] | no_license | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 1,174 | py | test = {
"name": "substitute",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
scm> (substitute "(c a b) "b 'l)
(c a l)
scm> (substitute "(f e a r s) "f 'b)
(b e a r s)
scm> (substitute "(g (o) o (o)) "o 'r)
(g (r) r (r))
""",
"hidden": False,
"locked": False
},
{
"code": r"""
scm> (substitute '((lead guitar) (bass guitar) (rhythm guitar) drums)
.... "guitar "axe)
((lead axe) (bass axe) (rhythm axe) drums)
scm> (substitute "(romeo romeo wherefore art thou romeo) "romeo 'paris)
(paris paris wherefore art thou paris)
scm> (substitute "((to be) or not (to (be))) "be 'eat)
((to eat) or not (to (eat)))
scm> (substitute "(a b (c) d e) "foo 'bar)
(a b (c) d e)
""",
"hidden": False,
"locked": False
}
],
"scored": True,
"setup": r"""
scm> (load 'lab09)
scm> (load 'lab09_extra)
""",
"teardown": "",
"type": "scheme"
}
]
}
| [
"[email protected]"
] | |
400c7d1dfbd9b32067d5a361e8a800aaea5f8be9 | 771c1e2011a85a287c766b1a3d299ced2e6f799f | /src/electionguard/ballot_compact.py | 96e4d2be29a39ca2eb31f736305027dd3da57e10 | [
"MIT"
] | permissive | microsoft/electionguard-python | f50f64a473a8d77984a2faf4aa8db40cebb5c201 | b3ddc2a732f6c5f078a3afbe05b00d632a2ff5e0 | refs/heads/main | 2023-08-03T12:44:35.322716 | 2022-10-28T12:47:18 | 2022-10-28T12:47:18 | 246,392,956 | 143 | 117 | MIT | 2023-08-02T00:24:32 | 2020-03-10T19:46:06 | Python | UTF-8 | Python | false | false | 5,601 | py | from dataclasses import dataclass
from typing import Dict, List
from .ballot import (
CiphertextBallot,
SubmittedBallot,
PlaintextBallot,
PlaintextBallotContest,
PlaintextBallotSelection,
make_ciphertext_submitted_ballot,
)
from .ballot_box import BallotBoxState
from .election import CiphertextElectionContext
from .election_object_base import sequence_order_sort
from .encrypt import encrypt_ballot_contests
from .group import ElementModQ
from .manifest import (
ContestDescriptionWithPlaceholders,
InternalManifest,
)
from .utils import get_optional
YES_VOTE = 1
NO_VOTE = 0
@dataclass
class CompactPlaintextBallot:
"""A compact plaintext representation of ballot minimized for data size"""
object_id: str
style_id: str
selections: List[bool]
write_ins: Dict[int, str]
@dataclass
class CompactSubmittedBallot:
"""A compact submitted ballot minimized for data size"""
compact_plaintext_ballot: CompactPlaintextBallot
timestamp: int
ballot_nonce: ElementModQ
code_seed: ElementModQ
code: ElementModQ
ballot_box_state: BallotBoxState
def compress_plaintext_ballot(ballot: PlaintextBallot) -> CompactPlaintextBallot:
"""Compress a plaintext ballot into a compact plaintext ballot"""
selections = _get_compact_selections(ballot)
extended_data = _get_compact_write_ins(ballot)
return CompactPlaintextBallot(
ballot.object_id, ballot.style_id, selections, extended_data
)
def compress_submitted_ballot(
ballot: SubmittedBallot,
plaintext_ballot: PlaintextBallot,
ballot_nonce: ElementModQ,
) -> CompactSubmittedBallot:
"""Compress a submitted ballot into a compact submitted ballot"""
return CompactSubmittedBallot(
compress_plaintext_ballot(plaintext_ballot),
ballot.timestamp,
ballot_nonce,
ballot.code_seed,
ballot.code,
ballot.state,
)
def expand_compact_submitted_ballot(
compact_ballot: CompactSubmittedBallot,
internal_manifest: InternalManifest,
context: CiphertextElectionContext,
) -> SubmittedBallot:
"""
Expand a compact submitted ballot using context and
the election manifest into a submitted ballot
"""
# Expand ballot and encrypt & hash contests
plaintext_ballot = expand_compact_plaintext_ballot(
compact_ballot.compact_plaintext_ballot, internal_manifest
)
nonce_seed = CiphertextBallot.nonce_seed(
internal_manifest.manifest_hash,
compact_ballot.compact_plaintext_ballot.object_id,
compact_ballot.ballot_nonce,
)
contests = get_optional(
encrypt_ballot_contests(
plaintext_ballot, internal_manifest, context, nonce_seed
)
)
return make_ciphertext_submitted_ballot(
plaintext_ballot.object_id,
plaintext_ballot.style_id,
internal_manifest.manifest_hash,
compact_ballot.code_seed,
contests,
compact_ballot.code,
compact_ballot.timestamp,
compact_ballot.ballot_box_state,
)
def expand_compact_plaintext_ballot(
compact_ballot: CompactPlaintextBallot, internal_manifest: InternalManifest
) -> PlaintextBallot:
"""Expand a compact plaintext ballot into the original plaintext ballot"""
return PlaintextBallot(
compact_ballot.object_id,
compact_ballot.style_id,
_get_plaintext_contests(compact_ballot, internal_manifest),
)
def _get_compact_selections(ballot: PlaintextBallot) -> List[bool]:
selections = []
for contest in ballot.contests:
for selection in contest.ballot_selections:
selections.append(selection.vote == YES_VOTE)
return selections
def _get_compact_write_ins(ballot: PlaintextBallot) -> Dict[int, str]:
write_ins = {}
index = 0
for contest in ballot.contests:
for selection in contest.ballot_selections:
index += 1
if selection.write_in:
write_ins[index] = selection.write_in
return write_ins
def _get_plaintext_contests(
compact_ballot: CompactPlaintextBallot, internal_manifest: InternalManifest
) -> List[PlaintextBallotContest]:
"""Get ballot contests from compact plaintext ballot"""
index = 0
ballot_style_contests = _get_ballot_style_contests(
compact_ballot.style_id, internal_manifest
)
contests: List[PlaintextBallotContest] = []
for manifest_contest in sequence_order_sort(internal_manifest.contests):
contest_in_style = (
ballot_style_contests.get(manifest_contest.object_id) is not None
)
# Iterate through selections. If contest not in style, mark placeholder
selections: List[PlaintextBallotSelection] = []
for selection in sequence_order_sort(manifest_contest.ballot_selections):
selections.append(
PlaintextBallotSelection(
selection.object_id,
YES_VOTE if compact_ballot.selections[index] else NO_VOTE,
not contest_in_style,
compact_ballot.write_ins.get(index),
)
)
index += 1
contests.append(PlaintextBallotContest(manifest_contest.object_id, selections))
return contests
def _get_ballot_style_contests(
ballot_style_id: str, internal_manifest: InternalManifest
) -> Dict[str, ContestDescriptionWithPlaceholders]:
ballot_style_contests = internal_manifest.get_contests_for(ballot_style_id)
return {contest.object_id: contest for contest in ballot_style_contests}
| [
"[email protected]"
] | |
3dc2bb12966bffd471380690c04c8efd0a9a13b7 | caedff6019e47035eadaaad5a588022e05d92104 | /Christmas2016/question/migrations/0001_initial.py | 763d1090fa8d71921230ce550cd9738236392d82 | [] | no_license | JMorris1575/christmas16 | ff767add9321bfe82ee70477f75a957504dc5288 | 1b06bf8febb94a699226b0b9d951cb14bbe59d50 | refs/heads/master | 2021-01-13T09:33:57.721350 | 2016-12-28T13:12:44 | 2016-12-28T13:12:44 | 72,059,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-09 02:39
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import model_mixins
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
],
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='question.Question')),
],
bases=(models.Model, model_mixins.AuthorMixin),
),
]
| [
"[email protected]"
] | |
628d46dc69e58fab2b00e0b3f44ef0d2fcd88ea1 | 5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4 | /interview_bits/level_1/01_mathematics/05_number_encoding/01_rearrange-array.py | f60f9290190be8d9160ecf9353f276b41e9c32b3 | [] | no_license | salvador-dali/algorithms_general | 04950bd823fc354adc58a4f23b7d2f3d39664798 | aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d | refs/heads/master | 2020-12-14T06:24:10.466601 | 2016-07-17T06:00:17 | 2016-07-17T06:00:17 | 47,397,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | # https://www.interviewbit.com/problems/rearrange-array/
def rearrange(arr):
n = len(arr)
for i in xrange(n):
arr[i] += n * (arr[arr[i]] % n)
for i in xrange(n):
arr[i] /= n
return arr
| [
"[email protected]"
] | |
077e06806c57829b1f5cc54d139833314ac0bffe | 308953409e1a3b828ac49b7301c1e751cbf762cf | /suite_EETc 12/tst_Open_Change_Values_Import_No/test.py | fec88bb5a37e70b505750a61bac908c5b0993dd9 | [] | no_license | asthagaur1/danfoss-automation | 4dcc7d8f000917b67e4d6f46ff862a525ddcbc5e | 213a99d3375889cd0e0c801421a50e9fe6085879 | refs/heads/main | 2023-03-31T23:26:56.956107 | 2021-04-01T08:52:37 | 2021-04-01T08:52:37 | 353,627,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | def main():
excel = r"C:\gitworkspace\KoolProg-TestAutomation\Master_Functions\Test_Automation\SourceCode\suite_EETc 12\shared\testdata\Open_Change_Values_Import_No.xls";
#Mapping with Global scripts for Function library and key action.
source(findFile("scripts", "Functions.py"))
source(findFile("scripts", "Actions.py"))
# source(findFile("scripts", "object_id.py"))
keyAction(excel)
| [
"[email protected]"
] | |
9e78bb7a62c7ff5743be037816b12a9c2316c086 | 82fdb2f3baeb4f08799d93c4be8d8c829f092415 | /tests/test_policies.py | 52b3f8e8e75389b6127427521a6f3c7145b58814 | [
"Apache-2.0"
] | permissive | velamurip/rasa_core | 915f815772e2b596f837f0e1af511e829cc28e3e | f3dbb70d0bb748628ab238eded17a8f5e09279e2 | refs/heads/master | 2021-05-16T04:22:04.310610 | 2017-10-05T09:53:22 | 2017-10-05T09:53:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,838 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pytest
from rasa_core.channels import UserMessage
from rasa_core.domain import TemplateDomain
from rasa_core.featurizers import BinaryFeaturizer
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.policies.scoring_policy import ScoringPolicy
from rasa_core.trackers import DialogueStateTracker
from rasa_core.training_utils import extract_training_data_from_file, \
extract_stories_from_file
def train_data(max_history, domain):
return extract_training_data_from_file(
"data/dsl_stories/stories_defaultdomain.md",
domain=domain, max_history=max_history, remove_duplicates=True,
featurizer=BinaryFeaturizer())
# We are going to use class style testing here since unfortunately pytest
# doesn't support using fixtures as arguments to its own parameterize yet
# (hence, we can't train a policy, declare it as a fixture and use the different
# fixtures of the different policies for the functional tests). Therefore, we
# are going to reverse this and train the policy within a class and collect the
# tests in a base class.
class PolicyTestCollection(object):
"""Tests every policy needs to fulfill.
Each policy can declare further tests on its own."""
max_history = 3 # this is the amount of history we test on
def create_policy(self):
raise NotImplementedError
@pytest.fixture(scope="module")
def trained_policy(self):
default_domain = TemplateDomain.load("examples/default_domain.yml")
policy = self.create_policy()
X, y = train_data(self.max_history, default_domain)
policy.max_history = self.max_history
policy.featurizer = BinaryFeaturizer()
policy.train(X, y, default_domain)
return policy
def test_persist_and_load(self, trained_policy, default_domain, tmpdir):
trained_policy.persist(tmpdir.strpath)
loaded = trained_policy.__class__.load(tmpdir.strpath,
trained_policy.featurizer,
trained_policy.max_history)
stories = extract_stories_from_file(
"data/dsl_stories/stories_defaultdomain.md", default_domain)
for story in stories:
tracker = DialogueStateTracker("default", default_domain.slots)
dialogue = story.as_dialogue("default", default_domain)
tracker.update_from_dialogue(dialogue)
predicted_probabilities = loaded.predict_action_probabilities(
tracker, default_domain)
actual_probabilities = trained_policy.predict_action_probabilities(
tracker, default_domain)
assert predicted_probabilities == actual_probabilities
def test_prediction_on_empty_tracker(self, trained_policy, default_domain):
tracker = DialogueStateTracker(UserMessage.DEFAULT_SENDER,
default_domain.slots,
default_domain.topics,
default_domain.default_topic)
probabilities = trained_policy.predict_action_probabilities(
tracker, default_domain)
assert len(probabilities) == default_domain.num_actions
assert max(probabilities) <= 1.0
assert min(probabilities) >= 0.0
def test_persist_and_load_empty_policy(self, tmpdir):
empty_policy = self.create_policy()
empty_policy.persist(tmpdir.strpath)
loaded = empty_policy.__class__.load(tmpdir.strpath, BinaryFeaturizer(),
empty_policy.max_history)
assert loaded is not None
class TestKerasPolicy(PolicyTestCollection):
@pytest.fixture(scope="module")
def create_policy(self):
p = KerasPolicy()
return p
class TestScoringPolicy(PolicyTestCollection):
@pytest.fixture(scope="module")
def create_policy(self):
p = ScoringPolicy()
return p
class TestMemoizationPolicy(PolicyTestCollection):
@pytest.fixture(scope="module")
def create_policy(self):
p = MemoizationPolicy()
return p
def test_memorise(self, trained_policy, default_domain):
X, y = train_data(self.max_history, default_domain)
trained_policy.train(X, y, default_domain)
for ii in range(X.shape[0]):
assert trained_policy.recall(X[ii, :, :], default_domain) == y[ii]
random_feature = np.random.randn(default_domain.num_features)
assert trained_policy.recall(random_feature, default_domain) is None
| [
"[email protected]"
] | |
9db26fb7dad810ee471a57378bf7b950550c9a78 | e1a2c6ed4a4b93b4697974e3b0a32a4d67daa6f6 | /venv/Lib/site-packages/pybrain3/rl/environments/ode/instances/ccrl.py | f868c93f0e79f79e82bdefa752c7d5da13efb64f | [
"MIT"
] | permissive | ishatserka/MachineLearningAndDataAnalysisCoursera | cdf0f23a58617e17d6b938e3a9df17daae8585e4 | e82e772df2f4aec162cb34ac6127df10d14a625a | refs/heads/master | 2021-09-11T01:39:26.228392 | 2018-04-05T14:33:39 | 2018-04-05T14:33:39 | 117,153,454 | 0 | 0 | MIT | 2018-03-27T05:20:37 | 2018-01-11T21:05:33 | Python | UTF-8 | Python | false | false | 6,146 | py | __author__ = 'Frank Sehnke, [email protected]'
from pybrain3.rl.environments.ode import ODEEnvironment, sensors, actuators
import imp
import xode #@UnresolvedImport
import ode #@UnresolvedImport
import sys
from scipy import array, asarray
class CCRLEnvironment(ODEEnvironment):
def __init__(self, xodeFile="ccrlGlas.xode", renderer=True, realtime=False, ip="127.0.0.1", port="21590", buf='16384'):
ODEEnvironment.__init__(self, renderer, realtime, ip, port, buf)
# load model file
self.pert = asarray([1.0, 0.0, 0.0])
self.loadXODE(imp.find_module('pybrain')[1] + "/rl/environments/ode/models/" + xodeFile)
# standard sensors and actuators
self.addSensor(sensors.JointSensor())
self.addSensor(sensors.JointVelocitySensor())
self.addActuator(actuators.JointActuator())
#set act- and obsLength, the min/max angles and the relative max touques of the joints
self.actLen = self.indim
self.obsLen = len(self.getSensors())
#ArmLeft, ArmRight, Hip, PevelLeft, PevelRight, TibiaLeft, TibiaRight, KneeLeft, KneeRight, FootLeft, FootRight
self.tourqueList = array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.8, 0.8, 0.8, 0.5, 0.5, 0.1],)
#self.tourqueList=array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],)
self.cHighList = array([0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.9],)
self.cLowList = array([-1.0, -1.0, -1.0, -1.5, -1.0, -1.0, -1.0, -0.7, -1.0, 0.0, -1.0, -1.5, -1.0, -1.0, -1.0, 0.0],)
self.stepsPerAction = 1
def step(self):
# Detect collisions and create contact joints
self.tableSum = 0
self.glasSum = 0
ODEEnvironment.step(self)
def _near_callback(self, args, geom1, geom2):
"""Callback function for the collide() method.
This function checks if the given geoms do collide and
creates contact joints if they do."""
# only check parse list, if objects have name
if geom1.name != None and geom2.name != None:
# Preliminary checking, only collide with certain objects
for p in self.passpairs:
g1 = False
g2 = False
for x in p:
g1 = g1 or (geom1.name.find(x) != -1)
g2 = g2 or (geom2.name.find(x) != -1)
if g1 and g2:
return()
# Check if the objects do collide
contacts = ode.collide(geom1, geom2)
tmpStr = geom2.name[:-2]
handStr = geom1.name[:-1]
if geom1.name == 'plate' and tmpStr != 'objectP':
self.tableSum += len(contacts)
if tmpStr == 'objectP' and handStr == 'pressLeft':
if len(contacts) > 0: self.glasSum += 1
tmpStr = geom1.name[:-2]
handStr = geom2.name[:-1]
if geom2.name == 'plate' and tmpStr != 'objectP':
self.tableSum += len(contacts)
if tmpStr == 'objectP' and handStr == 'pressLeft':
if len(contacts) > 0: self.glasSum += 1
# Create contact joints
world, contactgroup = args
for c in contacts:
p = c.getContactGeomParams()
# parameters from Niko Wolf
c.setBounce(0.2)
c.setBounceVel(0.05) #Set the minimum incoming velocity necessary for bounce
c.setSoftERP(0.6) #Set the contact normal "softness" parameter
c.setSoftCFM(0.00005) #Set the contact normal "softness" parameter
c.setSlip1(0.02) #Set the coefficient of force-dependent-slip (FDS) for friction direction 1
c.setSlip2(0.02) #Set the coefficient of force-dependent-slip (FDS) for friction direction 2
c.setMu(self.FricMu) #Set the Coulomb friction coefficient
j = ode.ContactJoint(world, contactgroup, c)
j.name = None
j.attach(geom1.getBody(), geom2.getBody())
def loadXODE(self, filename, reload=False):
""" loads an XODE file (xml format) and parses it. """
f = file(filename)
self._currentXODEfile = filename
p = xode.parser.Parser()
self.root = p.parseFile(f)
f.close()
try:
# filter all xode "world" objects from root, take only the first one
world = [x for x in self.root.getChildren() if isinstance(x, xode.parser.World)][0]
except IndexError:
# malicious format, no world tag found
print("no <world> tag found in " + filename + ". quitting.")
sys.exit()
self.world = world.getODEObject()
self._setWorldParameters()
try:
# filter all xode "space" objects from world, take only the first one
space = [x for x in world.getChildren() if isinstance(x, xode.parser.Space)][0]
except IndexError:
# malicious format, no space tag found
print("no <space> tag found in " + filename + ". quitting.")
sys.exit()
self.space = space.getODEObject()
# load bodies and geoms for painting
self.body_geom = []
self._parseBodies(self.root)
for (body, _) in self.body_geom:
if hasattr(body, 'name'):
tmpStr = body.name[:-2]
if tmpStr == "objectP":
body.setPosition(body.getPosition() + self.pert)
if self.verbosity > 0:
print("-------[body/mass list]-----")
for (body, _) in self.body_geom:
try:
print(body.name, body.getMass())
except AttributeError:
print("<Nobody>")
# now parse the additional parameters at the end of the xode file
self.loadConfig(filename, reload)
def reset(self):
ODEEnvironment.reset(self)
self.pert = asarray([1.0, 0.0, 0.0])
if __name__ == '__main__' :
w = CCRLEnvironment()
while True:
w.step()
if w.stepCounter == 1000: w.reset()
| [
"[email protected]"
] | |
25ec5c3a23fdcbb3fe68b62fb26e6466e9c81f4a | 94c7440e7f1d2fdbe4a1e26b9c75a94e49c14eb4 | /leetcode/371.py | 9db89c099bace2c01ca91a5174d2047ab78a610c | [
"Apache-2.0"
] | permissive | windniw/just-for-fun | 7ddea4f75cf3466a400b46efe36e57f6f7847c48 | 44e1ff60f8cfaf47e4d88988ee67808f0ecfe828 | refs/heads/master | 2022-08-18T09:29:57.944846 | 2022-07-25T16:04:47 | 2022-07-25T16:04:47 | 204,949,602 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | """
link: https://leetcode.com/problems/sum-of-two-integers
problem: 不用 + / - 号,求 integer 类型的 a + b
solution: 由于python没有左移整形溢出这道题难度直线上升。
a + b
== 不进位 (a + b) + 进位 (a + b) << 1
== a ^ b + (a & b) << 1
持续迭代到 (a & b) << 1 为0,即不进位时, 结果为当时的 a ^ b
"""
class Solution:
def getSum(self, a: int, b: int) -> int:
max_uint = 0xffffffff
max_int = 0x7fffffff - 1
while a:
add = (a & b) << 1
b = a ^ b
a = add
add &= max_uint
b &= max_uint
return b if b <= max_int else ~(b ^ max_uint)
| [
"[email protected]"
] | |
55af1807d0651e3ce77d75f84c95801118d2aacc | d8c2cf1249c58b5f843031450db2f0f8733f85e8 | /todo/urls.py | 1a013e167082d76a6e64cdea74f08df97dddf656 | [
"MIT"
] | permissive | guluzadef/Instaexample | 80849e8a98f6e75b256e8e1d409793a490ea1a53 | 9c74a3e3ac8b523bbccd0e2e6c769c40bf6b3406 | refs/heads/master | 2020-07-01T20:12:30.928779 | 2019-09-11T16:59:45 | 2019-09-11T16:59:45 | 201,284,824 | 2 | 0 | MIT | 2019-09-11T16:54:56 | 2019-08-08T15:21:17 | Python | UTF-8 | Python | false | false | 972 | py | """todo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("todo_app.urls")),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
50f8e6b88bff07c4af0e52bfa551b372a8c93bc8 | a35b2842ff707c9adf70e178ba8cb7a128e6f0fa | /brl_gym/scripts/crosswalk_vel/generate_initial_conditions.py | a9c7a1bb7d4cb52e8276db48814c90777f6661e9 | [
"BSD-3-Clause"
] | permissive | gilwoolee/brl_gym | 7717366a09c7ff96a8fbc02688febe6d559e333a | 9c0784e9928f12d2ee0528c79a533202d3afb640 | refs/heads/master | 2022-11-26T15:08:56.730225 | 2020-08-02T05:08:28 | 2020-08-02T05:08:28 | 198,884,614 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | from brl_gym.envs.crosswalk_vel import CrossWalkVelEnv
import numpy as np
env = CrossWalkVelEnv()
env.reset()
goals = env.goals
peds = env.pedestrians
pose = env.pose
ped_speeds = env.pedestrian_speeds
print("Car 37, 38, 35")
print("Peds :\n", np.around(peds,1))
print("Ped speeds:\n", np.around(ped_speeds,2))
print("Goals :\n", np.around(goals,1))
print("Pose :\n", np.around(pose,1))
print("Angle :\n", np.around(np.rad2deg(pose[2]),2))
for ps, goal in zip(ped_speeds, goals):
if goal[0] == 3.5:
goal[0] = 3.2
if goal[0] == 0.0:
goal[0] = 0.3
print("roslaunch mushr_control runner_script.launch car_name:=$CAR_NAME wait_for_signal:=false desired_speed:={:.2f} desired_x:={:.2f} desired_y:={:.2f} local:=false".format(ps, goal[0], goal[1]))
| [
"[email protected]"
] | |
d0a92881174f016830e5b146ca97ba5a68b65627 | 2aa4c7c94866e7a958e4787dd4487aa7c1eb8d61 | /applications/MappingApplication/tests/test_mapper_mpi_tests.py | 17fa528cbfa65adc8d0f6521adde262131b8852b | [
"BSD-3-Clause"
] | permissive | PFEM/Kratos | b48df91e6ef5a00edf125e6f5aa398505c9c2b96 | 796c8572e9fe3875562d77370fc60beeacca0eeb | refs/heads/master | 2021-10-16T04:33:47.591467 | 2019-02-04T14:22:06 | 2019-02-04T14:22:06 | 106,919,267 | 1 | 0 | null | 2017-10-14T10:34:43 | 2017-10-14T10:34:43 | null | UTF-8 | Python | false | false | 2,141 | py | from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import KratosMultiphysics
from KratosMultiphysics.mpi import mpi
import KratosMultiphysics.MetisApplication
import KratosMultiphysics.TrilinosApplication
import KratosMultiphysics.MappingApplication as KratosMapping
import KratosMultiphysics.KratosUnittest as KratosUnittest
from base_mapper_tests import BaseMapperTests
from trilinos_import_model_part_utility import TrilinosImportModelPartUtility
class MapperMPITests(BaseMapperTests, KratosUnittest.TestCase):
@classmethod
def _ImportModelPart(cls):
cls.model_part_origin.AddNodalSolutionStepVariable(
KratosMultiphysics.PARTITION_INDEX)
cls.model_part_destination.AddNodalSolutionStepVariable(
KratosMultiphysics.PARTITION_INDEX)
origin_settings = KratosMultiphysics.Parameters("""{
"model_import_settings": {
"input_type": "mdpa",
"input_filename": \"""" + cls.input_file_origin + """\",
"partition_in_memory" : true
},
"echo_level" : 0
}""")
destination_settings = origin_settings.Clone()
destination_settings["model_import_settings"]["input_filename"].SetString(
cls.input_file_destination)
model_part_import_util_origin = TrilinosImportModelPartUtility(
cls.model_part_origin, origin_settings)
model_part_import_util_destination = TrilinosImportModelPartUtility(
cls.model_part_destination, destination_settings)
model_part_import_util_origin.ImportModelPart()
model_part_import_util_destination.ImportModelPart()
model_part_import_util_origin.CreateCommunicators()
model_part_import_util_destination.CreateCommunicators()
def _CreateMapper(self, mapper_settings):
return KratosMapping.MapperFactory.CreateMPIMapper(
self.model_part_origin,
self.model_part_destination,
mapper_settings)
if __name__ == '__main__':
KratosUnittest.main()
| [
"[email protected]"
] | |
224d192a356f25f72640dd130596fa1cc7f853c8 | fb1fd30098fd4dd7f11e614fbcd19bda5e0414bd | /randNum.py | 32dc0504c2cbabfba7c0c7b3ba6838a1d01a160a | [] | no_license | kali-lg/python | 6ceb452ae7fd611bb6b6b99a4be4404f3fd6b2de | 0363dba3e224ee2044dbe3216289c0245df9c5c0 | refs/heads/master | 2021-01-10T09:37:58.103674 | 2016-03-07T13:09:57 | 2016-03-07T13:09:57 | 53,310,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | #!/usr/bin/python
import random
num = random.randint(0, 100)
while True:
try:
guess = int(raw_input("Please Enter number 1~100:\n"))
except ValueError, e:
print "Please Enter correct number, your number is wrong type."
continue
if guess > num:
print "Guess Bigger:", guess
elif guess < num:
print "Gusee Smaller:", guess
else:
print "Guess OK, Game Over:"
break
print "\n"
| [
"[email protected]"
] | |
805e4b0e4a22e909185e96d5788bd12061f9e16a | 4df948c31bde1b49c110820ecf8a38f949a78f62 | /vta/tests/python/integration/test_benchmark_gemm.py | da867c9b827007e5e3c94b18238fb448793bd154 | [
"Apache-2.0"
] | permissive | jroesch/tvm | 40b4b8707177e3354c264ce31092721930ced376 | c2b36154778503a509a70a3b5309b201969eccab | refs/heads/master | 2021-12-19T03:38:13.732405 | 2018-10-22T16:31:59 | 2018-10-22T16:31:59 | 135,759,537 | 4 | 7 | Apache-2.0 | 2021-06-17T07:22:42 | 2018-06-01T20:15:33 | C++ | UTF-8 | Python | false | false | 11,731 | py | import tvm
import numpy as np
from tvm.contrib import util
import vta.testing
def test_gemm():
def run_gemm_packed(env, remote, batch_size, channel, block):
data_shape = (batch_size // env.BATCH,
channel // env.BLOCK_IN,
env.BATCH,
env.BLOCK_IN)
weight_shape = (channel // env.BLOCK_OUT,
channel // env.BLOCK_IN,
env.BLOCK_OUT,
env.BLOCK_IN)
res_shape = (batch_size // env.BATCH,
channel // env.BLOCK_OUT,
env.BATCH,
env.BLOCK_OUT)
# To compute number of ops, use a x2 factor for FMA
num_ops = 2 * channel * channel * batch_size
ko = tvm.reduce_axis((0, channel // env.BLOCK_IN), name='ko')
ki = tvm.reduce_axis((0, env.BLOCK_IN), name='ki')
data = tvm.placeholder(data_shape,
name="data",
dtype=env.inp_dtype)
weight = tvm.placeholder(weight_shape,
name="weight",
dtype=env.wgt_dtype)
data_buf = tvm.compute(data_shape,
lambda *i: data(*i),
"data_buf")
weight_buf = tvm.compute(weight_shape,
lambda *i: weight(*i),
"weight_buf")
res_gem = tvm.compute(res_shape,
lambda bo, co, bi, ci: tvm.sum(
data_buf[bo, ko, bi, ki].astype(env.acc_dtype) *
weight_buf[co, ko, ci, ki].astype(env.acc_dtype),
axis=[ko, ki]),
name="res_gem")
res_shf = tvm.compute(res_shape,
lambda *i: res_gem(*i)>>8,
name="res_shf")
res_max = tvm.compute(res_shape,
lambda *i: tvm.max(res_shf(*i), 0),
"res_max") #relu
res_min = tvm.compute(res_shape,
lambda *i: tvm.min(res_max(*i), (1<<(env.INP_WIDTH-1))-1),
"res_min") #relu
res = tvm.compute(res_shape,
lambda *i: res_min(*i).astype(env.inp_dtype),
name="res")
def verify(s, check_correctness=True):
mod = vta.build(s, [data, weight, res],
"ext_dev", env.target_host, name="gemm")
temp = util.tempdir()
mod.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
# verify
ctx = remote.ext_dev(0)
# Data in original format
data_orig = np.random.randint(
-128, 128, size=(batch_size, channel)).astype(data.dtype)
weight_orig = np.random.randint(
-128, 128, size=(channel, channel)).astype(weight.dtype)
data_packed = data_orig.reshape(
batch_size // env.BATCH, env.BATCH,
channel // env.BLOCK_IN, env.BLOCK_IN).transpose((0, 2, 1, 3))
weight_packed = weight_orig.reshape(
channel // env.BLOCK_OUT, env.BLOCK_OUT,
channel // env.BLOCK_IN, env.BLOCK_IN).transpose((0, 2, 1, 3))
res_np = np.zeros(res_shape).astype(res.dtype)
data_arr = tvm.nd.array(data_packed, ctx)
weight_arr = tvm.nd.array(weight_packed, ctx)
res_arr = tvm.nd.array(res_np, ctx)
res_ref = np.zeros(res_shape).astype(env.acc_dtype)
for b in range(batch_size // env.BATCH):
for i in range(channel // env.BLOCK_OUT):
for j in range(channel // env.BLOCK_IN):
res_ref[b,i,:] += np.dot(data_packed[b,j,:].astype(env.acc_dtype),
weight_packed[i,j].T.astype(env.acc_dtype))
res_ref = np.right_shift(res_ref, 8)
res_ref = np.clip(res_ref, 0, (1<<(env.INP_WIDTH-1))-1).astype(res.dtype)
time_f = f.time_evaluator("gemm", ctx, number=20)
cost = time_f(data_arr, weight_arr, res_arr)
res_unpack = res_arr.asnumpy().reshape(batch_size // env.BATCH,
channel // env.BLOCK_OUT,
env.BATCH,
env.BLOCK_OUT)
if check_correctness:
tvm.testing.assert_allclose(res_unpack, res_ref)
return cost
def run_schedule(load_inp,
load_wgt,
gemm,
alu,
store_out,
print_ir,
check_correctness):
s = tvm.create_schedule(res.op)
s[data_buf].set_scope(env.inp_scope)
s[weight_buf].set_scope(env.wgt_scope)
s[res_gem].set_scope(env.acc_scope)
s[res_shf].set_scope(env.acc_scope)
s[res_min].set_scope(env.acc_scope)
s[res_max].set_scope(env.acc_scope)
if block:
bblock = block // env.BATCH
iblock = block // env.BLOCK_IN
oblock = block // env.BLOCK_OUT
xbo, xco, xbi, xci = s[res].op.axis
xb1, xco1, xb2, xco2 = s[res].tile(xbo, xco, bblock, oblock)
store_pt = xb2
s[res_gem].compute_at(s[res], xco1)
s[res_shf].compute_at(s[res], xco1)
s[res_min].compute_at(s[res], xco1)
s[res_max].compute_at(s[res], xco1)
xbo, xco, xbi, xci = s[res_gem].op.axis
# Compute one line at a time
ko1, ko2 = s[res_gem].split(ko, iblock)
s[res_gem].reorder(ko1, ko2, xbo, xco, xbi, xci, ki)
s[data_buf].compute_at(s[res_gem], ko1)
s[weight_buf].compute_at(s[res_gem], ko1)
# Use VTA instructions
s[data_buf].pragma(s[data_buf].op.axis[0], load_inp)
s[weight_buf].pragma(s[weight_buf].op.axis[0], load_wgt)
s[res_gem].tensorize(xbi, gemm)
s[res_shf].pragma(s[res_shf].op.axis[0], alu)
s[res_min].pragma(s[res_min].op.axis[0], alu)
s[res_max].pragma(s[res_max].op.axis[0], alu)
s[res].pragma(store_pt, store_out)
else:
xbo, xco, xbi, xci = s[res_gem].op.axis
s[res_gem].reorder(ko, xbo, xco, xbi, xci, ki)
# Use VTA instructions
s[data_buf].pragma(s[data_buf].op.axis[0], load_inp)
s[weight_buf].pragma(s[weight_buf].op.axis[0], load_wgt)
s[res_gem].tensorize(xbi, gemm)
s[res_shf].pragma(s[res_shf].op.axis[0], alu)
s[res_min].pragma(s[res_min].op.axis[0], alu)
s[res_max].pragma(s[res_max].op.axis[0], alu)
s[res].pragma(s[res].op.axis[0], store_out)
if print_ir:
print(tvm.lower(s, [data, weight, res], simple_mode=True))
return verify(s, check_correctness)
def gemm_normal(print_ir):
mock = env.mock
print("----- GEMM GOPS End-to-End Test-------")
def run_test(header, print_ir, check_correctness):
cost = run_schedule(
env.dma_copy, env.dma_copy, env.gemm, env.alu, env.dma_copy,
print_ir, check_correctness)
gops = (num_ops / cost.mean) / float(10 ** 9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS" % (cost.mean, gops))
with vta.build_config():
run_test("NORMAL", print_ir, True)
def gemm_unittest(print_ir):
mock = env.mock
print("----- GEMM Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, mock.dma_copy, env.gemm, mock.alu, mock.dma_copy,
print_ir, False)
gops = (num_ops / cost.mean) / float(10 ** 9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS" % (cost.mean, gops))
with vta.build_config():
run_test("NORMAL", print_ir)
def alu_unittest(print_ir):
mock = env.mock
print("----- ALU Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, mock.dma_copy, mock.gemm, env.alu, mock.dma_copy,
print_ir, False)
gops = (num_ops / cost.mean) / float(10 ** 9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS" % (cost.mean, gops))
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
def load_inp_unittest(print_ir):
mock = env.mock
print("----- LoadInp Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
env.dma_copy, mock.dma_copy, mock.gemm, mock.alu, mock.dma_copy, print_ir, False)
gops = (num_ops / cost.mean) / float(10 ** 9)
bandwith = (batch_size * channel * env.INP_WIDTH / cost.mean) / float(10 ** 9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS, bandwidth=%g Gbits" % (
cost.mean, gops, bandwith))
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
def load_wgt_unittest(print_ir):
mock = env.mock
print("----- LoadWgt Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, env.dma_copy, mock.gemm, mock.alu, mock.dma_copy, print_ir, False)
gops = (num_ops / cost.mean) / float(10 ** 9)
bandwith = (channel * channel * env.WGT_WIDTH / cost.mean) / float(10 ** 9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS, bandwidth=%g Gbits" % (
cost.mean, gops, bandwith))
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
def store_out_unittest(print_ir):
mock = env.mock
print("----- StoreOut Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, mock.dma_copy, mock.gemm, mock.alu, env.dma_copy,
print_ir, False)
gops = (num_ops / cost.mean) / float(10 ** 9)
bandwith = (batch_size * channel * env.OUT_WIDTH / cost.mean) / float(10 ** 9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS, bandwidth=%g Gbits" % (
cost.mean, gops, bandwith))
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
gemm_normal(False)
gemm_unittest(False)
alu_unittest(False)
def _run(env, remote):
print("========GEMM 128=========")
run_gemm_packed(env, remote, 128, 128, 128)
vta.testing.run(_run)
if __name__ == "__main__":
test_gemm()
| [
"[email protected]"
] | |
bf799d87050ee17a2efe9205421a451ddbc5bbb3 | f0b741f24ccf8bfe9bd1950425d83b6291d21b10 | /components/google-cloud/google_cloud_pipeline_components/container/v1/bigquery/ml_reconstruction_loss/launcher.py | b0671efb65d1838f7599a10b484a9e7483666bb0 | [
"Apache-2.0"
] | permissive | kubeflow/pipelines | e678342b8a325559dec0a6e1e484c525fdcc8ce8 | 3fb199658f68e7debf4906d9ce32a9a307e39243 | refs/heads/master | 2023-09-04T11:54:56.449867 | 2023-09-01T19:07:33 | 2023-09-01T19:12:27 | 133,100,880 | 3,434 | 1,675 | Apache-2.0 | 2023-09-14T20:19:06 | 2018-05-12T00:31:47 | Python | UTF-8 | Python | false | false | 3,115 | py | # Copyright 2022 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCP launcher for Bigquery jobs based on the AI Platform SDK."""
import argparse
import logging
import sys
from google_cloud_pipeline_components.container.v1.bigquery.ml_reconstruction_loss import remote_runner
from google_cloud_pipeline_components.container.v1.gcp_launcher.utils import parser_util
def _parse_args(args):
"""Parse command line arguments."""
parser, parsed_args = parser_util.parse_default_args(args)
# Parse the conditionally required arguments
parser.add_argument(
'--executor_input',
dest='executor_input',
type=str,
# executor_input is only needed for components that emit output artifacts.
required=True,
default=argparse.SUPPRESS,
)
parser.add_argument(
'--job_configuration_query_override',
dest='job_configuration_query_override',
type=str,
required=True,
default=argparse.SUPPRESS,
)
parser.add_argument(
'--model_name',
dest='model_name',
type=str,
required=True,
default=argparse.SUPPRESS,
)
parser.add_argument(
'--table_name',
dest='table_name',
type=str,
# table_name is only needed for BigQuery tvf model job component.
required=False,
default=argparse.SUPPRESS,
)
parser.add_argument(
'--query_statement',
dest='query_statement',
type=str,
# query_statement is only needed for BigQuery predict model job component.
required=False,
default=argparse.SUPPRESS,
)
parsed_args, _ = parser.parse_known_args(args)
return vars(parsed_args)
def main(argv):
"""Main entry.
Expected input args are as follows:
Project - Required. The project of which the resource will be launched.
Region - Required. The region of which the resource will be launched.
Type - Required. GCP launcher is a single container. This Enum will
specify which resource to be launched.
Request payload - Required. The full serialized json of the resource spec.
Note this can contain the Pipeline Placeholders.
gcp_resources - placeholder output for returning job_id.
Args:
argv: A list of system arguments.
"""
parsed_args = _parse_args(argv)
job_type = parsed_args['type']
if job_type != 'BigqueryMLReconstructionLossJob':
raise ValueError('Incorrect job type: ' + job_type)
logging.info('Job started for type: ' + job_type)
remote_runner.bigquery_ml_reconstruction_loss_job(**parsed_args)
if __name__ == '__main__':
main(sys.argv[1:])
| [
"[email protected]"
] | |
3ec2e2dd3b709a107fda00833615406e4642a963 | 1bb42bac177fb4e979faa441363c27cb636a43aa | /dual_encoder/model_utils.py | 691253213276f6be9ac1bd05a51079a61df3c007 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | google-research/federated | a6040e80fa0fbf533e0d665c66a9bc549d208b3d | 329e60fa56b87f691303638ceb9dfa1fc5083953 | refs/heads/master | 2023-08-28T13:10:10.885505 | 2023-08-22T23:06:08 | 2023-08-22T23:06:40 | 295,559,343 | 595 | 187 | Apache-2.0 | 2022-05-12T08:42:53 | 2020-09-14T23:09:07 | Python | UTF-8 | Python | false | false | 5,775 | py | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dual encoder model."""
from typing import Callable, Optional
import tensorflow as tf
NormalizationFnType = Optional[Callable[[tf.Tensor], tf.Tensor]]
l2_normalize_fn = lambda x: tf.math.l2_normalize(x, axis=-1)
@tf.function
def get_predicted_embeddings(y_pred, y_true, normalization_fn=l2_normalize_fn):
"""Helper for retrieving optionally normalized embeddings from y_pred.
Args:
y_pred: dual encoder model output. If the model outputs embeddings, `y_pred`
is concatenate(context_embedding, full vocab label embeddings) with shape
[batch_size + label_embedding_vocab_size, final_embedding_dim]. If the
model outputs similarities, `y_pred` is the similarity matrix with shape
[batch_size, label_embedding_vocab_size] between context and full vocab
label embeddings.
y_true: the true labels with shape [batch_size, 1].
normalization_fn: The normalization function to be applied to both context
and label embeddings.
Returns:
Optionally normalized context and label embeddings.
"""
batch_size = tf.shape(y_true)[0]
context_embedding, label_embedding = y_pred[:batch_size], y_pred[batch_size:]
# Optionally apply nomalization_fn to both context and label embeddings,
# computing the cosine similarity rather than the dot product.
if normalization_fn is not None:
context_embedding = normalization_fn(context_embedding)
label_embedding = normalization_fn(label_embedding)
return context_embedding, label_embedding
@tf.function
def get_embeddings_and_similarities(y_pred,
y_true,
expect_embeddings=True,
normalization_fn=l2_normalize_fn):
"""Retrieving the context and label embeddings and the similarities between them.
Args:
y_pred: Dual encoder model output. When expect_embeddings is true, `y_pred`
is concatenate(context_embedding, full vocab label embeddings) with shape
[batch_size + label_embedding_vocab_size, final_embedding_dim]. When
`expect_embeddings` is False, `y_pred` is the similarity matrix with shape
[batch_size, label_embedding_vocab_size] between context and full vocab
label embeddings.
y_true: The true labels with shape [batch_size, 1].
expect_embeddings: If `expect_embeddings` is True, `y_pred` is the context
and label embeddings. Otherwise, the y_pred is the batch or global
similarities.
normalization_fn: The normalization function to be applied to both context
and label embeddings.
Returns:
The optionally normalized context and label embeddings as well as the
similarities between them. The context and label embeddings are `None` if
`expect_embeddings` is False.
"""
if expect_embeddings:
context_embedding, label_embedding = (
get_predicted_embeddings(y_pred, y_true, normalization_fn))
# similarities[i][j] is the dot product of the ith context embedding and
# the jth label embedding in a batch.
similarities = tf.matmul(
context_embedding, label_embedding, transpose_b=True)
else:
context_embedding = label_embedding = None
similarities = y_pred
return context_embedding, label_embedding, similarities
class Similarities(tf.keras.layers.Layer):
"""Keras layer for computing similarities over context/label embeddings.
Takes in context embeddings within a batch and label embeddings to computes a
similarities matrix where similarities[i][j] is the dot product similarity
between context embedding i and label embedding j.
If label embeddings are those within the same batch, this function computes
the batch similarity.
If label embeddings are those for the full vocabulary, this function computes
the global similarity.
Optionally apply normalization to the embeddings, computing cosine similarity
instead of dot product.
"""
def __init__(self,
normalization_fn: NormalizationFnType = l2_normalize_fn,
**kwargs):
super().__init__(**kwargs)
self.normalization_fn = normalization_fn
def call(self, inputs):
if len(inputs) != 2:
raise ValueError(
'Exactly two inputs must be provided, context embeddings and label '
'embeddings, but %d inputs were provided.' % len(inputs))
context_embedding, label_embedding = inputs
# Optionally apply normalization to both context and label embeddings,
# computing the cosine similarity rather than the dot product.
if self.normalization_fn is not None:
context_embedding = self.normalization_fn(context_embedding)
label_embedding = self.normalization_fn(label_embedding)
# similarities[i][j] is the dot product of the ith context embedding and
# the jth label embedding in a batch.
similarities = tf.matmul(
context_embedding, label_embedding, transpose_b=True)
return similarities
def get_config(self):
config = super().get_config()
config.update({
'normalization_fn': self.normalization_fn,
})
return config
NORMALIZATION_FN_MAP = {
'none': None,
'l2_normalize': l2_normalize_fn,
}
| [
"[email protected]"
] | |
445617559274b877d9caaaded1e30307947b51ec | ac9e79b04eadb95497b99c30444d952e6068f18f | /dockermap/map/policy/__init__.py | 45f1833b73ce0c9c2dd07d072c74315426a84c27 | [
"MIT"
] | permissive | vijayshan/docker-map | ff58f5c8aba15b8d157478a6614c6d6681de1e61 | a222c92947cbc22aef727c12f39fb93b0b192bc7 | refs/heads/master | 2021-01-17T03:16:31.693681 | 2015-09-14T08:20:55 | 2015-09-14T08:20:55 | 42,375,505 | 1 | 0 | null | 2015-09-12T22:31:07 | 2015-09-12T22:31:07 | null | UTF-8 | Python | false | false | 204 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .actions import ACTION_ATTACHED_FLAG, ACTION_DEPENDENCY_FLAG
from .simple import SimplePolicy
from .resume import ResumeUpdatePolicy
| [
"[email protected]"
] | |
65891c8750b9d10b031594b8b35080a55aaa6663 | 36409b78394002e5d6e9228ca256fd4654b38f80 | /random walk/src/BD_LERW.py | 225bf177733ba635a79943248f53c2381ba65975 | [] | no_license | xiaogang00/Mining-in-Social-Networks | fa383494fd56124096c60317af2b30373c0d4aac | 87ab6f29ae148170d03470987299c7ea812d1dab | refs/heads/master | 2020-12-02T16:22:59.938930 | 2017-08-30T01:58:33 | 2017-08-30T01:58:33 | 96,543,382 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,050 | py | #!/usr/bin/python
#
# An implementation of a Bidirectional Loop Erased Random Walk (LERW)
# from a cylinder with reflecting boundaries on the left
# and open boundaries on the right.
# PNG output of a single trajectory.
# Habib Rehmann and Gunnar Pruessner
#
import random
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
seed = 10 # random seed
Length = 200 # length of the cyclinder
Circ = 200 # circumference of cyclinder
x = 0 # x coordinate of starting location
# y coordinate of starting location. Origin is at centre of square
y = Circ / 2
#在这里一开始的时候,x是在原点,而y是在中间的
s = 0 # Step number.
realizations = 8
trajectory = [] # List of the x coordinates of all points visited.
# (Length x Circ) 2D array of zeros
lattice = np.zeros((Length, Circ), dtype=int)
random.seed(seed)
# Plot config
dpi = 300
fig, ax = plt.subplots()
fig.set_size_inches(3, Circ * 3. / Length)
ax.set_xlim(0, Length - 1)
ax.set_ylim(0, Circ - 1)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def plot(LERW, c='g', Length = Length, Circ = Circ):
for pos in range(len(LERW)):
x, y = LERW[pos]
#不能画在边缘和角落
if (x == Length) or (x == 0) or (y == Circ) or (y == Circ) or (y == 0):
LERW[pos] = (np.nan, np.nan)
pos += 1
plt.plot(*zip(*LERW), color=c, linewidth=0.2)
# Generate a randomwalk
for i in range(realizations):
s = 0
x = 0 # x coordinate of starting location
y = Circ / 2 # y coordinate of starting location
#lattice在这里是格子的线
lattice = np.zeros((Length, Circ), dtype=int)
trajectory = []
while True:
s += 1
#下面相当于在x,y的方向上产生随机数
if (bool(random.getrandbits(1))):
if (bool(random.getrandbits(1))):
x += 1
else:
x -= 1
else:
if (bool(random.getrandbits(1))):
y += 1
else:
y -= 1
if (x >= Length):
break
elif (x < 0):
x = 0
if (y >= Circ):
y -= Circ
elif (y < 0):
y += Circ
lattice[x][y] += 1
trajectory.append((x, y))
x0, y0, pos = None, None, 0
# Loop erasure
LERW_LeftRight = deepcopy(trajectory)
lcpy = deepcopy(lattice)
x0, y0 = None, None
pos = 0
while pos < len(LERW_LeftRight):
x, y = LERW_LeftRight[pos]
if lcpy[x][y] > 1 and (not x0):
x0, y0 = x, y
pos0 = pos
elif (x == x0) and (y == y0) and (lcpy[x][y] == 1):
del LERW_LeftRight[pos0:pos]
x0, y0 = None, None
pos = pos0
lcpy[x][y] -= 1
pos += 1
plot(LERW_LeftRight)
# Loop erasure (tranversal from right to left)
LERW_RightLeft = deepcopy(trajectory[::-1])
lcpy = deepcopy(lattice)
x0, y0 = None, None
pos = 0
while pos < len(LERW_RightLeft):
x, y = LERW_RightLeft[pos]
if lcpy[x][y] > 1 and (not x0):
x0, y0 = x, y
pos0 = pos
elif (x == x0) and (y == y0) and (lcpy[x][y] == 1):
del LERW_RightLeft[pos0:pos]
x0, y0 = None, None
pos = pos0
lcpy[x][y] -= 1
pos += 1
plot(LERW_RightLeft, 'r')
# Plot random walk
plt.savefig(__file__[:-3]+".png", bbox_inches="tight", dpi=dpi)
| [
"[email protected]"
] | |
579d13c29895c97ff77f3425bac31cb6d6070857 | 1e6e3bb707920fdb01ebca23eaf81097c558d918 | /tests/system/action/test_internal_actions.py | cc7ffd01313c71744500855191c73bb153e2160b | [
"MIT"
] | permissive | OpenSlides/openslides-backend | cbd24589f82a6f29bde02611610511870bb6abbf | d8511f5138db4cc5fe4fa35e2a0200f766bd49c5 | refs/heads/main | 2023-08-23T11:54:25.064070 | 2023-08-22T11:15:45 | 2023-08-22T11:15:45 | 231,757,840 | 6 | 22 | MIT | 2023-09-14T16:23:41 | 2020-01-04T12:17:38 | Python | UTF-8 | Python | false | false | 7,783 | py | from tempfile import NamedTemporaryFile
from typing import Any, Dict, Optional
from openslides_backend.http.views.action_view import (
INTERNAL_AUTHORIZATION_HEADER,
ActionView,
)
from openslides_backend.http.views.base_view import RouteFunction
from openslides_backend.shared.env import DEV_PASSWORD
from openslides_backend.shared.util import ONE_ORGANIZATION_FQID
from tests.system.util import disable_dev_mode, get_route_path
from tests.util import Response
from .base import BaseActionTestCase
from .util import get_internal_auth_header
class BaseInternalRequestTest(BaseActionTestCase):
"""
Provides the ability to use the anonymous client to call an internal route.
"""
route: RouteFunction
def call_internal_route(
self,
payload: Any,
internal_auth_password: Optional[str] = DEV_PASSWORD,
) -> Response:
if internal_auth_password is None:
headers = {}
else:
headers = get_internal_auth_header(internal_auth_password)
return self.anon_client.post(
get_route_path(self.route),
json=payload,
headers=headers,
)
class BaseInternalPasswordTest(BaseInternalRequestTest):
"""
Sets up a server-side password for internal requests.
"""
internal_auth_password: str = "Q2^$2J9QXimW6lDPoGj4"
def setUp(self) -> None:
super().setUp()
self.secret_file = NamedTemporaryFile()
self.secret_file.write(self.internal_auth_password.encode("ascii"))
self.secret_file.seek(0)
self.app.env.vars["INTERNAL_AUTH_PASSWORD_FILE"] = self.secret_file.name
def tearDown(self) -> None:
super().tearDown()
self.app.env.vars["INTERNAL_AUTH_PASSWORD_FILE"] = ""
self.secret_file.close()
class BaseInternalActionTest(BaseInternalRequestTest):
"""
Sets up a server-side password for internal requests.
"""
route: RouteFunction = ActionView.internal_action_route
def internal_request(
self,
action: str,
data: Dict[str, Any],
internal_auth_password: Optional[str] = DEV_PASSWORD,
) -> Response:
return super().call_internal_route(
[{"action": action, "data": [data]}], internal_auth_password
)
class TestInternalActionsDev(BaseInternalActionTest):
"""
Uses the anonymous client to call the internal action route. This should skip all permission checks, so the requests
still succeed.
Just rudimentary tests that the actions generally succeed since if that's the case, everything should be handled
analogously to the external case, which is already tested sufficiently in the special test cases for the actions.
Hint: This test assumes that OPENSLIDES_DEVELOPMENT is truthy.
"""
def test_internal_user_create(self) -> None:
response = self.internal_request("user.create", {"username": "test"})
self.assert_status_code(response, 200)
self.assert_model_exists("user/2", {"username": "test"})
def test_internal_user_update(self) -> None:
response = self.internal_request("user.update", {"id": 1, "username": "test"})
self.assert_status_code(response, 200)
self.assert_model_exists("user/1", {"username": "test"})
def test_internal_user_delete(self) -> None:
response = self.internal_request("user.delete", {"id": 1})
self.assert_status_code(response, 200)
self.assert_model_deleted("user/1")
def test_internal_user_set_password(self) -> None:
response = self.internal_request(
"user.set_password", {"id": 1, "password": "new_password"}
)
self.assert_status_code(response, 200)
model = self.get_model("user/1")
assert self.auth.is_equals("new_password", model["password"])
def test_internal_organization_initial_import(self) -> None:
self.datastore.truncate_db()
response = self.internal_request("organization.initial_import", {"data": {}})
self.assert_status_code(response, 200)
self.assert_model_exists(ONE_ORGANIZATION_FQID)
self.assert_model_exists("user/1", {"username": "superadmin"})
def test_internal_mismatching_passwords(self) -> None:
response = self.internal_request(
"user.create", {"username": "test"}, "wrong_pw"
)
self.assert_status_code(response, 401)
self.assert_model_not_exists("user/2")
def test_internal_no_password_in_request(self) -> None:
response = self.internal_request("user.create", {"username": "test"}, None)
self.assert_status_code(response, 401)
self.assert_model_not_exists("user/2")
def test_internal_wrong_password_in_request(self) -> None:
response = self.internal_request("user.create", {"username": "test"}, "wrong")
self.assert_status_code(response, 401)
self.assert_model_not_exists("user/2")
def test_internal_execute_stack_internal_via_public_route(self) -> None:
self.datastore.truncate_db()
response = self.request(
"organization.initial_import", {"data": {}}, internal=False
)
self.assert_status_code(response, 400)
self.assertEqual(
response.json.get("message"),
"Action organization.initial_import does not exist.",
)
self.assert_model_not_exists("organization/1")
def test_internal_wrongly_encoded_password(self) -> None:
response = self.anon_client.post(
get_route_path(self.route),
json=[{"action": "user.create", "data": [{"username": "test"}]}],
headers={INTERNAL_AUTHORIZATION_HEADER: "openslides"},
)
self.assert_status_code(response, 400)
self.assert_model_not_exists("user/2")
@disable_dev_mode
class TestInternalActionsProd(BaseInternalActionTest):
"""
The same as the TestInternalActionsDev class but in prod mode.
"""
def test_internal_no_password_on_server(self) -> None:
response = self.internal_request(
"user.create", {"username": "test"}, "some password"
)
self.assert_status_code(response, 500)
self.assert_model_not_exists("user/2")
@disable_dev_mode
class TestInternalActionsProdWithPasswordFile(
BaseInternalActionTest, BaseInternalPasswordTest
):
"""
Same as TestInternalActionsProd but with a server-side password set.
"""
def test_internal_wrong_password(self) -> None:
response = self.internal_request("user.create", {"username": "test"}, "wrong")
self.assert_status_code(response, 401)
self.assert_model_not_exists("user/2")
def test_internal_execute_public_action(self) -> None:
response = self.internal_request(
"user.create", {"username": "test"}, self.internal_auth_password
)
self.assert_status_code(response, 200)
self.assert_model_exists("user/2")
def test_internal_execute_stack_internal_action(self) -> None:
self.datastore.truncate_db()
response = self.internal_request(
"organization.initial_import", {"data": {}}, self.internal_auth_password
)
self.assert_status_code(response, 200)
self.assert_model_exists(ONE_ORGANIZATION_FQID)
def test_internal_execute_backend_internal_action(self) -> None:
response = self.internal_request(
"option.create",
{"meeting_id": 1, "text": "test"},
self.internal_auth_password,
)
self.assert_status_code(response, 400)
self.assertEqual(
response.json.get("message"), "Action option.create does not exist."
)
self.assert_model_not_exists("option/1")
| [
"[email protected]"
] | |
8c5e18b01ac0690c7c7bb348b0e0d6c88a0697eb | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4084/codes/1578_1331.py | 4c3e362ab144d591bf2ec7a790a959912f033491 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | g = 3
m = 50
conta = m // g
i = m%g
print(conta)
print(i)
| [
"[email protected]"
] | |
c9cf7689a50286a5c1017bfd446fa36d67ab48be | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02663/s577432279.py | fb0a161b678de985a05e44a3e1290554f0f0f831 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | import sys
input=sys.stdin.readline
h1,m1,h2,m2,k=map(int,input().split())
h1=h1*60
h2=h2*60
m1+=h1
m2+=h2
print(m2-m1-k) | [
"[email protected]"
] | |
e0401fc292b6f962226021e0e3f636419bf5068e | 958b6de6be5fb8bce876373cec29677259c6ceb3 | /hypergan/train_hooks/experimental/weight_penalty_train_hook.py | 7a672346f3853a2b0d4a457e359240de7e35efd9 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | RandomStudio/HyperGAN | 712679b9121ad414d2f91205a82370d54a930120 | 586cefe69805f5ffa8dcb11aaf346f6b3dcf3ac9 | refs/heads/master | 2020-06-22T22:43:04.884557 | 2019-07-23T12:17:58 | 2019-07-23T12:17:58 | 198,420,256 | 0 | 0 | null | 2019-07-23T11:52:00 | 2019-07-23T11:51:59 | null | UTF-8 | Python | false | false | 2,923 | py | #From https://gist.github.com/EndingCredits/b5f35e84df10d46cfa716178d9c862a3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
import tensorflow as tf
import hyperchamber as hc
import numpy as np
import inspect
from operator import itemgetter
from hypergan.train_hooks.base_train_hook import BaseTrainHook
class WeightPenaltyTrainHook(BaseTrainHook):
def __init__(self, gan=None, config=None, trainer=None, name="WeightPenaltyTrainHook", memory_size=2, top_k=1):
super().__init__(config=config, gan=gan, trainer=trainer, name=name)
d_losses = []
weights = self.gan.weights()
if config.only_d:
weights = self.discriminator.weights()
if config.l2nn_penalty:
l2nn_penalties = []
if len(weights) > 0:
for w in weights:
w = tf.reshape(w, [-1, self.ops.shape(w)[-1]])
wt = tf.transpose(w)
wtw = tf.matmul(wt,w)
wwt = tf.matmul(w,wt)
def _l(m):
m = tf.abs(m)
m = tf.reduce_sum(m, axis=0,keep_dims=True)
m = tf.maximum(m-1, 0)
m = tf.reduce_max(m, axis=1,keep_dims=True)
return m
l2nn_penalties.append(tf.minimum(_l(wtw), _l(wwt)))
print('l2nn_penalty', self.config.l2nn_penalty, l2nn_penalties)
l2nn_penalty = self.config.l2nn_penalty * tf.add_n(l2nn_penalties)
self.add_metric('l2nn_penalty', self.gan.ops.squash(l2nn_penalty))
d_losses.append(l2nn_penalty)
if config.ortho_penalty:
penalties = []
for w in self.gan.weights():
print("PENALTY", w)
w = tf.reshape(w, [-1, self.ops.shape(w)[-1]])
wt = tf.transpose(w)
wtw = tf.matmul(wt,w)
wwt = tf.matmul(w,wt)
mwtw = tf.matmul(w, wtw)
mwwt = tf.matmul(wt, wwt)
def _l(w,m):
l = tf.reduce_mean(tf.abs(w - m))
l = self.ops.squash(l)
return l
penalties.append(tf.minimum(_l(w, mwtw), _l(wt, mwwt)))
penalty = self.config.ortho_penalty * tf.add_n(penalties)
self.add_metric('ortho_penalty', self.gan.ops.squash(penalty))
print("PENALTY", penalty)
penalty = tf.reshape(penalty, [1,1])
penalty = tf.tile(penalty, [self.gan.batch_size(), 1])
d_losses.append(penalty)
self.loss = self.ops.squash(d_losses)
def losses(self):
return [self.loss, self.loss]
def after_step(self, step, feed_dict):
pass
def before_step(self, step, feed_dict):
pass
| [
"[email protected]"
] | |
bb7645b996dd70bb11bceb7fa31190757f205a92 | 141d1fb160fcfb4294d4b0572216033218da702d | /exec -l /bin/zsh/google-cloud-sdk/lib/surface/composer/environments/run.py | b81165e938f3ff95fea3676709e9be6e342bacc4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sudocams/tech-club | 1f2d74c4aedde18853c2b4b729ff3ca5908e76a5 | c8540954b11a6fd838427e959e38965a084b2a4c | refs/heads/master | 2021-07-15T03:04:40.397799 | 2020-12-01T20:05:55 | 2020-12-01T20:05:55 | 245,985,795 | 0 | 1 | null | 2021-04-30T21:04:39 | 2020-03-09T08:51:41 | Python | UTF-8 | Python | false | false | 7,255 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to run an Airflow CLI sub-command in an environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
from googlecloudsdk.api_lib.composer import environments_util as environments_api_util
from googlecloudsdk.api_lib.composer import util as api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.composer import resource_args
from googlecloudsdk.command_lib.composer import util as command_util
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
WORKER_POD_SUBSTR = 'worker'
WORKER_CONTAINER = 'airflow-worker'
DEPRECATION_WARNING = ('Because Cloud Composer manages the Airflow metadata '
'database for your environment, support for the Airflow '
'`{}` subcommand is being deprecated. '
'To avoid issues related to Airflow metadata, we '
'recommend that you do not use this subcommand unless '
'you understand the outcome.')
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Run(base.Command):
"""Run an Airflow sub-command remotely in a Cloud Composer environment.
Executes an Airflow CLI sub-command remotely in an environment. If the
sub-command takes flags, separate the environment name from the sub-command
and its flags with ``--''. This command waits for the sub-command to
complete; its exit code will match the sub-command's exit code.
## EXAMPLES
The following command:
{command} myenv trigger_dag -- some_dag --run_id=foo
is equivalent to running the following command from a shell inside the
*my-environment* environment:
airflow trigger_dag some_dag --run_id=foo
"""
@staticmethod
def Args(parser):
resource_args.AddEnvironmentResourceArg(
parser, 'in which to run an Airflow command')
parser.add_argument(
'subcommand',
metavar='SUBCOMMAND',
choices=command_util.SUBCOMMAND_WHITELIST,
help=('The Airflow CLI subcommand to run. Available subcommands '
'include: {} (see https://airflow.apache.org/cli.html for more '
'info). Note that delete_dag is available from Airflow 1.10.1, '
'and list_dag_runs, next_execution are available from Airflow '
'1.10.2.').format(', '.join(command_util.SUBCOMMAND_WHITELIST)))
parser.add_argument(
'cmd_args',
metavar='CMD_ARGS',
nargs=argparse.REMAINDER,
help='Command line arguments to the subcommand.',
example='{command} myenv trigger_dag -- some_dag --run_id=foo')
def BypassConfirmationPrompt(self, args):
"""Bypasses confirmations with "yes" responses.
Prevents certain Airflow CLI subcommands from presenting a confirmation
prompting (which can hang the gcloud CLI). When necessary, bypass
confirmations with a "yes" response.
Args:
args: argparse.Namespace, An object that contains the values for the
arguments specified in the .Args() method.
"""
prompting_subcommands = ['delete_dag']
if args.subcommand in prompting_subcommands and set(
args.cmd_args).isdisjoint({'-y', '--yes'}):
args.cmd_args.append('--yes')
def DeprecationWarningPrompt(self, args):
response = True
if args.subcommand in command_util.SUBCOMMAND_DEPRECATION:
response = console_io.PromptContinue(
message=DEPRECATION_WARNING.format(args.subcommand),
default=False, cancel_on_no=True)
return response
def ConvertKubectlError(self, error, env_obj):
del env_obj # Unused argument.
return error
def Run(self, args):
self.DeprecationWarningPrompt(args)
running_state = (
api_util.GetMessagesModule(release_track=self.ReleaseTrack())
.Environment.StateValueValuesEnum.RUNNING)
env_ref = args.CONCEPTS.environment.Parse()
env_obj = environments_api_util.Get(
env_ref, release_track=self.ReleaseTrack())
if env_obj.state != running_state:
raise command_util.Error(
'Cannot execute subcommand for environment in state {}. '
'Must be RUNNING.'.format(env_obj.state))
cluster_id = env_obj.config.gkeCluster
cluster_location_id = command_util.ExtractGkeClusterLocationId(env_obj)
with command_util.TemporaryKubeconfig(cluster_location_id, cluster_id):
try:
kubectl_ns = command_util.FetchKubectlNamespace(
env_obj.config.softwareConfig.imageVersion)
pod = command_util.GetGkePod(
pod_substr=WORKER_POD_SUBSTR, kubectl_namespace=kubectl_ns)
log.status.Print(
'Executing within the following kubectl namespace: {}'.format(
kubectl_ns))
self.BypassConfirmationPrompt(args)
kubectl_args = [
'exec', pod, '-tic', WORKER_CONTAINER, 'airflow', args.subcommand
]
if args.cmd_args:
# Add '--' to the argument list so kubectl won't eat the command args.
kubectl_args.extend(['--'] + args.cmd_args)
command_util.RunKubectlCommand(
command_util.AddKubectlNamespace(kubectl_ns, kubectl_args),
out_func=log.status.Print)
except command_util.KubectlError as e:
raise self.ConvertKubectlError(e, env_obj)
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class RunBeta(Run):
"""Run an Airflow sub-command remotely in a Cloud Composer environment.
Executes an Airflow CLI sub-command remotely in an environment. If the
sub-command takes flags, separate the environment name from the sub-command
and its flags with ``--''. This command waits for the sub-command to
complete; its exit code will match the sub-command's exit code.
## EXAMPLES
The following command:
{command} myenv trigger_dag -- some_dag --run_id=foo
is equivalent to running the following command from a shell inside the
*my-environment* environment:
airflow trigger_dag some_dag --run_id=foo
"""
def ConvertKubectlError(self, error, env_obj):
is_private = (
env_obj.config.privateEnvironmentConfig and
env_obj.config.privateEnvironmentConfig.enablePrivateEnvironment)
if is_private:
return command_util.Error(
str(error) +
' Make sure you have followed https://cloud.google.com/composer/docs/how-to/accessing/airflow-cli#running_commands_on_a_private_ip_environment '
'to enable access to your private Cloud Composer environment from '
'your machine.')
return error
| [
"[email protected]"
] | |
1750d92d1dc355447d3f4c59c6a8905eb0f2bb15 | 23a1faa037ddaf34a7b5db8ae10ff8fa1bb79b94 | /TCS_Practice/TCS_CodeVita_Problems/Constellation.py | e77bcc038db671a3100235e6c5e1bd94fd310097 | [] | no_license | Pyk017/Competetive-Programming | e57d2fe1e26eeeca49777d79ad0cbac3ab22fe63 | aaa689f9e208bc80e05a24b31aa652048858de22 | refs/heads/master | 2023-04-27T09:37:16.432258 | 2023-04-22T08:01:18 | 2023-04-22T08:01:18 | 231,229,696 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | '''
Three characters { #, *, . } represents a constellation of stars and galaxies in space. Each galaxy is demarcated by # characters. There can be one or many stars in a given galaxy. Stars can only be in shape of vowels { A, E, I, O, U } . A collection of * in the shape of the vowels is a star. A star is contained in a 3x3 block. Stars cannot be overlapping. The dot(.) character denotes empty space.
Given 3xN matrix comprising of { #, *, . } character, find the galaxy and stars within them.
Note: Please pay attention to how vowel A is denoted in a 3x3 block in the examples section below.
Constraints
3 <= N <= 10^5
Input
Input consists of single integer N denoting number of columns.
Output
Output contains vowels (stars) in order of their occurrence within the given galaxy. Galaxy itself is represented by # character.
Example 1
Input
18
* . * # * * * # * * * # * * * . * .
* . * # * . * # . * . # * * * * * *
* * * # * * * # * * * # * * * * . *
Output
U#O#I#EA
Explanation
As it can be seen that the stars make the image of the alphabets U, O, I, E and A respectively.
Example 2
Input
12
* . * # . * * * # . * .
* . * # . . * . # * * *
* * * # . * * * # * . *
Output
U#I#A
Explanation
As it can be seen that the stars make the image of the alphabet U, I and A.
Possible solution:
Input:
12
* . * # . * * * # . * .
* . * # . . * . # * * *
* * * # . * * * # * . *
'''
n = int(input())
galaxy = [list(map(int, input().split())) for _ in range(3)]
for i in range(n):
if galaxy[0][i] == '#' and galaxy[1][j] == '#' and galaxy[2][i] == '#':
print('#', end='')
elif galaxy[0][i] == '.' and galaxy[1][j] == '.' and galaxy[2][i] == '.':
pass
else:
x = i
a, b, c, a1, b1, c1, a2, b2, c2 = galaxy[0][x], galaxy[0][x+1], galaxy[0][x+2], galaxy[1][x], galaxy[1][x+1], galaxy[1][x+2], galaxy[2][x], galaxy[2][x+1], galaxy[2][x+2]
if a == '.' and b == '*' and c == '.' and a1=='*' and b1 == '*' and c1 == '*' and a2=='*' and b2 == '.' and c2 == '*':
print("A", end='')
i = i + 2
if a == '*' and b == '*' and c == '*' and a1 == '*' and b1 == '*' and c1 == '*' and a2 == '*' and b2 == '*' and c2 == '*':
print("E", end='')
i = i + 2
if a == '*' and b == '*' and c == '*' and a1 == '.' and b1 == '*' and c1 == '.' and a2 == '*' and b2 == '*' and c2 == '*':
print("I", end='')
i = i + 2
if a == '*' and b == '*' and c == '*' and a1 == '*' and b1 == '.' and c1 == '*' and a2 == '*' and b2 == '*' and c2 == '*':
print("O", end='')
i = i + 2
if a == '*' and b == '.' and c == '*' and a1 == '*' and b1 == '.' and c1 == '*' and a2 == '*' and b2 == '*' and c2 =='*':
print("U", end='')
i = i + 2
| [
"[email protected]"
] | |
8177573260147c1b6a0fc39e0e1977682266b7b6 | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/130_Fluent_Python/fp2-utf8/freeinteractive/freeinteractive 103.py | fedc3048dc3417682a8981be5ae2035e8e4eed63 | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24 | py | >>> b = 6
>>> f1(3)
3
6
| [
"[email protected]"
] | |
9d9c75dc71a08948292a19969a209d9e9e35aaba | 04f194dfd80367756cc3971b57b48065b2edbfb3 | /topics/number_line.py | 37386cf3172b78370d0618372e8485ba120c6294 | [] | no_license | arthurtcl/manim | de2bfcf495981fb332e036b63e7c074e0db50624 | ad05030641483b7f99b382cf6492bebcd4aa6d18 | refs/heads/master | 2021-01-17T11:44:48.968626 | 2017-03-04T01:34:05 | 2017-03-04T01:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,977 | py | from helpers import *
from mobject import Mobject1D
from mobject.vectorized_mobject import VMobject, VGroup
from mobject.tex_mobject import TexMobject
from topics.geometry import Line, Arrow
from scene import Scene
class NumberLine(VMobject):
CONFIG = {
"color" : BLUE,
"x_min" : -SPACE_WIDTH,
"x_max" : SPACE_WIDTH,
"space_unit_to_num" : 1,
"tick_size" : 0.1,
"tick_frequency" : 1,
"leftmost_tick" : None, #Defaults to ceil(x_min)
"numbers_with_elongated_ticks" : [0],
"numbers_to_show" : None,
"longer_tick_multiple" : 2,
"number_at_center" : 0,
"propogate_style_to_family" : True
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
if self.leftmost_tick is None:
self.leftmost_tick = np.ceil(self.x_min)
VMobject.__init__(self, **kwargs)
def generate_points(self):
self.main_line = Line(self.x_min*RIGHT, self.x_max*RIGHT)
self.tick_marks = VMobject()
self.add(self.main_line, self.tick_marks)
for x in self.get_tick_numbers():
self.add_tick(x, self.tick_size)
for x in self.numbers_with_elongated_ticks:
self.add_tick(x, self.longer_tick_multiple*self.tick_size)
self.stretch(self.space_unit_to_num, 0)
self.shift(-self.number_to_point(self.number_at_center))
def add_tick(self, x, size):
self.tick_marks.add(Line(
x*RIGHT+size*DOWN,
x*RIGHT+size*UP,
))
return self
def get_tick_marks(self):
return self.tick_marks
def get_tick_numbers(self):
return np.arange(
self.leftmost_tick, self.x_max + self.tick_frequency,
self.tick_frequency
)
def number_to_point(self, number):
alpha = float(number-self.x_min)/(self.x_max - self.x_min)
return interpolate(
self.main_line.get_start(),
self.main_line.get_end(),
alpha
)
def point_to_number(self, point):
left_point, right_point = self.main_line.get_start_and_end()
full_vect = right_point-left_point
def distance_from_left(p):
return np.dot(p-left_point, full_vect)/np.linalg.norm(full_vect)
return interpolate(
self.x_min, self.x_max,
distance_from_left(point)/distance_from_left(right_point)
)
def default_numbers_to_display(self):
if self.numbers_to_show is not None:
return self.numbers_to_show
return np.arange(self.leftmost_tick, self.x_max, 1)
def get_vertical_number_offset(self, direction = DOWN):
return 4*direction*self.tick_size
def get_number_mobjects(self, *numbers, **kwargs):
#TODO, handle decimals
if len(numbers) == 0:
numbers = self.default_numbers_to_display()
result = VGroup()
for number in numbers:
mob = TexMobject(str(int(number)))
mob.scale_to_fit_height(3*self.tick_size)
mob.shift(
self.number_to_point(number),
self.get_vertical_number_offset(**kwargs)
)
result.add(mob)
return result
def add_numbers(self, *numbers, **kwargs):
self.numbers = self.get_number_mobjects(
*numbers, **kwargs
)
self.add(*self.numbers)
return self
class UnitInterval(NumberLine):
CONFIG = {
"x_min" : 0,
"x_max" : 1,
"space_unit_to_num" : 6,
"tick_frequency" : 0.1,
"numbers_with_elongated_ticks" : [0, 1],
"number_at_center" : 0.5,
}
class Axes(VGroup):
CONFIG = {
"propogate_style_to_family" : True
}
def __init__(self, **kwargs):
VGroup.__init__(self)
self.x_axis = NumberLine(**kwargs)
self.y_axis = NumberLine(**kwargs).rotate(np.pi/2)
self.add(self.x_axis, self.y_axis)
class NumberPlane(VMobject):
CONFIG = {
"color" : BLUE_D,
"secondary_color" : BLUE_E,
"axes_color" : WHITE,
"secondary_stroke_width" : 1,
"x_radius": SPACE_WIDTH,
"y_radius": SPACE_HEIGHT,
"space_unit_to_x_unit" : 1,
"space_unit_to_y_unit" : 1,
"x_line_frequency" : 1,
"y_line_frequency" : 1,
"secondary_line_ratio" : 1,
"written_coordinate_height" : 0.2,
"written_coordinate_nudge" : 0.1*(DOWN+RIGHT),
"num_pair_at_center" : (0, 0),
"propogate_style_to_family" : False,
}
def generate_points(self):
self.axes = VMobject()
self.main_lines = VMobject()
self.secondary_lines = VMobject()
tuples = [
(
self.x_radius,
self.x_line_frequency,
self.y_radius*DOWN,
self.y_radius*UP,
RIGHT
),
(
self.y_radius,
self.y_line_frequency,
self.x_radius*LEFT,
self.x_radius*RIGHT,
UP,
),
]
for radius, freq, start, end, unit in tuples:
main_range = np.arange(0, radius, freq)
step = freq/float(freq + self.secondary_line_ratio)
for v in np.arange(0, radius, step):
line1 = Line(start+v*unit, end+v*unit)
line2 = Line(start-v*unit, end-v*unit)
if v == 0:
self.axes.add(line1)
elif v in main_range:
self.main_lines.add(line1, line2)
else:
self.secondary_lines.add(line1, line2)
self.add(self.secondary_lines, self.main_lines, self.axes)
self.stretch(self.space_unit_to_x_unit, 0)
self.stretch(self.space_unit_to_y_unit, 1)
#Put x_axis before y_axis
y_axis, x_axis = self.axes.split()
self.axes = VMobject(x_axis, y_axis)
def init_colors(self):
VMobject.init_colors(self)
self.axes.set_stroke(self.axes_color, self.stroke_width)
self.main_lines.set_stroke(self.color, self.stroke_width)
self.secondary_lines.set_stroke(
self.secondary_color, self.secondary_stroke_width
)
return self
def get_center_point(self):
return self.num_pair_to_point(self.num_pair_at_center)
def num_pair_to_point(self, pair):
pair = np.array(pair) + self.num_pair_at_center
result = self.axes.get_center()
result[0] += pair[0]*self.space_unit_to_x_unit
result[1] += pair[1]*self.space_unit_to_y_unit
return result
def point_to_num_pair(self, point):
new_point = point-self.get_center()
center_x, center_y = self.num_pair_at_center
x = center_x + point[0]/self.space_unit_to_x_unit
y = center_y + point[1]/self.space_unit_to_y_unit
return x, y
def get_coordinate_labels(self, x_vals = None, y_vals = None):
result = []
if x_vals == None and y_vals == None:
x_vals = range(-int(self.x_radius), int(self.x_radius))
y_vals = range(-int(self.y_radius), int(self.y_radius))
for index, vals in enumerate([x_vals, y_vals]):
num_pair = [0, 0]
for val in vals:
num_pair[index] = val
point = self.num_pair_to_point(num_pair)
num = TexMobject(str(val))
num.scale_to_fit_height(
self.written_coordinate_height
)
num.shift(
point-num.get_corner(UP+LEFT),
self.written_coordinate_nudge
)
result.append(num)
return result
def get_axes(self):
return self.axes
def get_axis_labels(self, x_label = "x", y_label = "y"):
x_axis, y_axis = self.get_axes().split()
x_label_mob = TexMobject(x_label)
y_label_mob = TexMobject(y_label)
x_label_mob.next_to(x_axis, DOWN)
x_label_mob.to_edge(RIGHT)
y_label_mob.next_to(y_axis, RIGHT)
y_label_mob.to_edge(UP)
return VMobject(x_label_mob, y_label_mob)
def add_coordinates(self, x_vals = None, y_vals = None):
self.add(*self.get_coordinate_labels(x_vals, y_vals))
return self
def get_vector(self, coords, **kwargs):
point = coords[0]*RIGHT + coords[1]*UP
arrow = Arrow(ORIGIN, coords, **kwargs)
return arrow
def prepare_for_nonlinear_transform(self, num_inserted_anchor_points = 50):
for mob in self.family_members_with_points():
num_anchors = mob.get_num_anchor_points()
if num_inserted_anchor_points > num_anchors:
mob.insert_n_anchor_points(num_inserted_anchor_points-num_anchors)
mob.make_smooth()
return self
| [
"[email protected]"
] | |
f85f6f39aa12d9bd44917d0f830d724ec3d6f956 | c42f7f7a8421103cc3ca8ee44673704f7eea22b1 | /src/utils/routes.py | 01fe170c5eaa9ad4e3f0cf77beb0b1f34279b976 | [
"MIT"
] | permissive | styleolder/fp-server | fe585fe73014eb0421b25d5579191d24276df250 | ae405e7c37a919bd73be567e3e098e7fe5524097 | refs/heads/master | 2020-03-21T12:05:36.250998 | 2018-06-24T13:07:51 | 2018-06-24T13:07:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | # -*- coding:utf-8 -*-
"""
http uri 路由装饰器
"""
from utils import log as logger
class route(object):
"""
@route('/some/path')
class SomeRequestHandler(RequestHandler):
pass
@route('/some/path', name='other')
class SomeOtherRequestHandler(RequestHandler):
pass
my_routes = route.make_routes(['api'])
"""
_routes = []
def __init__(self, uri, name=None):
""" 装饰器
@param uri 注册的uri名字,支持uri正则表达式
@param name 注册的uri别名
"""
self.uri = uri
if not name:
name = '-'.join(uri.split('/'))
self.name = name
def __call__(self, _handler):
""" gets called when we class decorate
"""
for item in self._routes:
if item.get('uri') == self.uri:
logger.error('uri aleady exists! uri:', self.uri, 'name:', self.name, 'handler:', _handler, caller=self)
if item.get('name') == self.name:
logger.warn('name aleady exists! uri:', self.uri, 'name:', self.name, 'handler:', _handler, caller=self)
self._routes.append({'uri': self.uri, 'name': self.name, 'handler': _handler})
return _handler
@classmethod
def make_routes(cls, dirs):
""" 注册并返回所有的handler
@param dirs list,需要注册uri路由的处理方法路径
"""
for dir in dirs:
s = 'import %s' % dir
exec(s)
routes = []
for handler_dic in cls._routes:
logger.info('register uri:', handler_dic['uri'], 'handler:', handler_dic.get('handler'), caller=cls)
routes.append((handler_dic.get('uri'), handler_dic.get('handler')))
return routes
| [
"[email protected]"
] | |
82d322d9d2a24a4f17977671c69823b4c05dcae3 | 523f8f5febbbfeb6d42183f2bbeebc36f98eadb5 | /207_3.py | a0c0add9e9275d90f0930004027fe8138ec29417 | [] | no_license | saleed/LeetCode | 655f82fdfcc3000400f49388e97fc0560f356af0 | 48b43999fb7e2ed82d922e1f64ac76f8fabe4baa | refs/heads/master | 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
graph={}
indegree=[0]*numCourses
for i in range(numCourses):
graph[i]=[]
for pair in prerequisites:
graph[pair[1]].append(pair[0])
indegree[pair[0]]+=1
res=[]
while True:
flag=0
for node in range(len(indegree)):
if indegree[node]==0:
indegree[node]=float("inf")
res.append(node)
for n in graph[node]:
indegree[n]-=1
del graph[node]
flag=1
break
if flag==0:
break
return len(res)==numCourses
a=Solution()
presp=[[1,0]]
num=2
print(a.canFinish(num,presp))
nums=2
psp=[[1,0],[0,1]]
print(a.canFinish(num,psp)) | [
"[email protected]"
] | |
3e67c476deabc53331ccd7582f0feff94455d632 | 31741f4807f857675f9304088b689af9b043e7b1 | /chp10/django_ecommerce/contact/views.py | 8160a48a0c26f4c0a9a9857aa9771927481e3ab1 | [] | no_license | ujrc/Realpython_book3 | c487ff0569f90b0e21c2c51cf951d6aad4755541 | aaff8db074b8dd33d6c7305ac0a94c2ef161c847 | refs/heads/master | 2021-01-10T02:02:11.247279 | 2016-01-11T17:06:59 | 2016-01-11T17:06:59 | 48,060,189 | 0 | 0 | null | 2015-12-31T16:48:52 | 2015-12-15T18:03:47 | Python | UTF-8 | Python | false | false | 755 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from .forms import ContactView
from django.contrib import messages
def contact(request):
if request.method == 'POST':
form = ContactView(request.POST)
if form.is_valid():
our_form = form.save(commit=False)
our_form.save()
messages.add_message(
request, messages.INFO, 'Your message has been sent. Thank you.'
)
return HttpResponseRedirect('/')
else:
form = ContactView()
t = loader.get_template('contact/contact.html')
c = RequestContext(request, {'form': form, })
return HttpResponse(t.render(c))# Create your views here.
| [
"[email protected]"
] | |
337d900284082e21087ff98ddb9d2bb64e6b8248 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_banked.py | 08d044b1996c25d485b9094eb3beb1112231d788 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _BANKED():
def __init__(self,):
self.name = "BANKED"
self.definitions = bank
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bank']
| [
"[email protected]"
] | |
54d72878eac09a4ed9b40f9ef8fdc315b10a7f4d | 99259216f11b15ec60446b4a141b3592a35560ce | /wex-python-api/test/test_json_node.py | c75004f2b70287a8c291d81ea8751f09dcf73ca6 | [] | no_license | adam725417/Walsin | 296ba868f0837077abff93e4f236c6ee50917c06 | 7fbefb9bb5064dabccf4a7e2bf49d2a43e0f66e9 | refs/heads/master | 2020-04-12T14:14:07.607675 | 2019-03-05T01:54:03 | 2019-03-05T01:54:03 | 162,546,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # coding: utf-8
"""
WEX REST APIs
Authentication methods - Basic Auth - JSON Web Token - [POST /api/v1/usermgmt/login](#!/User/signinUser) - [POST /api/v1/usermgmt/logout](#!/User/doLogout) - Python client sample [Download](/docs/wex-python-api.zip)
OpenAPI spec version: 12.0.2.417
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ibmwex
from ibmwex.rest import ApiException
from ibmwex.models.json_node import JsonNode
class TestJsonNode(unittest.TestCase):
""" JsonNode unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testJsonNode(self):
"""
Test JsonNode
"""
# FIXME: construct object with mandatory attributes with example values
#model = ibmwex.models.json_node.JsonNode()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
67ceb865d11bf7d82086694f8879b057f68bf848 | 864285315c3a154639355f14ab1ff14633576405 | /mapclientplugins/segmentationstep/tools/handlers/abstractselection.py | 4315d4f55f509d3c4b410e9a7a07ad7b29f48cb1 | [] | no_license | hsorby/segmentationstep | 774dc537967c9643bd0094dc4e64eefa472588b0 | 321505374f9434ac0ae832b0b00398c2d4ac1fbe | refs/heads/main | 2021-09-28T09:06:07.197158 | 2015-08-14T07:59:55 | 2015-08-14T07:59:55 | 21,375,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,527 | py | '''
MAP Client, a program to generate detailed musculoskeletal models for OpenSim.
Copyright (C) 2012 University of Auckland
This file is part of MAP Client. (http://launchpad.net/mapclient)
MAP Client is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MAP Client is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MAP Client. If not, see <http://www.gnu.org/licenses/>..
'''
from PySide import QtCore
from mapclientplugins.segmentationstep.tools.handlers.abstracthandler import AbstractHandler
from mapclientplugins.segmentationstep.zincutils import setGlyphSize, setGlyphOffset, COORDINATE_SYSTEM_LOCAL, \
createSelectionBox
from mapclientplugins.segmentationstep.undoredo import CommandSelection
from mapclientplugins.segmentationstep.definitions import SELECTION_BOX_3D_GRAPHIC_NAME
class SelectionMode(object):
NONE = -1
EXCULSIVE = 0
ADDITIVE = 1
class AbstractSelection(AbstractHandler):
def __init__(self, plane, undo_redo_stack):
super(AbstractSelection, self).__init__(plane, undo_redo_stack)
self._selection_box = createSelectionBox(plane.getRegion(), SELECTION_BOX_3D_GRAPHIC_NAME)
self._selection_mode = SelectionMode.NONE
self._selection_position_start = None
def mousePressEvent(self, event):
self._selection_mode = SelectionMode.NONE
if event.modifiers() & QtCore.Qt.SHIFT and event.button() == QtCore.Qt.LeftButton:
self._selection_position_start = [event.x(), event.y()]
self._selection_mode = SelectionMode.EXCULSIVE
if event.modifiers() & QtCore.Qt.ALT:
self._selection_mode = SelectionMode.ADDITIVE
self._start_selection = self._model.getCurrentSelection()
else:
super(AbstractSelection, self).mousePressEvent(event)
def mouseMoveEvent(self, event):
if self._selection_mode != SelectionMode.NONE:
x = event.x()
y = event.y()
xdiff = float(x - self._selection_position_start[0])
ydiff = float(y - self._selection_position_start[1])
if abs(xdiff) < 0.0001:
xdiff = 1
if abs(ydiff) < 0.0001:
ydiff = 1
xoff = float(self._selection_position_start[0]) / xdiff + 0.5
yoff = float(self._selection_position_start[1]) / ydiff + 0.5
scene = self._selection_box.getScene()
scene.beginChange()
setGlyphSize(self._selection_box, [xdiff, -ydiff, 0.999])
setGlyphOffset(self._selection_box, [xoff, yoff, 0])
self._selection_box.setVisibilityFlag(True)
scene.endChange()
else:
super(AbstractSelection, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event):
if self._selection_mode != SelectionMode.NONE:
x = event.x()
y = event.y()
# Construct a small frustrum to look for nodes in.
region = self._model.getRegion()
region.beginHierarchicalChange()
self._selection_box.setVisibilityFlag(False)
selection_group = self._model.getSelectionGroupField()
if (x != self._selection_position_start[0] and y != self._selection_position_start[1]):
left = min(x, self._selection_position_start[0])
right = max(x, self._selection_position_start[0])
bottom = min(y, self._selection_position_start[1])
top = max(y, self._selection_position_start[1])
self._zinc_view.setPickingRectangle(COORDINATE_SYSTEM_LOCAL, left, bottom, right, top)
if self._selection_mode == SelectionMode.EXCULSIVE:
selection_group.clear()
self._zinc_view.addPickedNodesToFieldGroup(selection_group)
else:
node = self._zinc_view.getNearestNode(x, y)
if self._selection_mode == SelectionMode.EXCULSIVE and not node.isValid():
selection_group.clear()
if node.isValid():
group = self._model.getSelectionGroup()
if self._selection_mode == SelectionMode.EXCULSIVE:
remove_current = group.getSize() == 1 and group.containsNode(node)
selection_group.clear()
if not remove_current:
group.addNode(node)
elif self._selection_mode == SelectionMode.ADDITIVE:
if group.containsNode(node):
group.removeNode(node)
else:
group.addNode(node)
end_selection = self._model.getCurrentSelection()
c = CommandSelection(self._model, self._start_selection, end_selection)
self._undo_redo_stack.push(c)
region.endHierarchicalChange()
self._selection_mode = SelectionMode.NONE
else:
super(AbstractSelection, self).mouseReleaseEvent(event)
| [
"[email protected]"
] | |
6fbd126342d2762103a2aff7486d0ce1305afb29 | 28297b7172bad2e427db185d449056340be2a429 | /src/join_pairs.py | 3bca92e18b11ac0a0c113c6e7492e6e049cf7c5b | [] | no_license | audy/cd-hit-that | 6a3480c01c7930751325acbd716202ad514562da | 27922835ebace8bcdcf8d7118ec2e05e11e5e9fa | refs/heads/master | 2021-01-01T15:31:01.604403 | 2011-08-02T20:33:47 | 2011-08-02T20:33:47 | 1,357,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | #!/usr/bin/env python
# outputs a FASTQ file but with its filename in the header (sorta)
# Also puts paired reads together with their 5' ends touching
# This is for clustering
# Takes input from STDIN
import sys
import os
from itertools import cycle
import string
_complement = string.maketrans('GATCRYgatcry','CTAGYRctagyr')
c = cycle([0, 1])
seq = { 0: '', 1: ''}
i = 0
infile = sys.argv[1]
minimum_read_length = int(sys.argv[2])
f_num = int(infile.split('_')[-1].split('.')[0])
kept, skipped = 0, 0
with open(infile) as handle:
for line in handle:
if line.startswith('>'):
n = c.next()
i += 1
if n == 1:
header = '>%s:%s' % (f_num, hex(i)[2:])
else:
seq[n] += line.strip()
if n == 1:
# Reverse-complement 3' pair
seq[1] = seq[1].translate(_complement)[::-1]
# Make sure reads are minimum length
if (len(seq[0]) >= minimum_read_length) \
and (len(seq[1]) >= minimum_read_length):
print header
print '%s%s' % (seq[1], seq[0])
kept +=1
else:
skipped +=1
seq = { 0: '', 1: ''}
print >> sys.stderr, "kept: %.2f percent of pairs (%s : %s)" % (float(kept)/(skipped + kept), skipped, kept) | [
"[email protected]"
] | |
ac4d6e76ee26b19ee2ff04a77b386ed4cf0059c9 | f7c1282dd377b95621436587fd2a6cb28a455d74 | /om_hr_payroll/__manifest__.py | 2c39fcc961672ef5c2c5369414d6b86b5a869f74 | [] | no_license | odoomates/odooapps | a22fa15346694563733008c42549ebc0da7fc9f6 | 459f3b25d31da24043523e72f8be09af9a1e67e9 | refs/heads/master | 2023-08-11T15:25:28.508718 | 2022-10-14T07:58:36 | 2022-10-14T07:58:36 | 173,598,986 | 182 | 306 | null | 2023-08-10T17:58:46 | 2019-03-03T16:20:23 | Python | UTF-8 | Python | false | false | 1,550 | py | # -*- coding:utf-8 -*-
{
'name': 'Odoo 16 HR Payroll',
'category': 'Generic Modules/Human Resources',
'version': '16.0.1.0.0',
'sequence': 1,
'author': 'Odoo Mates, Odoo SA',
'summary': 'Payroll For Odoo 16 Community Edition',
'live_test_url': 'https://www.youtube.com/watch?v=0kaHMTtn7oY',
'description': "Odoo 16 Payroll, Payroll Odoo 16, Odoo Community Payroll",
'website': 'https://www.odoomates.tech',
'license': 'LGPL-3',
'depends': [
'mail',
'hr_contract',
'hr_holidays',
],
'data': [
'security/hr_payroll_security.xml',
'security/ir.model.access.csv',
'data/hr_payroll_sequence.xml',
'data/hr_payroll_category.xml',
'data/hr_payroll_data.xml',
'wizard/hr_payroll_payslips_by_employees_views.xml',
'views/hr_contract_type_views.xml',
'views/hr_contract_views.xml',
'views/hr_salary_rule_views.xml',
'views/hr_payslip_views.xml',
'views/hr_employee_views.xml',
'views/hr_payroll_report.xml',
'wizard/hr_payroll_contribution_register_report_views.xml',
'views/res_config_settings_views.xml',
'views/report_contribution_register_templates.xml',
'views/report_payslip_templates.xml',
'views/report_payslip_details_templates.xml',
'views/hr_contract_history_views.xml',
'views/hr_leave_type_view.xml',
'data/mail_template.xml',
],
'images': ['static/description/banner.png'],
'application': True,
}
| [
"[email protected]"
] | |
44f758bb7c8d4183146ac4198ba226b5ea1ab1a6 | ea515ab67b832dad3a9b69bef723bd9d918395e7 | /03_Implementacao/DataBase/true_or_false_question_while_and_for_cicles/question/version_2/answers_program.py | bce77d50a8726979edc4b446b00a9c0313e7c11d | [] | no_license | projeto-exercicios/Exercicios-Python-de-correccao-automatica | b52be3211e75d97cb55b6cdccdaa1d9f9d84f65b | a7c80ea2bec33296a3c2fbe4901ca509df4b1be6 | refs/heads/master | 2022-12-13T15:53:59.283232 | 2020-09-20T21:25:57 | 2020-09-20T21:25:57 | 295,470,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | answer_1_true = while_cicle(48)
answer_2_true = p
answer_3_true = print_indexes(69)
print(answer_1_true)
print(answer_2_true)
print(answer_3_true)
| [
"[email protected]"
] | |
42a1f97987615325f30edc75f358e38ff7f7ba56 | 450916eee7580beb928ed8f387db4f0a8c1aa508 | /src/amuse/community/petar/__init__.py | 4b06f767ad173c110590200c195514cd334c5292 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | amusecode/amuse | 42095545893f5a86ea79c2a52ce54d3ce8eb204f | b57c1e2fda1457d5025307be105c2aa59b19b574 | refs/heads/main | 2023-08-31T04:50:48.880044 | 2023-08-30T12:00:20 | 2023-08-30T12:00:20 | 18,516,331 | 158 | 118 | Apache-2.0 | 2023-08-30T12:00:22 | 2014-04-07T12:35:07 | AMPL | UTF-8 | Python | false | false | 29 | py | from .interface import Petar
| [
"[email protected]"
] | |
f0284d22965f628a9a0b899b316fe6e649b59ee5 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /same_place/own_number/bad_way_or_year/different_week_or_able_work.py | 66bfb32cdb2de9f544d9b8a983695a71eb049913 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#! /usr/bin/env python
def place(str_arg):
find_world_by_case(str_arg)
print('point_and_little_problem')
def find_world_by_case(str_arg):
print(str_arg)
if __name__ == '__main__':
place('last_day')
| [
"[email protected]"
] | |
1ae237d50f3b39abb4962276db742147c966c2c6 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/l3ext/domdef.py | d549d045a6b6b4cb4b650cffb5b307b2cc6edf07 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,636 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class DomDef(Mo):
"""
This is generated and used only by internal processes.
"""
meta = ClassMeta("cobra.model.l3ext.DomDef")
meta.moClassName = "l3extDomDef"
meta.rnFormat = "l3dom-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Outside L3 Domain"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x80384001000601
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.infra.RtDomAtt")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.extnw.RtL3InstPToDomP")
meta.childClasses.add("cobra.model.infra.RsVlanNs")
meta.childClasses.add("cobra.model.extnw.RtL3DomAtt")
meta.childClasses.add("cobra.model.infra.RtDomRef")
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.extnw.LblCont")
meta.childClasses.add("cobra.model.infra.RtLDevDomP")
meta.childClasses.add("cobra.model.infra.RtDomP")
meta.childClasses.add("cobra.model.infra.RsVipAddrNs")
meta.childClasses.add("cobra.model.infra.RtDynPathAtt")
meta.childClasses.add("cobra.model.extnw.RsOut")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.infra.RsDomVxlanNsDef")
meta.childClasses.add("cobra.model.infra.RsVlanNsDef")
meta.childClasses.add("cobra.model.infra.RtExtDevDomP")
meta.childClasses.add("cobra.model.infra.RtNicProfToDomP")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.infra.RtDomDef")
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtNicProfToDomP", "rtextdevNicProfToDomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.extnw.RtL3InstPToDomP", "rtl3extL3InstPToDomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDynPathAtt", "rtl3extDynPathAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.extnw.RtL3DomAtt", "rtl3extL3DomAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtExtDevDomP", "rtedmExtDevDomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RsDomVxlanNsDef", "rsdomVxlanNsDef"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDomDef", "rtextdevDomDef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtLDevDomP", "rtvnsLDevDomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDomRef", "rtedmDomRef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDomAtt", "rtfvDomAtt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RsVipAddrNs", "rsvipAddrNs"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RsVlanNsDef", "rsvlanNsDef"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RsVlanNs", "rsvlanNs"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.extnw.LblCont", "lblcont"))
meta.childNamesAndRnPrefix.append(("cobra.model.infra.RtDomP", "rtdomP-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.extnw.RsOut", "rsout-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.RtdEpP")
meta.superClasses.add("cobra.model.infra.ADomP")
meta.superClasses.add("cobra.model.infra.DomP")
meta.superClasses.add("cobra.model.l3ext.ADomP")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Dom")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.fv.ADomP")
meta.superClasses.add("cobra.model.pol.Cont")
meta.superClasses.add("cobra.model.extnw.DomP")
meta.rnPrefixes = [
('l3dom-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "configIssues", "configIssues", 4941, PropCategory.REGULAR)
prop.label = "Configuration Issues"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("cdp-lldp-collision", "both-cdp-policy-and-lldp-policy-are-configured-for-attach-entity-profile", 16)
prop._addConstant("enhanced-lacp-lag-creation-skipped", "enhanced-lacp-lag-policy-creation-skipped,-dvs-has-lacp-v1-enabled", 4096)
prop._addConstant("invalid-mcast-addr", "missing-multicast-address-for-vxlan-mode", 512)
prop._addConstant("invalid-port", "invalid-port-for-fabric-interface", 1024)
prop._addConstant("invalid-vxlan-ns-range", "vxlan-range-below-0x800000-is-not-valid-for-n1kv-ns-mode", 128)
prop._addConstant("missing-assoc-attEntP", "domain-is-missing-association-from-attach-entity-profile", 8)
prop._addConstant("missing-encap", "invalid-or-missing-encapsulation", 1)
prop._addConstant("missing-encapblk", "invalid-or-missing-encapsulation-blocks", 4)
prop._addConstant("missing-epg", "association-to-end-point-group-not-specified", 2)
prop._addConstant("missing-internal-vlan-blk", "missing-internal-vlan-encapsulation-blocks", 2048)
prop._addConstant("missing-ns-assoc", "invalid-or-missing-association-to-vlan-or-vxlan-namespace", 256)
prop._addConstant("multiple-cdp", "more-than-one-cdp-policy-found-for-attach-entity-profile", 64)
prop._addConstant("multiple-lldp", "more-than-one-lldp-policy-found-for-attach-entity-profile", 32)
prop._addConstant("none", "n/a", 0)
meta.props.add("configIssues", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14212, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 6853, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15232, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15233, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "targetDscp", "targetDscp", 1625, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.defaultValue = 64
prop.defaultValueStr = "unspecified"
prop._addConstant("AF11", "af11-low-drop", 10)
prop._addConstant("AF12", "af12-medium-drop", 12)
prop._addConstant("AF13", "af13-high-drop", 14)
prop._addConstant("AF21", "af21-low-drop", 18)
prop._addConstant("AF22", "af22-medium-drop", 20)
prop._addConstant("AF23", "af23-high-drop", 22)
prop._addConstant("AF31", "af31-low-drop", 26)
prop._addConstant("AF32", "af32-medium-drop", 28)
prop._addConstant("AF33", "af33-high-drop", 30)
prop._addConstant("AF41", "af41-low-drop", 34)
prop._addConstant("AF42", "af42-medium-drop", 36)
prop._addConstant("AF43", "af43-high-drop", 38)
prop._addConstant("CS0", "cs0", 0)
prop._addConstant("CS1", "cs1", 8)
prop._addConstant("CS2", "cs2", 16)
prop._addConstant("CS3", "cs3", 24)
prop._addConstant("CS4", "cs4", 32)
prop._addConstant("CS5", "cs5", 40)
prop._addConstant("CS6", "cs6", 48)
prop._addConstant("CS7", "cs7", 56)
prop._addConstant("EF", "expedited-forwarding", 46)
prop._addConstant("VA", "voice-admit", 44)
prop._addConstant("unspecified", "unspecified", 64)
meta.props.add("targetDscp", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Path"
meta.deploymentQueryPaths.append(DeploymentPathMeta("ADomPToEthIf", "Interface", "cobra.model.l1.EthIf"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
d22510d282ed3e0b33f8d3e501117b4b8527cca0 | 91438802ee114b2fb945aae4105a17993dd6953d | /build/learning_ros_noetic/Part_5/ur10_robot/ur_traj_client/catkin_generated/pkg.installspace.context.pc.py | 4807c4137df7658f74a42609d61315e95299f603 | [] | no_license | AlexLam616/Baxter-robot | 3a4cef31fe46da0fdb23c0e3b5808d84b412d037 | d10fdcd35f29427ca14bb75f14fa9c64af3b028c | refs/heads/master | 2023-05-12T01:25:56.454549 | 2021-05-25T02:02:09 | 2021-05-25T02:02:09 | 367,070,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;actionlib;trajectory_msgs;control_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur_traj_client"
PROJECT_SPACE_DIR = "/home/alex/workspace/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
fef8619855d686a10de3b4cc6d72b631190df666 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2282.py | f467a7480d13917624dc75ae91326fb1c6115b5b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | def rec_stall(n):
res = []
if n == 1: return stalls[1]
if n == 2: return stalls[2]
if n == 3: return stalls[3]
if n%2 == 0:
a = rec_stall(n/2)
b = rec_stall(n/2-1)
res.extend([[n/2-1,n/2]])
c = [list(x) for x in zip(a,b)]
c = [val for sublist in c for val in sublist]
res.extend(c)
res.extend([[0,0]])
return res
else:
a = rec_stall(n/2)
res.extend([[n/2,n/2]])
c = [list(x) for x in zip(a,a)]
c = [val for sublist in c for val in sublist]
res.extend(c)
res.extend([[0,0]])
return res
stalls = [0,0,0,0]
stalls[1] = [[0,0]]
stalls[2] = [[0,1],[0,0]]
stalls[3] = [[1,1],[0,0],[0,0]]
#stalls[4] = [[1,2],[0,1],[0,0],[0,0]]
#stalls[5] = [[2,2],[0,1],[0,1],[0,0],[0,0]]
#stalls[6] = [[2,3],[1,1],[0,1],[0,0],[0,0],[0,0]]
#print 1,rec_stall(1)
#print 2,rec_stall(2)
#print 3,rec_stall(3)
#print 4,rec_stall(4)
#print 5,rec_stall(5)
#print 6,rec_stall(6)
#print 7,rec_stall(7)
#print 8,rec_stall(8)
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t + 1):
n, m = [int(s) for s in raw_input().split(" ")] # read a list of integers, 2 in this case
if n == m:
print "Case #{}: {} {}".format(i, 0, 0)
continue
s = rec_stall(n)
#print "Case #{}: {} {}", i, s, n, m, max(s[m-1]), min(s[m-1])
print "Case #{}: {} {}".format(i, max(s[m-1]), min(s[m-1]))
| [
"[email protected]"
] | |
2f819d9b7131ebb5ab3ba5de2b16433c41ef6657 | da7a893f0dc9c130b5f8c29d4875e7c5d98ac64f | /code-slides/0019-fib-more-fast-examples.py | 8dbdcdff07628e4544477b3860838a7d9f952cf8 | [] | no_license | py-yyc/decorators | a489d89869582a9127a5272e9342b8131ad91fe3 | bd7c65b78b3f00cf8da216eab945f3ef26c1b2a8 | refs/heads/master | 2020-06-20T18:29:59.884497 | 2016-02-23T21:48:09 | 2016-02-23T21:48:09 | 52,392,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | from __future__ import print_function # noslide
## <h1>how decorators work</h1>
from time import time # noslide
from contextlib import contextmanager # noslide
@contextmanager # noslide
def timer(): # noslide
s = time() # noslide
yield # noslide
print("took {:.6f}s".format(time() - s)) # noslide
def memoize(fn): # noslide
cache = {} # noslide
def wrapper(*args): # noslide
try: # noslide
return cache[args] # noslide
except KeyError: # noslide
r = fn(*args) # noslide
cache[args] = r # noslide
return r # noslide
return wrapper # noslide
@memoize # noslide
def fib(x): # noslide
if x in [1, 2]: # noslide
return 1 # noslide
return fib(x - 1) + fib(x - 2) # noslide
with timer():
print("fib(100) =", fib(100))
with timer():
print("fib(200) =", fib(200))
## show-output
| [
"[email protected]"
] | |
5d7458f2c2d6962f5be50183283d1437f8dc2908 | 68f757e7be32235c73e316888ee65a41c48ecd4e | /python_book(이것이 코딩테스트다)/03 그리디/3-2 큰수의 법칙.py | b1f160381acf71c965174db1df86f096e154ed49 | [] | no_license | leejongcheal/algorithm_python | b346fcdbe9b1fdee33f689477f983a63cf1557dc | f5d9bc468cab8de07b9853c97c3db983e6965d8f | refs/heads/master | 2022-03-05T20:16:21.437936 | 2022-03-03T01:28:36 | 2022-03-03T01:28:36 | 246,039,901 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | n, m, k = map(int,input().split())
L = list(map(int,input().split()))
f = max(L)
L.remove(f)
s = max(L)
flag = 0
result = 0
for i in range(m):
flag += 1
if flag >= k:
result += s
flag = 0
else:
result += f
print(result)
| [
"[email protected]"
] | |
fd13a713a5caf9c48c60ad83f504415838e20c7c | 710d2f31b6808187c4895a618101c25b36d25b3c | /backend/home/migrations/0002_customtext_homepage_message.py | 306e32a28d93fb23006c97f446b674b7a449a077 | [] | no_license | crowdbotics-apps/haplen-28237 | 920231c921a3aa490acc97a7debacc6858c520de | 938cef97a8534941cb5f4de9684c95361ae27ef3 | refs/heads/master | 2023-05-31T20:26:37.979544 | 2021-06-25T20:12:49 | 2021-06-25T20:12:49 | 380,344,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | # Generated by Django 2.2.20 on 2021-06-25 20:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('dating', '0001_initial'),
('home', '0001_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='CustomText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='HomePage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('created', models.DateTimeField(auto_now_add=True)),
('inbox', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='message_inbox', to='dating.Inbox')),
('match', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_match', to='dating.Match')),
],
),
]
| [
"[email protected]"
] | |
9df13b60fe7e01a4f1d644f01e37df3490769058 | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp156_6000.py | d472674e752a50fc2c150d7c07922e1993e4e721 | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,771 | py | ITEM: TIMESTEP
6000
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
-2.7215234007832265e+00 4.9921523400776607e+01
-2.7215234007832265e+00 4.9921523400776607e+01
-2.7215234007832265e+00 4.9921523400776607e+01
ITEM: ATOMS id type xs ys zs
1622 1 0.483866 0.6696 0.0502502
1525 1 0.0892975 0.141091 0.0758595
815 1 0.205158 0.0643884 0.178165
585 1 0.0492387 0.136937 0.19374
18 1 0.106243 0.0579593 0.0985426
155 1 0.0507332 0.042377 0.152979
1496 1 0.417022 0.200629 0.381304
1871 1 0.0493648 0.066403 0.0397444
1238 1 0.344257 0.110556 0.0896849
1357 1 0.192934 0.0796272 0.0895359
852 1 0.0236587 0.513427 0.154936
1254 1 0.298657 0.980446 0.0572751
1641 1 0.161095 0.159221 0.0760137
1748 1 0.357619 0.0908315 0.174926
586 1 0.268144 0.119384 0.0504549
549 1 0.343336 0.16553 0.156985
1322 1 0.246871 0.0327068 0.120771
1352 1 0.479376 0.59961 0.260596
1359 1 0.451406 0.0179069 0.0734731
420 1 0.272947 0.131492 0.136173
1334 1 0.331776 0.24152 0.113246
1420 1 0.387949 0.0423638 0.0589836
1293 1 0.192485 0.681525 0.0235019
1759 1 0.302003 0.0578136 0.0901349
804 1 0.16988 0.31171 0.13637
1423 1 0.01381 0.332269 0.178131
1268 1 0.471821 0.118791 0.462719
671 1 0.0260388 0.349169 0.430271
1620 1 0.109044 0.333926 0.0059734
733 1 0.226852 0.140259 0.22027
2027 1 0.124392 0.183455 0.15116
1670 1 0.410592 0.651008 0.0647709
896 1 0.357546 0.692507 0.0203623
1932 1 0.224796 0.0761756 0.243876
1073 1 0.302701 0.177569 0.080904
1771 1 0.191654 0.169475 0.146355
1085 1 0.192225 0.276039 0.0551283
2018 1 0.270152 0.203908 0.154698
1106 1 0.229948 0.265733 0.126304
1222 1 0.247279 0.213697 0.0558782
560 1 0.0308248 0.277388 0.271511
443 1 0.446264 0.203666 0.00346344
1973 1 0.306554 0.249728 0.0489461
697 1 0.374909 0.173534 0.0854787
1734 1 0.441042 0.11816 0.0688306
1382 1 0.386391 0.00553615 0.122214
1261 1 0.0157243 0.572904 0.275918
726 1 0.483122 0.188514 0.0808325
708 1 0.138855 0.376148 0.058544
745 1 0.137014 0.382484 0.13446
857 1 0.317975 0.727283 0.476744
1912 1 0.00382869 0.388565 0.13246
1168 1 0.132705 0.369722 0.252162
299 1 0.122185 0.310537 0.0816508
493 1 0.138899 0.8265 0.472695
636 1 0.0725038 0.41664 0.0768961
1830 1 0.4183 0.815832 0.0205347
1471 1 0.240489 0.376823 0.110267
314 1 0.364801 0.314022 0.0687149
77 1 0.276422 0.309631 0.0836881
61 1 0.213686 0.365702 0.0458131
1257 1 0.327797 0.373794 0.12158
1651 1 0.282014 0.357026 0.0261893
1782 1 0.49329 0.255314 0.041978
1544 1 0.0399534 0.265682 0.199401
399 1 0.201333 0.900543 0.0454325
21 1 0.46716 0.338451 0.0475941
1964 1 0.458008 0.459357 0.331051
1686 1 0.305292 0.408094 0.191945
790 1 0.318827 0.561141 0.00697293
1249 1 0.489129 0.585931 0.445332
639 1 0.378009 0.477792 0.0241742
1637 1 0.073647 0.467397 0.133855
1189 1 0.00585068 0.199621 0.255641
554 1 0.140448 0.445631 0.0874117
634 1 0.149596 0.456036 0.15993
798 1 0.218015 0.462438 0.0703786
1456 1 0.0244078 0.968952 0.197645
1450 1 0.0710672 0.585753 0.0546782
1048 1 0.416368 0.0729733 0.349807
1172 1 0.219372 0.595219 0.0315721
1020 1 0.0102313 0.853479 0.0509291
521 1 0.290558 0.446694 0.0664316
2014 1 0.213763 0.503303 0.15461
410 1 0.362256 0.545075 0.0657383
1319 1 0.00204304 0.322992 0.328492
1064 1 0.386025 0.475469 0.0918908
1897 1 0.204548 0.571401 0.173788
767 1 0.0194324 0.117569 0.292348
1107 1 0.12076 0.872165 0.32345
461 1 0.384477 0.529569 0.146594
552 1 0.0415801 0.440072 0.0137688
13 1 0.373904 0.871252 0.487999
407 1 0.40633 0.650536 0.41665
1183 1 0.410172 0.405893 0.0323969
1376 1 0.133309 0.611886 0.16266
1980 1 0.164674 0.68791 0.0877057
1439 1 0.356244 0.78798 0.427554
1709 1 0.0935858 0.475377 0.212098
1522 1 0.132965 0.548713 0.20603
1981 1 0.0519103 0.673502 0.140625
680 1 0.0539551 0.582624 0.127162
1494 1 0.0621276 0.505935 0.0748198
494 1 0.194497 0.586566 0.256727
1452 1 0.193198 0.529091 0.077242
1958 1 0.284743 0.601646 0.211071
1607 1 0.238582 0.718498 0.115319
1719 1 0.343217 0.642765 0.178728
26 1 0.263245 0.655388 0.0723762
1069 1 0.275149 0.660345 0.154563
806 1 0.195298 0.661058 0.147522
1557 1 0.25855 0.712974 0.201067
821 1 0.339161 0.582091 0.275418
487 1 0.340012 0.614336 0.0578631
517 1 0.421659 0.671662 0.154329
1788 1 0.39392 0.591654 0.109754
1722 1 0.352657 0.682255 0.108319
1684 1 0.440434 0.720711 0.0905676
995 1 0.388555 0.411849 0.362093
371 1 0.442397 0.590743 0.0396707
470 1 0.017124 0.2041 0.114881
653 1 0.487129 0.20578 0.469929
1530 1 0.110026 0.843105 0.205851
682 1 0.104818 0.751129 0.0990502
1061 1 0.0871148 0.85978 0.0427872
982 1 0.174039 0.749127 0.142528
367 1 0.166628 0.895638 0.112145
1821 1 0.196858 0.438413 0.464433
381 1 0.10792 0.772422 0.18576
642 1 0.0818784 0.818484 0.132096
510 1 0.469564 0.872451 0.36641
1921 1 0.152271 0.822168 0.143109
1634 1 0.222516 0.747744 0.0550109
1837 1 0.193264 0.778491 0.212812
880 1 0.221796 0.799142 0.142191
954 1 0.247516 0.290947 0.00564548
246 1 0.221438 0.822577 0.0767767
266 1 0.156852 0.787589 0.0513247
970 1 0.303509 0.726039 0.0659358
1702 1 0.337957 0.81078 0.197879
637 1 0.330978 0.73895 0.150336
66 1 0.446756 0.808833 0.119088
488 1 0.37115 0.75709 0.0557082
503 1 0.411781 0.747182 0.155946
1742 1 0.285631 0.853902 0.0713214
1218 1 0.0659605 0.242343 0.0538262
404 1 0.086972 0.895618 0.120177
1169 1 0.181257 0.0411055 0.0173737
483 1 0.0751966 0.912805 0.188378
1438 1 0.204136 0.981767 0.145377
193 1 0.146352 -0.00176401 0.0738577
673 1 0.269764 0.679185 0.430396
1895 1 0.469652 0.369803 0.349189
1834 1 0.332902 0.413315 0.0236871
610 1 0.0673337 0.98407 0.0512626
1807 1 0.153785 0.0312405 0.496127
1281 1 0.227514 0.982361 0.0670085
408 1 0.260453 0.0496816 0.0373235
1878 1 0.301151 0.166323 0.003567
357 1 0.31351 0.997439 0.135443
1515 1 0.273072 0.940571 0.181624
1078 1 0.235561 0.912142 0.125067
184 1 0.376028 0.952468 0.0190259
164 1 0.453359 0.290961 0.272484
48 1 0.29736 0.793669 0.118187
1392 1 0.415483 0.881458 0.0997325
1385 1 0.363477 0.826667 0.127686
486 1 0.354476 0.934201 0.108967
1694 1 0.322314 0.880581 0.159394
428 1 0.389438 0.327637 0.483379
1448 1 0.0983204 0.904983 0.263716
472 1 0.466976 0.945951 0.327448
1049 1 0.185899 0.277118 0.478588
57 1 0.13024 0.0301861 0.2766
1528 1 0.0919083 0.00390295 0.200257
530 1 0.161331 0.121979 0.195784
927 1 0.102341 0.0848775 0.227258
2045 1 0.03147 0.182296 0.336931
1988 1 0.0688094 0.00742275 0.298411
1735 1 0.245548 0.00274071 0.202496
578 1 0.273665 0.0502451 0.296492
1132 1 0.201507 0.0554274 0.335264
360 1 0.3725 0.147153 0.258049
529 1 0.33227 0.0795435 0.248999
1671 1 0.372516 0.0227447 0.29961
182 1 0.473602 0.240855 0.13003
1030 1 0.432889 0.123131 0.199093
1408 1 0.369683 0.228965 0.242896
1425 1 0.486934 0.0308626 0.146846
1615 1 0.45293 0.186878 0.241639
1273 1 0.450049 0.158815 0.132616
356 1 0.41751 0.070802 0.12054
689 1 0.223078 0.667828 0.490259
364 1 0.269796 0.1868 0.426349
103 1 0.0796575 0.242 0.132427
786 1 0.15976 0.101113 0.289875
1789 1 0.256899 0.514566 0.47629
1011 1 0.106414 0.183485 0.233839
67 1 0.0568578 0.199758 0.429103
865 1 0.0883007 0.290357 0.335037
365 1 0.312385 0.136311 0.21799
280 1 0.400452 0.221167 0.140076
105 1 0.33618 0.245459 0.18476
794 1 0.196124 0.171117 0.279133
770 1 0.186666 0.244595 0.24936
388 1 0.133146 0.116073 0.132097
1 1 0.282572 0.0660715 0.180107
1822 1 0.295918 0.209139 0.230935
1766 1 0.00825946 0.517181 0.0255241
971 1 0.275141 0.555078 0.418052
323 1 0.303919 0.28077 0.263427
261 1 0.376522 0.296108 0.286098
808 1 0.0613197 0.722629 0.047541
684 1 0.378485 0.286979 0.138742
267 1 0.432296 0.253296 0.329693
1876 1 0.295073 0.173664 0.291593
353 1 0.111802 0.283701 0.256865
776 1 0.162063 0.23368 0.118168
1050 1 0.403528 0.163103 0.469499
877 1 0.239027 0.437142 0.188904
80 1 0.414975 0.0371346 0.456889
1311 1 0.0693176 0.340679 0.268739
202 1 0.0855497 0.305023 0.420641
135 1 0.0880526 0.33027 0.203097
24 1 0.260733 0.274514 0.199533
1656 1 0.30762 0.298064 0.144493
490 1 0.20643 0.373654 0.281525
1117 1 0.180265 0.318149 0.220319
717 1 0.237498 0.30777 0.265178
1582 1 0.199886 0.346939 0.372689
1144 1 0.163035 0.471136 0.373726
1936 1 0.272606 0.29563 0.332409
1609 1 0.0196685 0.865747 0.394211
1956 1 0.419358 0.253246 0.0781785
277 1 0.252112 0.352262 0.178309
406 1 0.48257 0.393363 0.243109
2024 1 0.350552 0.383975 0.243983
415 1 0.374929 0.357321 0.173934
1412 1 0.0636634 0.50132 0.307699
163 1 0.0805611 0.418162 0.177358
844 1 0.189989 0.578451 0.331068
1457 1 0.0638928 0.432863 0.268574
1097 1 0.0229876 0.395102 0.317612
901 1 0.155974 0.434545 0.269738
87 1 0.0137158 0.438444 0.199665
151 1 0.105212 0.566751 0.2702
1486 1 0.136315 0.50199 0.288798
1320 1 0.119193 0.536058 0.367558
1799 1 0.063662 0.563952 0.208734
1619 1 0.313825 0.494178 0.154774
259 1 0.267249 0.520202 0.0786244
1563 1 0.261941 0.460638 0.342903
1403 1 0.336031 0.556785 0.198538
348 1 0.393982 0.506189 0.216301
899 1 0.285221 0.411328 0.29788
555 1 0.192393 0.476946 0.226631
1484 1 0.27637 0.674597 0.324974
1725 1 0.280293 0.514613 0.293397
337 1 0.437746 0.464927 0.163205
633 1 0.427287 0.3784 0.103285
1712 1 0.497984 0.461422 0.216186
30 1 0.392226 0.59192 0.202698
1849 1 0.321297 0.322707 0.214857
333 1 0.319829 0.472595 0.229327
438 1 0.458304 0.465596 0.0795514
1159 1 0.360877 0.463093 0.304899
690 1 0.316546 0.505645 0.360378
1015 1 0.371848 0.435252 0.184217
859 1 0.114754 0.699297 0.157781
1858 1 0.433299 0.724626 0.0211737
1344 1 0.0741369 0.626388 0.259654
720 1 0.0465314 0.36264 0.00957701
1193 1 0.0946984 0.679171 0.311312
874 1 0.355518 0.538354 0.496216
459 1 0.0518772 0.808554 0.256665
1816 1 0.219152 0.646332 0.215644
625 1 0.250032 0.522304 0.2241
1997 1 0.230028 0.73539 0.324439
1942 1 0.316065 0.665182 0.249742
455 1 0.416574 0.561549 0.380235
1632 1 0.448023 0.65706 0.2263
1187 1 0.209239 0.717585 0.259522
1179 1 0.207797 0.491855 0.313868
663 1 0.418938 0.601873 0.306238
1340 1 0.492443 0.699405 0.266133
1432 1 0.467476 0.539973 0.203499
1591 1 0.48647 0.838833 0.4252
645 1 0.465595 0.538927 0.33025
1026 1 0.4934 0.0665068 0.215854
715 1 0.347835 0.68023 0.31582
79 1 0.12714 0.729434 0.249218
890 1 0.0361423 0.825665 0.187425
1487 1 0.440551 0.675556 0.320933
706 1 0.431773 0.806493 0.461608
676 1 0.0435212 0.868871 0.309296
16 1 0.0573563 0.705808 0.227714
1082 1 0.491147 0.408213 0.0631251
1355 1 0.000170218 0.762564 0.229993
1490 1 0.233553 0.82703 0.293448
1724 1 0.263044 0.785277 0.23061
1551 1 0.243406 0.794281 0.370577
336 1 0.395853 0.752776 0.302724
147 1 0.287298 0.856622 0.271983
1115 1 0.175623 0.878746 0.270136
949 1 0.138321 0.806145 0.27056
572 1 0.255018 0.857497 0.178192
1191 1 0.372347 0.694199 0.212989
1024 1 0.424453 0.738516 0.236994
1451 1 0.413233 0.272503 0.203584
801 1 0.467627 0.79047 0.306412
614 1 0.416212 0.831717 0.178498
1501 1 0.391186 0.803131 0.247048
1314 1 0.362936 0.871671 0.228518
1437 1 0.460545 0.840452 0.24104
1654 1 0.146174 0.939278 0.195385
190 1 0.290181 0.918048 0.45829
1643 1 0.137073 0.956314 0.294743
1499 1 0.0364567 0.248716 0.377285
658 1 0.00978924 0.870885 0.246424
528 1 0.160445 0.00391666 0.2229
1659 1 0.141856 0.0272609 0.153267
1695 1 0.21224 0.942026 0.251342
173 1 0.371639 0.95877 0.252425
522 1 0.181683 0.857515 0.198777
864 1 0.283511 0.926278 0.322867
883 1 0.196712 0.593424 0.107512
1272 1 0.339968 0.0232865 0.205614
1847 1 0.404741 0.0513271 0.200091
923 1 0.353904 0.948866 0.175064
473 1 0.44891 0.892519 0.18458
1160 1 0.445592 0.00502982 0.255303
1776 1 0.438402 0.915861 0.263569
254 1 0.416442 0.962502 0.195676
1296 1 0.408076 0.984168 0.38453
354 1 0.242677 0.596189 0.497795
1827 1 0.126272 0.0226259 0.424922
1290 1 0.139119 0.0225616 0.353219
1868 1 0.461507 0.875403 0.0456829
604 1 0.311028 0.142538 0.486167
700 1 0.0516796 0.100697 0.378399
1745 1 0.083648 0.152546 0.297683
820 1 0.104464 0.154379 0.363447
1935 1 0.404016 0.673321 0.497112
627 1 0.0889709 0.122427 0.450871
1721 1 0.24151 0.124222 0.331183
655 1 0.290997 0.0555414 0.359757
1947 1 0.281085 0.118303 0.390833
1090 1 0.147537 0.607474 0.0443984
1952 1 0.245839 0.268451 0.437798
1285 1 0.353083 0.0338139 0.389395
583 1 0.178828 0.149436 0.376397
1813 1 0.0141431 0.285117 0.0405492
1618 1 0.423469 0.140407 0.320155
730 1 0.41881 0.0699972 0.267941
275 1 0.358172 0.115637 0.426322
557 1 0.340065 0.101599 0.332243
431 1 0.450052 0.96445 0.457728
95 1 0.393491 0.0977628 0.498911
1308 1 0.0600522 0.295303 0.490045
1963 1 0.15547 0.280764 0.395975
1440 1 0.103846 0.548181 0.459239
1116 1 0.105347 0.22381 0.298049
40 1 0.108507 0.241701 0.452794
1297 1 0.1312 0.177581 0.431332
650 1 0.298875 0.589237 0.107551
775 1 0.00104041 0.687027 0.490531
372 1 0.185311 0.287282 0.316868
28 1 0.168887 0.221672 0.337655
1714 1 0.462985 0.939345 0.120867
520 1 0.111067 0.520879 0.00220536
1446 1 0.371153 0.269543 0.426384
1031 1 0.243672 0.200756 0.356863
15 1 0.447362 0.896409 0.484672
436 1 0.327255 0.33223 0.396132
574 1 0.424488 0.454628 0.42575
1835 1 0.430224 0.323777 0.41521
759 1 0.471464 0.242673 0.39558
931 1 0.250666 0.235194 0.287485
659 1 0.359139 0.192311 0.316029
1431 1 0.231336 0.0658019 0.412045
1806 1 0.122195 0.348194 0.470685
65 1 0.173349 0.469172 0.0157279
355 1 0.0749504 0.405072 0.447611
1832 1 0.0254639 0.750262 0.165262
54 1 0.178829 0.389772 0.189895
736 1 0.128583 0.378105 0.329141
1101 1 0.0885554 0.448163 0.3602
362 1 0.134221 0.956268 0.49018
1841 1 0.123335 0.373244 0.396999
1505 1 0.353344 0.266758 0.348012
1100 1 0.225363 0.362841 0.452397
217 1 0.016221 0.381166 0.243564
904 1 0.00511055 0.815427 0.345264
838 1 0.0627194 0.355863 0.365529
1001 1 0.0322636 0.113256 0.127858
1361 1 0.33287 0.439004 0.383384
216 1 0.368143 0.384978 0.435699
1679 1 0.273144 0.410283 0.492311
1948 1 0.407726 0.27798 0.0021874
255 1 0.0134614 0.868112 0.49063
4 1 0.410383 0.403516 0.282654
1419 1 0.118138 0.257301 0.00729662
1688 1 0.461865 0.510344 0.261998
753 1 0.480085 0.0118728 0.405836
1657 1 0.195462 0.210821 0.418839
1287 1 0.0263044 0.655621 0.310871
766 1 0.0489084 0.339393 0.0813783
845 1 0.0865658 0.598615 0.33017
143 1 0.0458714 0.527319 0.377897
1531 1 0.100724 0.474697 0.455698
929 1 0.272076 0.610742 0.362205
342 1 0.224096 0.527131 0.377091
802 1 0.177726 0.507478 0.437163
1310 1 0.266633 0.610117 0.282684
1594 1 0.330331 0.367906 0.334387
1017 1 0.2711 0.446664 0.426292
305 1 0.271253 0.373927 0.387492
433 1 0.488483 0.53072 0.0479932
1309 1 0.162045 0.0986956 0.446668
468 1 0.361057 0.53196 0.418337
805 1 0.0408654 0.918252 0.0540287
1255 1 0.417479 0.575946 0.454536
1903 1 0.354902 0.465694 0.462919
325 1 0.376711 0.528512 0.303694
430 1 0.473082 0.732072 0.355079
1028 1 0.481493 0.0804316 0.374043
1203 1 0.137391 0.658987 0.223337
789 1 0.0292921 0.71 0.377006
335 1 0.0325517 0.605758 0.400729
1143 1 0.336264 0.0879007 0.00226226
935 1 0.0526789 0.740118 0.303207
957 1 0.0773359 0.703428 0.457273
313 1 0.16255 0.602734 0.403152
977 1 0.028823 0.635161 0.0125259
1236 1 0.350049 0.576905 0.354664
712 1 0.324138 0.741947 0.258241
1330 1 0.260218 0.755776 0.430959
681 1 0.184467 0.650923 0.313929
1601 1 0.177283 0.741389 0.419479
702 1 0.200871 0.680202 0.391092
508 1 0.311117 0.739682 0.372914
1114 1 0.36286 0.705079 0.426753
1756 1 0.350911 0.619786 0.445609
1393 1 0.338465 0.653499 0.377612
791 1 0.466627 0.620871 0.377423
1715 1 0.405601 0.718645 0.37242
2011 1 0.484328 0.782177 0.185464
489 1 0.444142 0.719898 0.4381
1033 1 0.0281741 0.940163 0.27692
543 1 0.112204 0.728992 0.375062
110 1 0.0868614 0.862715 0.417482
1395 1 0.067843 0.788667 0.473825
181 1 0.150701 0.903532 0.432849
1521 1 0.491567 0.106483 0.288504
2028 1 0.164334 0.76546 0.328433
919 1 0.460051 0.012582 0.336795
243 1 0.0652428 0.775973 0.409043
224 1 0.322105 0.904488 0.0175257
239 1 0.0841322 0.657781 0.378436
1391 1 0.0860221 0.801942 0.338185
1703 1 0.207544 0.808647 0.430789
1443 1 0.451889 0.348204 0.193855
1321 1 0.226926 0.0196864 0.478939
380 1 0.165415 0.833896 0.372247
1044 1 0.00336234 0.462896 0.327407
126 1 0.281575 0.837507 0.444873
1896 1 0.393055 0.838601 0.389467
90 1 0.05844 0.0559116 0.433632
611 1 0.110573 0.0511445 0.00930101
1930 1 0.318955 0.817964 0.373009
497 1 0.376667 0.82316 0.317589
1235 1 0.467055 0.528362 0.131685
1055 1 0.443529 0.7878 0.390057
1517 1 0.0448693 0.497302 0.499286
117 1 0.147858 0.958323 0.387289
1416 1 0.20976 0.948369 0.455624
58 1 0.00514178 0.968392 0.34249
448 1 0.48006 0.152968 0.38164
376 1 0.0146178 0.544914 0.435955
1463 1 0.0771431 0.989677 0.383702
1665 1 0.213272 0.911066 0.33852
1614 1 0.0792928 0.938073 0.332106
304 1 0.278491 0.881577 0.384099
2009 1 0.201221 0.999414 0.297067
1944 1 0.297713 0.968939 0.244439
1979 1 0.425501 0.11568 0.410394
1663 1 0.232754 -0.000394719 0.37927
869 1 0.340784 0.861609 0.427989
1037 1 0.0646118 0.623152 0.476947
233 1 0.42434 0.907492 0.41095
242 1 0.351686 0.970066 0.476978
46 1 0.340475 0.952163 0.376473
1353 1 0.468843 0.60253 0.146325
1794 1 0.0402093 0.935966 0.417051
1991 1 0.3069 0.797297 0.0283287
912 1 0.493699 0.318651 0.116809
1198 1 0.224186 0.139943 0.45792
2013 1 0.0884616 0.969026 0.128729
727 1 0.146861 0.609987 0.481613
722 1 0.0370234 0.182379 0.0124987
1548 1 0.494411 0.286665 0.19264
12 1 0.146207 0.720129 0.480599
515 1 0.00101851 0.138638 0.472105
509 1 0.232898 0.206259 0.484237
1786 1 0.000852815 0.00507774 0.402765
1480 1 0.38115 0.755308 0.485879
1341 1 0.484753 0.49736 0.463241
351 1 0.0179296 0.955009 0.120167
1389 1 0.0793425 0.000357977 0.499293
1587 1 0.00695859 0.155114 0.39981
1010 1 0.00568505 0.0181926 0.480703
130 1 0.480496 0.405007 0.175293
1497 1 0.161177 0.1553 0.496648
121 1 0.219824 0.803055 0.0118939
1959 1 0.127824 0.187657 0.00131914
1488 1 0.324933 0.299305 0.00456966
941 1 0.306949 0.632706 -0.0018836
573 1 0.134894 0.0089005 0.560542
1145 1 0.16607 0.109391 0.582571
161 1 0.105167 0.0848857 0.521968
863 1 0.403596 0.950022 0.856108
201 1 0.0221128 0.265854 0.69201
872 1 0.117593 0.142174 0.684392
1583 1 0.191948 0.0248687 0.611599
840 1 0.0824378 0.994927 0.617334
1626 1 0.445028 0.595475 0.544084
1156 1 0.210242 0.104255 0.515104
1882 1 0.460185 0.376177 0.862688
115 1 0.231458 0.457217 0.535354
564 1 0.0340691 0.656274 0.664785
1843 1 0.226719 0.0924276 0.611971
1844 1 0.257814 0.155176 0.527621
146 1 0.287983 0.0503006 0.580887
1373 1 0.282363 0.0432722 0.649603
331 1 0.490505 0.0334389 0.58649
1675 1 0.382059 0.960718 0.668176
707 1 0.332518 0.0966365 0.634551
1113 1 0.377633 0.899777 0.70738
1855 1 0.401843 0.079959 0.574284
1652 1 0.478267 0.212696 0.659899
512 1 0.476083 0.0793353 0.714954
373 1 0.0625567 0.103007 0.639269
339 1 0.388354 0.143914 0.992333
1993 1 0.173223 0.227091 0.526414
2030 1 0.0703815 0.0478873 0.686097
1052 1 0.12126 0.172447 0.597224
1639 1 0.201168 0.29366 0.569621
1250 1 0.124604 0.268555 0.598651
525 1 0.0162282 0.0037413 0.55142
1210 1 0.218269 0.27297 0.635463
1032 1 0.0130824 0.39494 0.761133
1838 1 0.173366 0.219441 0.63118
444 1 0.255448 0.22552 0.562719
142 1 0.336256 0.29084 0.632238
1814 1 0.395312 0.217039 0.597288
84 1 0.327323 0.216095 0.618066
1586 1 0.295345 0.180323 0.678149
908 1 0.476029 0.837887 0.662956
1312 1 0.442603 0.284048 0.73225
1242 1 0.418511 0.298401 0.58959
843 1 0.47795 0.347498 0.604622
175 1 0.384186 0.157821 0.538036
534 1 0.360648 0.162271 0.666955
366 1 0.448374 0.125393 0.536653
191 1 0.450073 0.199764 0.543308
1904 1 0.357113 0.240518 0.687116
1951 1 0.281229 0.0752945 0.511133
1277 1 0.126062 0.310525 0.532391
1216 1 0.00559104 0.426649 0.526566
813 1 0.111438 0.368483 0.580097
345 1 0.109766 0.424114 0.640199
10 1 0.0657443 0.307155 0.579215
1737 1 0.0725302 0.282502 0.73653
185 1 0.0750452 0.561851 0.887564
1178 1 0.0119703 0.847522 0.899566
1056 1 0.186688 0.343486 0.698186
972 1 0.392561 0.351197 0.987966
457 1 0.241512 0.308117 0.741386
1914 1 0.173356 0.39149 0.530361
329 1 0.174282 0.436675 0.586208
384 1 0.240188 0.349648 0.537545
784 1 0.256485 0.32561 0.665121
696 1 0.258759 0.40214 0.59626
683 1 0.17944 0.369351 0.605606
1274 1 0.0224418 0.285379 0.843627
114 1 0.36756 0.35264 0.578042
992 1 0.382182 0.480031 0.633756
1568 1 0.276127 0.517806 0.565181
139 1 0.459801 0.438144 0.586226
1130 1 0.115051 0.340212 0.658341
1240 1 0.0436686 0.570394 0.641579
1978 1 0.0135796 0.568375 0.965195
1065 1 0.0393035 0.549738 0.736094
499 1 0.0559012 0.476068 0.598704
1940 1 0.034149 0.361176 0.936158
1109 1 0.0726951 0.0585557 0.579751
1110 1 0.096885 0.588525 0.526968
51 1 0.482619 0.062366 0.839054
640 1 0.0371764 0.44297 0.670507
1662 1 0.00834321 0.422114 0.891934
441 1 0.458232 0.713705 0.519516
1518 1 0.157187 0.43388 0.718948
1677 1 0.22266 0.508343 0.610112
1898 1 0.325661 0.459092 0.562487
569 1 0.209332 0.4012 0.666071
111 1 0.180136 0.496297 0.505344
1574 1 0.426293 0.894972 0.660061
1387 1 0.0273104 0.620232 0.756227
361 1 0.37804 0.532305 0.566633
514 1 0.47326 0.51333 0.577358
1237 1 0.447237 0.492518 0.661973
1472 1 0.388957 0.423594 0.577192
1325 1 0.273933 0.282223 0.507892
1589 1 0.332288 0.112508 0.561541
617 1 0.0956559 0.621217 0.639386
1529 1 0.0502032 0.360446 0.697556
917 1 0.0990268 0.582771 0.701939
1267 1 0.110851 0.413734 0.986975
860 1 0.0708845 0.220171 0.956022
1900 1 0.474857 0.30855 0.82204
900 1 0.179251 0.604515 0.622502
668 1 0.259007 0.607082 0.595062
1757 1 0.27168 0.821947 0.583045
1504 1 0.336275 0.6625 0.57333
1728 1 0.233126 0.665051 0.581469
149 1 0.0225556 0.495082 0.8809
144 1 0.178806 0.718609 0.537489
1765 1 0.324533 0.525905 0.634444
1215 1 0.29434 0.978134 0.919914
1464 1 0.365298 0.588281 0.668712
1260 1 0.284974 0.999907 0.504904
1181 1 0.421806 0.897148 0.929849
1511 1 0.351962 0.555244 0.723417
519 1 0.447741 0.387675 0.549021
709 1 0.451563 0.350914 0.749934
1552 1 0.369881 0.607305 0.528191
605 1 0.421354 0.992133 0.971725
1447 1 0.0811391 0.238054 0.523334
758 1 0.0820748 0.73987 0.626731
1916 1 0.0484605 0.669097 0.586285
1950 1 0.10986 0.718043 0.548835
544 1 0.496314 0.25288 0.78856
834 1 0.0944433 0.78552 0.555833
854 1 0.0514924 0.375223 0.504185
1875 1 0.165402 0.790322 0.528175
1259 1 0.158 0.745019 0.60187
228 1 0.239617 0.757196 0.512326
2007 1 0.11875 0.692718 0.67714
3 1 0.230864 0.73975 0.610163
797 1 0.292296 0.707001 0.626536
748 1 0.26333 0.913116 0.597443
1442 1 0.283224 0.692818 0.529004
1520 1 0.461276 0.916321 0.879024
612 1 0.422118 0.656283 0.575064
1687 1 0.0877832 0.616064 0.839112
1564 1 0.314447 0.862137 0.523911
1629 1 0.475705 0.131859 0.929639
1228 1 0.365236 0.817027 0.547106
635 1 0.383102 0.722197 0.55707
1469 1 0.448159 0.716338 0.592737
293 1 0.435179 0.790667 0.553547
607 1 0.184194 0.94366 0.82674
386 1 0.0291018 0.828644 0.553886
1262 1 0.286267 0.904085 0.926109
1375 1 0.0172612 0.958663 0.605968
322 1 0.134407 0.902486 0.590198
710 1 0.488797 0.517955 0.724674
1041 1 0.104132 0.923535 0.659677
209 1 0.450649 0.168045 0.845151
1793 1 0.239033 0.0148604 0.971099
1747 1 0.252908 0.988575 0.580247
1045 1 0.173436 0.811516 0.667134
1339 1 0.246497 0.928647 0.518888
1080 1 0.296461 0.793045 0.517788
213 1 0.221515 0.0455195 0.550626
1483 1 0.326187 0.947508 0.553957
588 1 0.307636 0.964176 0.633396
1655 1 0.0624602 0.928313 0.551133
1103 1 0.249699 0.978838 0.668016
1743 1 0.498332 0.140201 0.677097
1578 1 0.46958 0.289859 0.662636
397 1 0.396705 0.883603 0.558244
1508 1 0.383899 0.950398 0.589135
1137 1 0.343753 0.794378 0.668654
945 1 0.421973 0.021179 0.602602
1887 1 0.0206426 0.124538 0.58046
1127 1 0.0532541 0.0655504 0.862622
1470 1 0.046056 0.1149 0.734752
449 1 0.0412015 0.197397 0.717328
1661 1 0.141528 0.988068 0.656398
52 1 0.221268 0.160244 0.657097
1027 1 0.058445 0.0440336 0.780357
132 1 0.227949 0.104054 0.732792
672 1 0.131162 0.101053 0.742198
464 1 0.208813 0.167776 0.583145
198 1 0.122715 0.0602165 0.630202
903 1 0.331477 0.980437 0.832628
1740 1 0.335567 0.0566208 0.70203
743 1 0.21595 0.20738 0.856298
888 1 0.257243 0.0966712 0.798989
1025 1 0.421977 0.117239 0.801374
502 1 0.280441 0.101517 0.691162
1571 1 0.401905 0.803927 0.9528
231 1 0.346928 0.0114972 0.604775
1459 1 0.431311 0.0144729 0.719384
298 1 0.428671 0.997475 0.79856
738 1 0.0109694 0.478774 0.954045
1972 1 0.362358 0.213762 0.96868
571 1 0.419865 0.0880845 0.673719
1095 1 0.450022 0.145338 0.724257
1256 1 0.381047 0.0528845 0.775786
946 1 0.16906 0.145032 0.797143
906 1 0.14502 0.220514 0.842955
8 1 0.0889684 0.439501 0.537041
1194 1 0.0886096 0.12828 0.78879
1323 1 0.141213 0.940698 0.895572
669 1 0.0984773 0.240496 0.680316
1965 1 0.17447 0.160123 0.722611
1083 1 0.157424 0.280509 0.6926
1939 1 0.214195 0.224173 0.701465
989 1 0.256433 0.174356 0.7413
582 1 0.213853 0.233057 0.786747
2033 1 0.28421 0.267896 0.698976
1374 1 0.282435 0.155489 0.599293
1553 1 0.334229 0.136336 0.901018
1949 1 0.301087 0.237661 0.768395
425 1 0.441186 0.201401 0.769903
1624 1 0.0119858 0.984882 0.759957
1182 1 0.443897 0.146381 0.622079
1859 1 0.407046 0.133428 0.891518
1569 1 0.421873 0.211243 0.697974
1549 1 0.392541 0.246698 0.813862
1305 1 0.37003 0.303908 0.757989
312 1 0.0646594 0.23197 0.591409
1066 1 0.488752 0.64342 0.503176
1962 1 0.0930805 0.298978 0.892061
432 1 0.0385307 0.378528 0.628778
703 1 0.125714 0.337848 0.834753
1131 1 0.107814 0.271767 0.800236
1184 1 0.106899 0.355543 0.760907
1541 1 0.351595 0.0443236 0.509018
1863 1 0.0447407 0.338281 0.795208
1356 1 0.236632 0.268591 0.885547
1244 1 0.229875 0.456742 0.759064
480 1 0.25529 0.29251 0.813413
1918 1 0.173131 0.381817 0.785895
256 1 0.387541 0.310256 0.688421
1826 1 0.307881 0.34135 0.716138
1060 1 0.172151 0.306931 0.769841
116 1 0.35538 0.166756 0.818736
1479 1 0.00185292 0.767316 0.920853
1908 1 0.444395 0.401242 0.683702
975 1 0.401031 0.360114 0.636562
741 1 0.440725 0.447209 0.735924
317 1 0.388807 0.378034 0.725592
959 1 0.431693 0.30536 0.915668
445 1 0.266911 0.329797 0.875053
2020 1 0.20226 0.554022 0.556074
1396 1 0.405691 0.31998 0.840665
1913 1 0.0822874 0.869412 0.89189
44 1 0.145209 0.484947 0.631781
566 1 0.0836773 0.431286 0.732467
1711 1 0.265821 0.630442 0.778806
546 1 0.116217 0.404248 0.815895
222 1 0.0173396 0.635406 0.84551
769 1 0.0382843 0.558164 0.807433
1219 1 0.158668 0.476398 0.772932
1263 1 0.043821 0.142684 0.935863
987 1 0.46635 0.434106 0.901579
746 1 0.148855 0.529654 0.718215
1872 1 0.250276 0.370579 0.806176
1378 1 0.0894812 0.495518 0.680843
1138 1 0.311403 0.430697 0.793916
1150 1 0.215461 0.547995 0.769035
504 1 0.292273 0.502846 0.754726
169 1 0.333056 0.593711 0.929206
427 1 0.402794 0.414289 0.812683
200 1 0.322656 0.410519 0.702139
930 1 0.312437 0.373865 0.636197
1739 1 0.459782 0.583055 0.678812
751 1 0.126266 0.653836 0.974456
817 1 0.298968 0.493998 0.827548
32 1 0.432189 0.509393 0.879002
1124 1 0.0225088 0.755287 0.518716
593 1 0.0920712 0.670018 0.782959
1369 1 0.185441 0.663688 0.688869
1223 1 0.11733 0.645533 0.722024
516 1 0.175465 0.645038 0.774373
454 1 0.0669979 0.750947 0.885996
1275 1 0.253389 0.476956 0.687364
799 1 0.141578 0.680726 0.610632
1795 1 0.201035 0.595551 0.721991
814 1 0.25436 0.672989 0.711923
835 1 0.23852 0.559079 0.667592
1288 1 0.309351 0.631462 0.664717
1575 1 0.216757 0.592578 0.870881
1850 1 0.158512 0.595261 0.821783
1911 1 0.322488 0.712551 0.69093
1560 1 0.237883 0.726622 0.840816
692 1 0.472074 0.68238 0.66643
1148 1 0.460731 0.632321 0.782014
1195 1 0.254255 0.945341 0.978903
1660 1 0.47182 0.681027 0.884228
1752 1 0.277119 0.592533 0.714793
1039 1 0.484994 0.533863 0.928687
1229 1 0.412875 0.566476 0.776349
327 1 0.425169 0.677464 0.817122
1919 1 0.038848 0.776082 0.679074
150 1 0.226524 0.72862 0.686592
1969 1 0.0521996 0.922409 0.773907
674 1 0.0585728 0.802791 0.747506
34 1 0.115336 0.725124 0.745567
1938 1 0.432409 0.525864 0.501987
781 1 0.134593 0.847404 0.727828
414 1 0.119881 0.828631 0.817443
341 1 0.121153 0.768629 0.68324
394 1 0.263564 0.828079 0.7063
1335 1 0.192936 0.762635 0.751648
1770 1 0.344408 0.595665 0.603085
1818 1 0.323263 0.761332 0.586411
250 1 0.223665 0.826471 0.909274
387 1 0.399037 0.827869 0.610093
724 1 0.290443 0.769556 0.724864
1466 1 0.298049 0.876882 0.767516
188 1 0.40218 0.725359 0.755587
1270 1 0.260931 0.942345 0.845825
31 1 0.424037 0.559081 0.615287
284 1 0.468336 0.75393 0.707374
1856 1 0.43035 0.778102 0.651376
72 1 0.482013 0.844181 0.864851
1954 1 0.383574 0.658647 0.635809
1797 1 0.48906 0.694394 0.745417
1510 1 0.482068 0.734008 0.820743
1680 1 0.091537 0.122483 0.990789
68 1 0.340044 0.777404 0.903817
7 1 0.0739206 0.976477 0.812332
754 1 0.0572822 0.890518 0.708992
916 1 0.442052 0.529318 0.990759
762 1 0.0865504 0.983859 0.71468
279 1 0.128577 0.0254425 0.868839
2006 1 0.142873 0.993579 0.795807
965 1 0.218614 0.891677 0.702147
601 1 0.187035 0.868146 0.794532
1606 1 0.0485838 0.877102 0.609543
1749 1 0.27895 0.772481 0.645495
589 1 0.264111 0.863313 0.831444
1866 1 0.195138 0.95374 0.733841
1732 1 0.131526 0.928074 0.755731
1699 1 0.291231 0.928662 0.701584
876 1 0.077516 0.822102 0.654701
1590 1 0.354716 0.793334 0.743023
1970 1 0.349602 0.898774 0.62875
292 1 0.47 0.930507 0.813363
718 1 0.350421 0.91788 0.776148
1295 1 0.430839 0.94559 0.744062
167 1 0.414897 0.87798 0.769931
1604 1 0.350257 0.981783 0.742349
1081 1 0.334078 0.826378 0.808221
1996 1 0.322077 0.905863 0.848239
324 1 0.0998495 0.559353 0.774087
850 1 0.377198 0.724078 0.629671
330 1 0.117025 0.169368 0.924071
1362 1 0.135955 0.0866231 0.910786
1093 1 0.0902266 0.864522 0.517625
1303 1 0.110147 0.217224 0.746422
505 1 0.148143 0.0834991 0.826703
1763 1 0.481333 0.363815 0.936994
1108 1 0.0273244 4.89503e-05 0.900455
1902 1 0.220512 0.0286767 0.769928
1350 1 0.418428 0.438197 0.507012
27 1 0.00352152 0.189889 0.622906
393 1 0.198417 0.0619229 0.691152
91 1 0.0187193 0.170804 0.786216
1123 1 0.222159 0.119862 0.869606
83 1 0.355045 0.131589 0.745776
1226 1 0.202903 0.0568832 0.913007
596 1 0.0321865 0.646561 0.918303
603 1 0.270654 0.0138739 0.853939
1067 1 0.46075 0.308042 0.519077
1774 1 0.338661 0.00705315 0.982428
158 1 0.0100853 0.342791 0.559002
398 1 0.40268 0.0486161 0.84615
889 1 0.473733 0.00272913 0.888392
244 1 0.440502 0.0575033 0.938558
1338 1 0.278428 0.0770884 0.900016
809 1 0.387043 0.00269398 0.903999
961 1 0.20714 0.0335447 0.84076
1208 1 0.153697 0.589564 0.904377
699 1 0.0868788 0.160036 0.527971
1540 1 0.0943037 0.147874 0.857056
460 1 0.196719 0.900328 0.957873
964 1 0.179922 0.194051 0.920128
823 1 0.0796814 0.290858 0.95872
1716 1 0.13793 0.740601 0.999063
1220 1 0.081322 0.989568 0.940053
511 1 0.166043 0.96441 0.979982
1201 1 0.0530798 0.196928 0.873328
881 1 0.308414 0.210714 0.910331
947 1 0.309478 0.250836 0.843906
1852 1 0.0239391 0.059129 0.969101
1227 1 0.272859 0.166327 0.847777
1023 1 0.484748 0.630885 0.611782
33 1 0.174441 0.799079 0.956218
666 1 0.260573 0.946448 0.773965
679 1 0.385037 0.227946 0.883067
194 1 0.290652 0.23002 0.979101
195 1 0.45033 0.246338 0.857229
1126 1 0.105066 0.545322 0.603314
1192 1 0.463368 0.228477 0.926891
891 1 0.479865 0.297936 0.972818
870 1 0.34738 0.404389 0.506888
137 1 0.171951 0.299219 0.880767
1810 1 0.276144 0.0920075 0.973176
661 1 0.477991 0.959838 0.525763
456 1 0.0576468 0.379888 0.862342
153 1 0.0884414 0.464294 0.861323
828 1 0.180242 0.355348 0.982154
1336 1 0.1373 0.253603 0.925946
1367 1 0.222539 0.392698 0.881756
1380 1 0.250417 0.34685 0.953825
1243 1 0.17454 0.87531 0.53048
1475 1 0.255651 0.151814 0.92768
1014 1 0.316477 0.332083 0.79681
221 1 0.323236 0.361397 0.95687
839 1 0.190042 0.278531 0.965083
570 1 0.363908 0.0820627 0.93658
218 1 0.395927 0.388547 0.902284
894 1 0.296004 0.295975 0.932115
382 1 0.150353 0.0165243 0.719225
1453 1 0.0743097 0.944697 0.873737
1099 1 0.0855641 0.591239 0.985335
422 1 0.176285 0.131869 0.979553
1961 1 0.0622254 0.428873 0.93571
133 1 0.493302 0.964936 0.695223
1012 1 0.217245 0.514556 0.845401
1394 1 0.0668785 0.493403 0.765955
1089 1 0.40995 0.966711 0.522905
265 1 0.0446524 0.437205 0.805088
1306 1 0.232973 0.834003 0.51662
539 1 0.0816222 0.505479 0.938799
531 1 0.264341 0.562118 0.939888
43 1 0.163839 0.451631 0.856222
1125 1 0.0949857 0.916481 0.938354
214 1 0.382584 0.851209 0.854141
1204 1 0.350928 0.434949 0.941961
619 1 0.277776 0.561306 0.849576
1042 1 0.180186 0.428542 0.939957
1870 1 0.239476 0.481641 0.917156
760 1 0.310969 0.404149 0.862835
478 1 0.271316 0.416234 0.938077
1343 1 0.431146 0.8166 0.726185
1351 1 0.355466 0.520947 0.962148
1945 1 0.454121 0.59083 0.840704
429 1 0.367001 0.483916 0.850858
779 1 0.318388 0.505576 0.903097
1674 1 0.438397 0.502074 0.799509
723 1 0.372854 0.554182 0.850628
347 1 0.427765 0.588307 0.917996
276 1 0.361621 0.932604 0.938671
1034 1 0.175191 0.663642 0.857174
507 1 0.149169 0.798128 0.881004
1386 1 0.150499 0.0132099 0.932053
311 1 0.21082 0.618768 0.941357
506 1 0.140948 0.528058 0.860234
624 1 0.0970886 0.69769 0.842728
1397 1 0.305854 0.467704 0.994105
122 1 0.261967 0.653531 0.853819
1751 1 0.293348 0.651707 0.93068
11 1 0.324284 0.568047 0.791845
1975 1 0.331969 0.697151 0.78882
1071 1 0.231702 0.183657 0.988908
1347 1 0.156118 0.533555 0.945814
933 1 0.00292613 0.0901342 0.801229
1730 1 0.213251 0.811343 0.834701
990 1 0.345309 0.698264 0.876397
1280 1 0.104893 0.364269 0.930077
1957 1 0.343879 0.855515 0.921803
1664 1 0.480532 0.659608 0.977355
830 1 0.339778 0.24935 0.558013
638 1 0.382149 0.631608 0.858149
1445 1 0.409872 0.659839 0.993571
1209 1 0.362762 0.850523 0.991983
647 1 0.401658 0.735097 0.944659
942 1 0.418331 0.456127 0.958802
70 1 0.00127611 0.0480131 0.659315
1474 1 0.0940743 0.67563 0.918396
219 1 0.162068 0.732752 0.822775
1831 1 0.468677 0.0511005 0.652282
1676 1 0.118761 0.910091 0.826393
675 1 0.0538817 0.805277 0.843953
1008 1 0.147345 0.870429 0.912045
1365 1 0.14308 0.71967 0.893057
561 1 0.483167 0.797432 0.98827
2047 1 0.0209879 0.987276 0.98051
1153 1 0.279378 0.800342 0.86667
2004 1 0.250792 0.693262 0.969033
303 1 0.102155 0.786703 0.950008
1755 1 0.220837 0.71935 0.910175
166 1 0.277817 0.797151 0.95199
950 1 0.00732535 0.289474 0.615408
165 1 0.020672 0.934921 0.925325
701 1 0.161017 0.634779 0.55395
969 1 0.359216 0.75653 0.807935
1434 1 0.423983 0.654322 0.714596
446 1 0.203201 0.891659 0.875154
1689 1 0.218528 0.961164 0.906743
119 1 0.447794 0.801634 0.804676
136 1 0.41081 0.791913 0.880389
725 1 0.47813 0.880958 0.550788
1072 1 0.281143 0.292052 0.580448
829 1 0.494397 0.582658 0.748774
523 1 0.0279319 0.0790824 0.51976
421 1 0.455599 0.0306483 0.514776
562 1 0.0338663 0.760407 0.987716
1966 1 0.0253152 0.544554 0.57126
1258 1 0.474852 0.419941 0.983764
1573 1 0.00488556 0.740531 0.62483
999 1 0.476343 0.958165 0.603453
1252 1 0.231561 0.514315 0.991878
1532 1 0.388331 0.5865 0.988966
334 1 0.483559 0.78183 0.912325
551 1 0.47494 0.936746 0.992037
479 1 0.0237903 0.266018 0.921912
1792 1 0.135094 0.868554 0.993489
1907 1 0.330634 0.330117 0.518504
264 1 0.00756798 0.214536 0.53382
1697 1 0.00258787 0.429883 0.598327
498 1 0.411479 0.0760592 0.997771
1726 1 0.248934 0.866667 0.994222
56 1 0.387806 0.245609 0.502007
1825 1 0.226668 0.417514 0.994108
340 1 0.682326 0.74564 0.0586216
1139 1 0.594403 0.982338 0.038838
1649 1 0.641239 0.115147 0.0722476
1364 1 0.710462 0.00878996 0.147516
471 1 0.521987 0.0879638 0.0301279
118 1 0.74815 0.566248 0.085912
290 1 0.984522 0.0354067 0.296112
810 1 0.997558 0.609772 0.479076
2032 1 0.702749 0.0713283 0.0963703
785 1 0.774631 0.980079 0.164739
235 1 0.868282 0.978463 0.165478
212 1 0.954531 0.179685 0.0422658
439 1 0.637237 0.0544936 0.144182
1348 1 0.655876 0.00141903 0.0783334
1925 1 0.908299 0.0883887 0.043643
1485 1 0.840405 0.112634 0.0972968
1566 1 0.538092 0.340129 0.187635
99 1 0.784942 0.154471 0.0543351
1853 1 0.954011 0.317319 0.0736562
882 1 0.98778 0.0988658 0.0451183
1750 1 0.98498 0.477037 0.424783
125 1 0.978171 0.040493 0.132374
1000 1 0.932695 0.729253 0.468615
1435 1 0.826307 0.982603 0.0875245
1016 1 0.804803 0.0569065 0.16037
1302 1 0.611673 0.126091 0.145574
1328 1 0.532825 0.131958 0.086664
75 1 0.683127 0.253363 0.154597
1282 1 0.575831 0.184548 0.121639
237 1 0.667196 0.863943 0.0109084
1923 1 0.694783 0.197274 0.0726649
1823 1 0.586501 0.0467832 0.414843
1690 1 0.884263 0.944164 0.0282134
1086 1 0.766629 0.0964487 0.115805
1576 1 0.727672 0.0482489 0.00974281
735 1 0.685511 0.265664 0.0736713
1370 1 0.760404 0.167067 0.120784
1910 1 0.731447 0.230201 0.12234
1291 1 0.572069 0.348407 0.120992
1068 1 0.821056 0.290829 0.0139513
922 1 0.899453 0.688209 0.0821724
1976 1 0.897016 0.0753125 0.132158
192 1 0.882665 0.307877 0.244079
1327 1 0.87361 0.169176 0.0679967
1022 1 0.904566 0.166647 0.144192
1035 1 0.952615 0.218093 0.158007
565 1 0.920903 0.229995 0.077192
392 1 0.888912 0.332156 0.180493
807 1 0.670359 0.352563 0.0534775
291 1 0.572873 0.415176 0.0424826
878 1 0.643807 0.461611 0.247384
241 1 0.567464 0.965582 0.314202
465 1 0.53994 0.231411 0.163405
1669 1 0.52897 0.576488 0.36556
1543 1 0.615734 0.272658 0.0450359
210 1 0.63439 0.237155 0.443088
600 1 0.730059 0.315032 0.105806
861 1 0.595355 0.340217 0.057609
1824 1 0.830537 0.317045 0.123597
648 1 0.524236 0.367907 0.00697098
1142 1 0.82732 0.448635 0.0612608
632 1 0.770404 0.278428 0.160673
1761 1 0.715531 0.390705 0.111761
729 1 0.963771 0.0353024 0.014337
1733 1 0.690558 0.537187 0.0262612
1877 1 0.773239 0.364045 0.0545135
1524 1 0.857349 0.405378 0.12245
1248 1 0.83043 0.329127 0.283954
2040 1 0.93979 0.350754 0.128969
1644 1 0.718762 0.113231 0.0423735
513 1 0.850557 0.658135 0.0266162
978 1 0.881895 0.299765 0.0713397
1580 1 0.630791 0.515671 0.0485753
772 1 0.797754 0.440227 0.155868
1723 1 0.970028 0.388789 0.0560693
145 1 0.985231 0.313171 0.490157
225 1 0.543986 0.570555 0.185679
1112 1 0.745066 0.500146 0.00579013
154 1 0.807002 0.239936 0.107485
1247 1 0.625439 0.497321 0.168719
2037 1 0.701943 0.467416 0.0996669
1611 1 0.551331 0.502864 0.223575
1986 1 0.626695 0.398286 0.0963942
1738 1 0.560717 0.491903 0.0651688
251 1 0.95893 0.651023 0.0446542
778 1 0.554215 0.426783 0.123309
1696 1 0.622861 0.423092 0.171133
1441 1 0.575827 0.167644 0.201607
613 1 0.850563 0.499047 0.113398
948 1 0.628342 0.46615 0.102372
897 1 0.763229 0.49601 0.0780003
128 1 0.769494 0.575514 0.210957
765 1 0.788202 0.498901 0.217878
1053 1 0.985789 0.0847825 0.414729
691 1 0.950953 0.52826 0.0803678
1533 1 0.904495 0.455568 0.0918857
761 1 0.795347 0.536675 0.284919
868 1 0.95034 0.439954 0.233641
1681 1 0.952253 0.498422 0.187388
375 1 0.941239 0.434176 0.159275
1744 1 0.872173 0.463314 0.177921
1315 1 0.995715 0.789071 0.432317
848 1 0.67045 0.645391 0.155195
591 1 0.536765 0.557568 0.106875
924 1 0.613125 0.551838 0.110225
993 1 0.946777 0.582667 0.425039
657 1 0.543974 0.628596 0.0714586
2038 1 0.512452 0.736365 0.0399855
1476 1 0.506623 0.915514 0.400308
199 1 0.583479 0.620105 0.142104
96 1 0.960885 0.94268 0.411335
667 1 0.730079 0.911622 0.00249097
368 1 0.802548 0.552282 0.128892
1854 1 0.737415 0.675055 0.115595
898 1 0.602601 0.672597 0.10481
134 1 0.732333 0.517257 0.153605
1539 1 0.734291 0.627519 0.0592689
1783 1 0.776215 0.0498246 0.0600073
1329 1 0.681093 0.571207 0.104945
1424 1 0.658338 0.66835 0.0610585
1006 1 0.945158 0.368947 0.48939
928 1 0.897269 0.613512 0.0791149
873 1 0.603789 0.0606637 0.484038
628 1 0.526816 0.70946 0.198053
1076 1 0.913395 0.693791 0.219602
1535 1 0.86112 0.0355351 0.0255271
1239 1 0.580239 0.707541 0.431088
63 1 0.820816 0.668303 0.0978028
998 1 0.538202 0.101349 0.427178
1968 1 0.620695 0.81094 0.12383
568 1 0.880023 0.505331 0.0449374
1804 1 0.949009 0.388021 0.319089
403 1 0.671882 0.869882 0.140345
734 1 0.59021 0.749578 0.153147
670 1 0.552289 0.214461 0.0446989
795 1 0.600915 0.82793 0.0598759
1523 1 0.601299 0.751351 0.0670226
1038 1 0.810102 0.0923385 0.428805
229 1 0.942217 0.100467 0.279071
1767 1 0.782624 0.683081 0.0443539
2002 1 0.752931 0.845809 0.0340247
1773 1 0.656183 0.737484 0.125031
739 1 0.674914 0.82298 0.0766476
608 1 0.753411 0.772395 0.0827946
69 1 0.525092 0.985348 0.0671102
1120 1 0.827033 0.83608 0.0509281
1449 1 0.815568 0.855588 0.475942
1556 1 0.838972 0.888981 0.126301
1140 1 0.535653 0.848654 0.359345
98 1 0.81994 0.803972 0.123318
1205 1 0.983021 0.274304 0.433211
450 1 0.976719 0.587424 0.341419
1246 1 0.870645 0.718164 0.142308
983 1 0.921708 0.756892 0.0242346
1704 1 0.942986 0.74235 0.168782
768 1 0.541129 0.917775 0.0531518
1136 1 0.535195 0.929267 0.132217
1481 1 0.501878 0.200356 0.316053
1812 1 0.746034 0.84916 0.118448
412 1 0.650006 0.94079 0.0219171
17 1 0.703695 0.909355 0.0663481
955 1 0.978294 0.810015 0.277134
1171 1 0.811494 0.603511 0.0580804
1934 1 0.755729 0.962141 0.0676218
559 1 0.928146 0.112233 0.466963
1133 1 0.597841 0.920299 0.102749
485 1 0.818024 0.918703 0.188185
1881 1 0.52515 0.812456 0.0501152
282 1 0.813045 0.910912 0.0626169
1717 1 0.986054 0.40396 0.395875
1404 1 0.563657 0.898924 0.308835
936 1 0.940846 0.88015 0.0163042
1865 1 0.953837 0.951181 0.0460492
631 1 0.512148 0.265571 0.324168
1094 1 0.964907 0.822295 0.195903
606 1 0.885543 0.936601 0.107019
1989 1 0.904017 0.836441 0.0553881
749 1 0.940268 0.968676 0.146061
1388 1 0.503497 0.947456 0.267845
793 1 0.75137 0.432614 0.0509985
1602 1 0.586944 0.99596 0.11405
884 1 0.650361 0.996718 0.210372
1915 1 0.646177 0.439074 0.0352638
2008 1 0.617837 0.174186 0.0316194
652 1 0.514673 0.0164095 0.277377
1283 1 0.747393 0.104682 0.388927
1354 1 0.565478 0.0965916 0.190698
1390 1 0.796838 0.974869 0.246298
1102 1 0.692333 0.151144 0.143183
1864 1 0.702533 0.107919 0.224095
102 1 0.634582 0.130229 0.28616
656 1 0.732181 0.0525691 0.204993
287 1 0.91238 0.0247893 0.255198
1004 1 0.808244 0.0952474 0.350743
1613 1 0.687301 0.0737666 0.301208
1196 1 0.707942 0.952113 0.230505
440 1 0.932378 0.977099 0.214095
1413 1 0.972291 0.0511849 0.216548
1926 1 0.856617 0.138599 0.247327
1077 1 0.845162 0.0355057 0.218337
475 1 0.863156 0.939621 0.24407
1018 1 0.932992 0.0394054 0.442575
518 1 0.937338 0.051457 0.36022
892 1 0.962837 0.95393 0.278662
1682 1 0.542028 0.915239 0.464828
851 1 0.643284 0.141841 0.207739
1245 1 1.0013 0.718813 0.0920475
1170 1 0.649741 0.324019 0.139141
260 1 0.654617 0.205758 0.286775
1406 1 0.568918 0.216701 0.324535
187 1 0.628201 0.0759613 0.235693
1318 1 0.695842 0.327361 0.237867
138 1 0.518513 0.227548 0.235083
1289 1 0.683162 0.0810207 0.412496
2042 1 0.739693 0.360129 0.188501
100 1 0.840497 0.266874 0.188151
2034 1 0.824069 0.254055 0.281811
1982 1 0.805681 0.186519 0.177124
538 1 0.726785 0.19502 0.189397
756 1 0.693696 0.243462 0.237364
980 1 0.79069 0.179426 0.25278
537 1 0.734756 0.265571 0.362868
1104 1 0.961648 0.115067 0.356882
907 1 0.817727 0.0848231 0.265477
1762 1 0.84621 0.125056 0.176631
2025 1 0.872303 0.195591 0.292293
1206 1 0.913468 0.208579 0.237071
1058 1 0.872241 0.00323329 0.467489
29 1 0.533925 0.34398 0.400893
55 1 0.622188 0.287453 0.219149
1009 1 0.607321 0.256155 0.282896
45 1 0.984329 0.710067 0.016564
1428 1 0.600256 0.892353 0.0230876
236 1 0.615929 0.177734 0.469739
5 1 0.596819 0.27715 0.150709
25 1 0.560365 0.416418 0.208345
1333 1 0.604867 0.352841 0.201055
2010 1 0.612608 0.454607 0.425428
1869 1 0.818566 0.377245 0.375154
1265 1 0.757028 0.437148 0.224132
1514 1 0.772949 0.273629 0.234479
630 1 0.697406 0.445822 0.176607
694 1 0.940214 0.172085 0.308383
622 1 0.877502 0.28647 0.349396
85 1 0.749436 0.237095 0.293222
270 1 0.751956 0.313884 0.28987
1928 1 0.816117 0.331949 0.195525
1070 1 0.945044 0.291492 0.370267
1829 1 0.878029 0.581191 0.00297087
1493 1 0.883389 0.356176 0.330916
1999 1 0.552294 0.503004 0.144371
1512 1 0.648742 0.427581 0.354433
1307 1 0.61505 0.544443 0.236254
1646 1 0.636572 0.575793 0.170849
1161 1 0.560188 0.322177 0.309733
405 1 0.519503 0.641448 0.31482
390 1 0.536344 0.547269 0.289223
1129 1 0.783887 0.368612 0.247405
1779 1 0.70335 0.524409 0.22136
532 1 0.691173 0.399485 0.224347
249 1 0.809175 0.636469 0.236681
307 1 0.848765 0.440618 0.263772
1955 1 0.845313 0.397017 0.207877
1313 1 0.836615 0.582726 0.398992
89 1 0.758976 0.546355 0.35776
592 1 0.910691 0.566963 0.355917
796 1 0.661212 0.524612 0.294395
721 1 0.676932 0.577134 0.415375
1207 1 0.954177 0.365462 0.196904
1500 1 0.820693 0.594285 0.33003
1943 1 0.973087 0.516148 0.358953
1920 1 0.842589 0.581137 0.184363
1491 1 0.796349 0.405199 0.315738
418 1 0.977775 0.328415 0.256582
157 1 0.899731 0.478549 0.295702
885 1 0.899901 0.623728 0.320145
952 1 0.832363 0.305214 0.398556
1658 1 0.534924 0.643611 0.236823
932 1 0.640858 0.761196 0.207043
1819 1 0.600266 0.615079 0.221918
168 1 0.666944 0.597503 0.246611
822 1 0.678565 0.648005 0.318543
1781 1 0.604751 0.697405 0.205613
1316 1 0.594424 0.582384 0.0473753
914 1 0.539914 0.687711 0.362432
1901 1 0.909355 0.0171614 0.0898782
1746 1 0.728432 0.564766 0.285667
925 1 0.57058 0.97215 0.400099
1234 1 0.785735 0.725589 0.135026
1754 1 0.722957 0.736368 0.180518
395 1 0.690341 0.732255 0.351357
905 1 0.673452 0.677267 0.227473
887 1 0.758116 0.634276 0.344821
1692 1 0.765199 0.616846 0.13852
81 1 0.764533 0.673063 0.190845
1567 1 0.628619 0.541413 0.466163
232 1 0.938297 0.547569 0.291412
1616 1 0.593913 0.976199 0.488332
401 1 0.900966 0.762714 0.325502
867 1 0.918374 0.646906 0.158729
524 1 0.814599 0.676384 0.301512
649 1 0.89713 0.584137 0.236603
106 1 0.995059 0.582277 0.0837205
991 1 0.925381 0.760266 0.244473
744 1 0.779699 0.707859 0.375876
580 1 0.989668 0.940493 0.477071
1666 1 0.751517 0.922911 0.366933
824 1 0.717033 0.787562 0.236245
1005 1 0.536793 0.753557 0.290289
662 1 0.66127 0.714665 0.288243
1848 1 0.738517 0.863524 0.28077
1241 1 0.589 0.686085 0.292445
1384 1 0.698269 0.797137 0.141085
547 1 0.837777 0.783294 0.202324
389 1 0.77467 0.740808 0.240521
1931 1 0.772238 0.794317 0.171455
263 1 0.778135 0.86028 0.195726
247 1 0.745342 0.706082 0.30725
757 1 0.819522 0.937624 0.421365
1197 1 0.802474 0.752299 0.325745
1062 1 0.599652 0.73968 0.350897
1880 1 0.827806 0.703564 0.202128
750 1 0.619347 0.781671 0.30079
1635 1 0.846359 0.747429 0.273677
1888 1 0.963176 0.626376 0.262399
1059 1 0.882389 0.889068 0.387322
1174 1 0.559171 0.671446 0.0218448
1889 1 0.965412 0.340663 0.00685101
474 1 0.578328 0.850767 0.176161
140 1 0.56681 0.0154981 0.18701
374 1 0.604601 0.947902 0.170643
2046 1 0.593406 0.000714651 0.250527
1118 1 0.632026 0.930898 0.258422
1842 1 0.718288 0.854116 0.352749
728 1 0.641099 0.830473 0.218352
1673 1 0.627371 0.970062 0.358578
1299 1 0.694893 0.939295 0.154106
156 1 0.774177 0.12109 0.189527
909 1 0.804674 0.906321 0.252754
1088 1 0.813925 0.815345 0.272689
1760 1 0.704729 0.879094 0.208795
774 1 0.732411 0.938325 0.296759
620 1 0.868327 0.881479 0.30263
1713 1 0.80095 0.879307 0.331577
286 1 0.923472 0.974165 0.331669
1427 1 0.880286 0.24381 0.0235873
1780 1 0.834303 0.949461 0.334782
771 1 0.909559 0.913469 0.175802
1720 1 0.864145 0.861452 0.222566
576 1 0.91975 0.89631 0.24951
283 1 0.562387 0.151224 0.282512
1883 1 0.907585 0.846958 0.147922
1953 1 0.657269 0.0324644 0.366986
159 1 0.620573 0.0489418 0.303924
1398 1 0.508591 0.977478 0.193126
2005 1 0.499677 0.430318 0.416141
285 1 0.620524 0.0968515 0.3636
2000 1 0.564081 0.0791672 0.289837
1796 1 0.543518 0.115921 0.35936
352 1 0.896557 0.935447 0.454568
616 1 0.640287 0.00464305 0.445238
996 1 0.967777 0.780515 0.0698204
997 1 0.811328 0.32216 0.497073
1603 1 0.676347 0.981243 0.297892
651 1 0.729002 0.10706 0.463768
1784 1 0.763208 0.528485 0.42561
1617 1 0.877527 0.0129303 0.394098
1538 1 0.828262 0.157923 0.403975
577 1 0.773618 0.199137 0.440439
252 1 0.908226 0.159887 0.405379
495 1 0.557718 0.039023 0.352398
97 1 0.752491 0.972518 0.414666
1430 1 0.869966 0.0420185 0.322592
1595 1 0.895408 0.377365 0.0512997
71 1 0.54921 0.520193 0.429514
332 1 0.654975 0.115555 0.484344
1513 1 0.966051 0.211895 0.385742
501 1 0.995297 0.453306 0.082934
370 1 0.603223 0.0647696 0.0126999
476 1 0.501773 0.4173 0.494394
186 1 0.988993 0.830096 0.126108
1995 1 0.603756 0.124252 0.427003
35 1 0.534291 0.193715 0.388298
545 1 0.647158 0.234244 0.352324
385 1 0.611266 0.184644 0.381581
1650 1 0.974262 0.145264 0.111268
1633 1 0.56652 0.842866 0.430395
230 1 0.573722 0.311271 0.454863
893 1 0.80473 0.366272 0.443102
910 1 0.677911 0.14286 0.350259
1547 1 0.844078 0.113177 0.0194386
1455 1 0.577902 0.400733 0.490549
915 1 0.856874 0.201038 0.469404
172 1 0.742976 0.135575 0.284858
1874 1 0.723253 0.380813 0.400833
1346 1 0.823151 0.263531 0.453518
1668 1 0.723925 0.19332 0.350302
248 1 0.96511 0.242343 0.297597
41 1 0.796927 0.986815 0.00656881
434 1 0.797562 0.170475 0.321788
1482 1 0.53883 0.875594 0.233961
109 1 0.563948 0.0707829 0.126452
74 1 0.923908 0.451033 0.469587
484 1 0.635095 0.924053 0.463409
1693 1 0.522392 0.763345 0.107414
1269 1 0.576413 0.252292 0.399106
686 1 0.602974 0.376761 0.410253
1884 1 0.661045 0.856816 0.27406
1630 1 0.664449 0.3118 0.305656
1163 1 0.730392 0.488244 0.379987
1345 1 0.7376 0.363262 0.476223
1758 1 0.997316 0.684007 0.187486
1861 1 0.60929 0.844449 0.346419
1468 1 0.876338 0.214333 0.356576
1933 1 0.807314 0.241853 0.360614
926 1 0.767495 0.327218 0.384781
226 1 0.712685 0.412798 0.300195
1119 1 0.77817 0.442156 0.406876
1960 1 0.69612 0.910224 0.42641
1135 1 0.814185 0.499522 0.375737
1414 1 0.917378 0.366268 0.383474
599 1 0.900373 0.31689 0.439646
1599 1 0.879245 0.11557 0.315455
1368 1 0.94627 0.293336 0.176497
800 1 0.998391 0.287025 0.121033
1332 1 0.905844 0.244488 0.410551
78 1 0.938015 0.201511 0.46004
711 1 0.973478 0.900569 0.338589
1165 1 0.800134 0.0241246 0.331478
1046 1 0.535302 0.0331911 0.466339
223 1 0.887217 0.0952702 0.385979
1585 1 0.572296 0.46894 0.334188
2029 1 0.972756 0.570296 0.198886
1698 1 0.534516 0.401764 0.347602
1202 1 0.6067 0.536782 0.390256
400 1 0.532253 0.656783 0.43826
1791 1 0.510375 0.149105 0.226629
644 1 0.565967 0.478697 0.501911
587 1 0.86063 0.429954 0.405985
1899 1 0.745309 0.485481 0.289184
826 1 0.67143 0.447229 0.467449
383 1 0.703424 0.524274 0.479846
1833 1 0.867667 0.514162 0.460469
832 1 0.701861 0.601328 0.484491
206 1 0.770447 0.661206 0.483317
1266 1 0.976805 0.749192 0.322108
1405 1 0.911215 0.384077 0.260376
238 1 0.864691 0.677052 0.358687
1461 1 0.889198 0.390494 0.454389
1775 1 0.884663 0.423721 0.342574
553 1 0.574031 0.242543 0.486648
871 1 0.876715 0.594368 0.468915
1401 1 0.921823 0.710018 0.39319
641 1 0.600427 0.618975 0.301944
1477 1 0.98524 0.696589 0.267917
316 1 0.522964 0.475678 0.282664
976 1 0.635021 0.631161 0.445087
974 1 0.601341 0.626623 0.373051
678 1 0.758234 0.0547881 0.277159
1111 1 0.809504 0.667726 0.423974
296 1 0.77626 0.734947 0.475908
160 1 0.646926 0.694722 0.403121
50 1 0.996388 0.496482 0.247958
1537 1 0.634162 0.769135 0.444742
308 1 0.767911 0.601244 0.409169
1941 1 0.744631 0.811125 0.486139
124 1 0.717299 0.656302 0.409533
2048 1 0.666985 0.576177 0.338954
1885 1 0.515101 0.857617 0.122545
1561 1 0.764775 0.577428 0.48607
1230 1 0.900704 0.488743 0.379727
2015 1 0.852512 0.735029 0.474829
731 1 0.9603 0.670139 0.344615
862 1 0.914683 0.783636 0.412974
1421 1 0.977245 0.655362 0.416506
1987 1 0.90084 0.657881 0.430386
811 1 0.549109 0.782688 0.393185
170 1 0.647782 0.791367 0.376796
1349 1 0.636214 0.857096 0.426062
253 1 0.699277 0.973654 0.369448
1221 1 0.576504 0.785246 0.481514
1710 1 0.591304 0.903259 0.383876
492 1 0.655952 0.907635 0.334882
53 1 0.901319 0.696754 0.289637
1177 1 0.737889 0.786862 0.316206
1967 1 0.980593 0.140799 0.212605
1122 1 0.930843 0.848878 0.344422
1946 1 0.715887 0.737256 0.414002
1707 1 0.772269 0.776338 0.38802
812 1 0.574882 0.589355 0.442715
1409 1 0.709794 0.813327 0.425913
1600 1 0.857778 0.822437 0.336861
527 1 0.55047 0.281436 0.0796719
1994 1 0.771069 0.877796 0.424695
1579 1 0.845818 0.74795 0.392719
1507 1 0.826157 0.793783 0.440505
271 1 0.942513 0.87121 0.441801
747 1 0.728326 0.300417 0.0246706
853 1 0.682311 0.160871 0.422032
1360 1 0.534202 0.689095 0.115973
911 1 0.890983 0.8311 0.474844
409 1 0.898105 0.32669 0.00169017
1892 1 0.951995 0.80479 0.494119
714 1 0.629328 0.62477 0.00197386
1805 1 0.990108 0.821297 0.797203
742 1 0.823401 0.740516 0.996709
836 1 0.988338 0.882317 0.826252
1092 1 0.786281 0.931071 0.532153
1199 1 0.669474 0.136275 0.587714
1768 1 0.53834 0.0294966 0.645072
875 1 0.622198 0.996864 0.644693
626 1 0.559932 0.963966 0.688709
825 1 0.986454 0.0657598 0.578777
1802 1 0.551254 0.995812 0.547657
918 1 0.505297 0.849673 0.772922
1924 1 0.701713 0.0647875 0.524477
1154 1 0.807998 0.0593889 0.610289
2035 1 0.905053 0.186346 0.526695
1358 1 0.786077 0.194447 0.695667
1839 1 0.893136 0.858866 0.968724
1492 1 0.746925 0.170756 0.562224
1815 1 0.740181 0.102655 0.595271
719 1 0.843459 0.229291 0.577829
1418 1 0.70885 0.153766 0.651041
951 1 0.883435 0.137599 0.750713
274 1 0.610523 0.60172 0.536561
1638 1 0.998924 0.888958 0.980229
1764 1 0.86308 0.11353 0.625229
1929 1 0.959746 0.133292 0.534858
2039 1 0.784032 0.226086 0.513916
866 1 0.512551 0.76693 0.510733
1772 1 0.876905 0.118285 0.552079
1324 1 0.924424 0.0590839 0.524008
763 1 0.579687 0.658221 0.509703
1701 1 0.932163 0.0983044 0.594802
939 1 0.557918 0.924561 0.977086
183 1 0.619882 0.18676 0.619191
227 1 0.513174 0.0879917 0.519111
1545 1 0.558267 0.0661555 0.574555
536 1 0.57192 0.56394 0.937753
2021 1 0.512817 0.253429 0.51776
1301 1 0.519774 0.176352 0.549961
1176 1 0.687082 0.215752 0.621611
618 1 0.732795 0.274977 0.578156
481 1 0.640539 0.203712 0.547546
426 1 0.963028 0.022203 0.933568
1175 1 0.645855 0.558161 0.891287
986 1 0.877194 0.311295 0.598742
590 1 0.823556 0.354689 0.564179
1495 1 0.723264 0.230902 0.701828
350 1 0.753642 0.299457 0.66509
533 1 0.794909 0.127181 0.63757
782 1 0.978061 0.290654 0.776547
1636 1 0.634826 0.726454 0.510029
1785 1 0.921626 0.278148 0.645772
1801 1 0.772126 0.35697 0.622597
1158 1 0.871552 0.176145 0.695222
1891 1 0.656638 0.31051 0.567237
1562 1 0.618429 0.319418 0.722161
458 1 0.768564 0.0926859 0.528416
203 1 0.591845 0.462175 0.590239
842 1 0.620944 0.0419616 0.554625
1279 1 0.575456 0.238608 0.57637
1800 1 0.699212 0.355622 0.606984
2003 1 0.670782 0.293989 0.674247
629 1 0.884528 0.402198 0.705835
396 1 0.818745 0.464171 0.651692
1909 1 0.678142 0.372439 0.532226
2 1 0.793479 0.530063 0.671164
956 1 0.745415 0.34607 0.550727
841 1 0.813615 0.161531 0.5414
496 1 0.881762 0.302561 0.715809
705 1 0.931965 0.359552 0.587272
1460 1 0.725581 0.887582 0.50203
677 1 0.539194 0.747752 0.779321
86 1 0.81811 0.424741 0.588504
2001 1 0.855593 0.438429 0.515941
643 1 0.969988 0.259843 0.974549
1550 1 0.99115 0.855277 0.701619
973 1 0.530106 0.634079 0.709136
220 1 0.652678 0.425387 0.602173
1105 1 0.689578 0.450499 0.692154
1121 1 0.990117 0.967331 0.856051
1731 1 0.5635 0.537828 0.593607
968 1 0.959522 0.648535 0.782481
19 1 1.00047 0.134741 0.669906
1577 1 0.653426 0.552147 0.598687
211 1 0.565775 0.746214 0.991934
902 1 0.604862 0.639134 0.69007
615 1 0.608621 0.5539 0.666818
417 1 0.988247 0.119893 0.863648
419 1 0.986757 0.484776 0.769274
294 1 0.585774 0.414165 0.673223
1422 1 0.72464 0.421589 0.580346
346 1 0.937422 0.428891 0.783976
1593 1 0.830076 0.0436065 0.516971
257 1 0.754086 0.491659 0.614649
359 1 0.732008 0.587957 0.632154
819 1 0.803416 0.527254 0.577449
687 1 0.720996 0.548353 0.562534
391 1 0.930547 0.976433 0.511691
245 1 0.906717 0.495983 0.532077
1433 1 0.51152 0.387276 0.808607
1411 1 0.545284 0.565575 0.524146
963 1 0.862058 0.56475 0.532956
319 1 0.837486 0.596165 0.643503
1628 1 0.944859 0.472421 0.670887
1029 1 0.872178 0.495614 0.616078
1906 1 0.569713 0.744178 0.55575
1173 1 0.595233 0.729715 0.635955
1007 1 0.64267 0.67736 0.639216
1798 1 0.680719 0.634342 0.571124
1554 1 0.542758 0.67167 0.652683
1998 1 0.597838 0.236701 0.980689
94 1 0.805248 0.598864 0.577327
816 1 0.868458 0.668304 0.632236
240 1 0.566586 0.618039 0.590206
2017 1 0.72542 0.72743 0.611156
981 1 0.725435 0.66335 0.638262
1790 1 0.7453 0.617763 0.539058
1489 1 0.799005 0.689652 0.638458
1555 1 0.606909 0.680243 0.573664
1407 1 0.597808 0.00236326 0.7708
424 1 0.898651 0.612939 0.58905
20 1 0.636235 0.934082 0.534504
349 1 0.50502 0.463336 0.837164
1983 1 0.921615 0.165015 0.647139
1736 1 0.972687 0.714951 0.554863
1087 1 0.842816 0.643065 0.70244
1536 1 0.527563 0.027512 0.748805
1075 1 0.61194 0.797578 0.54852
1542 1 0.519411 0.318553 0.76321
289 1 0.594601 0.867849 0.501445
301 1 0.571451 0.871427 0.590773
107 1 0.547642 0.922654 0.53903
1519 1 0.682629 0.833946 0.572906
1608 1 0.689061 0.859026 0.65399
1845 1 0.516871 0.734425 0.619552
1683 1 0.75806 0.92546 0.65022
1098 1 0.540566 0.315269 0.564492
654 1 0.668837 0.826367 0.500815
943 1 0.714737 0.900616 0.585097
49 1 0.787186 0.760841 0.639855
148 1 0.729111 0.782013 0.563717
1605 1 0.7533 0.835624 0.61303
22 1 0.850174 0.729355 0.668757
205 1 0.846244 0.718235 0.580512
1581 1 0.828588 0.803533 0.701646
1890 1 0.972621 0.849468 0.622727
787 1 0.904269 0.699959 0.534081
62 1 0.939743 0.016534 0.736286
207 1 0.949948 0.573071 0.553262
1598 1 0.950968 0.676664 0.642883
550 1 0.909055 0.734162 0.615041
204 1 0.822249 0.80728 0.597649
152 1 0.621531 0.430032 0.537781
1592 1 0.666662 0.970999 0.598649
1700 1 0.54324 0.595052 0.993854
1478 1 0.530833 0.347356 0.500454
1278 1 0.846959 0.96609 0.562867
1054 1 0.676127 0.251112 0.993029
1467 1 0.841992 0.927184 0.953147
953 1 0.632423 0.902833 0.618304
1653 1 0.960639 0.789256 0.740125
780 1 0.937012 0.0560482 0.794397
1627 1 0.738042 0.996834 0.578645
101 1 0.779388 0.855018 0.546143
1146 1 0.787417 0.951387 0.712624
1040 1 0.668645 0.940684 0.67914
1224 1 0.714552 0.00249947 0.661184
1417 1 0.604068 0.36065 0.55259
1036 1 0.522661 0.222393 0.964752
803 1 0.894268 0.924772 0.814691
1741 1 0.796947 0.784352 0.532408
732 1 0.816076 0.902143 0.603137
985 1 0.871334 0.843523 0.565967
535 1 0.85222 0.906874 0.517586
447 1 0.796817 0.988699 0.640765
1128 1 0.880377 0.80432 0.645593
36 1 0.891082 0.0284371 0.579976
463 1 0.527642 0.385074 0.558667
1706 1 0.741488 0.535116 0.935955
1051 1 0.592843 0.0426594 0.716761
921 1 0.685277 0.195712 0.783422
1860 1 0.610296 0.122701 0.640719
849 1 0.657697 0.184859 0.708296
2023 1 0.606079 0.156185 0.834292
1803 1 0.628951 0.291516 0.816362
818 1 0.682654 0.0623883 0.594116
1003 1 0.551706 0.102325 0.742359
1454 1 0.654868 0.00839513 0.720326
1371 1 0.566766 0.0701739 0.854549
960 1 0.751299 0.064381 0.65785
920 1 0.810796 0.278165 0.703026
594 1 0.72511 0.11697 0.710994
162 1 0.755334 0.144921 0.786185
713 1 0.800528 0.0984726 0.84542
141 1 0.723688 0.0543058 0.849541
462 1 0.646635 0.280209 0.500375
197 1 0.974409 0.414176 0.99071
556 1 0.930861 0.0975977 0.695155
542 1 0.92639 0.0185873 0.650624
1233 1 0.837392 0.191042 0.761798
1217 1 0.851337 0.0833024 0.792069
1623 1 0.9935 0.966621 0.687196
1162 1 0.804914 0.131016 0.731419
1977 1 0.75766 0.0794351 0.761021
579 1 0.5527 0.303518 0.687947
967 1 0.790933 0.285132 0.842555
1817 1 0.682277 0.338979 0.780644
895 1 0.594384 0.184367 0.753322
1927 1 0.790325 0.808757 0.937673
189 1 0.578459 0.280442 0.766989
621 1 0.525015 0.216824 0.713425
2016 1 0.564025 0.231602 0.822621
1399 1 0.811579 0.879307 0.995579
1155 1 0.657393 0.267719 0.747295
123 1 0.76281 0.227881 0.640696
1610 1 0.595899 0.241826 0.680976
597 1 0.84641 0.374684 0.649586
740 1 0.740644 0.223614 0.857403
1366 1 0.742078 0.252972 0.78531
1383 1 0.757818 0.171556 0.900408
1498 1 0.81561 0.258002 0.782626
1588 1 0.857037 0.30937 0.814545
526 1 0.761042 0.917845 0.876498
1894 1 0.873316 0.226984 0.827335
344 1 0.884158 0.240267 0.900469
1708 1 0.81581 0.197659 0.865865
1718 1 0.857757 0.152683 0.826615
2043 1 0.940548 0.347099 0.748916
315 1 0.941491 0.156235 0.802107
623 1 0.937007 0.729105 0.954291
215 1 0.590584 0.528108 0.844877
1857 1 0.603351 0.296265 0.627154
343 1 0.97236 0.813944 0.987563
288 1 0.7189 0.396968 0.74347
127 1 0.943619 0.164381 0.715513
1151 1 0.567623 0.380067 0.728748
1778 1 0.653399 0.375564 0.692947
1753 1 0.49926 0.351717 0.689778
540 1 0.531617 0.483312 0.983712
979 1 0.709313 0.294532 0.849036
855 1 0.643327 0.397661 0.796348
2019 1 0.78693 0.371314 0.711933
1612 1 0.719827 0.457258 0.794216
934 1 0.733618 0.298259 0.734501
827 1 0.833656 0.312875 0.90021
837 1 0.633756 0.477149 0.655293
321 1 0.740449 0.359045 0.83295
1021 1 0.823105 0.378123 0.847602
1828 1 0.791312 0.492131 0.772521
500 1 0.53485 0.72598 0.860957
1079 1 0.904968 0.424307 0.619734
453 1 0.899897 0.36518 0.891279
177 1 0.888409 0.174114 0.889951
1372 1 0.946206 0.272346 0.711172
1429 1 0.791899 0.321559 0.772635
1645 1 0.972073 0.334293 0.668694
1596 1 0.663701 0.316012 0.952408
300 1 0.862076 0.433062 0.78949
1473 1 0.733468 0.779889 0.990485
1937 1 0.661577 0.607048 0.656495
1570 1 0.578153 0.216706 0.906167
1621 1 0.537331 0.569935 0.665743
2012 1 0.552949 0.201482 0.636819
320 1 0.621036 0.531103 0.753643
1509 1 0.56254 0.560204 0.726639
120 1 0.558274 0.476219 0.770484
278 1 0.706399 0.517197 0.672043
113 1 0.743631 0.651005 0.729161
379 1 0.788757 0.398934 0.780634
174 1 0.686758 0.502301 0.749477
777 1 0.753578 0.562614 0.751763
698 1 0.779629 0.598024 0.685565
104 1 0.936755 0.265007 0.530836
1002 1 0.905185 0.496099 0.801467
1647 1 0.939016 0.563567 0.623196
2036 1 0.805934 0.511464 0.506039
1286 1 0.97712 0.456459 0.831776
716 1 0.817365 0.548059 0.898585
886 1 0.83233 0.548793 0.759812
402 1 0.580286 0.603206 0.860972
1271 1 0.556569 0.733361 0.710964
1458 1 0.67032 0.667211 0.731671
1678 1 0.631314 0.766807 0.805232
1836 1 0.720495 0.736986 0.759677
1381 1 0.692802 0.688681 0.83969
1685 1 0.591732 0.605232 0.783895
1502 1 0.806664 0.620073 0.755237
1506 1 0.687197 0.579718 0.718171
1284 1 0.755947 0.656601 0.803306
2031 1 0.815658 0.694659 0.750771
1276 1 0.66148 0.596556 0.832512
467 1 0.767114 0.611796 0.923382
548 1 0.800147 0.642933 0.861604
281 1 0.667749 0.72696 0.690307
1294 1 0.871224 0.534836 0.683524
328 1 0.978836 0.701912 0.891677
1167 1 0.847938 0.574088 0.844833
60 1 0.53087 0.43073 0.624876
363 1 0.907213 0.600264 0.741446
297 1 0.666264 0.154307 0.96782
477 1 0.960423 0.771998 0.862278
1971 1 0.879848 0.637288 0.80628
82 1 0.616038 0.736001 0.740945
2022 1 0.672083 0.783633 0.635101
1867 1 0.588371 0.801604 0.642202
1503 1 0.576942 0.680643 0.753432
1251 1 0.525791 0.677914 0.555646
423 1 0.536438 0.951394 0.776204
966 1 0.634259 0.838418 0.791615
937 1 0.547089 0.891373 0.731655
93 1 0.569126 0.816298 0.755189
112 1 0.648617 0.804638 0.720339
595 1 0.720284 0.787503 0.700461
1527 1 0.761958 0.718478 0.695965
176 1 0.839432 0.833272 0.781952
1667 1 0.86884 0.765142 0.751589
1264 1 0.761865 0.700521 0.874952
442 1 0.789577 0.765148 0.745754
1186 1 0.868379 0.809507 0.905477
1152 1 0.579705 0.378546 0.964244
1253 1 0.942858 0.90589 0.685272
1691 1 0.902807 0.682 0.736139
755 1 0.772264 0.86997 0.728641
413 1 0.916279 0.829547 0.709457
847 1 0.910532 0.744459 0.697283
1985 1 0.87382 0.777604 0.830337
39 1 0.616868 0.867571 0.714815
2044 1 0.806046 0.282729 0.606431
646 1 0.524346 0.156758 1.00103
1084 1 0.633428 0.785225 0.992185
858 1 0.679851 0.0667317 0.674555
1584 1 0.9456 0.623491 0.846892
378 1 0.608054 0.932541 0.767727
6 1 0.534963 0.920318 0.866501
1558 1 0.717908 0.027077 0.730777
108 1 0.798496 0.866061 0.665486
1047 1 0.697961 0.866343 0.726006
1377 1 0.66292 0.0587611 0.767673
944 1 0.706379 0.897997 0.793339
704 1 0.59598 0.953199 0.834119
1212 1 0.865888 0.0485696 0.657892
665 1 0.801811 0.0446937 0.702307
846 1 0.873354 0.981978 0.774042
1180 1 0.927237 0.868148 0.773477
1225 1 0.824989 0.951378 0.84053
318 1 0.559201 0.818841 0.981914
196 1 0.686392 0.945092 0.941915
1326 1 0.85812 0.901394 0.753126
73 1 0.874588 0.0621303 0.727006
1157 1 0.868557 0.986238 0.699985
558 1 0.858681 0.909617 0.682
338 1 0.541901 0.997055 0.839636
491 1 0.591509 0.00845036 0.967258
1213 1 0.541029 0.0443004 0.919259
1922 1 0.506514 0.111112 0.602188
764 1 0.655435 0.0857838 0.851712
326 1 0.635067 0.125969 0.761947
1546 1 0.884201 0.332005 0.510082
258 1 0.739383 0.964526 0.808312
38 1 0.794424 0.0086675 0.779912
451 1 0.72411 0.289248 0.501549
208 1 0.736512 0.0156676 0.938333
1873 1 0.773651 0.946992 0.94629
306 1 0.779366 0.993608 0.865163
1134 1 0.606222 0.99705 0.901587
1990 1 0.508571 0.598876 0.898456
1400 1 0.771321 0.997185 0.511213
1705 1 0.78686 0.402781 0.522951
1164 1 0.98434 0.365319 0.823219
273 1 0.805551 0.0539324 0.969469
1672 1 0.984758 0.0434965 0.856426
1232 1 0.709161 0.651476 0.989754
178 1 0.880561 0.0493261 0.910877
1331 1 0.936316 0.981537 0.801721
879 1 0.919658 0.114291 0.928676
1840 1 0.597231 0.152129 0.916261
1188 1 0.548434 0.388733 0.881048
88 1 0.986749 0.917096 0.557872
664 1 0.947823 0.481956 0.995222
1292 1 0.554998 0.31495 0.848863
752 1 0.735216 0.227318 0.951168
1091 1 0.659619 0.209637 0.920709
688 1 0.517064 0.429833 0.700928
1886 1 0.646558 0.273073 0.895628
1214 1 0.75679 0.0924929 0.913584
268 1 0.692569 0.140581 0.90474
309 1 0.643674 0.221446 0.847769
269 1 0.993284 0.0646117 0.72927
1565 1 0.789374 0.471216 0.949687
1893 1 0.803793 0.259809 0.950523
1231 1 0.819384 0.168429 0.981877
64 1 0.981484 0.619324 0.702998
773 1 0.988946 0.192135 0.931953
1147 1 0.757887 0.578422 0.996833
14 1 0.928125 0.632449 0.509114
737 1 0.955647 0.209901 0.864099
1846 1 0.834408 0.122573 0.908774
581 1 0.916053 0.0899415 0.85611
1777 1 0.9103 0.170055 0.977609
129 1 0.895524 0.245399 0.761346
693 1 0.515144 0.805138 0.596885
685 1 0.638528 0.35348 0.898075
1787 1 0.510941 0.270014 0.889022
962 1 0.592991 0.344761 0.791346
1465 1 0.600109 0.451889 0.907111
783 1 0.721961 0.346708 0.906568
262 1 0.661918 0.382155 0.987645
1727 1 0.851149 0.453497 0.905989
1298 1 0.690721 0.165635 0.508619
1063 1 0.743821 0.381037 0.986581
1597 1 0.899196 0.413201 0.956347
584 1 0.802683 0.391056 0.928937
272 1 0.978746 0.21846 0.746806
1185 1 0.838287 0.34789 0.965959
1410 1 0.531723 0.797258 0.693143
295 1 0.971422 0.317249 0.928457
437 1 0.895538 0.42687 0.853395
234 1 0.817972 0.538132 0.969291
466 1 0.935642 0.286783 0.863866
1190 1 0.90972 0.562428 0.810085
302 1 0.964287 0.536638 0.924341
1559 1 0.555514 0.101467 0.957445
1534 1 0.855437 0.26955 0.519918
482 1 0.6536 0.50156 0.81797
1917 1 0.625009 0.436183 0.733543
1300 1 0.597353 0.440823 0.830159
563 1 0.692049 0.409789 0.850016
1811 1 0.744709 0.476535 0.874649
1149 1 0.551051 0.502115 0.909212
59 1 0.551464 0.322886 0.926047
1526 1 0.716761 0.427038 0.932988
1402 1 0.769628 0.449263 0.71495
9 1 0.776475 0.659057 0.980018
856 1 0.78455 0.55607 0.823834
1096 1 0.839157 0.486629 0.834474
42 1 0.691915 0.824132 0.838506
452 1 0.725005 0.583332 0.860547
1426 1 0.921892 0.426431 0.550688
469 1 0.640011 0.505474 0.547451
411 1 0.554889 0.496842 0.653587
1013 1 0.90937 0.97946 0.875006
1809 1 0.894568 0.503392 0.892775
1019 1 0.945777 0.459902 0.917683
92 1 0.672074 0.990034 0.519626
833 1 0.885763 0.501649 0.969936
1851 1 0.830888 0.992372 0.918673
602 1 0.975549 0.526828 0.83994
435 1 0.677995 0.503556 0.919028
1200 1 0.524239 0.612216 0.816075
831 1 0.640135 0.640818 0.900945
76 1 0.500632 0.717522 0.953753
1516 1 0.843621 0.430676 0.989205
37 1 0.949392 0.87721 0.51184
416 1 0.623939 0.524302 0.965858
1879 1 0.691421 0.580003 0.962315
1342 1 0.566584 0.362441 0.620717
171 1 0.734849 0.72528 0.937881
660 1 0.714851 0.755242 0.86095
1057 1 0.981016 0.111467 0.971121
358 1 0.836844 0.617 0.96022
1043 1 0.86983 0.663899 0.872741
1074 1 0.905853 0.0533773 0.975576
1436 1 0.983998 0.738113 0.794001
1820 1 0.665659 0.734217 0.569936
1640 1 0.996235 0.583757 0.890362
1905 1 0.945044 0.632168 0.914389
598 1 0.890733 0.668239 0.973634
1379 1 0.639998 0.0636397 0.939914
788 1 0.89087 0.575585 0.914071
179 1 0.683553 0.80059 0.920256
1974 1 0.616322 0.843362 0.931805
1444 1 0.561497 0.777949 0.915814
1337 1 0.568701 0.668627 0.833545
1642 1 0.566995 0.875606 0.798783
131 1 0.612467 0.809244 0.86512
1211 1 0.733087 0.827899 0.777767
1415 1 0.685482 0.902528 0.871782
1141 1 0.778223 0.881251 0.801653
695 1 0.510744 0.539793 0.817708
1631 1 0.67885 0.970982 0.876005
1462 1 0.631475 0.716075 0.869993
1808 1 0.807033 0.769163 0.823936
1304 1 0.72551 0.935149 0.735228
1984 1 0.817648 0.689414 0.928526
1729 1 0.891836 0.909898 0.895171
1363 1 0.817116 0.864426 0.884641
377 1 0.891298 0.735195 0.890412
1166 1 0.734726 0.861623 0.914906
984 1 0.954562 0.979519 0.580757
938 1 0.908114 0.840601 0.841892
609 1 0.868677 0.782678 0.518677
994 1 0.932985 0.707574 0.829057
1648 1 0.945498 0.827879 0.912995
47 1 0.618846 0.886571 0.854396
23 1 0.659866 0.961623 0.802359
792 1 0.549056 0.970245 0.92409
940 1 0.602324 0.924131 0.927946
567 1 0.629161 0.0255365 0.830368
541 1 0.901191 0.937784 0.600856
2041 1 0.543315 0.859191 0.903105
1862 1 0.888072 0.783177 0.966459
180 1 0.848938 0.0287615 0.8411
958 1 0.766653 0.818082 0.854854
913 1 0.755397 0.708676 0.554973
1625 1 0.541261 0.811012 0.836937
1992 1 0.987439 0.903024 0.754873
575 1 0.996184 0.728197 0.719436
1572 1 0.502182 0.247736 0.591546
1317 1 0.49962 0.00343709 0.965863
988 1 0.992795 0.524164 0.690209
2026 1 0.517106 0.840671 0.506215
310 1 0.953446 0.555779 0.995529
369 1 0.85318 0.654951 0.500881
1769 1 0.551821 0.286594 0.988603
| [
"[email protected]"
] | |
3033e6d1836363f7bb5afdf44a0c0c1d5e093bf0 | ad372f7753c70e3997d035097ee03f740a5fb068 | /pygym/custom_storage.py | 83d139b7b163479af1d7152929f4ca060d13b04d | [] | no_license | Insper/servidor-de-desafios | a5f09fe9368887b06b98800f2bb8f35ff13f80a9 | 9875e9b9248c14237161ca73983595f7d929e963 | refs/heads/master | 2022-12-14T17:28:42.963112 | 2022-09-12T19:18:36 | 2022-09-12T19:18:36 | 167,026,050 | 3 | 42 | null | 2022-12-08T07:36:47 | 2019-01-22T16:19:46 | Python | UTF-8 | Python | false | false | 1,053 | py | # Source: https://github.com/druids/django-chamber/blob/master/chamber/storages/boto3.py
from django.core.files.base import ContentFile
from storages.backends.s3boto3 import S3Boto3Storage
def force_bytes_content(content, blocksize=1024):
"""Returns a tuple of content (file-like object) and bool indicating wheter the content has been casted or not"""
block = content.read(blocksize)
content.seek(0)
if not isinstance(block, bytes):
_content = bytes(
content.read(),
'utf-8' if not hasattr(content, 'encoding') or content.encoding is None else content.encoding,
)
return ContentFile(_content), True
return content, False
class MediaStorage(S3Boto3Storage):
bucket_name = 'softdes-static'
location = 'media'
def _clean_name(self, name):
# pathlib support
return super()._clean_name(str(name))
def save(self, name, content, max_length=None):
content, _ = force_bytes_content(content)
return super().save(name, content, max_length)
| [
"[email protected]"
] | |
e6aaeb095bc5f80a528def54a281d8534449f9a1 | a1566f5733611a930c82990e4c7c5d05664c1eb0 | /jspy1/test.py | 8809144780e3fc2a8e4db10d0a0ba8223aaefaa0 | [] | no_license | sjbrown/misc_work | 95e7020c9fb0a2fe840c7a4e25bf30a30b26aaea | 4215e3d612811426686b1a90db4f975052c4023d | refs/heads/master | 2022-10-21T02:05:04.928161 | 2022-10-12T23:43:56 | 2022-10-12T23:43:56 | 18,151,228 | 5 | 3 | null | 2021-05-29T21:47:17 | 2014-03-26T19:38:07 | Python | UTF-8 | Python | false | false | 47 | py | import os
def foo():
return False
foo()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.