blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2ae90b6dffc7987765ffacf37be52a03f335f474 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_WeekOfYear_SVR.py | 87afffb284e8c95e2d354f9e473c99b687fc4781 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 162 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['MovingMedian'] , ['Seasonal_WeekOfYear'] , ['SVR'] ); | [
"[email protected]"
] | |
114635dbdd614a124e3ed8f049bf0f44250abed7 | 38258a7dd9acbfb7adf72983015de68a948a4826 | /B_6000~/B_6749.py | 679d26eb42d51d74c4f4a7e4ff51b701aad91d01 | [] | no_license | kangsm0903/Algorithm | 13a7fe5729039a1d0ce91a574c4755a8a92fb02b | 7d713d1c9e2e4dc30141d4f409ac1430a357065b | refs/heads/master | 2022-10-04T00:33:49.247977 | 2022-09-26T12:51:16 | 2022-09-26T12:51:16 | 219,265,010 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | X = int(input())
Y = int(input())
print(2*Y-X) | [
"[email protected]"
] | |
7a4946bf049ebf1ed6ee4360fa21fb2fa3271c02 | fe19d2fac4580d463132e61509bd6e3cc2cf958d | /toontown/coghq/MoleFieldBase.py | 508ffbac0ff0495f22fc1b06a212904c3fadf747 | [] | no_license | t00nt0wn1dk/c0d3 | 3e6db6dd42c3aa36ad77709cf9016176a3f3a44f | 7de105d7f3de0f8704b020e32fd063ee2fad8d0d | refs/heads/master | 2021-01-01T16:00:15.367822 | 2015-03-21T21:25:52 | 2015-03-21T21:25:55 | 32,647,654 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 3,950 | py | # 2013.08.22 22:19:18 Pacific Daylight Time
# Embedded file name: toontown.coghq.MoleFieldBase
import random
HILL_MOLE = 0
HILL_BOMB = 1
HILL_WHACKED = 2
HILL_COGWHACKED = 3
class MoleFieldBase():
__module__ = __name__
WHACKED = 1
MoveUpTimeMax = 1
MoveUpTimeMultiplier = 0.95
MoveUpTimeMin = 0.5
StayUpTimeMax = 7
StayUpTimeMultiplier = 0.95
StayUpTimeMin = 3
MoveDownTimeMax = 1
MoveDownTimeMultiplier = 0.95
MoveDownTimeMin = 0.5
TimeBetweenPopupMax = 1.5
TimeBetweenPopupMultiplier = 0.95
TimeBetweenPopupMin = 0.25
DamageOnFailure = 20
def getRng(self):
return random.Random(self.entId * self.level.doId)
def scheduleMoles(self):
self.schedule = []
totalTime = 0
curMoveUpTime = self.MoveUpTimeMax
curMoveDownTime = self.MoveDownTimeMax
curTimeBetweenPopup = self.TimeBetweenPopupMax
curStayUpTime = self.StayUpTimeMax
curTime = 3
eligibleMoles = range(self.numMoles)
self.getRng().shuffle(eligibleMoles)
usedMoles = []
self.notify.debug('eligibleMoles=%s' % eligibleMoles)
self.endingTime = 0
randOb = random.Random(self.entId * self.level.doId)
while self.endingTime < self.GameDuration:
if len(eligibleMoles) == 0:
eligibleMoles = usedMoles
self.getRng().shuffle(usedMoles)
usedMoles = []
self.notify.debug('eligibleMoles=%s' % eligibleMoles)
moleIndex = eligibleMoles[0]
eligibleMoles.remove(moleIndex)
usedMoles.append(moleIndex)
moleType = randOb.choice([HILL_MOLE,
HILL_MOLE,
HILL_MOLE,
HILL_BOMB])
self.schedule.append((curTime,
moleIndex,
curMoveUpTime,
curStayUpTime,
curMoveDownTime,
moleType))
curTime += curTimeBetweenPopup
curMoveUpTime = self.calcNextMoveUpTime(curTime, curMoveUpTime)
curStayUpTime = self.calcNextStayUpTime(curTime, curStayUpTime)
curMoveDownTime = self.calcNextMoveDownTime(curTime, curMoveDownTime)
curTimeBetweenPopup = self.calcNextTimeBetweenPopup(curTime, curTimeBetweenPopup)
self.endingTime = curTime + curMoveUpTime + curStayUpTime + curMoveDownTime
self.schedule.pop()
self.endingTime = self.schedule[-1][0] + self.schedule[-1][2] + self.schedule[-1][3] + self.schedule[-1][4]
self.notify.debug('schedule length = %d, endingTime=%f' % (len(self.schedule), self.endingTime))
def calcNextMoveUpTime(self, curTime, curMoveUpTime):
newMoveUpTime = curMoveUpTime * self.MoveUpTimeMultiplier
if newMoveUpTime < self.MoveDownTimeMin:
newMoveUpTime = self.MoveDownTimeMin
return newMoveUpTime
def calcNextStayUpTime(self, curTime, curStayUpTime):
newStayUpTime = curStayUpTime * self.StayUpTimeMultiplier
if newStayUpTime < self.StayUpTimeMin:
newStayUpTime = self.StayUpTimeMin
return newStayUpTime
def calcNextMoveDownTime(self, curTime, curMoveDownTime):
newMoveDownTime = curMoveDownTime * self.MoveDownTimeMultiplier
if newMoveDownTime < self.MoveDownTimeMin:
newMoveDownTime = self.MoveDownTimeMin
return newMoveDownTime
def calcNextTimeBetweenPopup(self, curTime, curTimeBetweenPopup):
newTimeBetweenPopup = curTimeBetweenPopup * self.TimeBetweenPopupMultiplier
if newTimeBetweenPopup < self.TimeBetweenPopupMin:
newTimeBetweenPopup = self.TimeBetweenPopupMin
return newTimeBetweenPopup
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\coghq\MoleFieldBase.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:19:18 Pacific Daylight Time
| [
"[email protected]"
] | |
d4d403b01e00e44d6866bca55752f5509a8b15f8 | 748b8c66d8d9f77e047033e07142328ea6939138 | /utils/create-captions.py | 3a8f8271fec5c0ed4ec02b5ca917f56240ba6ce5 | [
"Apache-2.0"
] | permissive | nakamura196/neural-neighbors | 68f073511f10fb2df4c9efb2e504d2f9fb3b8455 | 277cb6e6a2102ad9d850c4397ca454ecb347dd1b | refs/heads/master | 2022-01-24T15:16:49.170431 | 2019-08-11T14:37:23 | 2019-08-11T14:37:23 | 201,567,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | import glob, random, os, json
files = glob.glob(
"assets/images/thumbs/*.jpg")
captions = {}
for file in files:
filename = file.split("/")[-1].split(".")[0]
captions[filename] = filename
fw = open("data/full-captions.json", 'w')
json.dump(captions, fw, ensure_ascii=False, indent=4,
sort_keys=True, separators=(',', ': '))
| [
"[email protected]"
] | |
a01fac3aa0ec2d33e854eac1c3973308a9e2b23e | 8c0b804f1cc8cbf2f8788727df22a2cc149f7b5c | /gala/integrate/core.py | d7150e9b565f316c3bb7011aa15040378aa0d4e6 | [
"MIT"
] | permissive | adrn/gala | 579cc5a4ecb22df118e1c8a2322a46e935825054 | f62e1a6ae7a8466a4db5c8407471b524cf085637 | refs/heads/main | 2023-09-04T11:42:07.278388 | 2023-08-18T18:04:35 | 2023-08-18T18:04:35 | 17,577,779 | 115 | 89 | MIT | 2023-09-05T11:40:10 | 2014-03-10T00:56:18 | Python | UTF-8 | Python | false | false | 4,338 | py | """ Base class for integrators. """
# Third-party
import numpy as np
# This project
from gala.units import UnitSystem, DimensionlessUnitSystem
__all__ = ["Integrator"]
class Integrator(object):
def __init__(
self,
func,
func_args=(),
func_units=None,
progress=False,
store_all=True,
):
if not hasattr(func, "__call__"):
raise ValueError(
"func must be a callable object, e.g., a function."
)
self.F = func
self._func_args = func_args
if func_units is not None and not isinstance(
func_units, DimensionlessUnitSystem
):
func_units = UnitSystem(func_units)
else:
func_units = DimensionlessUnitSystem()
self._func_units = func_units
self.progress = bool(progress)
self.store_all = store_all
def _get_range_func(self):
if self.progress:
try:
from tqdm import trange
return trange
except ImportError:
raise ImportError(
"tqdm must be installed to use progress=True when running "
f"{self.__class__.__name__}"
)
return range
def _prepare_ws(self, w0, mmap, n_steps):
"""
Decide how to make the return array. If ``mmap`` is False, this returns a full
array of zeros, but with the correct shape as the output. If ``mmap`` is True,
return a pointer to a memory-mapped array. The latter is particularly useful for
integrating a large number of orbits or integrating a large number of time
steps.
"""
from ..dynamics import PhaseSpacePosition
if not isinstance(w0, PhaseSpacePosition):
w0 = PhaseSpacePosition.from_w(w0)
arr_w0 = w0.w(self._func_units)
self.ndim, self.norbits = arr_w0.shape
self.ndim = self.ndim // 2
if self.store_all:
return_shape = (2 * self.ndim, n_steps + 1, self.norbits)
else:
return_shape = (2 * self.ndim, self.norbits)
if mmap is None:
# create the return arrays
ws = np.zeros(return_shape, dtype=float)
else:
if mmap.shape != return_shape:
raise ValueError(
"Shape of memory-mapped array doesn't match expected shape of "
f"return array ({mmap.shape} vs {return_shape})"
)
if not mmap.flags.writeable:
raise TypeError(
f"Memory-mapped array must be a writable mode, not '{mmap.mode}'"
)
ws = mmap
return w0, arr_w0, ws
def _handle_output(self, w0, t, w):
""" """
if w.shape[-1] == 1:
w = w[..., 0]
pos_unit = self._func_units["length"]
t_unit = self._func_units["time"]
vel_unit = pos_unit / t_unit
from ..dynamics import Orbit
orbit = Orbit(
pos=w[:self.ndim] * pos_unit,
vel=w[self.ndim:] * vel_unit,
t=t * t_unit,
)
return orbit
def run(self):
"""
Run the integrator starting from the specified phase-space position.
The initial conditions ``w0`` should be a
`~gala.dynamics.PhaseSpacePosition` instance.
There are a few combinations of keyword arguments accepted for
specifying the timestepping. For example, you can specify a fixed
timestep (``dt``) and a number of steps (``n_steps``), or an array of
times::
dt, n_steps[, t1] : (numeric, int[, numeric])
A fixed timestep dt and a number of steps to run for.
dt, t1, t2 : (numeric, numeric, numeric)
A fixed timestep dt, an initial time, and a final time.
t : array-like
An array of times to solve on.
Parameters
----------
w0 : `~gala.dynamics.PhaseSpacePosition`
Initial conditions.
**time_spec
Timestep information passed to
`~gala.integrate.time_spec.parse_time_specification`.
Returns
-------
orbit : `~gala.dynamics.Orbit`
"""
pass
| [
"[email protected]"
] | |
6a4b0552b744dec84088346b499869e5c6e2f442 | 284b88b3ff07430e17c04503f646db50677f627b | /Algorithm_w_Python/breakingRecords/breakin.py | bcea1259a3ac5551c57727e3d6f685001859b3ce | [] | no_license | ybgirgin3/hackerrank-solutions | ae61c27173c24c920f6e002a12a1acd20928cf59 | 58c4f62585d115eff3e1a43595d6a8375f185696 | refs/heads/master | 2023-05-27T11:32:06.099639 | 2021-06-13T18:12:22 | 2021-06-13T18:12:22 | 280,741,585 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | #!/usr/bin/python3
### çalışmıyor aq
import math
import os
import random
import re
import sys
import numpy as np
np.set_printoptions(suppress=True)
# Complete the breakingRecords function below.
def breakingRecords(scores):
# n tane sayı girilecek
# girilen sayıların max ve min terimlerinin
# hangi indexte olduğunu ekrana dönmek gerek
# print(type(scores))
# turn list to np array
arr = np.array(scores)
# find max ind
# maxEL = np.amax(arr)
# find min ind
# minEL = np.amin(arr)
# finds elements
# print(maxEL, minEL)
maxRes = np.where(arr == np.amax(arr))
minRes = np.where(arr == np.amin(arr))
# prints (array([9]),) (array([0]),)
# which is true but not wanted
# print(maxRes, minRes)
maxIt = ' '.join(map(str, maxRes[0]))
minIt = ' '.join(map(str, minRes[0]))
print(maxIt, minIt)
if __name__ == '__main__':
n = int(input())
scores = list(map(int, input().rstrip().split()))
result = breakingRecords(scores)
| [
"[email protected]"
] | |
8d86da40269921bffa1c814385522d20c36cf4da | 28b0928057e96da28b268cbe1fe64ede3a2a20c5 | /addons/edi_product/models/edi_product_record.py | eeff16a7b54cf541e2c862d2bf6087f2e3be2e98 | [] | no_license | sasakuma/odoo-edi | 370061221e09f7ade1a7753ff237ebec24b55694 | 31a0ff761be3984adc5d6ceaabe781801715ad14 | refs/heads/master | 2020-03-29T18:24:55.576689 | 2018-10-29T04:50:51 | 2018-10-29T04:50:51 | 150,211,179 | 0 | 0 | null | 2018-10-29T04:50:52 | 2018-09-25T05:14:48 | Python | UTF-8 | Python | false | false | 2,829 | py | """EDI product records"""
from odoo import api, fields, models
class EdiDocument(models.Model):
"""Extend ``edi.document`` to include EDI product records"""
_inherit = 'edi.document'
product_ids = fields.One2many('edi.product.record', 'doc_id',
string="Products")
inactive_product_ids = fields.One2many('edi.inactive.product.record',
'doc_id', string="Inactive Products")
class EdiProductRecord(models.Model):
"""EDI product record
This is the base model for EDI product records. Each row
represents a product that will be created or updated when the
document is executed.
The fields within each record represent the fields within the
source document, which may not exactly correspond to fields of the
``product.product`` model. For example: the source document may
define a weight as an integer number of grams, whereas the
``product.product.weight`` field is defined as a floating point
number of kilograms.
Derived models should implement :meth:`~.target_values`.
"""
_name = 'edi.product.record'
_inherit = 'edi.record.sync.active'
_description = "Product"
_edi_sync_target = 'product_id'
_edi_sync_via = 'default_code'
product_id = fields.Many2one('product.product', string="Product",
required=False, readonly=True, index=True,
auto_join=True)
description = fields.Char(string="Description", required=True,
readonly=True, default="Unknown")
@api.model
def targets_by_key(self, vlist):
"""Construct lookup cache of target records indexed by key field"""
products_by_key = super().targets_by_key(vlist)
# Cache product templates to minimise subsequent database lookups
Product = self.browse()[self._edi_sync_target].with_context(
active_test=False
)
Template = Product.product_tmpl_id
products = Product.browse([x.id for x in products_by_key.values()])
templates = Template.browse(products.mapped('product_tmpl_id.id'))
templates.mapped('name')
return products_by_key
@api.model
def target_values(self, record_vals):
"""Construct ``product.product`` field value dictionary"""
product_vals = super().target_values(record_vals)
product_vals.update({
'name': record_vals['description'],
})
return product_vals
class EdiInactiveProductRecord(models.Model):
"""EDI inactive product record"""
_name = 'edi.inactive.product.record'
_inherit = 'edi.record.deactivator'
_description = "Inactive Product"
target_id = fields.Many2one('product.product', string="Product")
| [
"[email protected]"
] | |
942fb16d6341d7adf7dec90c86062803d46ebb56 | db053c220094368ecb784fbe62375378c97457c2 | /92.reverse-linked-list-ii.py | 01812dfd90c75c46be6106a88de76827a53e89a1 | [] | no_license | thegamingcoder/leetcode | 8c16e7ac9bda3e34ba15955671a91ad072e87d94 | 131facec0a0c70d319982e78e772ed1cb94bc461 | refs/heads/master | 2020-03-22T14:51:45.246495 | 2018-07-09T00:00:06 | 2018-07-09T00:00:06 | 140,211,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | #
# [92] Reverse Linked List II
#
# https://leetcode.com/problems/reverse-linked-list-ii/description/
#
# algorithms
# Medium (31.88%)
# Total Accepted: 145.3K
# Total Submissions: 455.9K
# Testcase Example: '[1,2,3,4,5]\n2\n4'
#
# Reverse a linked list from position m to n. Do it in one-pass.
#
# Note: 1 ≤ m ≤ n ≤ length of list.
#
# Example:
#
#
# Input: 1->2->3->4->5->NULL, m = 2, n = 4
# Output: 1->4->3->2->5->NULL
#
#
#
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
| [
"[email protected]"
] | |
ed93f6359fc8c624ea8f4a3efb3595b551de4a00 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /pyinst/pyinst-000/postinst.py | fe27556ee5b15caee330210d5f7103f3ac5dc6f8 | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | #!/usr/bin/env python
import os, sys, getopt, signal, select, time
import subprocess
verbose = False
# ------------------------------------------------------------------------
def exec_program2(fname, arg1, arg2, arg3):
global verbose
try:
if verbose:
print "Started", fname
pp = subprocess.Popen([fname, arg1, arg2, arg3])
ret = os.waitpid(pp.pid, 0)
if ret[1] != 0:
print "Warninig: ", fname, "returned with", ret[1]
if verbose:
print "Ended ", fname
except:
print "Cannot execute script", fname, sys.exc_info()
raise
return True
# ------------------------------------------------------------------------
if __name__ == '__main__':
#print "In install.py"
#time.sleep(1)
pass
# Create menus for your app.
# Edit entry.desktop to taste
exec_program2("xdg-desktop-menu", "install", "--novendor", "entry.desktop")
| [
"[email protected]"
] | |
a4e291834a035910169cf5c8fa887a2feef65ec6 | 7b13e6acb2a1f26936462ed795ee4508b4088042 | /算法题目/算法题目/二分查找/LeetCode69求开方.py | cdee55e6ed01d6470cb969ad904f4d20d55e2c37 | [] | no_license | guojia60180/algorithm | ed2b0fd63108f30cd596390e64ae659666d1c2c6 | ea81ff2722c7c350be5e1f0cd6d4290d366f2988 | refs/heads/master | 2020-04-19T08:25:55.110548 | 2019-05-13T13:29:39 | 2019-05-13T13:29:39 | 168,076,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | #Author guo
class Solution:
def mySqrt(self, x):
# if x==1:
# return 1
# if not x:
# return 0
l = 0
r = x
while l <= r:
mid = (l + r) // 2
if mid * mid <= x < (mid + 1) * (mid + 1):
return mid
elif mid * mid > x:
r = mid - 1
else:
l = mid + 1
| [
"[email protected]"
] | |
7281d23d02e3de9896dd3690adde741d708c1fe7 | 1b60b5c4d2a873b643dbd04fb77504d596237ba2 | /runtests.py | d8b2a28529b2ecfaaa4faf76e40c0f1029086034 | [
"BSD-3-Clause"
] | permissive | amolm-cuelogic/django-charsleft-widget | 32d832c90c9d269efd5877e83983c78a2ff5a0db | bbe0196c597c7b25f51d204acb10d05ab348d703 | refs/heads/master | 2021-01-22T19:09:15.856856 | 2016-10-14T08:15:51 | 2016-10-14T08:15:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,855 | py | #!/usr/bin/env python
import sys
from os import path
import django
from django.conf import settings, global_settings
from django.core.management import execute_from_command_line
if not settings.configured:
BASE_DIR = path.dirname(path.realpath(__file__))
settings.configure(
DEBUG = False,
TEMPLATE_DEBUG = True,
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
],
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.staticfiles',
'django.contrib.contenttypes',
'charsleft_widget',
),
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if django.VERSION < (1,6) else 'django.test.runner.DiscoverRunner',
STATIC_URL = '/static/',
)
def runtests():
argv = sys.argv[:1] + ['test', 'charsleft_widget'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
| [
"[email protected]"
] | |
131fa69d7c88902d3272e686e559688f3e1406f6 | 5c7f2ff956b1fd1477d56486e239b6e661a08efd | /reinforcement_learning/0x00-q_learning/3-q_learning.py | 38289878256043dd1d61c99f45447b4db0addd4e | [] | no_license | diego0096/holbertonschool-machine_learning | 60c5f40e185df04d02d9887d966542e85a981896 | 64b8984846c2b2b88bbf11125b55b482c7b74eea | refs/heads/master | 2023-04-02T01:27:59.263397 | 2021-04-02T21:33:51 | 2021-04-02T21:33:51 | 279,229,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | #!/usr/bin/env python3
"""3-q_learning.py"""
import numpy as np
epsilon_greedy = __import__('2-epsilon_greedy').epsilon_greedy
def train(env, Q, episodes=5000, max_steps=100, alpha=0.1,
gamma=0.99, epsilon=1, min_epsilon=0.1, epsilon_decay=0.05):
"""function that performs Q-learning"""
total_rewards = []
for episode in range(episodes):
state = env.reset()
done = False
for step in range(max_steps):
action = epsilon_greedy(Q, state, epsilon)
new_state, reward, done, info = env.step(action)
map_size = env.desc.shape[0]
new_state_on_map = env.desc[int(np.floor(new_state / map_size)),
new_state % map_size]
if new_state_on_map == b'H':
reward = -1.0
Q[state, action] = ((1 - alpha) * Q[state, action] + alpha *
(reward + gamma * np.max(Q[new_state, :])))
state = new_state
if done is True:
break
max_epsilon = 1
epsilon = (min_epsilon + (max_epsilon - min_epsilon) *
np.exp(-epsilon_decay * episode))
total_rewards.append(reward)
return Q, total_rewards
| [
"[email protected]"
] | |
674b0a7991768af96255c795dd3126b23dd92600 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_172/ch87_2020_04_29_11_23_49_782896.py | b473d466d2bfc573a99a1bf4c984260fa1ce298e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | with open('churras.txt', 'r') as arquivo:
conteudo = arquivo.readlines()
y = 0
for item in conteudo:
x = item.split(',')
y = y + x[1]*x[2]
print (y)
| [
"[email protected]"
] | |
acbc151d7b384b7d09b14aef7407e39844b2cb9e | a55d515cf59f4ee898892fbf358c327ff53bce96 | /djangodialogs/auth/urls.py | b76c9847559dc83f03b63bbafd5035c1a6a268a5 | [] | no_license | furious-luke/django-dialogs | 859a3c30970f0a40813f828a0e0909cf5de29d24 | 142b5075910c940091a1f58a69d5192f74c10f9c | refs/heads/master | 2016-09-05T17:23:22.244282 | 2012-03-14T04:24:59 | 2012-03-14T04:24:59 | 1,964,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | from django.conf.urls.defaults import *
from views import *
urlpatterns = patterns('',
url(r'^accounts/login/ajax/$', login),
url(r'^accounts/register/ajax/$', register),
)
| [
"[email protected]"
] | |
25a115ac2ed7f5a2cae20b819f3d0d0d7c73959f | a50e906945260351f43d57e014081bcdef5b65a4 | /collections/ansible_collections/fortinet/fortios/plugins/modules/fortios_system_3g_modem_custom.py | bfa0a35321a02ba247ba88189f58c418996fe034 | [] | no_license | alhamdubello/evpn-ipsec-dci-ansible | 210cb31f4710bb55dc6d2443a590f3eb65545cf5 | 2dcc7c915167cd3b25ef3651f2119d54a18efdff | refs/heads/main | 2023-06-08T10:42:35.939341 | 2021-06-28T09:52:45 | 2021-06-28T09:52:45 | 380,860,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,173 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_3g_modem_custom
short_description: 3G MODEM custom in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system_3g_modem feature and custom category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.4.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_3g_modem_custom:
description:
- 3G MODEM custom.
default: null
type: dict
suboptions:
class_id:
description:
- USB interface class in hexadecimal format (00-ff).
type: str
id:
description:
- ID.
required: true
type: int
init_string:
description:
- Init string in hexadecimal format (even length).
type: str
model:
description:
- MODEM model name.
type: str
product_id:
description:
- USB product ID in hexadecimal format (0000-ffff).
type: str
vendor:
description:
- MODEM vendor name.
type: str
vendor_id:
description:
- USB vendor ID in hexadecimal format (0000-ffff).
type: str
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: 3G MODEM custom.
fortios_system_3g_modem_custom:
vdom: "{{ vdom }}"
state: "present"
access_token: "<your_own_value>"
system_3g_modem_custom:
class_id: "<your_own_value>"
id: "4"
init_string: "<your_own_value>"
model: "<your_own_value>"
product_id: "<your_own_value>"
vendor: "<your_own_value>"
vendor_id: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_system_3g_modem_custom_data(json):
option_list = ['class_id', 'id', 'init_string',
'model', 'product_id', 'vendor',
'vendor_id']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_3g_modem_custom(data, fos):
vdom = data['vdom']
state = data['state']
system_3g_modem_custom_data = data['system_3g_modem_custom']
filtered_data = underscore_to_hyphen(filter_system_3g_modem_custom_data(system_3g_modem_custom_data))
if state == "present":
return fos.set('system.3g-modem',
'custom',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system.3g-modem',
'custom',
mkey=filtered_data['id'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system_3g_modem(data, fos):
if data['system_3g_modem_custom']:
resp = system_3g_modem_custom(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('system_3g_modem_custom'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = 'id'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_3g_modem_custom": {
"required": False, "type": "dict", "default": None,
"options": {
"class_id": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"init_string": {"required": False, "type": "str"},
"model": {"required": False, "type": "str"},
"product_id": {"required": False, "type": "str"},
"vendor": {"required": False, "type": "str"},
"vendor_id": {"required": False, "type": "str"}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_system_3g_modem(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
772d2710213f53600f2da917a56490ae3251092a | 7e5a7171eb3e97d9d18b928092f94142e5cb51e3 | /system/t06_publish/snapshot.py | 5fac3d2521971707f55d158fddf4f02dda5f9e6d | [
"MIT"
] | permissive | sbadia/aptly | e67ef68c15bad196b6723644113179a8495277fb | 5d16cf06cf23b5a70b0d375fcc1a43eaa7053071 | refs/heads/master | 2020-12-28T19:24:16.093595 | 2015-02-04T22:49:29 | 2015-02-04T22:49:29 | 28,808,010 | 1 | 0 | null | 2015-01-05T10:27:37 | 2015-01-05T10:27:37 | null | UTF-8 | Python | false | false | 40,731 | py | import os
import hashlib
import inspect
from lib import BaseTest
def strip_processor(output):
return "\n".join([l for l in output.split("\n") if not l.startswith(' ') and not l.startswith('Date:')])
def sorted_processor(output):
return "\n".join(sorted(output.split("\n")))
class PublishSnapshot1Test(BaseTest):
"""
publish snapshot: defaults
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec snap1"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot1Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Release')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/binary-amd64/Release')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
self.check_not_exists('public/dists/maverick/main/debian-installer/binary-i386/Packages')
self.check_not_exists('public/dists/maverick/main/debian-installer/binary-amd64/Packages')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-doc_4.6.1-1~maverick2_all.deb')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('public/dists/maverick/main/binary-i386/Release', 'release_i386')
self.check_file_contents('public/dists/maverick/main/binary-amd64/Release', 'release_amd64')
self.check_file_contents('public/dists/maverick/main/binary-i386/Packages', 'packages_i386', match_prepare=sorted_processor)
self.check_file_contents('public/dists/maverick/main/binary-amd64/Packages', 'packages_amd64', match_prepare=sorted_processor)
# verify signatures
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
# verify sums
release = self.read_file('public/dists/maverick/Release').split("\n")
release = [l for l in release if l.startswith(" ")]
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
pathsSeen.add(path)
fileSize = int(fileSize)
st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/', path))
if fileSize != st.st_size:
raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size))
if len(fileHash) == 32:
h = hashlib.md5()
elif len(fileHash) == 40:
h = hashlib.sha1()
else:
h = hashlib.sha256()
h.update(self.read_file(os.path.join('public/dists/maverick', path)))
if h.hexdigest() != fileHash:
raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest()))
if pathsSeen != set(['main/binary-amd64/Packages', 'main/binary-i386/Packages', 'main/binary-i386/Packages.gz',
'main/binary-amd64/Packages.gz', 'main/binary-amd64/Packages.bz2', 'main/binary-i386/Packages.bz2',
'main/binary-amd64/Release', 'main/binary-i386/Release']):
raise Exception("path seen wrong: %r" % (pathsSeen, ))
class PublishSnapshot2Test(BaseTest):
"""
publish snapshot: different distribution
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap2 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=squeeze snap2"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot2Test, self).check()
self.check_exists('public/dists/squeeze/InRelease')
self.check_exists('public/dists/squeeze/Release')
self.check_exists('public/dists/squeeze/Release.gpg')
self.check_exists('public/dists/squeeze/main/binary-i386/Packages')
self.check_exists('public/dists/squeeze/main/binary-i386/Packages.gz')
self.check_exists('public/dists/squeeze/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/squeeze/main/binary-amd64/Packages')
self.check_exists('public/dists/squeeze/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/squeeze/main/binary-amd64/Packages.bz2')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-doc_4.6.1-1~maverick2_all.deb')
# verify contents except of sums
self.check_file_contents('public/dists/squeeze/Release', 'release', match_prepare=strip_processor)
class PublishSnapshot3Test(BaseTest):
"""
publish snapshot: different distribution and component
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap3 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=squeeze -component=contrib snap3"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot3Test, self).check()
self.check_exists('public/dists/squeeze/InRelease')
self.check_exists('public/dists/squeeze/Release')
self.check_exists('public/dists/squeeze/Release.gpg')
self.check_exists('public/dists/squeeze/contrib/binary-i386/Packages')
self.check_exists('public/dists/squeeze/contrib/binary-i386/Packages.gz')
self.check_exists('public/dists/squeeze/contrib/binary-i386/Packages.bz2')
self.check_exists('public/dists/squeeze/contrib/binary-amd64/Packages')
self.check_exists('public/dists/squeeze/contrib/binary-amd64/Packages.gz')
self.check_exists('public/dists/squeeze/contrib/binary-amd64/Packages.bz2')
self.check_exists('public/pool/contrib/g/gnuplot/gnuplot-doc_4.6.1-1~maverick2_all.deb')
# verify contents except of sums
self.check_file_contents('public/dists/squeeze/Release', 'release', match_prepare=strip_processor)
class PublishSnapshot4Test(BaseTest):
"""
publish snapshot: limit architectures
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap4 from mirror gnuplot-maverick",
]
runCmd = "aptly -architectures=i386 publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=squeeze snap4"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot4Test, self).check()
self.check_exists('public/dists/squeeze/InRelease')
self.check_exists('public/dists/squeeze/Release')
self.check_exists('public/dists/squeeze/Release.gpg')
self.check_exists('public/dists/squeeze/main/binary-i386/Packages')
self.check_exists('public/dists/squeeze/main/binary-i386/Packages.gz')
self.check_exists('public/dists/squeeze/main/binary-i386/Packages.bz2')
self.check_not_exists('public/dists/squeeze/main/binary-amd64/Packages')
self.check_not_exists('public/dists/squeeze/main/binary-amd64/Packages.gz')
self.check_not_exists('public/dists/squeeze/main/binary-amd64/Packages.bz2')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-doc_4.6.1-1~maverick2_all.deb')
# verify contents except of sums
self.check_file_contents('public/dists/squeeze/Release', 'release', match_prepare=strip_processor)
class PublishSnapshot5Test(BaseTest):
"""
publish snapshot: under prefix
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap5 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=squeeze snap5 ppa/smira"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot5Test, self).check()
self.check_exists('public/ppa/smira/dists/squeeze/InRelease')
self.check_exists('public/ppa/smira/dists/squeeze/Release')
self.check_exists('public/ppa/smira/dists/squeeze/Release.gpg')
self.check_exists('public/ppa/smira/dists/squeeze/main/binary-i386/Packages')
self.check_exists('public/ppa/smira/dists/squeeze/main/binary-i386/Packages.gz')
self.check_exists('public/ppa/smira/dists/squeeze/main/binary-i386/Packages.bz2')
self.check_exists('public/ppa/smira/dists/squeeze/main/binary-amd64/Packages')
self.check_exists('public/ppa/smira/dists/squeeze/main/binary-amd64/Packages.gz')
self.check_exists('public/ppa/smira/dists/squeeze/main/binary-amd64/Packages.bz2')
self.check_exists('public/ppa/smira/pool/main/g/gnuplot/gnuplot-doc_4.6.1-1~maverick2_all.deb')
class PublishSnapshot6Test(BaseTest):
"""
publish snapshot: specify distribution
"""
fixtureDB = True
fixtureCmds = [
"aptly snapshot create snap from mirror gnuplot-maverick",
"aptly snapshot create snap2 from mirror wheezy-main",
"aptly snapshot merge snap6 snap2 snap"
]
runCmd = "aptly publish snapshot snap6"
expectedCode = 1
class PublishSnapshot7Test(BaseTest):
"""
publish snapshot: double publish under root
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap7 from mirror gnuplot-maverick",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec snap7",
]
runCmd = "aptly publish snapshot snap7"
expectedCode = 1
class PublishSnapshot8Test(BaseTest):
"""
publish snapshot: double publish under prefix
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap8 from mirror gnuplot-maverick",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec snap8 ./ppa",
]
runCmd = "aptly publish snapshot snap8 ppa"
expectedCode = 1
class PublishSnapshot9Test(BaseTest):
"""
publish snapshot: wrong prefix
"""
fixtureDB = True
fixtureCmds = [
"aptly snapshot create snap9 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot snap9 ppa/dists/la"
expectedCode = 1
class PublishSnapshot10Test(BaseTest):
"""
publish snapshot: wrong prefix
"""
fixtureDB = True
fixtureCmds = [
"aptly snapshot create snap10 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot snap10 ppa/pool/la"
expectedCode = 1
class PublishSnapshot11Test(BaseTest):
"""
publish snapshot: wrong prefix
"""
fixtureDB = True
fixtureCmds = [
"aptly snapshot create snap11 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot snap11 ../la"
expectedCode = 1
class PublishSnapshot12Test(BaseTest):
"""
publish snapshot: no snapshot
"""
fixtureDB = True
runCmd = "aptly publish snapshot snap12"
expectedCode = 1
class PublishSnapshot13Test(BaseTest):
"""
publish snapshot: -skip-signing
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap13 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot -skip-signing snap13"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot13Test, self).check()
self.check_not_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_not_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
class PublishSnapshot14Test(BaseTest):
"""
publish snapshot: empty snapshot is not publishable w/o architectures list
"""
fixtureDB = True
fixtureCmds = [
"aptly snapshot create snap14 empty",
]
runCmd = "aptly publish snapshot --distribution=mars --skip-signing snap14"
expectedCode = 1
class PublishSnapshot15Test(BaseTest):
"""
publish snapshot: skip signing via config
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap15 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot snap15"
configOverride = {"gpgDisableSign": True}
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot15Test, self).check()
self.check_not_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_not_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
class PublishSnapshot16Test(BaseTest):
"""
publish snapshot: with sources
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap16 from mirror gnuplot-maverick-src",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec snap16"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot16Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
self.check_exists('public/dists/maverick/main/source/Sources')
self.check_exists('public/dists/maverick/main/source/Sources.gz')
self.check_exists('public/dists/maverick/main/source/Sources.bz2')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-doc_4.6.1-1~maverick2_all.deb')
self.check_exists('public/pool/main/g/gnuplot/gnuplot_4.6.1-1~maverick2.debian.tar.gz')
self.check_exists('public/pool/main/g/gnuplot/gnuplot_4.6.1-1~maverick2.dsc')
self.check_exists('public/pool/main/g/gnuplot/gnuplot_4.6.1.orig.tar.gz')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('public/dists/maverick/main/source/Sources', 'sources', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
# verify signatures
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
class PublishSnapshot17Test(BaseTest):
"""
publish snapshot: from local repo
"""
fixtureCmds = [
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create snap17 from repo local-repo",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap17"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot17Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/source/Sources')
self.check_exists('public/dists/maverick/main/source/Sources.gz')
self.check_exists('public/dists/maverick/main/source/Sources.bz2')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('public/pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('public/pool/main/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('public/pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('public/dists/maverick/main/source/Sources', 'sources', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
self.check_file_contents('public/dists/maverick/main/binary-i386/Packages', 'binary', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
# verify signatures
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
class PublishSnapshot18Test(BaseTest):
"""
publish snapshot: specify distribution from local repo
"""
fixtureCmds = [
"aptly repo create repo1",
"aptly repo add repo1 ${files}",
"aptly snapshot create snap1 from repo repo1",
]
runCmd = "aptly publish snapshot snap1"
expectedCode = 1
class PublishSnapshot19Test(BaseTest):
"""
publish snapshot: guess distribution from long chain
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly snapshot create snap2 from mirror gnuplot-maverick",
"aptly snapshot create snap3 from mirror gnuplot-maverick",
"aptly snapshot merge snap4 snap1 snap2",
"aptly snapshot pull snap4 snap1 snap5 gnuplot",
]
runCmd = "aptly publish snapshot -skip-signing snap5"
gold_processor = BaseTest.expand_environ
class PublishSnapshot20Test(BaseTest):
"""
publish snapshot: guess distribution from long chain including local repo
"""
fixtureDB = True
fixturePoolCopy = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly repo create -distribution=maverick local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create snap2 from repo local-repo",
"aptly snapshot merge snap3 snap1 snap2",
]
runCmd = "aptly publish snapshot -skip-signing snap3"
gold_processor = BaseTest.expand_environ
class PublishSnapshot21Test(BaseTest):
"""
publish snapshot: conflict in distributions
"""
fixtureDB = True
fixturePoolCopy = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly repo create -distribution=squeeze local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create snap2 from repo local-repo",
"aptly snapshot merge snap3 snap1 snap2",
]
runCmd = "aptly publish snapshot -skip-signing snap3"
gold_processor = BaseTest.expand_environ
expectedCode = 1
class PublishSnapshot22Test(BaseTest):
"""
publish snapshot: conflict in components
"""
fixtureDB = True
fixturePoolCopy = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly repo create -component=contrib -distribution=maverick local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create snap2 from repo local-repo",
"aptly snapshot merge snap3 snap1 snap2",
]
runCmd = "aptly publish snapshot -skip-signing snap3"
gold_processor = BaseTest.expand_environ
class PublishSnapshot23Test(BaseTest):
"""
publish snapshot: distribution empty plus distribution maverick
"""
fixtureDB = True
fixturePoolCopy = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create snap2 from repo local-repo",
"aptly snapshot merge snap3 snap1 snap2",
]
runCmd = "aptly publish snapshot -skip-signing snap3"
gold_processor = BaseTest.expand_environ
class PublishSnapshot24Test(BaseTest):
"""
publish snapshot: custom origin
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap24 from mirror gnuplot-maverick",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=squeeze -origin=aptly24 snap24"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot24Test, self).check()
# verify contents except of sums
self.check_file_contents('public/dists/squeeze/Release', 'release', match_prepare=strip_processor)
class PublishSnapshot25Test(BaseTest):
"""
publish snapshot: empty snapshot is publishable with architectures list
"""
fixtureDB = True
fixtureCmds = [
"aptly snapshot create snap25 empty",
]
runCmd = "aptly publish snapshot -architectures=amd64 --distribution=maverick -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec snap25"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot25Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_not_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_not_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_not_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
class PublishSnapshot26Test(BaseTest):
"""
publish snapshot: multiple component
"""
fixtureDB = True
fixturePoolCopy = True
fixtureCmds = [
"aptly snapshot create snap26.1 from mirror gnuplot-maverick",
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create snap26.2 from repo local-repo",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -component=main,contrib snap26.1 snap26.2"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot26Test, self).check()
self.check_exists('public/dists/maverick/InRelease')
self.check_exists('public/dists/maverick/Release')
self.check_exists('public/dists/maverick/Release.gpg')
self.check_exists('public/dists/maverick/main/binary-i386/Packages')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/main/binary-amd64/Packages.bz2')
self.check_exists('public/dists/maverick/main/source/Sources')
self.check_exists('public/dists/maverick/main/source/Sources.gz')
self.check_exists('public/dists/maverick/main/source/Sources.bz2')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.gz')
self.check_exists('public/dists/maverick/contrib/binary-i386/Packages.bz2')
self.check_exists('public/dists/maverick/contrib/binary-amd64/Packages')
self.check_exists('public/dists/maverick/contrib/binary-amd64/Packages.gz')
self.check_exists('public/dists/maverick/contrib/binary-amd64/Packages.bz2')
self.check_exists('public/dists/maverick/contrib/source/Sources')
self.check_exists('public/dists/maverick/contrib/source/Sources.gz')
self.check_exists('public/dists/maverick/contrib/source/Sources.bz2')
self.check_exists('public/pool/main/g/gnuplot/gnuplot-doc_4.6.1-1~maverick2_all.deb')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('public/pool/contrib/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('public/pool/contrib/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('public/pool/contrib/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
# verify contents except of sums
self.check_file_contents('public/dists/maverick/Release', 'release', match_prepare=strip_processor)
# verify signatures
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/InRelease')])
self.run_cmd(["gpg", "--no-auto-check-trustdb", "--keyring", os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "files", "aptly.pub"),
"--verify", os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release.gpg'),
os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/Release')])
# verify sums
release = self.read_file('public/dists/maverick/Release').split("\n")
release = [l for l in release if l.startswith(" ")]
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
pathsSeen.add(path)
fileSize = int(fileSize)
st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/maverick/', path))
if fileSize != st.st_size:
raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size))
if len(fileHash) == 32:
h = hashlib.md5()
elif len(fileHash) == 40:
h = hashlib.sha1()
else:
h = hashlib.sha256()
h.update(self.read_file(os.path.join('public/dists/maverick', path)))
if h.hexdigest() != fileHash:
raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest()))
if pathsSeen != set(['main/binary-amd64/Packages', 'main/binary-i386/Packages', 'main/binary-i386/Packages.gz',
'main/binary-amd64/Packages.gz', 'main/binary-amd64/Packages.bz2', 'main/binary-i386/Packages.bz2',
'main/source/Sources', 'main/source/Sources.gz', 'main/source/Sources.bz2',
'contrib/binary-amd64/Packages', 'contrib/binary-i386/Packages', 'contrib/binary-i386/Packages.gz',
'contrib/binary-amd64/Packages.gz', 'contrib/binary-amd64/Packages.bz2', 'contrib/binary-i386/Packages.bz2',
'contrib/source/Sources', 'contrib/source/Sources.gz', 'contrib/source/Sources.bz2',
'main/binary-amd64/Release', 'main/binary-i386/Release', 'main/source/Release',
'contrib/binary-amd64/Release', 'contrib/binary-i386/Release', 'contrib/source/Release']):
raise Exception("path seen wrong: %r" % (pathsSeen, ))
class PublishSnapshot27Test(BaseTest):
"""
publish snapshot: multiple component, guessing component names
"""
fixtureDB = True
fixturePoolCopy = True
fixtureCmds = [
"aptly snapshot create snap27.1 from mirror gnuplot-maverick",
"aptly repo create -component=contrib local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create snap27.2 from repo local-repo",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -component=, snap27.1 snap27.2"
gold_processor = BaseTest.expand_environ
class PublishSnapshot28Test(BaseTest):
"""
publish snapshot: duplicate component name (guessed)
"""
fixtureDB = True
fixtureCmds = [
"aptly snapshot create snap28.1 from mirror gnuplot-maverick",
"aptly repo create local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create snap28.2 from repo local-repo",
]
runCmd = "aptly publish snapshot -component=, snap28.1 snap28.2"
expectedCode = 1
class PublishSnapshot29Test(BaseTest):
"""
publish snapshot: duplicate component name (manual)
"""
fixtureCmds = [
"aptly snapshot create snap29.1 empty",
"aptly snapshot create snap29.2 empty",
]
runCmd = "aptly publish snapshot -component=b,b snap29.1 snap29.2"
expectedCode = 1
class PublishSnapshot30Test(BaseTest):
"""
publish snapshot: distribution conflict
"""
fixtureDB = True
fixtureCmds = [
"aptly snapshot create snap30.1 from mirror gnuplot-maverick",
"aptly repo create -distribution=squeeze local-repo",
"aptly repo add local-repo ${files}",
"aptly snapshot create snap30.2 from repo local-repo",
]
runCmd = "aptly publish snapshot -component=main,contrib snap30.1 snap30.2"
expectedCode = 1
class PublishSnapshot31Test(BaseTest):
"""
publish snapshot: no such snapshot
"""
fixtureCmds = [
"aptly snapshot create snap31.1 empty",
]
runCmd = "aptly publish snapshot -component=main,contrib snap31.1 snap31.2"
expectedCode = 1
class PublishSnapshot32Test(BaseTest):
"""
publish snapshot: mismatch in count
"""
fixtureCmds = [
"aptly snapshot create snap32.1 empty",
]
runCmd = "aptly publish snapshot -component=main,contrib snap32.1"
expectedCode = 2
outputMatchPrepare = lambda _, s: "\n".join([l for l in s.split("\n") if l.startswith("ERROR")])
class PublishSnapshot33Test(BaseTest):
"""
publish snapshot: conflicting files in the snapshot
"""
fixtureCmds = [
"aptly repo create local-repo1",
"aptly repo add local-repo1 ${files}",
"aptly snapshot create snap1 from repo local-repo1",
"aptly repo create local-repo2",
"aptly repo add local-repo2 ${testfiles}",
"aptly snapshot create snap2 from repo local-repo2",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap1",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=squeeze snap2"
expectedCode = 1
gold_processor = BaseTest.expand_environ
class PublishSnapshot34Test(BaseTest):
"""
publish snapshot: -force-overwrite
"""
fixtureCmds = [
"aptly repo create local-repo1",
"aptly repo add local-repo1 ${files}",
"aptly snapshot create snap1 from repo local-repo1",
"aptly repo create local-repo2",
"aptly repo add local-repo2 ${testfiles}",
"aptly snapshot create snap2 from repo local-repo2",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap1",
]
runCmd = "aptly publish snapshot -force-overwrite -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=squeeze snap2"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot34Test, self).check()
self.check_file_contents("public/pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz", "file")
class PublishSnapshot35Test(BaseTest):
"""
publish snapshot: mirror with udebs
"""
fixtureGpg = True
fixtureCmds = [
"aptly -architectures=i386,amd64 mirror create -keyring=aptlytest.gpg -filter='$$Source (dmraid)' -with-udebs squeeze http://mirror.yandex.ru/debian/ squeeze main non-free",
"aptly mirror update -keyring=aptlytest.gpg squeeze",
"aptly snapshot create squeeze from mirror squeeze",
]
runCmd = "aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec squeeze"
gold_processor = BaseTest.expand_environ
def check(self):
super(PublishSnapshot35Test, self).check()
self.check_exists('public/dists/squeeze/InRelease')
self.check_exists('public/dists/squeeze/Release')
self.check_exists('public/dists/squeeze/Release.gpg')
self.check_exists('public/dists/squeeze/main/binary-i386/Release')
self.check_exists('public/dists/squeeze/main/binary-i386/Packages')
self.check_exists('public/dists/squeeze/main/binary-i386/Packages.gz')
self.check_exists('public/dists/squeeze/main/binary-i386/Packages.bz2')
self.check_exists('public/dists/squeeze/main/debian-installer/binary-i386/Release')
self.check_exists('public/dists/squeeze/main/debian-installer/binary-i386/Packages')
self.check_exists('public/dists/squeeze/main/debian-installer/binary-i386/Packages.gz')
self.check_exists('public/dists/squeeze/main/debian-installer/binary-i386/Packages.bz2')
self.check_exists('public/dists/squeeze/main/binary-amd64/Release')
self.check_exists('public/dists/squeeze/main/binary-amd64/Packages')
self.check_exists('public/dists/squeeze/main/binary-amd64/Packages.gz')
self.check_exists('public/dists/squeeze/main/binary-amd64/Packages.bz2')
self.check_exists('public/dists/squeeze/main/debian-installer/binary-amd64/Release')
self.check_exists('public/dists/squeeze/main/debian-installer/binary-amd64/Packages')
self.check_exists('public/dists/squeeze/main/debian-installer/binary-amd64/Packages.gz')
self.check_exists('public/dists/squeeze/main/debian-installer/binary-amd64/Packages.bz2')
self.check_not_exists('public/dists/squeeze/main/source/Sources')
self.check_not_exists('public/dists/squeeze/main/source/Sources.gz')
self.check_not_exists('public/dists/squeeze/main/source/Sources.bz2')
self.check_exists('public/pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_amd64.udeb')
self.check_exists('public/pool/main/d/dmraid/dmraid-udeb_1.0.0.rc16-4.1_i386.udeb')
self.check_exists('public/pool/main/d/dmraid/dmraid_1.0.0.rc16-4.1_amd64.deb')
self.check_exists('public/pool/main/d/dmraid/dmraid_1.0.0.rc16-4.1_i386.deb')
self.check_file_contents('public/dists/squeeze/main/binary-i386/Packages', 'packages_i386', match_prepare=sorted_processor)
self.check_file_contents('public/dists/squeeze/main/debian-installer/binary-i386/Packages', 'packages_udeb_i386', match_prepare=sorted_processor)
self.check_file_contents('public/dists/squeeze/main/binary-amd64/Packages', 'packages_amd64', match_prepare=sorted_processor)
self.check_file_contents('public/dists/squeeze/main/debian-installer/binary-amd64/Packages', 'packages_udeb_amd64', match_prepare=sorted_processor)
# verify contents except of sums
self.check_file_contents('public/dists/squeeze/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('public/dists/squeeze/main/debian-installer/binary-i386/Release', 'release_udeb_i386', match_prepare=strip_processor)
# verify sums
release = self.read_file('public/dists/squeeze/Release').split("\n")
release = [l for l in release if l.startswith(" ")]
pathsSeen = set()
for l in release:
fileHash, fileSize, path = l.split()
pathsSeen.add(path)
fileSize = int(fileSize)
st = os.stat(os.path.join(os.environ["HOME"], ".aptly", 'public/dists/squeeze/', path))
if fileSize != st.st_size:
raise Exception("file size doesn't match for %s: %d != %d" % (path, fileSize, st.st_size))
if len(fileHash) == 32:
h = hashlib.md5()
elif len(fileHash) == 40:
h = hashlib.sha1()
else:
h = hashlib.sha256()
h.update(self.read_file(os.path.join('public/dists/squeeze', path)))
if h.hexdigest() != fileHash:
raise Exception("file hash doesn't match for %s: %s != %s" % (path, fileHash, h.hexdigest()))
pathsExepcted = set()
for arch in ("i386", "amd64"):
for udeb in ("", "debian-installer/"):
for ext in ("", ".gz", ".bz2"):
pathsExepcted.add("main/%sbinary-%s/Packages%s" % (udeb, arch, ext))
pathsExepcted.add("main/%sbinary-%s/Release" % (udeb, arch))
if pathsSeen != pathsExepcted:
raise Exception("path seen wrong: %r != %r" % (pathsSeen, pathsExepcted))
| [
"[email protected]"
] | |
a3853c891ca016a85276919f21c11e6ea1299f0e | 490f5e517942f529ddc8c1e0d421a208ff1ca29b | /02_code/listinstance2.py | c198804a387d5ef0f574c63f87e3a85e035230d8 | [] | no_license | emnglang/py-lab | facdc464a8c84b90f06b5cb639315981c0b4ba8d | bc3566da81e0b2cfa9ce563ffc198d35294971a1 | refs/heads/master | 2020-03-25T15:10:42.856062 | 2018-08-24T14:54:33 | 2018-08-24T14:54:33 | 143,869,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | #!python
# File listinstance.py (2.X + 3.X)
class ListInstance:
"""
Mix-in class that provides a formatted print() or str() of instances via
inheritance of __str__ coded here; displays instance attrs only; self is
instance of lowest class; __X names avoid clashing with client's attrs
"""
def __attrnames(self):
return ''.join('\t%s=%s\n' % (attr, self.__dict__[attr])
for attr in sorted(self.__dict__))
def __str__(self):
return '<Instance of %s, address %s:\n%s>' % (
self.__class__.__name__, # My class's name
id(self), # My address
self.__attrnames()) # name=value list
if __name__ == '__main__':
import testmixin
testmixin.tester(ListInstance)
| [
"[email protected]"
] | |
eacab20918d79ac5d79f76307ea5cd4283e90349 | 0b76e4db1f08f2d6d7b9379a884c2075f6e258c3 | /w10/G3/demo/demo/urls.py | 6a76c0435cad61a73f5dc7da4dae78ad382bcce9 | [] | no_license | bobur554396/WD2020Spring | 244ec8b491f297646d1d37f1feeb3767b68b9180 | 2b833c9043701ebaa4d122f717c8465af8fd5677 | refs/heads/master | 2020-12-26T19:01:46.605344 | 2020-04-18T05:33:42 | 2020-04-18T05:33:42 | 237,606,624 | 1 | 6 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | """demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('main/', include('main.urls')),
path('core/', include('core.urls')),
]
| [
"[email protected]"
] | |
4c6f6c224597b05b7e7ca61fee80f16589525350 | 248cf77b8a24b3b35e658d81b4c9cb8c450c2ca4 | /cryptex/streamers/bitbay/__init__.py | cdb4ef4c0f13d264b7f1b3ca9447113e54d3cb0b | [] | no_license | Reynaldo61/cryptex | fee1e124f544a049a74775fab4540cfd89d405df | deb34a449bdeb4e26009ef21035383ecf463cb3e | refs/heads/master | 2020-09-09T21:04:44.855833 | 2018-03-04T17:41:32 | 2018-03-04T17:41:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .bitbay import BitbayBase as base
from .wrapper import BitbayWrapper as wrapper
from .streamer import BitbayStreamer as streamer
__version__ = 0.1
__exchange__ = "bitbay"
__method__ = "rest"
| [
"[email protected]"
] | |
5628e326c354f7cd3c1d230e5e6e83140c249278 | a40950330ea44c2721f35aeeab8f3a0a11846b68 | /INTERACTION1/DRIVER/Interaction/PumpsStation/Liquid/_OneSensor.py | b7ac8d7e289207ed76be73e90b0fa19808b3bab0 | [] | no_license | huang443765159/kai | 7726bcad4e204629edb453aeabcc97242af7132b | 0d66ae4da5a6973e24e1e512fd0df32335e710c5 | refs/heads/master | 2023-03-06T23:13:59.600011 | 2023-03-04T06:14:12 | 2023-03-04T06:14:12 | 233,500,005 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | import time
import random
import pigpio
import threading
BAUD = 9600
class OneSensor(object):
def __init__(self, pi, sid, rx_pin, tx_pin, data_cb):
self._pi = pi
self._sid = sid
self._rx_pin = rx_pin
self._tx_pin = tx_pin
self._data_cb = data_cb
# PI
self._pi.set_mode(self._rx_pin, pigpio.INPUT)
self._pi.set_mode(self._tx_pin, pigpio.OUTPUT)
pigpio.exceptions = False
self._pi.bb_serial_read_close(self._rx_pin)
pigpio.exceptions = True
self._pi.bb_serial_read_open(self._rx_pin, BAUD, 8)
self._msg = bytes()
# THREAD
self._thread = threading.Thread(target=self._working)
self._thread.daemon = True
self._thread.start()
def _working(self):
while 1:
count, data = self._pi.bb_serial_read(self._rx_pin)
if count:
self._msg += data
if len(self._msg) == 4:
if (self._msg[0] + self._msg[1] + self._msg[2]) & 0x00ff == self._msg[3]:
self._data_cb(sid=self._sid, data=self._msg[1] * 256 + self._msg[2])
self._msg = bytes()
time.sleep(1)
if __name__ == '__main__':
import os
def _data_cb(sid, data):
print(sid, data)
_pi = pigpio.pi()
if not _pi.connected:
os.system('sudo pigpiod')
_pi = pigpio.pi()
sensor = OneSensor(pi=_pi, sid=1, rx_pin=15, tx_pin=14, data_cb=_data_cb)
# rx_pin=15, 24, 8, 12, 20
# tx_pin=14, 23, 25, 7, 16
| [
"[email protected]"
] | |
60413c7f96d539ba2b325c06b0678d2f48db7667 | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/scipy/fft/setup.py | f0d5dade992f334540d4759fc4100bb1c8bdaf3c | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f32208914b4d7de0ea26596a8be64ef729f4cc5799cd04da84cd8f639c4796de
size 448
| [
"[email protected]"
] | |
67cf67841c5e1908a4086e5b9737233c7076141c | 2698b0148191078f36efe266c3572d9f30724255 | /sharedslides.py | d49c4cdff40d8d397bccf9a8082771f798ea1678 | [] | no_license | sugar-activities/4196-activity | 8c81dc6c0aa0c7e7fd8bd08da79c5a1279d7d400 | 8eaefd6bc6429694e2d765b70f5bdd42b1a5286a | refs/heads/master | 2021-01-19T23:15:28.535962 | 2017-04-21T05:45:34 | 2017-04-21T05:45:34 | 88,937,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,085 | py | # -*- mode:python; tab-width:4; indent-tabs-mode:nil; -*-
# sharedslides.py
#
# Class that performs all work relating to the sharing of slide decks and ink.
# Kris Plunkett <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import sys
import os
import time
import random
import gobject
import telepathy
import telepathy.client
import dbus
from dbus.service import method, signal
from dbus.gobject_service import ExportedGObject
from sugar.presence import presenceservice
from sugar import network
from sugar.presence.tubeconn import TubeConnection
SERVICE = "edu.washington.cs.ClassroomPresenterXO"
IFACE = SERVICE
PATH = "/edu/washington/cs/ClassroomPresenterXO"
# Define a simple HTTP server for sharing data.
class ReadHTTPRequestHandler(network.ChunkedGlibHTTPRequestHandler):
def translate_path(self, path):
return self.server._filepath
class ReadHTTPServer(network.GlibTCPServer):
def __init__(self, server_address, filepath):
self._filepath = filepath
network.GlibTCPServer.__init__(self, server_address, ReadHTTPRequestHandler)
class SharedSlides(gobject.GObject):
""" Handles all sharing of slides and ink """
__gsignals__ = {
'deck-download-complete' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, init, cpxo_path, shared_activity, read_file_cb):
gobject.GObject.__init__(self)
self.__is_initiating = init
self.__cpxo_path = cpxo_path
self.__shared_activity = shared_activity
self.read_file_cb = read_file_cb
self.__logger = logging.getLogger('SharedSlides')
self.__tubes_chan = self.__shared_activity.telepathy_tubes_chan
self.__iface = self.__tubes_chan[telepathy.CHANNEL_TYPE_TUBES]
if (self.__is_initiating):
self.__logger.debug('Hello from SharedSlides (sharer).')
self.__have_deck = True
self.share_deck()
else:
# find a stream tube to download the slide deck from
self.__logger.debug('Hello from SharedSlides (joiner).')
self.__iface.connect_to_signal('NewTube', self.new_tube_cb)
self.__have_deck = False
self.get_stream_tube()
def get_stream_tube(self):
""" Attempts to download the slide deck from an available stream tube """
self.__iface.ListTubes(
reply_handler=self.list_tubes_reply_cb,
error_handler=self.list_tubes_error_cb)
def handle_download_fail(self):
""" If an attempt to download the deck fails, this method takes care of it """
self.__logger.error('Download failed! Sleeping five seconds and trying again.')
time.sleep(5)
self.get_stream_tube()
def list_tubes_reply_cb(self, tubes):
for tube_info in tubes:
self.new_tube_cb(*tube_info)
def list_tubes_error_cb(self, e):
self.__logger.error('ListTubes() failed: %s', e)
self.handle_download_fail
def new_tube_cb(self, id, initiator, type, service, params, state):
self.__logger.debug('New tube: ID=%d initiator=%d type=%d service=%s params=%r state=%d',
id, initiator, type, service, params, state)
if (not self.__have_deck and
type == telepathy.TUBE_TYPE_STREAM and
service == SERVICE and
state == telepathy.TUBE_STATE_LOCAL_PENDING):
addr = self.__iface.AcceptStreamTube(id,
telepathy.SOCKET_ADDRESS_TYPE_IPV4,
telepathy.SOCKET_ACCESS_CONTROL_LOCALHOST, 0,
utf8_strings=True)
self.__logger.debug("Got a stream tube!")
# sanity checks
assert isinstance(addr, dbus.Struct)
assert len(addr) == 2
assert isinstance(addr[0], str)
assert isinstance(addr[1], (int, long))
assert addr[1] > 0 and addr[1] < 65536
ip_addr = addr[0]
port = int(addr[1])
self.__logger.debug("The stream tube is good!")
self.download_file(ip_addr, port, id)
def download_file(self, ip_addr, port, tube_id):
""" Performs the actual download of the slide deck """
self.__logger.debug("Downloading from ip %s and port %d.", ip_addr, port)
getter = network.GlibURLDownloader("http://%s:%d/document" % (ip_addr, port))
getter.connect("finished", self.download_result_cb, tube_id)
getter.connect("progress", self.download_progress_cb, tube_id)
getter.connect("error", self.download_error_cb, tube_id)
self.__logger.debug("Starting download to %s...", self.__cpxo_path)
getter.start(self.__cpxo_path)
def download_result_cb(self, getter, tempfile, suggested_name, tube_id):
""" Called when the file download was successful """
self.__logger.debug("Got file %s (%s) from tube %u",
tempfile, suggested_name, tube_id)
self.emit('deck-download-complete')
self.read_file_cb(self.__cpxo_path)
def download_progress_cb(self, getter, bytes_downloaded, tube_id):
tmp = True
#self.__logger.debug("Bytes downloaded from tube %u: %u", tube_id, bytes_downloaded)
def download_error_cb(self, getter, err, tube_id):
self.__logger.error('Download failed on tube %u: %s', tube_id, err)
self.handle_download_fail()
def share_deck(self):
""" As the instructor XO, or as a student that has completed the deck download
share the deck with others in the activity """
# get a somewhat random port number
self.__port = random.randint(1024, 65535)
self.__ip_addr = "127.0.0.1"
self._fileserver = ReadHTTPServer(("", self.__port), self.__cpxo_path)
self.__logger.debug('Started an HTTP server on port %d', self.__port)
self.__iface.OfferStreamTube(SERVICE, {},
telepathy.SOCKET_ADDRESS_TYPE_IPV4,
(self.__ip_addr, dbus.UInt16(self.__port)),
telepathy.SOCKET_ACCESS_CONTROL_LOCALHOST, 0)
self.__logger.debug('Made a stream tube.')
gobject.type_register(SharedSlides)
| [
"[email protected]"
] | |
6b822a81c52ad1a954af151406c8f6fad2de0bae | 2940f5416082dadd9c646cd9a46d2d0a99883efb | /venv/Lib/site-packages/networkx/testing/__init__.py | 884ac83d194f33d3597f6095678a2801e2c8fecc | [
"MIT"
] | permissive | tpike3/SugarScape | 4813e4fefbfb0a701f5913d74f045fd0eaed1942 | 39efe4007fba2b12b75c72f7795827a1f74d640b | refs/heads/main | 2021-06-20T03:55:46.288721 | 2021-01-20T17:06:35 | 2021-01-20T17:06:35 | 168,583,530 | 11 | 3 | MIT | 2021-01-20T17:19:53 | 2019-01-31T19:29:40 | Jupyter Notebook | UTF-8 | Python | false | false | 75 | py | from networkx.testing.utils import *
from networkx.testing.test import run
| [
"[email protected]"
] | |
a3e4ee41e561ef8abd0f4c96f80857d31f2980d3 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_031858.28+002325.7/sdB_sdssj_031858.28+002325.7_lc.py | 015e2ab508983a3ff321694093bcee12e4301b9c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[49.742833,0.390472], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_031858.28+002325.7/sdB_sdssj_031858.28+002325.7_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
27ea5d9ee615c2727ec11f2710fc2f03b5a30bfc | 7d9d3d5ce2ac19221163d54a94c025993db0af4f | /autotest/ogr/ogr_dxf.py | 2222aacac9eb29e918c8f8f18981e317beeaa58a | [
"MIT"
] | permissive | dcgull/gdal | 5408adad77d001db32173bba547b447220b5e9a2 | a5e2a7b54db955bd061ebfc6d69aa2dd752b120c | refs/heads/master | 2020-04-03T13:30:40.013172 | 2013-10-11T12:07:57 | 2013-10-11T12:07:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,233 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: ogr_dxf.py 26354 2013-08-21 15:28:27Z rouault $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test OGR DXF driver functionality.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2009, Frank Warmerdam <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import string
from sys import version_info
sys.path.append( '../pymod' )
import ogrtest
import gdaltest
from osgeo import gdal, ogr
###############################################################################
# Check some general things to see if they meet expectations.
def ogr_dxf_1():
gdaltest.dxf_ds = ogr.Open( 'data/assorted.dxf' )
if gdaltest.dxf_ds is None:
return 'fail'
if gdaltest.dxf_ds.GetLayerCount() != 1:
gdaltest.post_reason( 'expected exactly one layer!' )
return 'fail'
gdaltest.dxf_layer = gdaltest.dxf_ds.GetLayer(0)
if gdaltest.dxf_layer.GetName() != 'entities':
gdaltest.post_reason( 'did not get expected layer name.' )
return 'fail'
defn = gdaltest.dxf_layer.GetLayerDefn()
if defn.GetFieldCount() != 6:
gdaltest.post_reason( 'did not get expected number of fields.' )
return 'fail'
fc = gdaltest.dxf_layer.GetFeatureCount()
if fc != 16:
gdaltest.post_reason( 'did not get expected feature count, got %d' % fc)
return 'fail'
# Setup the utf-8 string.
if version_info >= (3,0,0):
gdaltest.sample_text = 'Text Sample1\u00BF\u03BB\n"abc"'
gdaltest.sample_style = 'Text Sample1\u00BF\u03BB\n\\"abc\\"'
else:
exec("gdaltest.sample_text = u'Text Sample1\u00BF\u03BB'")
gdaltest.sample_text += chr(10)
gdaltest.sample_style = gdaltest.sample_text + '\\"abc\\"'
gdaltest.sample_style = gdaltest.sample_style.encode('utf-8')
gdaltest.sample_text += '"abc"'
gdaltest.sample_text = gdaltest.sample_text.encode('utf-8')
return 'success'
###############################################################################
# Read the first feature, an ellipse and see if it generally meets expectations.
def ogr_dxf_2():
gdaltest.dxf_layer.ResetReading()
feat = gdaltest.dxf_layer.GetNextFeature()
if feat.Layer != '0':
gdaltest.post_reason( 'did not get expected layer for feature 0' )
return 'fail'
if feat.GetFID() != 0:
gdaltest.post_reason( 'did not get expected fid for feature 0' )
return 'fail'
if feat.SubClasses != 'AcDbEntity:AcDbEllipse':
gdaltest.post_reason( 'did not get expected SubClasses on feature 0.' )
return 'fail'
if feat.LineType != 'ByLayer':
gdaltest.post_reason( 'Did not get expected LineType' )
return 'fail'
if feat.EntityHandle != '43':
gdaltest.post_reason( 'did not get expected EntityHandle' )
return 'fail'
if feat.GetStyleString() != 'PEN(c:#000000)':
print( '%s' % feat.GetStyleString())
gdaltest.post_reason( 'did not get expected style string on feat 0.' )
return 'fail'
geom = feat.GetGeometryRef()
if geom.GetGeometryType() != ogr.wkbLineString25D:
gdaltest.post_reason( 'did not get expected geometry type.' )
return 'fail'
envelope = geom.GetEnvelope()
area = (envelope[1] - envelope[0]) * (envelope[3] - envelope[2])
exp_area = 1596.12
if area < exp_area - 0.5 or area > exp_area + 0.5:
gdaltest.post_reason( 'envelope area not as expected, got %g.' % area )
return 'fail'
if abs(geom.GetX(0)-73.25) > 0.001 or abs(geom.GetY(0)-139.75) > 0.001:
gdaltest.post_reason( 'first point (%g,%g) not expected location.' \
% (geom.GetX(0),geom.GetY(0)) )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Second feature should be a partial ellipse.
def ogr_dxf_3():
feat = gdaltest.dxf_layer.GetNextFeature()
geom = feat.GetGeometryRef()
envelope = geom.GetEnvelope()
area = (envelope[1] - envelope[0]) * (envelope[3] - envelope[2])
exp_area = 311.864
if area < exp_area - 0.5 or area > exp_area + 0.5:
gdaltest.post_reason( 'envelope area not as expected, got %g.' % area )
return 'fail'
if abs(geom.GetX(0)-61.133) > 0.01 or abs(geom.GetY(0)-103.592) > 0.01:
gdaltest.post_reason( 'first point (%g,%g) not expected location.' \
% (geom.GetX(0),geom.GetY(0)) )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Third feature: point.
def ogr_dxf_4():
feat = gdaltest.dxf_layer.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'POINT (83.5 160.0 0)' ):
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Fourth feature: LINE
def ogr_dxf_5():
feat = gdaltest.dxf_layer.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'LINESTRING (97.0 159.5 0,108.5 132.25 0)' ):
return 'fail'
if feat.GetGeometryRef().GetGeometryType() == ogr.wkbLineString:
gdaltest.post_reason( 'not keeping 3D linestring as 3D' )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Fourth feature: MTEXT
def ogr_dxf_6():
feat = gdaltest.dxf_layer.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'POINT (84 126)' ):
return 'fail'
if feat.GetGeometryRef().GetGeometryType() == ogr.wkbPoint25D:
gdaltest.post_reason( 'not keeping 2D text as 2D' )
return 'fail'
if feat.GetStyleString() != 'LABEL(f:"Arial",t:"Test",a:30,s:5g,p:7,c:#000000)':
print(feat.GetStyleString())
gdaltest.post_reason( 'got wrong style string' )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Partial CIRCLE
def ogr_dxf_7():
feat = gdaltest.dxf_layer.GetNextFeature()
geom = feat.GetGeometryRef()
envelope = geom.GetEnvelope()
area = (envelope[1] - envelope[0]) * (envelope[3] - envelope[2])
exp_area = 445.748
if area < exp_area - 0.5 or area > exp_area + 0.5:
print(envelope)
gdaltest.post_reason( 'envelope area not as expected, got %g.' % area )
return 'fail'
if abs(geom.GetX(0)-115.258) > 0.01 or abs(geom.GetY(0)-107.791) > 0.01:
gdaltest.post_reason( 'first point (%g,%g) not expected location.' \
% (geom.GetX(0),geom.GetY(0)) )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Dimension
def ogr_dxf_8():
# Skip boring line.
feat = gdaltest.dxf_layer.GetNextFeature()
feat.Destroy()
# Dimension lines
feat = gdaltest.dxf_layer.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.GetGeometryType() != ogr.wkbMultiLineString:
gdaltest.post_reason( 'did not get expected geometry type.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((63.862871944482457 149.209935992088333,24.341960668550669 111.934531038652722),(72.754404848874373 139.782768575383642,62.744609795879391 150.395563330366286),(33.233493572942614 102.507363621948002,23.2236985199476 113.120158376930675),(63.862871944482457 149.209935992088333,59.187727781045531 147.04077688455709),(63.862871944482457 149.209935992088333,61.424252078251662 144.669522208001183),(24.341960668550669 111.934531038652722,26.78058053478146 116.474944822739886),(24.341960668550669 111.934531038652722,29.017104831987599 114.103690146183979))' ):
return 'fail'
feat.Destroy()
# Dimension text
feat = gdaltest.dxf_layer.GetNextFeature()
geom = feat.GetGeometryRef()
if ogrtest.check_feature_geometry( feat, 'POINT (42.815907752635709 131.936242584545397)' ):
return 'fail'
expected_style = 'LABEL(f:"Arial",t:"54.3264",p:5,a:43.3,s:2.5g)'
if feat.GetStyleString() != expected_style:
gdaltest.post_reason( 'Got unexpected style string:\n%s\ninstead of:\n%s.' % (feat.GetStyleString(),expected_style) )
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# BLOCK (inlined)
def ogr_dxf_9():
# Skip two dimensions each with a line and text.
for x in range(4):
feat = gdaltest.dxf_layer.GetNextFeature()
feat.Destroy()
# block (merged geometries)
feat = gdaltest.dxf_layer.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.GetGeometryType() != ogr.wkbGeometryCollection25D:
gdaltest.post_reason( 'did not get expected geometry type.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'GEOMETRYCOLLECTION (LINESTRING (79.069506278985116 121.003652476272777 0,79.716898725419625 118.892590150942851 0),LINESTRING (79.716898725419625 118.892590150942851 0,78.140638855839953 120.440702522851453 0),LINESTRING (78.140638855839953 120.440702522851453 0,80.139111190485622 120.328112532167196 0),LINESTRING (80.139111190485622 120.328112532167196 0,78.619146316248077 118.920737648613908 0),LINESTRING (78.619146316248077 118.920737648613908 0,79.041358781314059 120.975504978601705 0))' ):
return 'fail'
feat.Destroy()
# First of two MTEXTs
feat = gdaltest.dxf_layer.GetNextFeature()
if feat.GetField( 'Text' ) != gdaltest.sample_text:
gdaltest.post_reason( 'Did not get expected first mtext.' )
return 'fail'
expected_style = 'LABEL(f:"Arial",t:"'+gdaltest.sample_style+'",a:45,s:0.5g,p:5,c:#000000)'
if feat.GetStyleString() != expected_style:
gdaltest.post_reason( 'Got unexpected style string:\n%s\ninstead of:\n%s.' % (feat.GetStyleString(),expected_style) )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'POINT (77.602201427662891 120.775897075866169 0)' ):
return 'fail'
# Second of two MTEXTs
feat = gdaltest.dxf_layer.GetNextFeature()
if feat.GetField( 'Text' ) != 'Second':
gdaltest.post_reason( 'Did not get expected second mtext.' )
return 'fail'
if feat.GetField( 'SubClasses' ) != 'AcDbEntity:AcDbMText':
gdaltest.post_reason( 'Did not get expected subclasses.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'POINT (79.977331629005178 119.698291706738644 0)' ):
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# LWPOLYLINE in an Object Coordinate System.
def ogr_dxf_10():
ocs_ds = ogr.Open('data/LWPOLYLINE-OCS.dxf')
ocs_lyr = ocs_ds.GetLayer(0)
# Skip boring line.
feat = ocs_lyr.GetNextFeature()
feat.Destroy()
# LWPOLYLINE in OCS
feat = ocs_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
if geom.GetGeometryType() != ogr.wkbLineString25D:
print(geom.GetGeometryType())
gdaltest.post_reason( 'did not get expected geometry type.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'LINESTRING (600325.567999998573214 3153021.253000000491738 562.760000000052969,600255.215999998385087 3151973.98600000096485 536.950000000069849,597873.927999997511506 3152247.628000000491738 602.705000000089058)' ):
return 'fail'
feat.Destroy()
ocs_lyr = None
ocs_ds.Destroy()
ocs_ds = None
return 'success'
###############################################################################
# Test reading from an entities-only dxf file (#3412)
def ogr_dxf_11():
eo_ds = ogr.Open('data/entities_only.dxf')
eo_lyr = eo_ds.GetLayer(0)
# Check first point.
feat = eo_lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat,
'POINT (672500.0 242000.0 539.986)' ):
return 'fail'
feat.Destroy()
# Check second point.
feat = eo_lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat,
'POINT (672750.0 242000.0 558.974)' ):
return 'fail'
feat.Destroy()
eo_lyr = None
eo_ds.Destroy()
eo_ds = None
return 'success'
###############################################################################
# Write a simple file with a polygon and a line, and read back.
def ogr_dxf_12():
ds = ogr.GetDriverByName('DXF').CreateDataSource('tmp/dxf_11.dxf' )
lyr = ds.CreateLayer( 'entities' )
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'LINESTRING(10 12, 60 65)' ) )
lyr.CreateFeature( dst_feat )
dst_feat = None
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'POLYGON((0 0,100 0,100 100,0 0))' ) )
lyr.CreateFeature( dst_feat )
dst_feat = None
# Test 25D linestring with constant Z (#5210)
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'LINESTRING(1 2 10,3 4 10)' ) )
lyr.CreateFeature( dst_feat )
dst_feat = None
# Test 25D linestring with different Z (#5210)
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'LINESTRING(1 2 -10,3 4 10)' ) )
lyr.CreateFeature( dst_feat )
dst_feat = None
lyr = None
ds = None
# Read back.
ds = ogr.Open('tmp/dxf_11.dxf')
lyr = ds.GetLayer(0)
# Check first feature
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat,
'LINESTRING(10 12, 60 65)' ):
print(feat.GetGeometryRef().ExportToWkt())
return 'fail'
if feat.GetGeometryRef().GetGeometryType() != ogr.wkbLineString:
gdaltest.post_reason( 'not linestring 2D' )
return 'fail'
feat = None
# Check second feature
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat,
'POLYGON((0 0,100 0,100 100,0 0))' ):
print(feat.GetGeometryRef().ExportToWkt())
return 'fail'
if feat.GetGeometryRef().GetGeometryType() != ogr.wkbPolygon:
gdaltest.post_reason( 'not keeping polygon 2D' )
return 'fail'
feat = None
# Check third feature
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat,
'LINESTRING(1 2 10,3 4 10)' ):
print(feat.GetGeometryRef().ExportToWkt())
return 'fail'
feat = None
# Check fourth feature
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat,
'LINESTRING(1 2 -10,3 4 10)' ):
print(feat.GetGeometryRef().ExportToWkt())
return 'fail'
feat = None
lyr = None
ds = None
ds = None
os.unlink( 'tmp/dxf_11.dxf' )
return 'success'
###############################################################################
# Check smoothed polyline.
def ogr_dxf_13():
ds = ogr.Open( 'data/polyline_smooth.dxf' )
layer = ds.GetLayer(0)
feat = layer.GetNextFeature()
if feat.Layer != '1':
gdaltest.post_reason( 'did not get expected layer for feature 0' )
return 'fail'
geom = feat.GetGeometryRef()
if geom.GetGeometryType() != ogr.wkbLineString25D:
gdaltest.post_reason( 'did not get expected geometry type.' )
return 'fail'
envelope = geom.GetEnvelope()
area = (envelope[1] - envelope[0]) * (envelope[3] - envelope[2])
exp_area = 1350.43
if area < exp_area - 0.5 or area > exp_area + 0.5:
gdaltest.post_reason( 'envelope area not as expected, got %g.' % area )
return 'fail'
# Check for specific number of points from tesselated arc(s).
# Note that this number depends on the tesselation algorithm and
# possibly the default global arc_stepsize variable; therefore it is
# not guaranteed to remain constant even if the input DXF file is constant.
# If you retain this test, you may need to update the point count if
# changes are made to the aforementioned items. Ideally, one would test
# only that more points are returned than in the original polyline, and
# that the points lie along (or reasonably close to) said path.
if geom.GetPointCount() != 146:
gdaltest.post_reason( 'did not get expected number of points, got %d' % (rgeom.GetPointCount()) )
return 'fail'
if abs(geom.GetX(0)-251297.8179) > 0.001 \
or abs(geom.GetY(0)-412226.8286) > 0.001:
gdaltest.post_reason( 'first point (%g,%g) not expected location.' \
% (geom.GetX(0),geom.GetY(0)) )
return 'fail'
# Other possible tests:
# Polylines with no explicit Z coordinates (e.g., no attribute 38 for
# LWPOLYLINE and no attribute 30 for POLYLINE) should always return
# geometry type ogr.wkbPolygon. Otherwise, ogr.wkbPolygon25D should be
# returned even if the Z coordinate values are zero.
# If the arc_stepsize global is used, one could test that returned adjacent
# points do not slope-diverge greater than that value.
feat.Destroy()
ds = None
return 'success'
###############################################################################
# Check smooth LWPOLYLINE entity.
def ogr_dxf_14():
# This test is identical to the previous one except the
# newer lwpolyline entity is used. See the comments in the
# previous test regarding caveats, etc.
ds = ogr.Open( 'data/lwpolyline_smooth.dxf' )
layer = ds.GetLayer(0)
feat = layer.GetNextFeature()
if feat.Layer != '1':
gdaltest.post_reason( 'did not get expected layer for feature 0' )
return 'fail'
geom = feat.GetGeometryRef()
if geom.GetGeometryType() != ogr.wkbLineString:
gdaltest.post_reason( 'did not get expected geometry type.' )
return 'fail'
envelope = geom.GetEnvelope()
area = (envelope[1] - envelope[0]) * (envelope[3] - envelope[2])
exp_area = 1350.43
if area < exp_area - 0.5 or area > exp_area + 0.5:
gdaltest.post_reason( 'envelope area not as expected, got %g.' % area )
return 'fail'
if geom.GetPointCount() != 146:
gdaltest.post_reason( 'did not get expected number of points, got %d' % (geom.GetPointCount()) )
return 'fail'
if abs(geom.GetX(0)-251297.8179) > 0.001 \
or abs(geom.GetY(0)-412226.8286) > 0.001:
gdaltest.post_reason( 'first point (%g,%g) not expected location.' \
% (geom.GetX(0),geom.GetY(0)) )
return 'fail'
feat.Destroy()
ds = None
return 'success'
###############################################################################
# Write a file with dynamic layer creation and confirm that the
# dynamically created layer 'abc' matches the definition of the default
# layer '0'.
def ogr_dxf_15():
ds = ogr.GetDriverByName('DXF').CreateDataSource('tmp/dxf_14.dxf',
['FIRST_ENTITY=80'] )
lyr = ds.CreateLayer( 'entities' )
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'LINESTRING(10 12, 60 65)' ) )
dst_feat.SetField( 'Layer', 'abc' )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'POLYGON((0 0,100 0,100 100,0 0))' ) )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
lyr = None
ds = None
# Read back.
ds = ogr.Open('tmp/dxf_14.dxf')
lyr = ds.GetLayer(0)
# Check first feature
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat,
'LINESTRING(10 12, 60 65)' ):
print(feat.GetGeometryRef().ExportToWkt())
return 'fail'
if feat.GetGeometryRef().GetGeometryType() == ogr.wkbLineString25D:
gdaltest.post_reason( 'not linestring 2D' )
return 'fail'
if feat.GetField('Layer') != 'abc':
gdaltest.post_reason( 'Did not get expected layer, abc.' )
return 'fail'
feat.Destroy()
# Check second point.
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat,
'POLYGON((0 0,100 0,100 100,0 0))' ):
print(feat.GetGeometryRef().ExportToWkt())
return 'fail'
if feat.GetGeometryRef().GetGeometryType() == ogr.wkbPolygon25D:
gdaltest.post_reason( 'not keeping polygon 2D' )
return 'fail'
if feat.GetField('Layer') != '0':
print(feat.GetField('Layer'))
gdaltest.post_reason( 'Did not get expected layer, 0.' )
return 'fail'
feat.Destroy()
lyr = None
ds.Destroy()
ds = None
# Check the DXF file itself to try and ensure that the layer
# is defined essentially as we expect. We assume the only thing
# that will be different is the layer name is 'abc' instead of '0'
# and the entity id.
outdxf = open('tmp/dxf_14.dxf').read()
start_1 = outdxf.find(' 0\nLAYER')
start_2 = outdxf.find(' 0\nLAYER',start_1+10)
txt_1 = outdxf[start_1:start_2]
txt_2 = outdxf[start_2:start_2+len(txt_1)+2]
abc_off = txt_2.find('abc\n')
if txt_2[16:abc_off] + '0' + txt_2[abc_off+3:] != txt_1[16:]:
print(txt_2[abc_off] + '0' + txt_2[abc_off+3:])
print(txt_1)
gdaltest.post_reason( 'Layer abc does not seem to match layer 0.' )
return 'fail'
# Check that $HANDSEED was set as expected.
start_seed = outdxf.find('$HANDSEED')
handseed = outdxf[start_seed+10+4:start_seed+10+4+8]
if handseed != '00000053':
gdaltest.post_reason( 'Did not get expected HANDSEED, got %s.' % handseed)
return 'fail'
os.unlink( 'tmp/dxf_14.dxf' )
return 'success'
###############################################################################
# Test reading without DXF blocks inlined.
def ogr_dxf_16():
gdal.SetConfigOption( 'DXF_INLINE_BLOCKS', 'FALSE' )
dxf_ds = ogr.Open( 'data/assorted.dxf' )
if dxf_ds is None:
return 'fail'
if dxf_ds.GetLayerCount() != 2:
gdaltest.post_reason( 'expected exactly two layers!' )
return 'fail'
dxf_layer = dxf_ds.GetLayer(1)
if dxf_layer.GetName() != 'entities':
gdaltest.post_reason( 'did not get expected layer name.' )
return 'fail'
# read through till we encounter the block reference.
feat = dxf_layer.GetNextFeature()
while feat.GetField('EntityHandle') != '55':
feat = dxf_layer.GetNextFeature()
# check contents.
if feat.GetField('BlockName') != 'STAR':
gdaltest.post_reason( 'Did not get blockname!' )
return 'fail'
if feat.GetField('BlockAngle') != 0.0:
gdaltest.post_reason( 'Did not get expected angle.' )
return 'fail'
if feat.GetField('BlockScale') != [1.0,1.0,1.0]:
print(feat.GetField('BlockScale'))
gdaltest.post_reason( 'Did not get expected BlockScale' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'POINT (79.097653776656188 119.962195062443342 0)' ):
return 'fail'
feat = None
# Now we need to check the blocks layer and ensure it is as expected.
dxf_layer = dxf_ds.GetLayer(0)
if dxf_layer.GetName() != 'blocks':
gdaltest.post_reason( 'did not get expected layer name.' )
return 'fail'
# First MTEXT
feat = dxf_layer.GetNextFeature()
if feat.GetField( 'Text' ) != gdaltest.sample_text:
gdaltest.post_reason( 'Did not get expected first mtext.' )
return 'fail'
expected_style = 'LABEL(f:"Arial",t:"'+gdaltest.sample_style+'",a:45,s:0.5g,p:5,c:#000000)'
if feat.GetStyleString() != expected_style:
gdaltest.post_reason( 'Got unexpected style string:\n%s\ninstead of:\n%s.' % (feat.GetStyleString(),expected_style) )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'POINT (-1.495452348993292 0.813702013422821 0)' ):
return 'fail'
# Second MTEXT
feat = dxf_layer.GetNextFeature()
if feat.GetField( 'Text' ) != 'Second':
gdaltest.post_reason( 'Did not get expected second mtext.' )
return 'fail'
if feat.GetField( 'SubClasses' ) != 'AcDbEntity:AcDbMText':
gdaltest.post_reason( 'Did not get expected subclasses.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'POINT (0.879677852348995 -0.263903355704699 0)' ):
return 'fail'
# STAR geometry
feat = dxf_layer.GetNextFeature()
if feat.GetField('BlockName') != 'STAR':
gdaltest.post_reason( 'Did not get expected block name.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'GEOMETRYCOLLECTION (LINESTRING (-0.028147497671066 1.041457413829428 0,0.619244948763444 -1.069604911500494 0),LINESTRING (0.619244948763444 -1.069604911500494 0,-0.957014920816232 0.478507460408116 0),LINESTRING (-0.957014920816232 0.478507460408116 0,1.041457413829428 0.365917469723853 0),LINESTRING (1.041457413829428 0.365917469723853 0,-0.478507460408116 -1.041457413829428 0),LINESTRING (-0.478507460408116 -1.041457413829428 0,-0.056294995342131 1.013309916158363 0))' ):
return 'fail'
feat = None
# cleanup
gdal.SetConfigOption( 'DXF_INLINE_BLOCKS', 'TRUE' )
return 'success'
###############################################################################
# Write a file with blocks defined from a source blocks layer.
def ogr_dxf_17():
ds = ogr.GetDriverByName('DXF').CreateDataSource('tmp/dxf_17.dxf',
['HEADER=data/header_extended.dxf'])
blyr = ds.CreateLayer( 'blocks' )
lyr = ds.CreateLayer( 'entities' )
dst_feat = ogr.Feature( feature_def = blyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'GEOMETRYCOLLECTION( LINESTRING(0 0,1 1),LINESTRING(1 0,0 1))' ) )
dst_feat.SetField( 'BlockName', 'XMark' )
blyr.CreateFeature( dst_feat )
dst_feat.Destroy()
# Write a block reference feature.
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'POINT(200 100)' ))
dst_feat.SetField( 'Layer', 'abc' )
dst_feat.SetField( 'BlockName', 'XMark' )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
# Write a block reference feature for a non-existant block.
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'POINT(300 50)' ))
dst_feat.SetField( 'Layer', 'abc' )
dst_feat.SetField( 'BlockName', 'DoesNotExist' )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
# Write a block reference feature for a template defined block
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'POINT(250 200)' ))
dst_feat.SetField( 'Layer', 'abc' )
dst_feat.SetField( 'BlockName', 'STAR' )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
# Write a block reference feature with scaling and rotation
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'POINT(300 100)' ))
dst_feat.SetField( 'BlockName', 'XMark' )
dst_feat.SetField( 'BlockAngle', '30' )
dst_feat.SetFieldDoubleList(lyr.GetLayerDefn().GetFieldIndex('BlockScale'),
[4.0,5.0,6.0] )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
ds = None
# Reopen and check contents.
ds = ogr.Open('tmp/dxf_17.dxf')
lyr = ds.GetLayer(0)
# Check first feature.
feat = lyr.GetNextFeature()
if feat.GetField('SubClasses') != 'AcDbEntity:AcDbBlockReference':
gdaltest.post_reason( 'Got wrong subclasses for feature 1.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'GEOMETRYCOLLECTION (LINESTRING (200 100,201 101),LINESTRING (201 100,200 101))' ):
print( 'Feature 1' )
return 'fail'
# Check second feature.
feat = lyr.GetNextFeature()
if feat.GetField('SubClasses') != 'AcDbEntity:AcDbPoint':
gdaltest.post_reason( 'Got wrong subclasses for feature 2.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'POINT (300 50)' ):
print( 'Feature 2' )
return 'fail'
# Check third feature.
feat = lyr.GetNextFeature()
if feat.GetField('SubClasses') != 'AcDbEntity:AcDbBlockReference':
gdaltest.post_reason( 'Got wrong subclasses for feature 3.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'GEOMETRYCOLLECTION (LINESTRING (249.971852502328943 201.04145741382942 0,250.619244948763452 198.930395088499495 0),LINESTRING (250.619244948763452 198.930395088499495 0,249.042985079183779 200.47850746040811 0),LINESTRING (249.042985079183779 200.47850746040811 0,251.04145741382942 200.365917469723854 0),LINESTRING (251.04145741382942 200.365917469723854 0,249.52149253959189 198.95854258617058 0),LINESTRING (249.52149253959189 198.95854258617058 0,249.943705004657858 201.013309916158363 0))' ):
print( 'Feature 3' )
return 'fail'
# Check fourth feature (scaled and rotated)
feat = lyr.GetNextFeature()
if feat.GetField('SubClasses') != 'AcDbEntity:AcDbBlockReference':
gdaltest.post_reason( 'Got wrong subclasses for feature 4.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'GEOMETRYCOLLECTION (LINESTRING (300 100,300.964101615137736 106.330127018922198),LINESTRING (303.464101615137736 102.0,297.5 104.330127018922198))' ):
print( 'Feature 4' )
return 'fail'
# Cleanup
lyr = None
ds = None
os.unlink( 'tmp/dxf_17.dxf' )
return 'success'
###############################################################################
# Write a file with line patterns, and make sure corresponding Linetypes are
# created.
def ogr_dxf_18():
ds = ogr.GetDriverByName('DXF').CreateDataSource('tmp/dxf_18.dxf',
['HEADER=data/header_extended.dxf'])
lyr = ds.CreateLayer( 'entities' )
# Write a feature with a predefined LTYPE in the header.
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt('LINESTRING(0 0,25 25)') )
dst_feat.SetField( 'Linetype', 'DASHED' )
dst_feat.SetStyleString( 'PEN(c:#ffff00,w:2g,p:"12.0g 6.0g")' )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
# Write a feature with a named linetype but that isn't predefined in the header.
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt('LINESTRING(5 5,30 30)') )
dst_feat.SetField( 'Linetype', 'DOTTED' )
dst_feat.SetStyleString( 'PEN(c:#ffff00,w:2g,p:"0.0g 4.0g")' )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
# Write a feature without a linetype name - it will be created.
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt('LINESTRING(5 5,40 30)') )
dst_feat.SetStyleString( 'PEN(c:#ffff00,w:2g,p:"3.0g 4.0g")' )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
ds = None
# Reopen and check contents.
ds = ogr.Open('tmp/dxf_18.dxf')
lyr = ds.GetLayer(0)
# Check first feature.
feat = lyr.GetNextFeature()
if feat.GetField('Linetype') != 'DASHED':
gdaltest.post_reason( 'Got wrong linetype. (1)' )
return 'fail'
if feat.GetStyleString() != 'PEN(c:#ffff00,w:2g,p:"12.6999999999999993g 6.3499999999999996g")':
print(feat.GetStyleString())
gdaltest.post_reason( "got wrong style string (1)" )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'LINESTRING (0 0,25 25)' ):
return 'fail'
# Check second feature.
feat = lyr.GetNextFeature()
if feat.GetField('Linetype') != 'DOTTED':
gdaltest.post_reason( 'Got wrong linetype. (2)' )
return 'fail'
if feat.GetStyleString() != 'PEN(c:#ffff00,w:2g,p:"0.0g 4.0g")':
print(feat.GetStyleString())
gdaltest.post_reason( "got wrong style string (2)" )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'LINESTRING (5 5,30 30)' ):
return 'fail'
# Check third feature.
feat = lyr.GetNextFeature()
if feat.GetField('Linetype') != 'AutoLineType-1':
gdaltest.post_reason( 'Got wrong linetype. (3)' )
return 'fail'
if feat.GetStyleString() != 'PEN(c:#ffff00,w:2g,p:"3.0g 4.0g")':
print(feat.GetStyleString())
gdaltest.post_reason( "got wrong style string (3)" )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'LINESTRING (5 5,40 30)' ):
return 'fail'
# Cleanup
lyr = None
ds = None
os.unlink( 'tmp/dxf_18.dxf' )
return 'success'
###############################################################################
# Test writing a file using references to blocks defined entirely in the
# template - no blocks layer transferred.
def ogr_dxf_19():
ds = ogr.GetDriverByName('DXF').CreateDataSource('tmp/dxf_19.dxf',
['HEADER=data/header_extended.dxf'])
lyr = ds.CreateLayer( 'entities' )
# Write a block reference feature for a template defined block
dst_feat = ogr.Feature( feature_def = lyr.GetLayerDefn() )
dst_feat.SetGeometryDirectly( ogr.CreateGeometryFromWkt( 'POINT(250 200)' ))
dst_feat.SetField( 'Layer', 'abc' )
dst_feat.SetField( 'BlockName', 'STAR' )
lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
ds = None
# Reopen and check contents.
ds = ogr.Open('tmp/dxf_19.dxf')
lyr = ds.GetLayer(0)
# Check first feature.
feat = lyr.GetNextFeature()
if feat.GetField('SubClasses') != 'AcDbEntity:AcDbBlockReference':
gdaltest.post_reason( 'Got wrong subclasses for feature 1.' )
return 'fail'
if ogrtest.check_feature_geometry( feat, 'GEOMETRYCOLLECTION (LINESTRING (249.971852502328943 201.04145741382942 0,250.619244948763452 198.930395088499495 0),LINESTRING (250.619244948763452 198.930395088499495 0,249.042985079183779 200.47850746040811 0),LINESTRING (249.042985079183779 200.47850746040811 0,251.04145741382942 200.365917469723854 0),LINESTRING (251.04145741382942 200.365917469723854 0,249.52149253959189 198.95854258617058 0),LINESTRING (249.52149253959189 198.95854258617058 0,249.943705004657858 201.013309916158363 0))' ):
return 'fail'
# Cleanup
lyr = None
ds = None
os.unlink( 'tmp/dxf_19.dxf' )
return 'success'
###############################################################################
# SPLINE
def ogr_dxf_20():
ds = ogr.Open('data/spline_qcad.dxf')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'LINESTRING (10.75 62.75,20.637752769146068 63.434832501489716,29.283239084385464 63.396838394381845,36.766943814562865 62.711565975596599,43.169351828522906 61.454563542054103,48.570947995110252 59.70137939067456,53.05221718316956 57.527561818378146,56.693644261545501 55.008659122085049,59.575714099082703 52.220219598715438,61.778911564625851 49.237791545189509,63.383721527019588 46.136923258427423,64.470628855108572 42.993163035349369,65.120118417737459 39.882059172875508,65.412419131869868 36.878358785215056,65.417809785093752 34.025663008687722,65.193643595004147 31.327113252708507,64.796409941597645 28.783146935042897,64.282598204870823 26.394201473456341,63.708697764820236 24.16071428571431,63.131198001442392 22.083122789582241,62.606588294733939 20.161864402825621,62.191358024691354 18.397376543209894,61.941996571311265 16.790096628500525,61.914993314590184 15.340462076462975,62.166837634524704 14.0489103048627,62.754018911111373 12.915878731465167,63.723652286703427 11.940700981548817,65.053571428571416 11.114552964042769,66.690557841792398 10.424954275262921,68.581246558980226 9.859407264767562,70.672272612748785 9.405414282114966,72.910271035711943 9.050477676863418,75.241876860483572 8.782099798571203,77.613725119677511 8.587782996796603,79.97245084590763 8.4550296210979,82.264689071787842 8.371342021033378,84.437074829931987 8.324222546161321,86.436243152953921 8.301173546040012,88.208926721776336 8.289771106365336,89.722559658784164 8.293223374005688,90.990763736417563 8.349615688917151,92.033410218878885 8.501752503862612,92.870370370370395 8.792438271604945,93.521515455094473 9.264477444907039,94.006716737253413 9.960674476531764,94.345845481049565 10.923833819242011,94.558772950685281 12.196759925800654,94.665370410362868 13.82225724897058,94.685509124284636 15.843130241514663,94.639060356652948 18.302183356195791,94.545895371670113 21.242221045776841,94.421471763308503 24.702030018356666,94.215205541358216 28.660279617432039,93.825673773330607 33.049360720184715,93.15014577259474 37.800473760933045,92.085890852519697 42.844819173995376,90.530178326474584 48.113597393690064,88.380277507828495 53.538008854335445,85.533457709950525 59.049253990249873,81.886988246209697 64.578533235751706,77.338138429975174 70.057047025159264,71.784177574615995 75.415995792790937,65.122374993501282 80.586579972965055,57.25 85.5)' ):
return 'fail'
ds = None
return 'success'
###############################################################################
# CIRCLE
def ogr_dxf_21():
ds = ogr.Open('data/circle.dxf')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'LINESTRING (5 2 3,4.990256201039297 1.720974105023499 3,4.961072274966281 1.443307596159738 3,4.912590402935223 1.168353236728963 3,4.845046783753276 0.897450576732003 3,4.758770483143634 0.631919426697325 3,4.654181830570403 0.373053427696799 3,4.531790371435708 0.122113748856437 3,4.392192384625703 -0.11967705693282 3,4.23606797749979 -0.351141009169893 3,4.064177772475912 -0.571150438746157 3,3.877359201354605 -0.778633481835989 3,3.676522425435433 -0.972579301909577 3,3.462645901302633 -1.152043014426888 3,3.236771613882987 -1.316150290220167 3,3.0 -1.464101615137754 3,2.75348458715631 -1.595176185196668 3,2.498426373663648 -1.70873541826715 3,2.23606797749979 -1.804226065180614 3,1.967687582398672 -1.881182905103986 3,1.694592710667722 -1.939231012048832 3,1.418113853070614 -1.978087581473093 3,1.139597986810004 -1.997563308076383 3,0.860402013189997 -1.997563308076383 3,0.581886146929387 -1.978087581473094 3,0.305407289332279 -1.939231012048832 3,0.032312417601329 -1.881182905103986 3,-0.236067977499789 -1.804226065180615 3,-0.498426373663648 -1.70873541826715 3,-0.75348458715631 -1.595176185196668 3,-1.0 -1.464101615137755 3,-1.236771613882987 -1.316150290220167 3,-1.462645901302633 -1.152043014426888 3,-1.676522425435433 -0.972579301909577 3,-1.877359201354605 -0.778633481835989 3,-2.064177772475912 -0.571150438746158 3,-2.236067977499789 -0.351141009169893 3,-2.392192384625704 -0.11967705693282 3,-2.531790371435707 0.122113748856436 3,-2.654181830570403 0.373053427696798 3,-2.758770483143633 0.631919426697324 3,-2.845046783753275 0.897450576732001 3,-2.912590402935223 1.168353236728963 3,-2.961072274966281 1.443307596159737 3,-2.990256201039297 1.720974105023498 3,-3.0 2.0 3,-2.990256201039297 2.279025894976499 3,-2.961072274966281 2.556692403840262 3,-2.912590402935223 2.831646763271036 3,-2.845046783753276 3.102549423267996 3,-2.758770483143634 3.368080573302675 3,-2.654181830570404 3.626946572303199 3,-2.531790371435708 3.877886251143563 3,-2.392192384625704 4.119677056932819 3,-2.23606797749979 4.351141009169892 3,-2.064177772475912 4.571150438746157 3,-1.877359201354604 4.778633481835989 3,-1.676522425435434 4.972579301909576 3,-1.462645901302632 5.152043014426889 3,-1.236771613882989 5.316150290220166 3,-1.0 5.464101615137753 3,-0.753484587156311 5.595176185196667 3,-0.498426373663649 5.70873541826715 3,-0.23606797749979 5.804226065180615 3,0.032312417601329 5.881182905103985 3,0.305407289332279 5.939231012048833 3,0.581886146929387 5.978087581473094 3,0.860402013189993 5.997563308076383 3,1.139597986810005 5.997563308076383 3,1.418113853070612 5.978087581473094 3,1.69459271066772 5.939231012048833 3,1.96768758239867 5.881182905103986 3,2.236067977499789 5.804226065180615 3,2.498426373663648 5.70873541826715 3,2.75348458715631 5.595176185196668 3,3.0 5.464101615137754 3,3.236771613882985 5.316150290220168 3,3.462645901302634 5.152043014426887 3,3.676522425435431 4.972579301909578 3,3.877359201354603 4.778633481835991 3,4.064177772475912 4.571150438746159 3,4.23606797749979 4.351141009169893 3,4.392192384625702 4.119677056932823 3,4.531790371435708 3.877886251143563 3,4.654181830570404 3.626946572303201 3,4.758770483143634 3.368080573302675 3,4.845046783753275 3.102549423267999 3,4.912590402935223 2.831646763271039 3,4.961072274966281 2.556692403840263 3,4.990256201039298 2.279025894976499 3,5.0 2.0 3)' ):
return 'fail'
ds = None
return 'success'
###############################################################################
# TEXT
def ogr_dxf_22():
# Read TEXT feature
ds = ogr.Open('data/text.dxf')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetFieldAsString('Text') != 'test_text':
gdaltest.post_reason('bad attribute')
return 'fail'
style = feat.GetStyleString()
if style != 'LABEL(f:"Arial",t:"test_text",a:45,s:10g,c:#ff0000)':
gdaltest.post_reason('bad style')
print(style)
return 'fail'
if ogrtest.check_feature_geometry( feat, 'POINT(1 2 3)' ):
gdaltest.post_reason('bad geometry')
return 'fail'
# Write text feature
out_ds = ogr.GetDriverByName('DXF').CreateDataSource('/vsimem/ogr_dxf_22.dxf')
out_lyr = out_ds.CreateLayer( 'entities' )
out_feat = ogr.Feature(out_lyr.GetLayerDefn())
out_feat.SetStyleString(style)
out_feat.SetGeometry(feat.GetGeometryRef())
out_lyr.CreateFeature(out_feat)
out_feat = None
out_lyr = None
out_ds = None
ds = None
# Check written file
ds = ogr.Open('/vsimem/ogr_dxf_22.dxf')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetFieldAsString('Text') != 'test_text':
gdaltest.post_reason('bad attribute')
return 'fail'
style = feat.GetStyleString()
if style != 'LABEL(f:"Arial",t:"test_text",a:45,s:10g,c:#ff0000)':
gdaltest.post_reason('bad style')
print(style)
return 'fail'
if ogrtest.check_feature_geometry( feat, 'POINT(1 2 3)' ):
gdaltest.post_reason('bad geometry')
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_dxf_22.dxf')
return 'success'
###############################################################################
# POLYGON with hole
def ogr_dxf_23():
# Write polygon
out_ds = ogr.GetDriverByName('DXF').CreateDataSource('/vsimem/ogr_dxf_23.dxf')
out_lyr = out_ds.CreateLayer( 'entities' )
out_feat = ogr.Feature(out_lyr.GetLayerDefn())
out_feat.SetStyleString('BRUSH(fc:#ff0000)')
wkt = 'POLYGON ((0 0,0 10,10 10,10 0,0 0),(1 1,1 9,9 9,9 1,1 1))'
out_feat.SetGeometry(ogr.CreateGeometryFromWkt(wkt))
out_lyr.CreateFeature(out_feat)
out_feat = None
out_lyr = None
out_ds = None
ds = None
# Check written file
ds = ogr.Open('/vsimem/ogr_dxf_23.dxf')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
style = feat.GetStyleString()
if style != 'BRUSH(fc:#ff0000)':
gdaltest.post_reason('bad style')
print(style)
return 'fail'
if ogrtest.check_feature_geometry( feat, wkt ):
gdaltest.post_reason('bad geometry')
return 'fail'
ds = None
gdal.Unlink('/vsimem/ogr_dxf_23.dxf')
return 'success'
###############################################################################
# cleanup
def ogr_dxf_cleanup():
gdaltest.dxf_layer = None
gdaltest.dxf_ds.Destroy()
gdaltest.dxf_ds = None
return 'success'
###############################################################################
#
gdaltest_list = [
ogr_dxf_1,
ogr_dxf_2,
ogr_dxf_3,
ogr_dxf_4,
ogr_dxf_5,
ogr_dxf_6,
ogr_dxf_7,
ogr_dxf_8,
ogr_dxf_9,
ogr_dxf_10,
ogr_dxf_11,
ogr_dxf_12,
ogr_dxf_13,
ogr_dxf_14,
ogr_dxf_15,
ogr_dxf_16,
ogr_dxf_17,
ogr_dxf_18,
ogr_dxf_19,
ogr_dxf_20,
ogr_dxf_21,
ogr_dxf_22,
ogr_dxf_23,
ogr_dxf_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_dxf' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| [
"[email protected]"
] | |
37d6f988a200dc46a67310f1d35ed4b3cdc5d949 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/nltk/chat/iesha.py | 225e8202b10289cc992b3c089c98d1784db5c7ee | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,402 | py | # Natural Language Toolkit: Teen Chatbot
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Selina Dennis <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
This chatbot is a tongue-in-cheek take on the average teen
anime junky that frequents YahooMessenger or MSNM.
All spelling mistakes and flawed grammar are intentional.
"""
from nltk.chat.util import Chat
reflections = {
"am": "r",
"was": "were",
"i": "u",
"i'd": "u'd",
"i've": "u'v",
"ive": "u'v",
"i'll": "u'll",
"my": "ur",
"are": "am",
"you're": "im",
"you've": "ive",
"you'll": "i'll",
"your": "my",
"yours": "mine",
"you": "me",
"u": "me",
"ur": "my",
"urs": "mine",
"me": "u",
}
# Note: %1/2/etc are used without spaces prior as the chat bot seems
# to add a superfluous space when matching.
pairs = (
(
r"I\'m (.*)",
(
"ur%1?? that's so cool! kekekekeke ^_^ tell me more!",
"ur%1? neat!! kekeke >_<",
),
),
(
r"(.*) don\'t you (.*)",
(
r"u think I can%2??! really?? kekeke \<_\<",
"what do u mean%2??!",
"i could if i wanted, don't you think!! kekeke",
),
),
(r"ye[as] [iI] (.*)", ("u%1? cool!! how?", "how come u%1??", "u%1? so do i!!")),
(
r"do (you|u) (.*)\??",
("do i%2? only on tuesdays! kekeke *_*", "i dunno! do u%2??"),
),
(
r"(.*)\?",
(
"man u ask lots of questions!",
"booooring! how old r u??",
"boooooring!! ur not very fun",
),
),
(
r"(cos|because) (.*)",
("hee! i don't believe u! >_<", "nuh-uh! >_<", "ooooh i agree!"),
),
(
r"why can\'t [iI] (.*)",
(
"i dunno! y u askin me for!",
"try harder, silly! hee! ^_^",
"i dunno! but when i can't%1 i jump up and down!",
),
),
(
r"I can\'t (.*)",
(
"u can't what??! >_<",
"that's ok! i can't%1 either! kekekekeke ^_^",
"try harder, silly! hee! ^&^",
),
),
(
r"(.*) (like|love|watch) anime",
(
"omg i love anime!! do u like sailor moon??! ^&^",
"anime yay! anime rocks sooooo much!",
"oooh anime! i love anime more than anything!",
"anime is the bestest evar! evangelion is the best!",
"hee anime is the best! do you have ur fav??",
),
),
(
r"I (like|love|watch|play) (.*)",
("yay! %2 rocks!", "yay! %2 is neat!", "cool! do u like other stuff?? ^_^"),
),
(
r"anime sucks|(.*) (hate|detest) anime",
(
"ur a liar! i'm not gonna talk to u nemore if u h8 anime *;*",
"no way! anime is the best ever!",
"nuh-uh, anime is the best!",
),
),
(
r"(are|r) (you|u) (.*)",
("am i%1??! how come u ask that!", "maybe! y shud i tell u?? kekeke >_>"),
),
(
r"what (.*)",
("hee u think im gonna tell u? .v.", "booooooooring! ask me somethin else!"),
),
(r"how (.*)", ("not tellin!! kekekekekeke ^_^",)),
(r"(hi|hello|hey) (.*)", ("hi!!! how r u!!",)),
(
r"quit",
(
"mom says i have to go eat dinner now :,( bye!!",
"awww u have to go?? see u next time!!",
"how to see u again soon! ^_^",
),
),
(
r"(.*)",
(
"ur funny! kekeke",
"boooooring! talk about something else! tell me wat u like!",
"do u like anime??",
"do u watch anime? i like sailor moon! ^_^",
"i wish i was a kitty!! kekekeke ^_^",
),
),
)
iesha_chatbot = Chat(pairs, reflections)
def iesha_chat():
print("Iesha the TeenBoT\n---------")
print("Talk to the program by typing in plain English, using normal upper-")
print('and lower-case letters and punctuation. Enter "quit" when done.')
print("=" * 72)
print("hi!! i'm iesha! who r u??!")
iesha_chatbot.converse()
def demo():
iesha_chat()
if __name__ == "__main__":
demo()
| [
"[email protected]"
] | |
6284ac6837cd1258e98d0c17dd4b125e0698cde9 | bc32a53cfebc7c03987ed44492ed8afb79aed5a3 | /customers/management/commands/generate_purchases.py | c2b3220e7ef021aca32af2aadbcce1d5bd7d9792 | [
"MIT"
] | permissive | jeremy886/bookstore | abe537df821d3d2b873c166965aac2a01e0d5914 | d220efcc21b95942d14fae672da746b1abcaf750 | refs/heads/master | 2020-03-20T19:42:04.639190 | 2017-05-12T19:07:39 | 2017-05-12T19:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | from django.core.management.base import BaseCommand, CommandError
import factory
from ...factories import PurchaseFactory
from ...models import Purchase
class Command(BaseCommand):
help = 'Generate N-number Purchases'
def add_arguments(self, parser):
parser.add_argument('num', nargs='?', default=10, type=int)
parser.add_argument(
'--clear',
action='store_true',
dest='clear',
default=False,
help='Clear out all Purchases before generating new'
)
def handle(self, *args, **options):
if options['clear']:
Purchase.objects.all().delete()
factory.build_batch(PurchaseFactory, size=options['num'])
self.stdout.write(
self.style.SUCCESS('Successfully generated %s purchase(s)' % options['num'])
) | [
"[email protected]"
] | |
85a17047bc108250cf945819caff9c91c8ad3cf9 | 4142b8c513d87361da196631f7edd82f11465abb | /python/1263A.py | b8918a15dc5cdc4a97064bbcc7dad5e27403ed25 | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py |
n = int(input())
for _ in range(n):
a, b, c = sorted(list(map(int, input().split())))
if a + b >= c:
print(int((a+b+c)/2))
else:
print(a+b)
| [
"[email protected]"
] | |
b583f8756e2031e3b01beb734e64fdfc0770f0c3 | ab19c3757766f00c7414aa10641f8c7e6321375d | /40.py | 7ef3408fe4ce923ea05c074a96da77db063a15e7 | [] | no_license | HHariHHaran/python-programming | 2c70ff0c4b24ae48b8096075a29ffc0edfe1ef00 | c2db869e352d7ee22d499dd772f5cb2285b2822f | refs/heads/master | 2020-04-19T09:19:56.918989 | 2019-01-22T09:50:28 | 2019-01-22T09:50:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | n11 = 0
n12 = 1
count = 0
nterms=int(input("n"))
if nterms <= 0:
print("Positive integer")
elif nterms == 1:
print("Fibonacci sequence upto",nterms,":")
print(n11)
else:
print("Fibonacci sequence upto",nterms,":")
while count < nterms:
print(n11,end=' ')
nth = n11 + n12
n11 = n12
n12 = nth
count += 1
| [
"[email protected]"
] | |
74d8a11fc9df07567058ed3e34f2de8eb3f18d54 | fcdb430384f00a39a936b46eae5606c9ec339b15 | /pentai/run_ai.py | 3ac6bb7608270e981abd3d943d83fb1dc94a1077 | [
"MIT"
] | permissive | cropleyb/pentai | 78a03161d26eb5cdd7b0f023b782b3bf9352456a | bbd32864cabce9ba5cb1051fa9d78d69c8feb5e5 | refs/heads/master | 2020-04-16T16:13:53.640001 | 2014-12-10T07:54:05 | 2014-12-10T07:54:05 | 24,712,772 | 8 | 1 | null | 2016-06-04T08:01:48 | 2014-10-02T08:59:04 | Python | UTF-8 | Python | false | false | 10,233 | py | #!/usr/bin/env python
'''
from guppy import hpy
h = hpy()
h.setref()
'''
import pentai.base.rules as r_m
import pentai.base.game as g_m
import time
from pentai.ai.ai_player import *
#from pentai.db.evaluator import *
from pentai.db.ai_factory import *
from pentai.db.openings_book import *
from pentai.db.games_mgr import *
import pentai.db.zodb_dict as z_m
import pentai.ai.ab_game as abg_m
class TwoTimer:
def __init__(self):
self.totals = [0.0, 0.0]
self.current = 0
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, *args):
self.end = time.clock()
self.totals[self.current] += self.end - self.start
self.current = 1 - self.current
def __repr__(self):
tot = self.totals
return "B: %.2fs, W: %.2fs, B/W: %.2f" % (tot[0], tot[1], tot[0]/tot[1])
class MatchResults():
def __init__(self):
self.results = []
self.bw_total = [0, 0, 0]
self.dc_total = {"Defender":0, "Contender":0}
self.total_ratio = 0.
self.games_played = 0
def __repr__(self):
return "\n".join(self.results) + "\nB/W:" + str(self.bw_total) \
+ " won: " + str(self.dc_total) \
+ " C/D avg. time: " + str(self.total_ratio / self.games_played)
def add(self, result):
res_str, winner_colour, winner_name, ratio = result
self.results.append(res_str)
self.bw_total[winner_colour] += 1
self.dc_total[str(winner_name)] += 1
self.total_ratio += ratio
self.games_played += 1
class Match():
def __init__(self):
self.genome1 = AIGenome("Defender")
self.genome2 = AIGenome("Contender")
# We're not doing player lookups, so we don't need the players_mgr
self.games_mgr = GamesMgr()
self.openings_book = OpeningsBook()
def set_up(self, game_length, p2_depth_boost=0):
aif = AIFactory()
self.genome1.max_depth = game_length
self.genome2.max_depth = game_length + p2_depth_boost
self.p1 = aif.create_player(self.genome1)
self.p2 = aif.create_player(self.genome2)
def play_one_game(self, board_size, rules_type, p1, p2):
r = r_m.Rules(board_size, rules_type)
self.game = self.games_mgr.create_game(r, p1, p2)
#self.evaluator = Evaluator(self.game.current_state)
tt = TwoTimer()
while not self.game.finished():
p = self.game.get_current_player()
with tt:
turn, prev_move, m = p.do_the_search()
self.game.make_move(m)
#print self.evaluator.utility()
winner_name = self.game.winner_name()
winner = self.game.get_won_by()
if self.games_mgr:
self.games_mgr.save(self.game)
self.games_mgr.sync_all()
if self.openings_book:
self.openings_book.add_game(self.game, winner)
if p1.get_name() == "Contender":
ratio = tt.totals[0] / tt.totals[1]
else:
ratio = tt.totals[1] / tt.totals[0]
print "Game %s %s %s was won by: %s %s" % (board_size, p1.max_depth, rules_type, ["?", "B", "W"][winner], winner_name)
print tt
ret = "%s vs. %s: %s (%sx%s %s) %s" % (p1.get_name(), p2.get_name(), winner_name,
board_size, board_size, p1.max_depth, tt), winner, winner_name, ratio
return ret
def play_some_games(self):
aip_m.set_skip_openings_book(True) # For deterministic results
self.genome1.use_openings_book = False
self.genome2.use_openings_book = False
self.genome2.filter_num = 2
#self.genome1.capture_score_base = 300
#self.genome2.capture_score_base = 300
#self.genome2.capture_score_base = 500
#self.genome1.enclosed_four_base = 400
#self.genome2.enclosed_four_base = 400
#self.genome2.enclosed_four_base = 300
#self.genome1.threat_score_base = 20
#self.genome2.threat_score_base = 20
#self.genome2.threat_score_base = 25
#self.genome1.length_factor = 35
#self.genome2.length_factor = 35
#self.genome2.length_factor = 22
#self.genome1.move_factor = 45
#self.genome2.move_factor = 45
#self.genome2.move_factor = 80
#self.genome1.checkerboard_value = 0
#self.genome1.checkerboard_value = 30
#self.genome2.checkerboard_value = 35
#self.threat_score_base = 20
#self.take_score_base = 80
#self.capture_score_base = 300
#self.genome1.utility_filter = True
#self.genome2.filter_num = 5
#self.genome2.chokes = [(3,3)]
#self.genome2.mmpdl = 13
#self.genome2.utility_filter = True
#self.genome1.move_factor = 5
#self.genome2.use_net_captures = False
#self.genome1.move_factor = 5
#l_boosts = []
#l_boosts.append((2,1.15))
#l_boosts.append((3,1.05))
#l_boosts.append((4,1.1))
#l_b1 = l_boosts[:]
#self.genome1.length_boosts = l_b1
#l_boosts.append((3,1.05))
#l_boosts.append((4,1.05))
#self.genome2.length_boosts = l_boosts
'''
#self.genome2.move_factor = 5
# All round config:
# self.genome1.take_score_base = 72
self.genome2.take_score_base = 72
# self.genome1.length_factor = 32
self.genome2.length_factor = 32
#self.genome2.threat_score_base = 20 # OK as is.
# self.genome1.capture_score_base = 350 # Try this again for high depth
self.genome2.capture_score_base = 350 # Try this again for high depth
# self.genome1.enclosed_four_base = 315
self.genome2.enclosed_four_base = 315
l_boosts = []
l_boosts.append((2,1.15))
l_boosts.append((3,1.05))
l_boosts.append((4,1.1))
l_b1 = l_boosts[:]
# self.genome1.length_boosts = l_b1
#l_boosts.append((4,1.05))
self.genome2.length_boosts = l_boosts
st_boosts = []
st_boosts.append((2,0,0.95))
st_boosts.append((2,2,1.1))
st_boosts.append((3,0,1.1))
st_boosts.append((3,2,1.1))
st_boosts.append((4,0,1.1))
st_boosts.append((4,2,.75))
st_b1 = st_boosts[:]
# self.genome1.sub_type_boosts = st_b1
#st_b1.append((2,0,0.95))
self.genome2.sub_type_boosts = st_boosts
'''
#"length_boosts": [], # length, boost
#"sub_type_boosts": [], # length, sub_type, boost):
#self.genome1.enclosed_four_base = 300
#self.genome1.vision = 0.98
#self.genome2.vision = 0.98
#self.genome1.filter_num = 4
#self.genome2.filter_num = 5
#self.genome2.narrowing = 3
#self.genome2.max_depth += 2 # Setting max_depth here doesn't work
#self.genome1.mmpdl = 15
#self.genome1.chokes = []
#self.genome2.mmpdl = 15
#self.genome2.chokes = [(4,2),(6,1)]
#self.genome2.chokes = [(3,1)]
#self.genome2.bl_cutoff = True
#self.genome2.chokes = [(4,5)]
#self.genome2.chokes = [(4,3)]
#self.genome2.chokes = [(2,2)]
#self.genome1.max_depth_boost = 2
#self.genome2.max_depth_boost = 2
#self.genome2.captures_scale = [1, 1, 1, 1, 2, 4]
#self.genome2.captures_scale = [0, 0, 0, 0, 0, 0]
#self.genome2.length_scale = [0, 0, 0, 0, 0, 0]
#self.genome2.move_factor = 10000000
#self.genome1.calc_mode = 3
#self.genome2.calc_mode = 2
#self.genome2.use_net_captures = False
#self.genome2.scale_pob = True
#self.genome2.move_factor = 50
#self.genome2.move_factor = 45
#self.genome2.move_factor = 8
#self.genome2.move_factor = 10
#self.genome2.force_depth = 4 FAIL ;)
#self.genome2.misjudgement = 8
results = MatchResults()
#for game_length in range(2,3):
#for game_length in range(2,4):
for game_length in range(2,5):
#for game_length in range(2,6):
#for game_length in range(2,7):
#for game_length in range(2,8):
#for game_length in range(2,9):
#for game_length in range(2,10):
#for board_size in [19]:
for board_size in [13, 19]:
for first_player in [0, 1]:
#for rules_type in ['s', 't']:
#for rules_type in ['t']:
for rules_type in ['s']:
#self.set_up(game_length, 1) # For p2 depth boost
self.set_up(game_length)
players = [self.p1, self.p2]
second_player = 1 - first_player
res = self.play_one_game(board_size, rules_type,
players[first_player],
players[second_player])
#hits = [players[i].ab_game.transposition_hits for i in [0,1]]
#print "Hits: %s" % hits
results.add(res)
print results
import sys
def memory_usage_resource():
import resource
rusage_denom = 1024.
if sys.platform == 'darwin':
# ... it seems that in OSX the output is different units ...
rusage_denom = rusage_denom * rusage_denom
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
return mem
import random
import pstats, cProfile
import gc
if __name__ == "__main__":
z_m.set_db("db.fs")
'''
random.seed()
# the code you want to memory-profile
'''
'''
#while True:
m = Match()
m.play_some_games()
m = None
gc.collect()
mem = memory_usage_resource()
print mem
'''
'''
heap_data = h.heap()
print heap_data
print heap_data.more
st()
'''
m = Match()
cProfile.runctx("m.play_some_games()", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("cumulative").print_stats(20) # or "time"
#s.strip_dirs().sort_stats("time").print_stats(20)
| [
"[email protected]"
] | |
6d4c342943a36cbc73c3e96e062a082e56c15181 | aa853a9094fff4b6e9b0ddc7469be29ad5f0f811 | /poi_stock_account_consolidate/models/__init__.py | 646eda432c164d20562a02dbbb872636bba80b21 | [] | no_license | blue-connect/illuminati | 40a13e1ebeaceee39f17caa360f79e8deeaebf58 | 6682e60630064641474ddb2d8cbc520e30f64832 | refs/heads/master | 2022-01-06T00:55:58.465611 | 2018-11-24T04:30:03 | 2018-11-24T04:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import stock_account
| [
"[email protected]"
] | |
45e8a9ce23db31e8bffc2db20e4031eff14e986a | ebb2a06a025b3c25443b2287c7c9b130cad5b5f1 | /unsourced/scrape.py | e9a26d03bd11897e45f1248ad095f5c358f6b1c8 | [] | no_license | bcampbell/unsourced | b587d423e2422ad2e5060263404b062df2941dbe | 4e0b418a957bbac9c7d3358425cdf11bcd436388 | refs/heads/master | 2021-01-23T12:20:54.033881 | 2014-02-18T08:43:25 | 2014-02-18T08:43:25 | 2,531,708 | 0 | 1 | null | 2012-10-04T01:14:19 | 2011-10-07T10:18:04 | Python | UTF-8 | Python | false | false | 2,037 | py | import urllib
import collections
import json
import datetime
from unsourced import util,analyser,highlight
from unsourced.forms import EnterArticleForm
from unsourced.models import Article,ArticleURL,Action
class Status:
""" status codes returned by scrapomat """
SUCCESS = 0
NET_ERROR = 1
BAD_REQ = 2
PAYWALLED = 3
PARSE_ERROR = 4
def process_scraped(url,response):
""" process http response from scrapomat, return an article (or raise exception) """
scraped_art = None
enter_form = EnterArticleForm(url=url)
err_msg = None
if response.error:
# scrapomat down :-(
raise Exception("Sorry, there was a problem reading the article.")
results = json.loads(response.body)
if results['status'] != Status.SUCCESS:
error_messages = {
Status.PAYWALLED: u"Sorry, that article seems to be behind a paywall.",
Status.PARSE_ERROR: u"Sorry, we couldn't read the article",
Status.BAD_REQ: u"Sorry, that URL doesn't look like an article",
Status.NET_ERROR: u"Sorry, we couldn't read that article - is the URL correct?",
}
err_msg = error_messages.get(results['status'],"Unknown error")
raise Exception(err_msg)
scraped_art = results['article']
scraped_art['pubdate'] = datetime.datetime.fromtimestamp(scraped_art['pubdate'])
# use entry form to validate everything's there (ugh!)
enter_form.url.data = url
enter_form.title.data = scraped_art['headline']
enter_form.pubdate.data = scraped_art['pubdate']
if not enter_form.validate():
scraped_art = None
err_msg = u"Sorry, we weren't able to automatically read all the details"
raise Exception(err_msg)
# if we've got this far, we now have all the details needed to load the article into the DB. Yay!
url_objs = [ArticleURL(url=u) for u in scraped_art['urls']]
art = Article(scraped_art['headline'],scraped_art['permalink'], scraped_art['pubdate'], url_objs)
return art
| [
"[email protected]"
] | |
5c96b24cce307e12e0ca9c44917eeb453918a858 | 399fb29d8525b6d7ac298783675d0d56e37bcac7 | /python/ray/tune/execution/ray_trial_executor.py | 0a31e063f80879f2b1c36e393477920c460d3aa7 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | AmeerHajAli/ray | 40c9aebe0da59e9bcd70303d981bfe6b65007991 | 1ffd032f5f793d8817217a040f0f636f9372cd56 | refs/heads/master | 2023-03-28T10:50:09.186561 | 2023-03-24T23:08:08 | 2023-03-24T23:08:08 | 175,129,851 | 1 | 0 | Apache-2.0 | 2019-03-12T03:39:16 | 2019-03-12T03:39:14 | null | UTF-8 | Python | false | false | 49,716 | py | # coding: utf-8
import copy
import logging
import os
import random
import time
import traceback
from collections import deque
from enum import Enum
from functools import partial
from typing import Callable, Dict, Iterable, Optional, Set, Union
import ray
from ray.actor import ActorHandle
from ray.air import Checkpoint, AcquiredResources
from ray.air._internal.checkpoint_manager import CheckpointStorage, _TrackedCheckpoint
from ray.air.constants import (
COPY_DIRECTORY_CHECKPOINTS_INSTEAD_OF_MOVING_ENV,
DISABLE_LAZY_CHECKPOINTING_ENV,
)
from ray.air.execution import ResourceManager
from ray.air.execution.resources.placement_group import (
PlacementGroupResourceManager,
)
from ray.exceptions import GetTimeoutError, RayTaskError
from ray.tune.error import (
TuneError,
_AbortTrialExecution,
_TuneNoNextExecutorEventError,
_TuneStartTrialError,
)
from ray.tune.result import STDERR_FILE, STDOUT_FILE, TRIAL_INFO
from ray.tune.experiment.trial import (
Trial,
_Location,
_TrialInfo,
_change_working_directory,
_get_trainable_kwargs,
)
from ray.tune.utils import warn_if_slow
from ray.tune.utils.object_cache import _ObjectCache
from ray.tune.utils.resource_updater import _ResourceUpdater
from ray.tune.trainable.util import TrainableUtil
from ray.util import log_once
from ray.util.annotations import DeveloperAPI
logger = logging.getLogger(__name__)
DEFAULT_GET_TIMEOUT = 60.0 # seconds
DEFAULT_ENV_VARS = {
# https://github.com/ray-project/ray/issues/28197
"PL_DISABLE_FORK": "1"
}
ENV_VARS_TO_PROPAGATE = {
DISABLE_LAZY_CHECKPOINTING_ENV,
COPY_DIRECTORY_CHECKPOINTS_INSTEAD_OF_MOVING_ENV,
"TUNE_CHECKPOINT_CLOUD_RETRY_NUM",
"TUNE_CHECKPOINT_CLOUD_RETRY_WAIT_TIME_S",
}
class _ActorClassCache:
"""Caches actor classes.
ray.remote is a registration call. It sends the serialized object to the
key value store (redis), and will be fetched at an arbitrary worker
later. Registration does not use any Ray scheduling resources.
Later, class.remote() actually creates the remote actor. The
actor will be instantiated on some arbitrary machine,
according to the underlying Ray scheduler.
Without this cache, you would register the same serialized object
over and over again. Naturally, since redis doesn’t spill to disk,
this can easily nuke the redis instance (and basically blow up Ray).
This cache instead allows us to register once and only once.
Note that we assume there can be multiple trainables in the
system at once.
"""
def __init__(self):
self._cache = {}
def get(self, trainable_cls):
"""Gets the wrapped trainable_cls, otherwise calls ray.remote."""
env_vars = DEFAULT_ENV_VARS.copy()
for env_var_to_propagate in ENV_VARS_TO_PROPAGATE:
if env_var_to_propagate in os.environ:
env_vars[env_var_to_propagate] = os.environ[env_var_to_propagate]
runtime_env = {"env_vars": env_vars}
if trainable_cls not in self._cache:
remote_cls = ray.remote(runtime_env=runtime_env)(trainable_cls)
self._cache[trainable_cls] = remote_cls
return self._cache[trainable_cls]
_class_cache = _ActorClassCache()
class _LocalWrapper:
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
class _TrialCleanup:
"""Responsible for triggering force cleanup of remote actors,
without waiting for `Trainable.stop()` to finish.
Only instantiated when `TUNE_FORCE_TRIAL_CLEANUP_S` is set up.
"""
def __init__(self, force_cleanup):
assert force_cleanup
self._force_cleanup = force_cleanup
self._future_to_insert_time = deque()
def add(self, future):
self._future_to_insert_time.append((future, time.time()))
def get_next(self):
"""Get the next future that is eligible to be cleaned up forcibly."""
if len(self._future_to_insert_time) > 0 and (
self._future_to_insert_time[0][1] + self._force_cleanup < time.time()
):
future, _time = self._future_to_insert_time.popleft()
return future
else:
return None
def is_empty(self):
return len(self._future_to_insert_time) == 0
class _ExecutorEventType(Enum):
"""The executor event type.
Some of the events are internal events to executor while others
are handled by runner."""
NO_RUNNING_TRIAL_TIMEOUT = 1
PG_READY = 2
TRAINING_RESULT = 3
SAVING_RESULT = 4
RESTORING_RESULT = 5
STOP_RESULT = 6 # Internally to executor only.
YIELD = 7 # Yielding back to TrialRunner's main event loop.
class _ExecutorEvent:
"""A struct that describes the event to be processed by TrialRunner.
Attributes:
result: A dict with keys of "future_result" and "exception".
"future_result" is the corresponding result when future returns
successfully.
"exception" is the exception as caught during ``ray.get(future)``.
"""
KEY_FUTURE_RESULT = "future_result"
KEY_EXCEPTION = "exception"
def __init__(
self,
event_type: _ExecutorEventType,
trial: Optional[Trial] = None,
result: Optional[Dict] = None,
):
self.type = event_type
self.trial = trial
self.result = result
def __repr__(self):
return f"[{self.type}] for {self.trial}"
@DeveloperAPI
class RayTrialExecutor:
"""An implementation of TrialExecutor based on Ray."""
def __init__(
self,
resource_manager: Optional[ResourceManager] = None,
reuse_actors: bool = False,
result_buffer_length: Optional[int] = None,
refresh_period: Optional[float] = None,
chdir_to_trial_dir: bool = False,
):
# Trial metadata
self._cached_trial_state = {}
self._trials_to_cache = set()
# future --> (type, trial/pg)
self._futures = {}
# Cache futures that are ready to reduce the number times we iterate through
# all futures (and e.g. shuffle them)
self._cached_ready_futures = []
# Cleanup
force_trial_cleanup = int(os.environ.get("TUNE_FORCE_TRIAL_CLEANUP_S", "600"))
self._get_next_event_wait = int(
os.environ.get("TUNE_GET_EXECUTOR_EVENT_WAIT_S", "5")
)
if force_trial_cleanup:
self._trial_cleanup = _TrialCleanup(force_trial_cleanup)
else:
self._trial_cleanup = None
# For printing used resources
self._resource_updater = _ResourceUpdater(refresh_period)
# Resource management.
# For details, see docstring of `_stage_and_update_status()`
self._max_staged_actors = 1
self._resource_manager = resource_manager or PlacementGroupResourceManager()
# Actor re-use.
# For details, see docstring of `_maybe_cache_trial_actor()`
self._reuse_actors = reuse_actors
self._actor_cache = _ObjectCache(may_keep_one=True)
# Trials for which we requested resources
self._staged_trials = set() # Staged trials
self._trial_to_acquired_resources: Dict[Trial, AcquiredResources] = {}
# Result buffer
self._buffer_length = result_buffer_length or int(
os.getenv("TUNE_RESULT_BUFFER_LENGTH", 1)
)
self._buffer_min_time_s = float(os.getenv("TUNE_RESULT_BUFFER_MIN_TIME_S", 0.0))
self._buffer_max_time_s = float(
os.getenv("TUNE_RESULT_BUFFER_MAX_TIME_S", 100.0)
)
# Default kwargs to pass to trainable
self._trainable_kwargs = {}
# Trial dir behavior
self._chdir_to_trial_dir = chdir_to_trial_dir
def setup(
self, max_pending_trials: int, trainable_kwargs: Optional[Dict] = None
) -> None:
if self._actor_cache.num_cached_objects:
logger.warning(
"Cannot update maximum number of queued actors for reuse "
"during a run."
)
else:
self._max_staged_actors = max_pending_trials
self._trainable_kwargs = trainable_kwargs or {}
def set_status(self, trial: Trial, status: str) -> None:
"""Sets status and checkpoints metadata if needed.
Only checkpoints metadata if trial status is a terminal condition.
PENDING, PAUSED, and RUNNING switches have checkpoints taken care of
in the TrialRunner.
Args:
trial: Trial to checkpoint.
status: Status to set trial to.
"""
if trial.status == status:
logger.debug("Trial %s: Status %s unchanged.", trial, trial.status)
else:
logger.debug(
"Trial %s: Changing status from %s to %s.", trial, trial.status, status
)
trial.set_status(status)
if status in [Trial.TERMINATED, Trial.ERROR]:
self._trials_to_cache.add(trial)
def mark_trial_to_checkpoint(self, trial: Trial) -> None:
self._trials_to_cache.add(trial)
def get_checkpoints(self) -> Dict[str, str]:
"""Returns a copy of mapping of the trial ID to pickled metadata."""
for trial in self._trials_to_cache:
self._cached_trial_state[trial.trial_id] = trial.get_json_state()
self._trials_to_cache.clear()
return self._cached_trial_state
def _stage_and_update_status(self, trials: Iterable[Trial]):
"""Check and update statuses of scheduled placement groups.
Stages placement groups of all trials.
We will never request resources for more than `_max_staged_actors` at the same
time. This does not include running actors.
Thus, if max_staged_actors=4 and e.g. 8 trials can run at the same time,
we will occupy resources for up to 8 actors and have requests pending
for 4 more.
"""
for trial in trials:
if len(self._staged_trials) >= self._max_staged_actors:
break
if trial.status not in (Trial.PENDING, Trial.PAUSED):
continue
if trial in self._staged_trials:
continue
resource_request = trial.placement_group_factory
self._staged_trials.add(trial)
self._actor_cache.increase_max(resource_request)
self._resource_manager.request_resources(resource_request=resource_request)
self._resource_manager.update_state()
def get_ready_trial(self) -> Optional[Trial]:
"""Get a trial whose resources are ready and that thus can be started.
Can also return None if no trial is available.
Returns:
Trial object or None.
"""
for trial in self._staged_trials:
resource_request = trial.placement_group_factory
# If we have a cached actor for these resources, return
if self._actor_cache.has_cached_object(resource_request):
return trial
# If the resources are available from the resource manager, return
if self._resource_manager.has_resources_ready(
resource_request=resource_request
):
return trial
return None
def _maybe_use_cached_actor(self, trial, logger_creator) -> Optional[ActorHandle]:
if not self._reuse_actors:
return None
resource_request = trial.placement_group_factory
if not self._actor_cache.has_cached_object(resource_request):
return None
actor, acquired_resources = self._actor_cache.pop_cached_object(
resource_request
)
logger.debug(f"Trial {trial}: Reusing cached actor {actor}")
trial.set_runner(actor)
if not self.reset_trial(
trial, trial.config, trial.experiment_tag, logger_creator
):
raise _AbortTrialExecution(
"Trainable runner reuse requires reset_config() to be "
"implemented and return True."
)
self._trial_to_acquired_resources[trial] = acquired_resources
# We are reusing an existing actor (and its resources),
# so we need to cancel the resource request that we originally scheduled
# for this trial.
self._resource_manager.cancel_resource_request(resource_request)
return actor
def _setup_remote_runner(self, trial):
# We checkpoint metadata here to try mitigating logdir duplication
self._trials_to_cache.add(trial)
trainable_kwargs = _get_trainable_kwargs(
trial,
additional_kwargs=self._trainable_kwargs,
should_chdir=self._chdir_to_trial_dir,
)
logger_creator = trainable_kwargs["logger_creator"]
existing_runner = self._maybe_use_cached_actor(trial, logger_creator)
if existing_runner:
return existing_runner
trainable_cls = trial.get_trainable_cls()
if not trainable_cls:
raise _AbortTrialExecution(
f"Invalid trainable: {trial.trainable_name}. If you passed "
f"a string, make sure the trainable was registered before."
)
_actor_cls = _class_cache.get(trainable_cls)
resource_request = trial.placement_group_factory
acquired_resources = self._resource_manager.acquire_resources(
resource_request=resource_request
)
if not acquired_resources:
return None
self._trial_to_acquired_resources[trial] = acquired_resources
[full_actor_class] = acquired_resources.annotate_remote_entities([_actor_cls])
# Clear the Trial's location (to be updated later on result)
# since we don't know where the remote runner is placed.
trial.set_location(_Location())
logger.debug("Trial %s: Setting up new remote runner.", trial)
with _change_working_directory(trial):
return full_actor_class.remote(**trainable_kwargs)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
if self._find_future(trial):
logging.debug(
"Trial {} already has a queued future. Skipping this "
"`train` call. This may occur if a trial has "
"been unpaused within a scheduler callback.".format(str(trial))
)
return
assert trial.status == Trial.RUNNING, trial.status
buffer_time_s = max(
self._buffer_min_time_s,
min(self._buffer_max_time_s, len(self._futures) // 10),
)
with _change_working_directory(trial):
buffer_length = self._buffer_length
if buffer_length > 1 and trial.checkpoint_at_end:
# If a trial checkpoint can be triggered externally,
# it is not safe to buffer results.
if log_once("trial_executor_buffer_checkpoint"):
logger.warning(
"Disabling buffered training as you passed "
"`checkpoint_at_end` to `air.CheckpointConfig()`."
)
buffer_length = 1
if buffer_length > 1:
if trial.checkpoint_freq > 0:
buffer_length = min(buffer_length, trial.checkpoint_freq)
remote = trial.runner.train_buffered.remote(
buffer_time_s, buffer_length
)
else:
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._futures[remote] = (_ExecutorEventType.TRAINING_RESULT, trial)
trial_item = self._find_future(trial)
assert len(trial_item) < 2, trial_item
def _start_trial(self, trial: Trial) -> bool:
"""Starts trial and restores last result if trial was paused.
Args:
trial: The trial to start.
Returns:
True if trial was started successfully, False otherwise.
See `RayTrialExecutor.restore` for possible errors raised.
"""
self.set_status(trial, Trial.PENDING)
runner = self._setup_remote_runner(trial)
if not runner:
return False
trial.set_runner(runner)
self.restore(trial)
self.set_status(trial, Trial.RUNNING)
self._unstage_trial_with_resources(trial)
if not trial.is_restoring:
self._train(trial)
return True
def _unstage_trial_with_resources(self, trial: Trial):
# Case 1: The trial we started was staged. Just remove it
if trial in self._staged_trials:
self._staged_trials.remove(trial)
self._actor_cache.decrease_max(trial.placement_group_factory)
return
# Case 2: We staged a trial "A" with the same resources, but our trial "B"
# was selected by the scheduler to run. The resource manager does not care
# about "trials", it just cares about resources being available. Thus we
# look for a staged trial with the same resource requirements and remove it
resource_request = trial.placement_group_factory
# Remove staged trial with same resource requirements
candidate_trial = None
for staged_trial in self._staged_trials:
staged_resources = staged_trial.placement_group_factory
if staged_resources == resource_request:
candidate_trial = staged_trial
break
if candidate_trial:
self._staged_trials.remove(candidate_trial)
self._actor_cache.decrease_max(candidate_trial.placement_group_factory)
return
raise RuntimeError(
"Started a trial with resources requested by a different trial, but "
"this trial was lost. This is an error in Ray Tune's execution "
"logic. Please raise a GitHub issue at "
"https://github.com/ray-project/ray/issues"
)
def _maybe_cache_trial_actor(self, trial: Trial) -> bool:
"""Cache trial actor for reuse, if needed.
We will only cache as many actors as are needed to fulfill any pending
resource requests for actors with the same resource requirements.
E.g. if we have 6 running trials and 4 additional staged actors, we will only
cache up to 4 of the running trial actors when they finish.
One exception is the case when we have no cached actors, yet. In that case,
we will always cache the actor in this method.
Later, in `_cleanup_cached_actors`, we will check again if we need this cached
actor. That method will keep the actor if we don't have any staged trials,
because we don't know at that point if the next trial might require the same
resources. But because there is no staged trial, it is safe to keep the actor
around, as it won't occupy resources needed by another trial until it's staged.
"""
if not self._reuse_actors:
return False
acquired_resources = self._trial_to_acquired_resources[trial]
cached_resource_request = acquired_resources.resource_request
if not self._actor_cache.cache_object(
cached_resource_request, (trial.runner, acquired_resources)
):
logger.debug(
f"Could not cache actor of trial {trial} for "
"reuse, as there are no pending trials "
"requiring its resources."
)
return False
logger.debug(f"Caching actor of trial {trial} for re-use")
self._trial_to_acquired_resources.pop(trial)
trial.set_runner(None)
return True
def _stop_trial(
self,
trial: Trial,
error: bool = False,
exc: Optional[Union[TuneError, RayTaskError]] = None,
):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error: Whether to mark this trial as terminated in error.
exc: Optional exception.
"""
self.set_status(trial, Trial.ERROR if error or exc else Trial.TERMINATED)
trial.set_location(_Location())
if not hasattr(trial, "runner") or not trial.runner:
return
if exc:
trial.handle_error(exc=exc)
if not error and self._maybe_cache_trial_actor(trial):
# Trial runner has been cached
return
try:
logger.debug("Trial %s: Destroying actor.", trial)
with _change_working_directory(trial):
future = trial.runner.stop.remote()
acquired_resources = self._trial_to_acquired_resources.pop(trial)
self._futures[future] = (
_ExecutorEventType.STOP_RESULT,
acquired_resources,
)
if self._trial_cleanup: # force trial cleanup within a deadline
self._trial_cleanup.add(future)
except Exception:
logger.exception("Trial %s: Error stopping runner.", trial)
self.set_status(trial, Trial.ERROR)
finally:
trial.set_runner(None)
def start_trial(self, trial: Trial) -> bool:
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial: Trial to be started.
Returns:
True if the remote runner has been started. False if trial was
not started (e.g. because of lacking resources/pending PG).
"""
try:
return self._start_trial(trial)
except _AbortTrialExecution as e:
logger.exception("Trial %s: Error starting runner, aborting!", trial)
time.sleep(2)
self._stop_trial(trial, exc=e)
return False
except Exception as e:
logger.exception("Trial %s: Unexpected error starting runner.", trial)
time.sleep(2)
if isinstance(e, TuneError):
self._stop_trial(trial, exc=e)
else:
self._stop_trial(
trial, exc=_TuneStartTrialError(traceback.format_exc())
)
# Note that we don't return the resources, since they may
# have been lost. TODO(ujvl): is this the right thing to do?
return False
def _find_future(self, trial):
out = [rid for rid, t in self._futures.items() if t[1] is trial]
assert (
len(out) <= 1
), "Expecting one future for any given trial at any given time."
return out
def stop_trial(
self,
trial: Trial,
error: bool = False,
exc: Optional[Union[TuneError, RayTaskError]] = None,
) -> None:
"""Stops the trial, releasing held resources and removing futures related to
this trial from the execution queue.
Args:
trial: Trial to stop.
error: Whether to mark this trial as terminated in error. The trial status
will be set to either `Trial.ERROR` or `Trial.TERMINATED` based on this.
Defaults to False.
exc: Optional exception to log (as a reason for stopping). Defaults to None.
"""
prior_status = trial.status
if prior_status == Trial.RUNNING:
logger.debug("Trial %s: Returning resources.", trial)
out = self._find_future(trial)
for result_id in out:
self._futures.pop(result_id)
trial.saving_to = None
trial.restoring_from = None
self._stop_trial(
trial,
error=error or exc,
exc=exc,
)
def continue_training(self, trial: Trial) -> None:
"""Continues the training of this trial."""
self._train(trial)
def pause_trial(self, trial: Trial, should_checkpoint: bool = True) -> None:
"""Pauses the trial, releasing resources (specifically GPUs)
We do this by:
1. Checkpoint the trial (if `should_checkpoint`) in memory to allow us to resume
from this state in the future. We may not always want to checkpoint, if we
know that the checkpoint will not be used.
2. Stop the trial and release resources, see `RayTrialExecutor.stop_trial` above
3. Set the trial status to `Trial.PAUSED`, which is similar to
`Trial.TERMINATED`, except we have the intention of resuming the trial.
Args:
trial: Trial to pause.
should_checkpoint: Whether to save an in-memory checkpoint before stopping.
"""
assert trial.status == Trial.RUNNING, trial.status
try:
if should_checkpoint:
self.save(trial, CheckpointStorage.MEMORY)
self.stop_trial(trial)
self.set_status(trial, Trial.PAUSED)
except Exception:
logger.exception("Error pausing runner.")
self.set_status(trial, Trial.ERROR)
def reset_trial(
self,
trial: Trial,
new_config: Dict,
new_experiment_tag: str,
logger_creator: Optional[Callable[[Dict], "ray.tune.Logger"]] = None,
) -> bool:
"""Tries to invoke `Trainable.reset()` to reset trial.
Args:
trial: Trial to be reset.
new_config: New configuration for Trial trainable.
new_experiment_tag: New experiment name for trial.
logger_creator: Function that instantiates a logger on the
actor process.
Returns:
True if `reset_config` is successful else False.
"""
trial.set_experiment_tag(new_experiment_tag)
trial.set_config(new_config)
trainable = trial.runner
# Pass magic variables
extra_config = copy.deepcopy(new_config)
extra_config[TRIAL_INFO] = _TrialInfo(trial)
stdout_file, stderr_file = trial.log_to_file
extra_config[STDOUT_FILE] = stdout_file
extra_config[STDERR_FILE] = stderr_file
with _change_working_directory(trial):
with warn_if_slow("reset"):
try:
reset_val = ray.get(
trainable.reset.remote(
extra_config,
logger_creator=logger_creator,
remote_checkpoint_dir=trial.remote_path,
),
timeout=DEFAULT_GET_TIMEOUT,
)
except GetTimeoutError:
logger.exception("Trial %s: reset timed out.", trial)
return False
return reset_val
def has_resources_for_trial(self, trial: Trial) -> bool:
"""Returns whether there are resources available for this trial.
This will return True as long as we didn't reach the maximum number
of pending trials. It will also return True if the trial placement
group is already staged.
Args:
trial: Trial object which should be scheduled.
Returns:
boolean
"""
resource_request = trial.placement_group_factory
return (
trial in self._staged_trials
or self._actor_cache.has_cached_object(resource_request)
or len(self._staged_trials) < self._max_staged_actors
or self._resource_manager.has_resources_ready(resource_request)
)
def _allocated_resources(self) -> dict:
total_resources = {"CPU": 0, "GPU": 0}
for allocated_resource in self._trial_to_acquired_resources.values():
resource_request = allocated_resource.resource_request
for bundle_resources in resource_request.bundles:
for key, val in bundle_resources.items():
total_resources[key] = total_resources.get(key, 0) + val
return total_resources
def debug_string(self) -> str:
"""Returns a human readable message for printing to the console."""
allocated_resources = self._allocated_resources()
return self._resource_updater.debug_string(allocated_resources)
def on_step_begin(self) -> None:
"""Before step() is called, update the available resources."""
self._resource_updater.update_avail_resources()
def on_step_end(self, search_ended: bool = False) -> None:
self._cleanup_cached_actors(search_ended=search_ended)
self._do_force_trial_cleanup()
def _cleanup_cached_actors(
self, search_ended: bool = False, force_all: bool = False
):
"""Clean up unneeded cached actors.
Ray Tune caches actors for re-use to avoid initialization overhead. This is
useful in two situations: a) to avoid scheduling overhead of actors when
trials run for a short time (e.g. < 1 second), and b) to avoid setup overhead
when trials initialize a heavy environment/dependencies (e.g. rllib).
Actors are cached when a trial is stopped. However, at that point in time
we don't always know if we will need the actor later or not. E.g. if all
subsequent trials have different resource requirements, we wouldn't need to
cache the actor.
Tune will generate a new trial in the next iteration of step(). Only once
this new trial is generated can we know if we will actually use the cached
actor soon. This is especially the case when we only run one trial at the time
and don't allow more pending trials: At the point of caching there would be
_no_ pending trial, so we would never cache the actor.
To circumvent this problem, we always cache an actor once the trial is
gracefully stopped (and when `reuse_actors=True`). We only remove this
cached actor once we have at least one new staged trial, so that we know
if we need to keep the actor or not. So if we create a new trial in the next
iteration, we can either reuse the cached actor (if resources match) or
remove it (if resources don't match and the cached actor is thus not required).
This method fetches the required resources for all pending trials and the
resources for all cached actors. If we cached more actors than we need, we
terminate the excess actors and free the resources.
"""
if not self._staged_trials and not force_all and not search_ended:
# If we don't have any staged trials, keep cached actors,
# unless cleanup is forced or no new trials are going to be generated
# (if the search ended).
return
if (
search_ended
and not self._staged_trials
and self._actor_cache.total_max_objects == 0
):
# If there are no more trials coming in, no trials are pending execution,
# and we don't explicitly want to cache objects, we can evict the full
# cache.
force_all = True
for actor, acquired_resources in self._actor_cache.flush_cached_objects(
force_all=force_all
):
future = actor.stop.remote()
self._futures[future] = (
_ExecutorEventType.STOP_RESULT,
acquired_resources,
)
if self._trial_cleanup: # force trial cleanup within a deadline
self._trial_cleanup.add(future)
def _resolve_stop_event(
self,
future: ray.ObjectRef,
acquired_resources: AcquiredResources,
timeout: Optional[float] = None,
):
"""Resolve stopping future (Trainable.cleanup() and free resources."""
try:
# Let's check one more time if the future resolved. If not,
# we remove the PG which will terminate the actor.
ray.get(future, timeout=timeout)
except GetTimeoutError:
if log_once("tune_trial_cleanup_timeout"):
logger.error(
"Timed out when trying to stop the Ray actor gracefully. "
"Consider making `stop` a faster operation."
)
except Exception:
if log_once("tune_trial_cleanup_exception"):
logger.error(
f"An exception occurred when trying to stop the Ray actor:"
f"{traceback.format_exc()}"
)
finally:
self._resource_manager.free_resources(acquired_resources)
def _do_force_trial_cleanup(self) -> None:
if self._trial_cleanup:
while True:
next_future_to_clean = self._trial_cleanup.get_next()
if not next_future_to_clean:
break
if next_future_to_clean in self._futures:
event_type, acquired_resources = self._futures.pop(
next_future_to_clean
)
assert event_type == _ExecutorEventType.STOP_RESULT
# Clean this future
self._resolve_stop_event(
next_future_to_clean, acquired_resources, timeout=0.01
)
else:
# This just means that before the deadline reaches,
# the future is already cleaned up.
pass
def force_reconcilation_on_next_step_end(self) -> None:
self._resource_manager.update_state()
def save(
self,
trial: Trial,
storage: CheckpointStorage = CheckpointStorage.PERSISTENT,
result: Optional[Dict] = None,
) -> _TrackedCheckpoint:
"""Saves the trial's state to a checkpoint asynchronously.
Args:
trial: The trial to be saved.
storage: Where to store the checkpoint. Defaults to
PERSISTENT.
result: The state of this trial as a dictionary to be saved.
If result is None, the trial's last result will be used.
Returns:
Checkpoint object, or None if an Exception occurs.
"""
logger.debug(f"saving trial {trial}")
result = result or trial.last_result
with _change_working_directory(trial):
if storage == CheckpointStorage.MEMORY:
value = trial.runner.save_to_object.remote()
checkpoint = _TrackedCheckpoint(
dir_or_data=value, storage_mode=storage, metrics=result
)
trial.on_checkpoint(checkpoint)
else:
value = trial.runner.save.remote()
checkpoint = _TrackedCheckpoint(
dir_or_data=value,
storage_mode=storage,
metrics=result,
local_to_remote_path_fn=partial(
TrainableUtil.get_remote_storage_path,
logdir=trial.local_path,
remote_checkpoint_dir=trial.remote_path,
)
if trial.uses_cloud_checkpointing
else None,
)
trial.saving_to = checkpoint
self._futures[value] = (_ExecutorEventType.SAVING_RESULT, trial)
return checkpoint
def restore(self, trial: Trial) -> None:
"""Restores training state from a given model checkpoint.
Args:
trial: The trial to be restored.
Raises:
RuntimeError: This error is raised if no runner is found.
AbortTrialExecution: This error is raised if the trial is
ineligible for restoration, given the Tune input arguments.
"""
checkpoint = trial.checkpoint
if checkpoint.dir_or_data is None:
return
if trial.runner is None:
raise RuntimeError(
"Trial {}: Unable to restore - no runner found.".format(trial)
)
checkpoint_dir = checkpoint.dir_or_data
node_ip = checkpoint.node_ip
if checkpoint.storage_mode == CheckpointStorage.MEMORY:
logger.debug("Trial %s: Attempting restore from object", trial)
# Note that we don't store the remote since in-memory checkpoints
# don't guarantee fault tolerance and don't need to be waited on.
with _change_working_directory(trial):
trial.runner.restore_from_object.remote(checkpoint_dir)
else:
logger.debug("Trial %s: Attempting restore from %s", trial, checkpoint_dir)
if (
trial.uses_cloud_checkpointing
or not trial.sync_on_checkpoint
or not os.path.exists(checkpoint_dir)
):
# If using cloud checkpointing, trial will get cp from cloud.
# If not syncing to driver, assume it has access to the cp
# on the local fs.
fallback_to_latest = bool(
int(os.environ.get("TUNE_FALLBACK_TO_LATEST_CHECKPOINT", "1"))
)
with _change_working_directory(trial):
remote = trial.runner.restore.remote(
checkpoint_dir,
checkpoint_node_ip=node_ip,
fallback_to_latest=fallback_to_latest,
)
elif trial.sync_on_checkpoint:
# This provides FT backwards compatibility in the
# case where no cloud checkpoints are provided.
logger.debug("Trial %s: Reading checkpoint into memory", trial)
checkpoint_path = TrainableUtil.find_checkpoint_dir(checkpoint_dir)
obj = Checkpoint.from_directory(checkpoint_path).to_bytes()
with _change_working_directory(trial):
remote = trial.runner.restore_from_object.remote(obj)
else:
raise _AbortTrialExecution(
"Pass in `sync_on_checkpoint=True` for driver-based trial"
"restoration. Pass in an `upload_dir` for remote "
"storage-based restoration"
)
self._futures[remote] = (_ExecutorEventType.RESTORING_RESULT, trial)
trial.restoring_from = checkpoint
def export_trial_if_needed(self, trial: Trial) -> Dict:
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
with _change_working_directory(trial):
return ray.get(
trial.runner.export_model.remote(trial.export_formats),
timeout=DEFAULT_GET_TIMEOUT,
)
return {}
def has_gpus(self) -> bool:
return self._resource_updater.get_num_gpus() > 0
def cleanup(self) -> None:
self._cleanup_cached_actors(force_all=True)
while self._futures:
if self._trial_cleanup and self._trial_cleanup.is_empty():
break
# Non-blocking trial cleanup futures
self._do_force_trial_cleanup()
# Deal with other futures
ready, _ = ray.wait(list(self._futures.keys()), timeout=0)
if not ready:
continue
event_type, acquired_resources = self._futures.pop(ready[0])
# It could be STOP future after all, if so, deal with it here.
if event_type == _ExecutorEventType.STOP_RESULT:
# Blocking here is ok as the future returned
self._resolve_stop_event(ready[0], acquired_resources, timeout=None)
for staged_trial in self._staged_trials:
resource_request = staged_trial.placement_group_factory
self._resource_manager.cancel_resource_request(
resource_request=resource_request
)
self._resource_manager.clear()
def get_next_executor_event(
self, live_trials: Set[Trial], next_trial_exists: bool
) -> _ExecutorEvent:
"""Get the next executor event to be processed in TrialRunner.
In case there are multiple events available for handling, the next
event is determined by the following priority:
1. if there is `next_trial_exists`, and if there is cached resources
to use, PG_READY is emitted.
2. if there is `next_trial_exists` and there is no cached resources
to use, wait on pg future and randomized other futures. If multiple
futures are ready, pg future will take priority to be handled first.
3. if there is no `next_trial_exists`, wait on just randomized other
futures.
An example of #3 would be synchronous hyperband. Although there are pgs
ready, the scheduler is holding back scheduling new trials since the
whole band of trials is waiting for the slowest trial to finish. In
this case, we prioritize handling training result to avoid deadlock
situation.
This is a blocking wait with a timeout (specified with env var).
The reason for the timeout is
we still want to print status info periodically in TrialRunner for
better user experience.
The handle of `ExecutorEvent.STOP_RESULT` is purely internal to
RayTrialExecutor itself. All the other future results are handled by
TrialRunner.
In the future we may want to do most of the handle of
`ExecutorEvent.RESTORE_RESULT` and `SAVING_RESULT` in
RayTrialExecutor itself and only notify TrialRunner to invoke
corresponding callbacks. This view is more consistent with our goal
of TrialRunner responsible for external facing Trial state transition,
while RayTrialExecutor responsible for internal facing transitions,
namely, `is_saving`, `is_restoring` etc.
Also you may notice that the boundary between RayTrialExecutor and
PlacementGroupManager right now is really blurry. This will be
improved once we move to an ActorPool abstraction.
`next_trial_exists` means that there is a trial to run - prioritize
returning PG_READY in this case.
"""
# First update status of staged placement groups
self._stage_and_update_status(live_trials)
while True:
###################################################################
# when next_trial_exists and there are cached resources
###################################################################
# There could be existing PGs from either
# `self._actor_cache`
# or from ready trials. If so and if there is indeed
# a next trial to run, we return `PG_READY` future for trial
# runner. The next trial can then be scheduled on this PG.
if next_trial_exists:
if self._actor_cache.num_cached_objects > 0:
return _ExecutorEvent(_ExecutorEventType.PG_READY)
# TODO(xwjiang): Expose proper API when we decide to do
# ActorPool abstraction.
if any(
self._resource_manager.has_resources_ready(
trial.placement_group_factory
)
for trial in self._staged_trials
):
return _ExecutorEvent(_ExecutorEventType.PG_READY)
###################################################################
# Prepare for futures to wait
###################################################################
if self._cached_ready_futures and not next_trial_exists:
# If there are cached ready futures, handle the first.
# But: If next trial exists, we want to prioritize PG_READY events.
ready_futures = [self._cached_ready_futures.pop(0)]
else:
# Otherwise, wait for new futures
futures_to_wait = list(self._futures.keys())
random.shuffle(futures_to_wait)
if next_trial_exists:
# Only wait for pg explicitly if there is next trial to run.
# In which case, handling PG_READY triumphs handling other events.
# Since we want to place pending trial ASAP.
futures_to_wait = (
self._resource_manager.get_resource_futures() + futures_to_wait
)
logger.debug(
f"get_next_executor_event before wait with futures "
f"{futures_to_wait} and "
f"next_trial_exists={next_trial_exists}"
)
# Try to resolve all ready futures that are immediately ready
ready, _ = ray.wait(
futures_to_wait, num_returns=len(futures_to_wait), timeout=0
)
if ready:
# If at least one future resolved, use that one. Cache the other
# ones.
ready_futures = [ready.pop(0)]
self._cached_ready_futures = ready
else:
# Otherwise, wait for next future with timeout.
ready_futures, _ = ray.wait(
futures_to_wait,
num_returns=1,
timeout=self._get_next_event_wait,
)
###################################################################
# Dealing with no future returned case.
###################################################################
if len(ready_futures) == 0:
if len(self._futures) == 0:
# No running trial and timing out with wait, could be we may
# have insufficient cluster resources that makes tune run
# infeasible.
# TODO: Move InsufficientResourceManager's logic
# to TrialExecutor. It is not Runner's responsibility!
return _ExecutorEvent(_ExecutorEventType.NO_RUNNING_TRIAL_TIMEOUT)
else:
# Training simply takes long time, yield the control back to main
# event loop to print progress info etc.
return _ExecutorEvent(_ExecutorEventType.YIELD)
###################################################################
# If there is future returned.
###################################################################
assert len(ready_futures) == 1
ready_future = ready_futures[0]
###################################################################
# If it is a PG_READY event.
###################################################################
if ready_future not in self._futures:
self._resource_manager.update_state()
return _ExecutorEvent(_ExecutorEventType.PG_READY)
###################################################################
# non PG_READY event
###################################################################
result_type, trial_or_acquired_resources = self._futures.pop(ready_future)
if result_type == _ExecutorEventType.STOP_RESULT:
# This will block, which is ok as the stop future returned
self._resolve_stop_event(
ready_future, trial_or_acquired_resources, timeout=None
)
else:
trial = trial_or_acquired_resources
assert isinstance(trial, Trial)
assert result_type in (
_ExecutorEventType.TRAINING_RESULT,
_ExecutorEventType.SAVING_RESULT,
_ExecutorEventType.RESTORING_RESULT,
)
try:
future_result = ray.get(ready_future)
# For local mode
if isinstance(future_result, _LocalWrapper):
future_result = future_result.unwrap()
logger.debug(f"Returning [{result_type}] for trial {trial}")
return _ExecutorEvent(
result_type,
trial,
result={_ExecutorEvent.KEY_FUTURE_RESULT: future_result},
)
except Exception as e:
return _ExecutorEvent(
result_type,
trial,
result={
_ExecutorEvent.KEY_EXCEPTION: e.as_instanceof_cause()
if isinstance(e, RayTaskError)
else _TuneNoNextExecutorEventError(traceback.format_exc())
},
)
| [
"[email protected]"
] | |
5c5b4e283559db0b607779e73d531b6dba39f937 | d66993b0383ee7a97c9d5fe761269a3cb8e67e22 | /Ejercicios/POO_Listas.py | 63a2dd591e2cff1c2b2819cdc5c99362c94d6104 | [] | no_license | rramosaveros/CursoPythonCisco | 09828e3d8190490c0dc30861ae241f5222e108d6 | 1508e67873adfcf31b8c78d3f5cb2a0572dfeb1c | refs/heads/master | 2023-06-27T12:07:52.652780 | 2021-08-01T14:19:19 | 2021-08-01T14:19:19 | 391,647,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | lst = []
for x in range(10):
lst.append(1 if x % 2 == 0 else 0)
print(lst) | [
"[email protected]"
] | |
ca1be42514d3cf0cd7dd055208df6e2fb2b5309b | bbeecb7cff56a96c580709b425823cde53f21621 | /msw/spots/north_america/nova_scotia.py | 7d3025323b4dad586bac40ff88083a92701a9107 | [] | no_license | hhubbell/python-msw | f8a2ef8628d545b3d57a5e54468222177dc47b37 | 5df38db1dc7b3239a6d00e0516f2942077f97099 | refs/heads/master | 2020-04-05T23:46:21.209888 | 2015-06-16T01:36:43 | 2015-06-16T01:36:43 | 37,476,303 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | LAWRENCETOWN = 342
WESTERN_HEAD = 343
POINT_MICHAUD = 758
COW_BAY = 786
SUMMERVILLE = 788
MARTINIQUE_BEACH = 814
NOVA_SCOTIA_HURRICANE = 1096
| [
"[email protected]"
] | |
f404316472bc8104bb2812ba039e0dd0f54ed07e | adbedf9626c52748aa048f2b17c18d25262b4d56 | /robot_framework_message_queue/build/lib/MessageQueue/MessageQueue.py | ac1ace0da6944b8ec8340417a2d56aaf98fb71ea | [] | no_license | sanjitroy1992/robot_framework_custom_libraries | 3ef91ea6d4705215f86c83d276d67ce7c5af673a | e5fde8f428a4d46d5cacb2c5369f9c59529f5c91 | refs/heads/master | 2022-11-06T09:11:02.148601 | 2020-06-29T09:35:46 | 2020-06-29T09:35:46 | 274,330,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,554 | py | try:
import pymqi
flag = True
except:
flag = False
from stompest.config import StompConfig
from stompest.sync import Stomp
import os
class MessageQueue(object):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def _ibm_queue_configure(self, host, port,queue_manager, channel):
""" To configure IBM Queue"""
host = str(host)
port = str(port)
channel = str(channel)
if not queue_manager:
raise AssertionError("queue_manager argument is required.!! Please check and pass queue_manager value")
if not channel:
raise AssertionError("channel argument is required.!! Please check and pass channel value")
conn_info = "%s(%s)" % (host, port)
qmgr = None
try:
qmgr = pymqi.connect(queue_manager, channel, conn_info)
except Exception as e:
raise AssertionError("Exception : {}".format(e))
return qmgr
def _active_queue_configure(self,host,port,user_name,password):
""" TO connect to Active MQ """
host = str(host)
port = str(port)
user_name = str(user_name)
password = str(password)
if not user_name:
raise AssertionError("user_Name argument is required.!! Please check and pass user_Name value")
if not password:
raise AssertionError("password argument is required.!! Please check and pass password value")
ActiveMQ_url = "tcp://{}:{}".format(host, port)
ActiveMQ_Client = None
config = StompConfig(ActiveMQ_url, login=user_name, passcode=password)
ActiveMQ_Client = Stomp(config)
ActiveMQ_Client.connect()
return ActiveMQ_Client
def connect_to_message_queue(self,queue_type,host,port,user_name=None,password=None,queue_manager=None,channel=None):
"""|Usage| To create connection to the IBM MQ and Active MQ.
It returns the session which can be used to putting, getting the message and clearing the queue.
|Arguments|
'queue_type' = It takes "IBM" or "Active" as argument in order to specify the queue type.
'host' = It takes the host name for the connection to the queue
'port' = It takes the port name for the connection to the queue
'user_name' = It takes the user name for the connection. This argument is mandatory for Active MQ but optional for IBM MQ.
'password' = It takes the password for the connection. This argument is mandatory for Active MQ but optional for IBM MQ.
'queue_manager' = Name of the queue manger used for IBM Connection only. It is mandatory for IBM MQ
'channel' = Name of the channel which is mandatory for IBM MQ only
Example:
${session} Connect To Message Queue Active ${host} ${port} user_Name=${user_name} password=${password}
${session} Connect To Message Queue IBM ${host} ${port} queue_manager=${queue_manager} channel=${channel}
"""
if queue_type.upper() == "IBM":
if not flag:
raise AssertionError("IBM MQ Client is not installed.Please install IBM MQ Client to work with IBM MQ Messages")
session = self._ibm_queue_configure(host, port, queue_manager, channel)
elif queue_type.upper() == "ACTIVE":
session = self._active_queue_configure(host, port, user_name, password)
else:
raise AssertionError("Passed queue type is {} is not supported !!".format(queue_type))
if not session:
raise AssertionError("Connection is not established.")
return session
def _put_message_in_ibm_queue(self, session, queue_name, message):
""" It is Used to put the messages in IBM MQ queue. It takes the session instance to perform action."""
queue_name = str(queue_name)
queue = pymqi.Queue(session, queue_name)
queue.put(message)
queue.close()
def _put_message_in_active_queue(self, session, queue_name, message, headers=None):
""" It is Used to put the messages in Active MQ queue.It takes the session instance to perform action."""
queue_name = str(queue_name)
if headers != None:
headers = eval(str(headers))
session.send(queue_name, message.encode(), headers)
def put_message_to_queue(self, session, queue_type, queue_name,inputfilepath, headers=None):
"""|Usage| To put message to the queue.
|Arguments|
'session' = the return value of the "Connect To Message Queue" keyword.
It uses the connection reference to put message to the queue.
'queue_type' = It takes "IBM" or "Active" as argument in order to specify the queue type.
'queue_name' = Name of IBM MQ or Active MQ queue
'inputfilepath' = file path of the message
Example:
${session} Connect To Message Queue IBM ${host} ${port} queue_manager=${queue_manager} channel=${channel}
Put Message To Queue ${session} IBM ${queue_name} ${inputfilepath}
"""
if inputfilepath:
if not os.path.exists(str(inputfilepath)):
raise AssertionError('File {} does not exists'.format(inputfilepath))
with open(inputfilepath, 'r') as f:
message = f.read()
try:
if queue_type.upper() == "IBM":
self._put_message_in_ibm_queue(session, queue_name, message)
elif queue_type.upper() == "ACTIVE":
self._put_message_in_active_queue(session, queue_name, message, headers)
else:
raise AssertionError("Passed queue type is {} is not supported !!".format(queue_type))
except Exception as e:
raise AssertionError("Exception : {}".format(e))
def get_message_from_queue(self, queue_type, queue_name, session, uniquemessageid=None, outputfilepath=None):
"""|Usage| To Get Message From Queue
== Arguments ==
'queue_type' = It takes "IBM" or "Active" as argument in order to specify the queue type.
'queue_name' = Name of queue from which message would be retrieve"
'session' = The return value of the "Connect To Message Queue" keyword. It uses the connection reference to get message from the queue.
'uniquemessageid' [Optional] = It is an unique message id used to retrieve a particular message from queue. If not provided it returns the first message from the queue.
'outputfilepath' [Optional] = It is the filepath to which a retrieve message from the queue could be saved.
== Example Test Cases ==
1. To Get The First Message From Active MQ Queue:
|${session} | Connect To Message Queue | ACTIVE | ${host} | ${port} | user_Name=${user_Name} | password=${password}
|${message} | Get Message From Queue | ACTIVE | SAMPLE.Q | ${session}
2. To Get The First Message From IBM MQ Queue:
|${session} | Connect To Message Queue | IBM | ${host} | ${port} | queue_manager=${queue_manager} | channel=${channel}
|${message} | Get Message From Queue | IBM | ADVISING | ${session}
3. To Get The Particular Message From Active MQ Queue Using UniqueuMessageID:
|${session} | Connect To Message Queue | ACTIVE | ${host} | ${port} | user_Name=${user_Name} | password=${password}
|${message} | Get Message From Queue | ACTIVE | SAMPLE.Q | ${session} | uniquemessageid="RefID = 00001"
4. To Get The Particular Message From IBM MQ Queue Using UniqueuMessageID:
|${session} | Connect To Message Queue | IBM | ${host} | ${port} | queue_manager=${queue_manager} | channel=${channel}
|${message} | Get Message From Queue | IBM | ADVISING | ${session} | uniquemessageid="RefID = 00001"
Note: If "uniquemessageid" not provided as an argument to this keyword, it returns the first message from the queue.
"""
if outputfilepath:
if not os.path.exists(str(outputfilepath)):
raise AssertionError('File {} does not exists'.format(outputfilepath))
if queue_type.upper() == "IBM":
message = self._get_message_from_ibm_queue(queue_name, session, uniquemessageid)
elif queue_type.upper() == "ACTIVE":
message = self._get_message_from_active_mq_queue(queue_name, session, uniquemessageid)
else:
raise AssertionError("Passed queue type is {} is not supported !!".format(queue_type))
if outputfilepath:
with open(outputfilepath,'w') as myfile:
myfile.write(str(message))
myfile.close()
print("Message content has been saved to '{}' file".format(outputfilepath))
return message
def _get_message_from_ibm_queue(self, queue_name, session, uniquemessageid=None):
""" It is Used to get the messages from IBM MQ queue. It takes the session instance to perform action."""
if uniquemessageid:
queue = pymqi.Queue(session, str(queue_name),pymqi.CMQC.MQOO_FAIL_IF_QUIESCING | pymqi.CMQC.MQOO_INPUT_SHARED | pymqi.CMQC.MQOO_BROWSE)
current_options = pymqi.GMO()
current_options.Options = pymqi.CMQC.MQGMO_BROWSE_NEXT
while True:
try:
md = pymqi.MD()
message = queue.get(None, md, current_options)
find = str(message).find(str(uniquemessageid))
if find != -1:
break
except pymqi.MQMIError as e:
if e.comp == pymqi.CMQC.MQCC_FAILED and e.reason == pymqi.CMQC.MQRC_NO_MSG_AVAILABLE:
raise AssertionError("No Message With '{0}' Unique Message ID Found in '{1}' Queue".format(uniquemessageid,queue_name))
queue.close()
else:
queue = pymqi.Queue(session, str(queue_name),pymqi.CMQC.MQOO_FAIL_IF_QUIESCING | pymqi.CMQC.MQOO_INPUT_SHARED | pymqi.CMQC.MQOO_BROWSE)
current_options = pymqi.GMO()
current_options.Options = pymqi.CMQC.MQGMO_BROWSE_FIRST
try:
md = pymqi.MD()
message = queue.get(None, md, current_options)
except pymqi.MQMIError as e:
if e.comp == pymqi.CMQC.MQCC_FAILED and e.reason == pymqi.CMQC.MQRC_NO_MSG_AVAILABLE:
raise AssertionError("'{}' queue is empty!".format(str(queue_name)))
finally:
queue.close()
print("Message content: {}".format(message))
return message
def _get_message_from_active_mq_queue(self, queue_name, session, uniquemessageid=None):
""" It is Used to get the messages from Active MQ queue.It takes the session instance to perform action."""
token = session.subscribe(queue_name, {"ack": "client-individual", "id": "0"})
if not (session.canRead(timeout=2)):
raise AssertionError("'{}' queue is empty!".format(queue_name))
if uniquemessageid:
find = -1
while(session.canRead(timeout=2)):
frame = session.receiveFrame()
find = str(frame.body).find(uniquemessageid)
if find != -1:
break
if find == -1:
raise AssertionError("No Message With '{0}' Unique Message ID Found in '{1}' Queue".format(uniquemessageid,queue_name))
else:
frame = session.receiveFrame()
message = frame.body
session.unsubscribe(token)
print("Message content : {}".format(message))
return message
def _clear_IBM_queue(self, session, queue_name):
""" It used to clear the IBM MQ queue.This keyword pops all the message in the IBM Queue."""
queue_name = str(queue_name)
while True:
queue = pymqi.Queue(session, queue_name)
try:
queue.get()
except Exception as e:
if "FAILED: MQRC_UNKNOWN_OBJECT_NAME" in str(e):
raise AssertionError("Given queue_name = {} is not present !!!".format(queue_name))
print("All messages for queue {} have been cleared").format(queue_name)
break
def _clear_active_mq(self, session, queue_name):
""" It used to clear the Active MQ queue.This keyword pops all the message in the Active Queue."""
queue_name = str(queue_name)
token = session.subscribe(queue_name, {"ack": "client-individual", "id": "0"})
while (session.canRead(timeout=2)):
frame = session.receiveFrame()
session.ack(frame)
else:
print("All messages for queue {} have been cleared").format(queue_name)
session.unsubscribe(token)
def clear_queue(self, session, queue_type, queue_name):
"""|Usage| To clear all messages from IBM or Active MQ
|Arguments|
'session' = the return value of the "Connect To Message Queue" keyword.
It uses the connection reference to put message to the queue.
'queue_type' = It takes "IBM" or "Active" as argument in order to specify the queue type.
'queue_name' = Name of IBM MQ or Active MQ queue
Example:
Clear Queue ${var} Active sample.q
"""
if queue_type.upper() == "IBM":
self._clear_IBM_queue(session,queue_name)
elif queue_type.upper() == "ACTIVE":
self._clear_active_mq(session,queue_name)
else:
raise AssertionError("Passed queue type is {} is not supported !!".format(queue_type))
def disconnect_message_queue(self, session):
"""|Usage| To Disconnect Active MQ or IBM MQ
|Arguments|
'session' = the return value of the "Connect To Message Queue" keyword.
It uses the connection reference to put message to the queue.
Example:
Disconnect From Queue ${var}
"""
session.disconnect()
| [
"[email protected]"
] | |
217faec137550783954ec982386548f78c4d0443 | b483c598fa375e9af02348960f210b9f482bd655 | /cursoemvideo/desafios/Desafio037.py | 7c82f9d600b1128328fb5824c6e2d85828beca8b | [
"MIT"
] | permissive | brunofonsousa/python | 6f766d08bf193180ea9a4903cb93ffd167db588d | 8f2f26c77015c0baaa76174e004406b4115272c7 | refs/heads/master | 2022-09-30T14:58:01.080749 | 2020-06-08T09:55:35 | 2020-06-08T09:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | '''
Escreva um programa em Python que leia um número inteiro qualquer e peça para
o usuário escolher qual será a base de conversão: 1 para binário, 2 para octal
e 3 para hexadecimal.
'''
num = int(input('Digite um número inteiro: '))
print('Escolha uma das bases para conversão: ')
print('[ 1 ] converter para BINÁRIO')
print('[ 2 ] converter para OCTAL')
print('[ 3 ] converter para HEXADECIMAL')
opcao = int(input('Sua opção: '))
if opcao == 1:
print('{} convertido para BINÁRIO é igual a {}'.format(num, bin(num)[2:]))
elif opcao == 2:
print('{} convertido para OCTAL é igual a {}'.format(num, oct(num)[2:]))
elif opcao == 3:
print('{} convertido para HEXADECIMAL é igual a {}'.format(num, hex(num)[2:]))
else:
print('Opção inválida, tente novamente!')
| [
"[email protected]"
] | |
2c3ae2bb82f2ad88bc2dc1a2824b1a4620858ef4 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/NETFINITYMANAGER-MIB.py | 4b221449307734e8d201c53c2843e368aaded33a | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 13,378 | py | #
# PySNMP MIB module NETFINITYMANAGER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NETFINITYMANAGER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:08:55 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
dmiMibs, = mibBuilder.importSymbols("NETFINITYSERVICES-MIB", "dmiMibs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, IpAddress, TimeTicks, Gauge32, Integer32, iso, Counter32, ObjectIdentity, Counter64, Bits, enterprises, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "IpAddress", "TimeTicks", "Gauge32", "Integer32", "iso", "Counter32", "ObjectIdentity", "Counter64", "Bits", "enterprises", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class DmiInteger(Integer32):
pass
class DmiOctetstring(OctetString):
pass
class DmiDisplaystring(DisplayString):
pass
class DmiDate(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(28, 28)
fixedLength = 28
class DmiComponentIndex(Integer32):
pass
netFinityManagerMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3))
dmtfGroups2 = MibIdentifier((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1))
tComponentid2 = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1), )
if mibBuilder.loadTexts: tComponentid2.setStatus('mandatory')
eComponentid2 = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"))
if mibBuilder.loadTexts: eComponentid2.setStatus('mandatory')
a1Manufacturer = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 1), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1Manufacturer.setStatus('mandatory')
a1Product = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 2), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1Product.setStatus('mandatory')
a1Version = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 3), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1Version.setStatus('mandatory')
a1SerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 4), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1SerialNumber.setStatus('mandatory')
tRemoteSystems = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11), )
if mibBuilder.loadTexts: tRemoteSystems.setStatus('mandatory')
eRemoteSystems = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"), (0, "NETFINITYMANAGER-MIB", "a11SystemTag"))
if mibBuilder.loadTexts: eRemoteSystems.setStatus('mandatory')
a11SystemTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 1), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemTag.setStatus('mandatory')
a11SystemName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 2), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemName.setStatus('mandatory')
a11ProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 3), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11ProtocolName.setStatus('mandatory')
a11NetworkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 4), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11NetworkAddress.setStatus('mandatory')
a11SystemState = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("vOff-line", 0), ("vOn-line", 1), ("vOff-lineWithErrorCondition", 2), ("vOn-lineWithErrorCondition", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemState.setStatus('mandatory')
a11Server = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("vFalse", 0), ("vTrue", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11Server.setStatus('mandatory')
a11Manager = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("vFalse", 0), ("vTrue", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11Manager.setStatus('mandatory')
a11OperatingSystemType = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11))).clone(namedValues=NamedValues(("vUnknown", 0), ("vIbmOs2", 1), ("vMicrosoftWindows", 2), ("vNovellNetware", 3), ("vMicrosoftWindowsNt", 4), ("vIbmAix", 5), ("vBanyanVines", 6), ("vIbmPc-dos", 7), ("vScoXenix", 8), ("vUnixSystemV", 9), ("vMicrosoftWindows95", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11OperatingSystemType.setStatus('mandatory')
a11OsMajorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 9), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11OsMajorVersion.setStatus('mandatory')
a11OsMinorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 10), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11OsMinorVersion.setStatus('mandatory')
a11SystemModelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 11), DmiOctetstring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemModelId.setStatus('mandatory')
a11SystemModelName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 12), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemModelName.setStatus('mandatory')
a11SystemOn_lineNotify = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a11SystemOn-lineNotify").setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemOn_lineNotify.setStatus('mandatory')
a11SystemOff_lineNotify = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a11SystemOff-lineNotify").setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemOff_lineNotify.setStatus('mandatory')
a11PresenceCheckInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 15), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11PresenceCheckInterval.setStatus('mandatory')
a11MacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 16), DmiOctetstring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11MacAddress.setStatus('mandatory')
tRemoteSystemGroups = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12), )
if mibBuilder.loadTexts: tRemoteSystemGroups.setStatus('mandatory')
eRemoteSystemGroups = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"), (0, "NETFINITYMANAGER-MIB", "a12GroupTag"))
if mibBuilder.loadTexts: eRemoteSystemGroups.setStatus('mandatory')
a12GroupTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 1), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12GroupTag.setStatus('mandatory')
a12GroupName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 2), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12GroupName.setStatus('mandatory')
a12RequiredKeywordsCombination = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("vAllKeywordsMustMatch", 0), ("vAnyOfTheKeywordsMayMatch", 1), ("vExactlyOneOfTheKeywordsMustMatch", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12RequiredKeywordsCombination.setStatus('mandatory')
a12Keywords = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 4), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12Keywords.setStatus('mandatory')
a12SystemOn_lineNotifyDefault = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a12SystemOn-lineNotifyDefault").setMaxAccess("readonly")
if mibBuilder.loadTexts: a12SystemOn_lineNotifyDefault.setStatus('mandatory')
a12SystemOff_lineNotifyDefault = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a12SystemOff-lineNotifyDefault").setMaxAccess("readonly")
if mibBuilder.loadTexts: a12SystemOff_lineNotifyDefault.setStatus('mandatory')
a12DefaultPresenceCheckInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 7), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12DefaultPresenceCheckInterval.setStatus('mandatory')
a12DiscoveryStartFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 8), DmiInteger()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a12DiscoveryStartFlag.setStatus('mandatory')
tRemoteSystemGroupMap = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13), )
if mibBuilder.loadTexts: tRemoteSystemGroupMap.setStatus('mandatory')
eRemoteSystemGroupMap = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"), (0, "NETFINITYMANAGER-MIB", "a13SystemTag"), (0, "NETFINITYMANAGER-MIB", "a13GroupTag"))
if mibBuilder.loadTexts: eRemoteSystemGroupMap.setStatus('mandatory')
a13SystemTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13, 1, 1), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a13SystemTag.setStatus('mandatory')
a13GroupTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13, 1, 2), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a13GroupTag.setStatus('mandatory')
mibBuilder.exportSymbols("NETFINITYMANAGER-MIB", a1Manufacturer=a1Manufacturer, a11SystemTag=a11SystemTag, DmiComponentIndex=DmiComponentIndex, tRemoteSystems=tRemoteSystems, DmiDisplaystring=DmiDisplaystring, eComponentid2=eComponentid2, a12GroupName=a12GroupName, a11MacAddress=a11MacAddress, a11Server=a11Server, a11OperatingSystemType=a11OperatingSystemType, a11SystemModelId=a11SystemModelId, a11PresenceCheckInterval=a11PresenceCheckInterval, a12Keywords=a12Keywords, a11Manager=a11Manager, eRemoteSystemGroupMap=eRemoteSystemGroupMap, a13SystemTag=a13SystemTag, tComponentid2=tComponentid2, a11SystemName=a11SystemName, a11SystemOn_lineNotify=a11SystemOn_lineNotify, a11OsMajorVersion=a11OsMajorVersion, tRemoteSystemGroupMap=tRemoteSystemGroupMap, a11SystemState=a11SystemState, dmtfGroups2=dmtfGroups2, a1Version=a1Version, eRemoteSystems=eRemoteSystems, a12DiscoveryStartFlag=a12DiscoveryStartFlag, netFinityManagerMIB=netFinityManagerMIB, DmiOctetstring=DmiOctetstring, a11OsMinorVersion=a11OsMinorVersion, a11SystemOff_lineNotify=a11SystemOff_lineNotify, a12SystemOff_lineNotifyDefault=a12SystemOff_lineNotifyDefault, a11ProtocolName=a11ProtocolName, a13GroupTag=a13GroupTag, DmiInteger=DmiInteger, a12RequiredKeywordsCombination=a12RequiredKeywordsCombination, a12GroupTag=a12GroupTag, DmiDate=DmiDate, a12SystemOn_lineNotifyDefault=a12SystemOn_lineNotifyDefault, tRemoteSystemGroups=tRemoteSystemGroups, a1Product=a1Product, a1SerialNumber=a1SerialNumber, a12DefaultPresenceCheckInterval=a12DefaultPresenceCheckInterval, a11NetworkAddress=a11NetworkAddress, eRemoteSystemGroups=eRemoteSystemGroups, a11SystemModelName=a11SystemModelName)
| [
"[email protected]"
] | |
9ed32859540a3be231e4c075717f88ae4e513b0d | c6cef87f0fc72df793b6151b3a60b60c026d9af0 | /measurements/forms.py | 41108293c5da115e1580f41279c5b5c442e8d104 | [] | no_license | rohitrajput-42/My_Map | 50ff393bd256d3bc922b2601aaad845d1a5c1094 | 22605ba8cea5f709bc6dc9f686431bd0d89d541e | refs/heads/main | 2023-06-05T15:15:02.206103 | 2021-07-01T04:26:48 | 2021-07-01T04:26:48 | 381,810,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from django import forms
from django.forms.models import ModelForm
from .models import Measurement
class MeasurementModelForm(forms.ModelForm):
class Meta:
model = Measurement
fields = ('destination',)
widgets = {
'destination': forms.TextInput(attrs = {'placeholder': 'Enter your destination'}),
} | [
"[email protected]"
] | |
58526088e3fb0f233400ca6bb5eefe05cf3affce | aeeaf40350a652d96a392010071df8a486c6e79f | /archive/python/Python/binary_tree/236.lowest-common-ancestor-of-a-binary-tree.0.py | 439011598294d17040086a1c52c0b366b1525b26 | [
"MIT"
] | permissive | linfengzhou/LeetCode | 11e6c12ce43cf0053d86437b369a2337e6009be3 | cb2ed3524431aea2b204fe66797f9850bbe506a9 | refs/heads/master | 2021-01-23T19:34:37.016755 | 2018-04-30T20:44:40 | 2018-04-30T20:44:40 | 53,916,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# what if p or q not in the tree
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
lca, is_p, is_q = self.helper(root, p, q)
if is_p and is_q:
return lca
else:
return None
def helper(self, root, p, q):
if not root:
return root, False, False
left_lca, left_p, left_q = self.helper(root.left, p, q)
right_lca, right_p, right_q = self.helper(root.right, p, q)
is_p = left_p or right_p or root == p
is_q = left_q or right_q or root == q
if left_lca and right_lca:
return root, is_p, is_q
if left_lca:
return left_lca, is_p, is_q
if right_lca:
return right_lca, is_p, is_q
return None, is_p, is_q
| [
"[email protected]"
] | |
bbbc8ede49f033fcaa8cfe5937eff44ec53222a0 | f9462f3768fa058bd895a56b151da694664ce588 | /examples/201_visualize_model_rhino.py | acacdc3daa62bc676a7299833e03bf0c92d3c5dd | [
"MIT"
] | permissive | ryanpennings/workshop_swinburne_2021 | 16a9a7e2c7134832f8f714b7b430376f1b67dfb2 | 820ef4e36e73ac950f40e1846739087180af2e1c | refs/heads/main | 2023-05-31T16:35:16.535310 | 2021-06-17T06:24:51 | 2021-06-17T06:24:51 | 377,373,107 | 0 | 0 | MIT | 2021-06-17T06:24:51 | 2021-06-16T04:45:02 | null | UTF-8 | Python | false | false | 256 | py | from compas_rhino.artists import RobotModelArtist
from compas.robots import RobotModel
model = RobotModel.from_urdf_file('models/05_with_colors.urdf')
artist = RobotModelArtist(model, layer='COMPAS::Robot Viz')
artist.clear_layer()
artist.draw_visual()
| [
"[email protected]"
] | |
a1e209bed0477352863b8d389058b400cebac1b3 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/eve/client/script/ui/shared/info/panels/__init__.py | a211e9aae9803d9b8c33f8e2fe8f3a05f3e5139b | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 143 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\shared\info\panels\__init__.py
pass
| [
"[email protected]"
] | |
17092c071cf752cfa5953bed7125cf311ef11293 | a9b6243dad4b86f75401a6ee0d6e8505fa57aa83 | /test.py | f02e47c3ec23bca07523acac72bf183601844ae1 | [] | no_license | minji-o-j/system-for-visually-impaired | b4503078b9a74896a3786683f9f14485466bc7d5 | 748b9cdf97259d7f3d0cd5d15167ad5a629f6089 | refs/heads/master | 2023-02-06T21:14:37.840155 | 2020-12-30T11:03:49 | 2020-12-30T11:03:49 | 256,681,461 | 7 | 5 | null | 2020-12-06T08:27:27 | 2020-04-18T06:02:48 | Jupyter Notebook | UTF-8 | Python | false | false | 4,461 | py | from __future__ import division
from models import *
from utils.utils import *
from utils.datasets import *
from utils.parse_config import *
import os
import sys
import time
import datetime
import argparse
import tqdm
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torch.autograd import Variable
import torch.optim as optim
def evaluate(model, path, iou_thres, conf_thres, nms_thres, img_size, batch_size):
model.eval()
# Get dataloader
dataset = ListDataset(path, img_size=img_size, augment=False, multiscale=False)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=dataset.collate_fn, pin_memory=True
)
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
labels = []
sample_metrics = [] # List of tuples (TP, confs, pred)
for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):
# Extract labels
labels += targets[:, 1].tolist()
# Rescale target
targets[:, 2:] = xywh2xyxy(targets[:, 2:])
targets[:, 2:] *= img_size
imgs = Variable(imgs.type(Tensor), requires_grad=False)
with torch.no_grad():
outputs = model(imgs)
outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres)
sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres)
# Concatenate sample statistics
true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)
return precision, recall, AP, f1, ap_class
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=8, help="size of each image batch")
parser.add_argument("--model_def", type=str, default="config/yolov4.cfg", help="path to model definition file")
parser.add_argument("--data_config", type=str, default="config/coco.data", help="path to data config file")
parser.add_argument("--weights_path", type=str, default="weights/yolov4.weights", help="path to weights file")
parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file")
parser.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected")
parser.add_argument("--conf_thres", type=float, default=0.5, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--use_custom", type=bool, default=False, help="trained weight")
opt = parser.parse_args()
# Use custom weight
if opt.use_custom:
opt.model_def = 'config/yolov4-custom.cfg'
opt.class_path = 'data/custom/classes.names'
opt.data_config = 'config/custom.data'
ls = sorted(os.listdir('./weights/custom'))
if len(ls) > 0:
opt.weights_path = 'weights/custom/'+ls[-1]
print(opt)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_config = parse_data_config(opt.data_config)
valid_path = data_config["valid"]
class_names = load_classes(data_config["names"])
# Initiate model
model = Darknet(opt.model_def).to(device)
if opt.weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(opt.weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(opt.weights_path))
print("Compute mAP...")
precision, recall, AP, f1, ap_class = evaluate(
model,
path=valid_path,
iou_thres=opt.iou_thres,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres,
img_size=opt.img_size,
batch_size=opt.batch_size,
)
print("Average Precisions:")
for i, c in enumerate(ap_class):
print(f"+ Class '{c}' ({class_names[c]}) - AP: {AP[i]}")
print(f"mAP: {AP.mean()}")
| [
"[email protected]"
] | |
212e3c7f9eed689458556700d2b64f75b0d4b956 | 425aba1a7c134c78e8d5710890d426d7d6b0bd45 | /tests/settings.py | f5b9b7ceb97006a73fcfd1b1add3b90442fa9338 | [
"BSD-3-Clause"
] | permissive | curiousTauseef/django-multiple-form-view | bd1f4558879382e9ae1b6c173ecbb2102350c12e | de13d124d913f12aa01aeeb6ea2f7b1768cd93cb | refs/heads/master | 2021-08-19T22:28:12.921932 | 2017-11-27T15:58:16 | 2017-11-27T15:58:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'not-a-secret'
DEBUG = True
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'testapp',
]
MIDDLEWARE = []
ROOT_URLCONF = 'testapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'TEST': {
'NAME': ':memory:',
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
8c3b83d33ccb9923cd38f392fa462b54697237c9 | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/test/test_com_day_cq_mailer_impl_cq_mailing_service_properties.py | 224a540ecb8cd7059da3fee3c5ac2c652c422627 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 1,257 | py | # coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_day_cq_mailer_impl_cq_mailing_service_properties import ComDayCqMailerImplCqMailingServiceProperties # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComDayCqMailerImplCqMailingServiceProperties(unittest.TestCase):
"""ComDayCqMailerImplCqMailingServiceProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComDayCqMailerImplCqMailingServiceProperties(self):
"""Test ComDayCqMailerImplCqMailingServiceProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_day_cq_mailer_impl_cq_mailing_service_properties.ComDayCqMailerImplCqMailingServiceProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
218dc82bd8eed4a2d5859956054fc24b06364e5a | f8b5aafac15f408a48fabf853a918015c927e6fe | /backup/virtualenv/venv27/lib/python2.7/site-packages/heatclient/tests/test_events.py | e94b7eb9702afaf92195edca367bae7b1ce1b5b4 | [] | no_license | to30/tmp | bda1ac0ca3fc61e96c2a1c491367b698d7e97937 | ec809683970af6787728c2c41f161f416155982a | refs/heads/master | 2021-01-01T04:25:52.040770 | 2016-05-13T16:34:59 | 2016-05-13T16:34:59 | 58,756,087 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,855 | py | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.common import utils
from heatclient.v1 import events
import mock
from mox3 import mox
import testtools
class EventManagerTest(testtools.TestCase):
def setUp(self):
super(EventManagerTest, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.UnsetStubs)
self.addCleanup(self.m.ResetAll)
def test_list_event(self):
stack_id = 'teststack',
resource_name = 'testresource'
manager = events.EventManager(None)
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id(stack_id).AndReturn('teststack/abcd1234')
self.m.ReplayAll()
manager._list = mock.MagicMock()
manager.list(stack_id, resource_name)
# Make sure url is correct.
manager._list.assert_called_once_with('/stacks/teststack%2Fabcd1234/'
'resources/testresource/events',
"events")
def test_list_event_with_unicode_resource_name(self):
stack_id = 'teststack',
resource_name = u'\u5de5\u4f5c'
manager = events.EventManager(None)
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id(stack_id).AndReturn('teststack/abcd1234')
self.m.ReplayAll()
manager._list = mock.MagicMock()
manager.list(stack_id, resource_name)
# Make sure url is correct.
manager._list.assert_called_once_with('/stacks/teststack%2Fabcd1234/'
'resources/%E5%B7%A5%E4%BD%9C/'
'events', "events")
def test_list_event_with_none_resource_name(self):
stack_id = 'teststack',
manager = events.EventManager(None)
manager._list = mock.MagicMock()
manager.list(stack_id)
# Make sure url is correct.
manager._list.assert_called_once_with('/stacks/teststack/'
'events', "events")
def test_list_event_with_kwargs(self):
stack_id = 'teststack',
resource_name = 'testresource'
kwargs = {'limit': 2,
'marker': '6d6935f4-0ae5',
'filters': {
'resource_action': 'CREATE',
'resource_status': 'COMPLETE'
}}
manager = events.EventManager(None)
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id(stack_id).AndReturn('teststack/abcd1234')
self.m.ReplayAll()
manager._list = mock.MagicMock()
manager.list(stack_id, resource_name, **kwargs)
# Make sure url is correct.
self.assertEqual(1, manager._list.call_count)
args = manager._list.call_args
self.assertEqual(2, len(args[0]))
url, param = args[0]
self.assertEqual("events", param)
base_url, query_params = utils.parse_query_url(url)
expected_base_url = ('/stacks/teststack%2Fabcd1234/'
'resources/testresource/events')
self.assertEqual(expected_base_url, base_url)
expected_query_dict = {'marker': ['6d6935f4-0ae5'],
'limit': ['2'],
'resource_action': ['CREATE'],
'resource_status': ['COMPLETE']}
self.assertEqual(expected_query_dict, query_params)
def test_get_event(self):
fields = {'stack_id': 'teststack',
'resource_name': 'testresource',
'event_id': '1'}
class FakeAPI(object):
"""Fake API and ensure request url is correct."""
def json_request(self, *args, **kwargs):
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/testresource/events/1')
assert args == expect
return {}, {'event': []}
manager = events.EventManager(FakeAPI())
with mock.patch('heatclient.v1.events.Event'):
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id('teststack').AndReturn(
'teststack/abcd1234')
self.m.ReplayAll()
manager.get(**fields)
def test_get_event_with_unicode_resource_name(self):
fields = {'stack_id': 'teststack',
'resource_name': u'\u5de5\u4f5c',
'event_id': '1'}
class FakeAPI(object):
"""Fake API and ensure request url is correct."""
def json_request(self, *args, **kwargs):
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/%E5%B7%A5%E4%BD%9C/events/1')
assert args == expect
return {}, {'event': []}
manager = events.EventManager(FakeAPI())
with mock.patch('heatclient.v1.events.Event'):
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id('teststack').AndReturn(
'teststack/abcd1234')
self.m.ReplayAll()
manager.get(**fields)
| [
"[email protected]"
] | |
9b3fc0aadb7a94e0c3921ce7159d230d74acf942 | c89e59b4d018e8a2d7dc0dbc3bb7a3768024f849 | /before2021/python/190922/2_D4_1824_혁진이의프로그램검증.py | 35739cf8ff47cfc4a6a46240d6cabeed99ea85a6 | [] | no_license | leeiopd/algorithm | ff32103a43e467a5a091257cc07cf35365ecbf91 | e41647d3918c3099110d97f455c5ebf9a38d571e | refs/heads/master | 2023-03-08T23:46:34.919991 | 2023-02-22T09:39:46 | 2023-02-22T09:39:46 | 166,131,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,328 | py | '''
Samsung Collegiate Programming Cup은 Samsung이 매년마다 개최하는 대학생 프로그래밍 축제다.
이 축제의 우승자는 Samsung에 입사할 수 있지만, 나머지 사람들은 시공의 폭풍 속으로 빠지게 된다.
이 축제에서 참가자들은 자신이 선호하는 언어로 프로그램을 작성할 수 있다.
혁진이는 자신이 개발한 언어 혁셈블리어를 이용해 대회에 참가했다.
축제에서 꼭 우승하고 싶은 혁진이는 자신이 작성한 프로그램이 결국에는 멈출 수 있는지 확인하고 싶다.
혁셈블리어는 다음과 같이 동작한다:
- 프로그램이 수행해야 하는 명령은 문자로 주어지며, 문자들은 2차원 격자 모양으로 줄지어 있다. 다음은 혁셈블리어 프로그램의 예이다.
6>--v.
.^--_@
- 프로그램은 현재 위치에 있는 문자가 나타내는 명령을 처리하고, 이동 방향에 따라 다음 문자로 이동해야 한다.
가장 처음 위치는 제일 왼쪽 위에 있는 문자이고, 이동 방향은 오른쪽이다.
- 명령을 처리하다 보면 이동 방향이 상하좌우로 바뀔 수 있다.
만약 다음 이동이 2차원 격자의 바깥으로 이동하는 방향이면, 반대편에 있는 위치로 이동한다.
예를 들어, 첫 번째 줄의 가장 오른쪽 칸에서 오른쪽 방향으로 이동하면 첫 번째 줄의 가장 왼쪽 칸으로 이동한다.
혁셈블리어에서는 메모리가 단 하나 있으며, 0에서 15사이의 정수를 하나 저장할 수 있다. 가장 처음에는 0이 저장되어 있다.
사용 가능한 명령은 아래와 같다:
문자 수행 명령
< 이동 방향을 왼쪽으로 바꾼다.
> 이동 방향을 오른쪽으로 바꾼다.
^ 이동 방향을 위쪽으로 바꾼다.
v 이동 방향을 아래쪽으로 바꾼다.
_ 메모리에 0이 저장되어 있으면 이동 방향을 오른쪽으로 바꾸고, 아니면 왼쪽으로 바꾼다.
| 메모리에 0이 저장되어 있으면 이동 방향을 아래쪽으로 바꾸고, 아니면 위쪽으로 바꾼다.
? 이동 방향을 상하좌우 중 하나로 무작위로 바꾼다. 방향이 바뀔 확률은 네 방향 동일하다.
. 아무 것도 하지 않는다.
@ 프로그램의 실행을 정지한다.
0~9 메모리에 문자가 나타내는 값을 저장한다.
+ 메모리에 저장된 값에 1을 더한다. 만약 더하기 전 값이 15이라면 0으로 바꾼다.
- 메모리에 저장된 값에 1을 뺀다. 만약 빼기 전 값이 0이라면 15로 바꾼다.
[입력]
첫 번째 줄에 테스트 케이스의 수 T가 주어진다.
각 테스트 케이스의 첫 번째 줄에는 두 정수 R, C (2 ≤ R, C ≤ 20) 가 공백으로 구분되어 주어진다.
이는 프로그램이 R행 C열의 문자로 이루어짐을 나타낸다.
다음 R개의 줄의 각 줄에는 C개의 문자로 이루어진 문자열이 주어진다. 주어지는 문자는 위에서 주어진 문자들이다.
[출력]
각 테스트 케이스마다 ‘#x’(x는 테스트케이스 번호를 의미하며 1부터 시작한다)를 출력하고,
주어진 프로그램이 정지할 수 있으면 “YES”를 출력하고, 아니면 “NO”를 출력한다.
'''
import sys
sys.stdin = open('1824.txt')
T = int(input())
dx = [1, -1, 0, 0]
dy = [0, 0, -1, 1]
for case in range(1, T+1):
R, C = map(int, input().split())
maps = []
for r in range(R):
maps.append(input())
flag = 0
for y in range(R):
if '@' in maps[y]:
flag = 1
if not flag:
print('#{} NO'.format(case))
else:
tmp = [[0, 0, 0, 0]]
check = [[[0, 0, 0, 0] for c in range(C)] for r in range(R)]
result = 'NO'
while tmp:
if len(tmp) >= 1000:
break
x, y, memory, arrow = map(int, tmp.pop())
check[y][x][arrow] += 1
if maps[y][x] == '<':
arrow = 1
X = x + dx[arrow]
X %= C
if check[y][X][arrow] <= 20:
tmp.append([X, y, memory, arrow])
elif maps[y][x] == '>':
arrow = 0
X = x + dx[arrow]
X %= C
if check[y][X][arrow] <= 20:
tmp.append([X, y, memory, arrow])
elif maps[y][x] == '^':
arrow = 2
Y = y + dy[arrow]
Y %= R
if check[Y][x][arrow] <= 20:
tmp.append([x, Y, memory, arrow])
elif maps[y][x] == 'v':
arrow = 3
Y = y + dy[arrow]
Y %= R
if check[Y][x][arrow] <= 20:
tmp.append([x, Y, memory, arrow])
elif maps[y][x] == '_':
if memory == 0:
arrow = 0
else:
arrow = 1
X = x + dx[arrow]
X %= C
if check[y][X][arrow] <= 20:
tmp.append([X, y, memory, arrow])
elif maps[y][x] == '|':
if memory == 0:
arrow = 3
else:
arrow = 2
Y = y + dy[arrow]
Y %= R
if check[Y][x][arrow] <= 20:
tmp.append([x, Y, memory, arrow])
elif maps[y][x] == '?':
for i in range(4):
X = x + dx[i]
Y = y + dy[i]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, i])
elif maps[y][x] == '.':
X = x + dx[arrow]
Y = y + dy[arrow]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, arrow])
elif maps[y][x] == '@':
result = 'YES'
break
elif maps[y][x] == '+':
if memory == 15:
memory = 0
else:
memory += 1
X = x + dx[arrow]
Y = y + dy[arrow]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, arrow])
elif maps[y][x] == '-':
if memory == 0:
memory = 15
else:
memory -= 1
X = x + dx[arrow]
Y = y + dy[arrow]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, arrow])
else:
memory = int(maps[y][x])
X = x + dx[arrow]
Y = y + dy[arrow]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, arrow])
print('#{} {}'.format(case, result))
| [
"[email protected]"
] | |
1112cad995c7f9cfcf9ea20d70efdbb239b37b36 | 2e26bf9c44f349ee308e63e067d93da654daf69d | /projecteuler/euler036.py | 878cb071e730b422a0ee722e5900f8f5af658ac0 | [
"MIT"
] | permissive | RelativeTech/PYTHON_PRAC | 034e44484d63d50a9c4295aa7e1dc63ef786fb37 | 7fa145dece99089706460466a89901e00eef9d28 | refs/heads/master | 2023-06-04T18:59:45.059403 | 2021-06-07T19:40:10 | 2021-06-07T19:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | #!/usr/bin/env python
"""
Solution to Project Euler Problem 36
http://projecteuler.net/
by Apalala <[email protected]>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in
base 10 and base 2.
(Please note that the palindromic number, in either base, may not include
leading zeros.)
"""
from palindromes import is_palindrome
def to_binary(n):
return '{:b}'.format(n)
def dec_and_bin_palindromes(m):
for n in range(1, m):
if is_palindrome(n) and is_palindrome(to_binary(n)):
yield n
def sum_dec_and_bin_palindromes(m):
return sum(x for x in dec_and_bin_palindromes(m))
def test():
assert is_palindrome(585) and is_palindrome(to_binary(585))
def run():
print(sum_dec_and_bin_palindromes(10 ** 6))
if __name__ == '__main__':
test()
run()
| [
"[email protected]"
] | |
b811df8852a8bc944150eb81bb5b2b5cdb6b8914 | 5cc954e27fd924da0f6f44e7d58691d612a77f80 | /coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py | a659bb2d3a72c417e763c9e2730b9e92c562bf9e | [
"BSD-3-Clause"
] | permissive | 1duo/coremltools | e25f1a8423ec368bf1e7dabfaa36e77952578e79 | 37e619d99bf603d2cb9ea0839fa3ebe649996b0a | refs/heads/master | 2021-07-15T08:48:51.930217 | 2020-07-27T20:58:33 | 2020-07-27T20:58:33 | 203,466,876 | 2 | 0 | BSD-3-Clause | 2020-07-22T00:05:02 | 2019-08-20T22:59:50 | Python | UTF-8 | Python | false | false | 732 | py | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
# Import all passes in this dir
from os.path import dirname, basename, isfile, join
import glob
excluded_files = [
"__init__.py",
"tf_passes.py",
]
modules = glob.glob(join(dirname(__file__), "*.py"))
pass_modules = [
basename(f)[:-3]
for f in modules
if isfile(f)
and basename(f)[:1] != "_" # Follow python convention to hide _* files.
and basename(f)[:4] != "test"
and basename(f) not in excluded_files
]
__all__ = pass_modules
from . import * # import everything in __all__
| [
"[email protected]"
] | |
8d1ab7912a785809077016e666d282153dd9da31 | 426aed70aa6925105f10c7fcb7b611b277bf8b84 | /python/dgl/distributed/__init__.py | 462e347f48493808c907b3d4968a92bfd18ca25f | [
"Apache-2.0"
] | permissive | hengruizhang98/dgl | 0ce7201ca7380482440f031cb8ced6ca0e8c8dc1 | 195f99362d883f8b6d131b70a7868a537e55b786 | refs/heads/master | 2023-06-10T22:21:45.835646 | 2021-04-13T12:29:43 | 2021-04-13T12:29:43 | 336,804,001 | 3 | 0 | Apache-2.0 | 2021-02-07T14:16:20 | 2021-02-07T14:16:20 | null | UTF-8 | Python | false | false | 1,054 | py | """DGL distributed module contains classes and functions to support
distributed graph neural network training and inference in a cluster of
machines.
This includes a few submodules:
* distributed data structures including distributed graph, distributed tensor
and distributed embeddings.
* distributed sampling.
* distributed workload split at runtime.
* graph partition.
"""
import os
import sys
from .dist_graph import DistGraphServer, DistGraph, node_split, edge_split
from .dist_tensor import DistTensor
from .partition import partition_graph, load_partition, load_partition_book
from .graph_partition_book import GraphPartitionBook, PartitionPolicy
from .sparse_emb import SparseAdagrad, DistEmbedding
from .rpc import *
from .rpc_server import start_server
from .rpc_client import connect_to_server
from .dist_context import initialize, exit_client
from .kvstore import KVServer, KVClient
from .server_state import ServerState
from .dist_dataloader import DistDataLoader
from .graph_services import sample_neighbors, in_subgraph, find_edges
| [
"[email protected]"
] | |
da5ef632a7cf8fee5a2e5b4c2148620481985735 | 7a0625ef4c271ed9992a736de7bb93215b7013fd | /leetcode70.py | 69d3a74dd79759a429e6c7c53286866920e7b5a1 | [] | no_license | yuchien302/LeetCode | 6576b93c005ea2275646df7b9547c22683d3b45c | c9a53ef2fc1fd1fea7377c3633689fa87601dba6 | refs/heads/master | 2020-12-11T01:42:36.980414 | 2015-12-03T02:53:51 | 2015-12-03T02:53:51 | 36,424,494 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import unittest
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
steps = []
steps.append(1)
steps.append(1)
for i in range(2, n+1):
steps.append(steps[i-1] + steps[i-2])
return steps[-1]
class Test(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test_0(self):
self.assertEqual(self.solution.climbStairs(3), 3)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
dcd43c82ed320c62fa992c05e7d3c179dd40a3ce | 5b4b1866571453f78db5b06a08ff0eda17b91b04 | /test/vanilla/Expected/AcceptanceTests/Validation/validation/_configuration.py | 058eb79f711e66e68abbc33d48dc1d80d856909a | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | koek67/autorest.azure-functions-python | ba345f1d194ca7431daab1210a0cd801d4946991 | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | refs/heads/main | 2022-12-20T13:27:56.405901 | 2020-09-30T08:23:11 | 2020-09-30T08:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
class AutoRestValidationTestConfiguration(Configuration):
"""Configuration for AutoRestValidationTest.
Note that all parameters used to create this instance are saved as instance
attributes.
:param subscription_id: Subscription ID.
:type subscription_id: str
"""
def __init__(
self,
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(AutoRestValidationTestConfiguration, self).__init__(**kwargs)
self.subscription_id = subscription_id
self.api_version = "1.0.0"
kwargs.setdefault('sdk_moniker', 'autorestvalidationtest/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
| [
"[email protected]"
] | |
8aa0b4016e82d440ca3696056f79e03f23b463be | afddf2a9a26e2a779d293bd7c76bde68769fd537 | /server/transformer.py | 8aa25c6947f1098224f3534e9fc691b713e66981 | [] | no_license | getChan/ADV | 54b5534cfa01dff49143ec0ce053fa397d4b1c6d | 263ada0fd2241abda5ae8a4a7dbb0f13bff51816 | refs/heads/master | 2020-06-21T22:28:38.155235 | 2020-01-15T07:52:21 | 2020-01-15T07:52:21 | 197,566,824 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,380 | py | import tensorflow as tf
import numpy as np
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding,
self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
maximum_position_encoding, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i+1)] = block1
attention_weights['decoder_layer{}_block2'.format(i+1)] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
class Transformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
target_vocab_size, pe_input, pe_target, rate=0.1):
super(Transformer, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff,
input_vocab_size, pe_input, rate)
self.decoder = Decoder(num_layers, d_model, num_heads, dff,
target_vocab_size, pe_target, rate)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inp, tar, training, enc_padding_mask,
look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
| [
"[email protected]"
] | |
d1a77da2f46d84a5e90b64fd9a100a0ab730d597 | 44cf0bda12c8ca392a7000efa709adc2ac2aff42 | /26_다음_이메일_크롤링.py | cb07d2336b10220b005cf68307343167a7d1fd17 | [] | no_license | swj8905/Shinhan_Basic_Course | a3d0ebe9aa12a70f3da56cf78e7eca39c5a92238 | b59ad0eeaa4ef77bee5d41d504ecd76148dbf9f8 | refs/heads/master | 2023-06-03T07:30:24.585194 | 2021-06-29T09:32:02 | 2021-06-29T09:32:02 | 380,967,321 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | from selenium import webdriver
import time
import chromedriver_autoinstaller
chrome_path = chromedriver_autoinstaller.install()
browser = webdriver.Chrome(chrome_path) # 크롬브라우저 실행
browser.get("https://logins.daum.net/accounts/signinform.do?url=https%3A%2F%2Fwww.daum.net%2F")
# 아이디 입력
id = browser.find_element_by_css_selector("input#id")
id.send_keys("talingpython")
# 비밀번호 입력
pw = browser.find_element_by_css_selector("input#inputPwd")
pw.send_keys("q1w2e3!@#")
# 로그인 버튼 클릭
button = browser.find_element_by_css_selector("button#loginBtn")
button.click()
time.sleep(3) # 로그인 다 될 때까지 기다리기
# 이메일함으로 이동
browser.get("https://mail.daum.net/")
time.sleep(2) # 웹페이지 다 뜰때까지 기다리기
# 이메일 제목 크롤링
page_num = 2
while True:
title = browser.find_elements_by_css_selector("strong.tit_subject")
for i in title:
print(i.text)
# 다음 페이지로 이동
try:
next_button = browser.find_element_by_css_selector(f"span.paging_mail > a:nth-child({page_num+1})")
except:
print("======== 크롤링 끝! ===========")
break
next_button.click()
time.sleep(1)
page_num += 1
browser.close()
| [
"[email protected]"
] | |
8361438a1cee72c2f72855173f80fe01740cc2d8 | 6ab9a3229719f457e4883f8b9c5f1d4c7b349362 | /leetcode/00007_reverse_integer.py | 9db0a0e2296e3e0688f687c37ad5b73c6f853dd8 | [] | no_license | ajmarin/coding | 77c91ee760b3af34db7c45c64f90b23f6f5def16 | 8af901372ade9d3d913f69b1532df36fc9461603 | refs/heads/master | 2022-01-26T09:54:38.068385 | 2022-01-09T11:26:30 | 2022-01-09T11:26:30 | 2,166,262 | 33 | 15 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | class Solution:
def reverse(self, x: int) -> int:
negative = x < 0
x = -x if negative else x
res = 0
while x:
res = res * 10 + (x % 10)
x = x // 10
res = -res if negative else res
return res if -2**31 <= res and res <= 2**31 - 1 else 0 | [
"[email protected]"
] | |
303600ff623cfbd44383faae9c4b2835e9251e51 | 9b9512f0f63ddaca315c3eccb63ff19e884a56fa | /submitted/test.444.py | aa1d522ad0a863685e78384feb6341cde7af26ca | [] | no_license | yashton/compiler | d793cf69c668ee68928ab74af70e17c53ffadeef | 40007da3af04a7ee31380ae29e23dcfbb50a1c8d | refs/heads/master | 2020-08-02T15:43:35.181718 | 2019-09-27T23:04:37 | 2019-09-27T23:04:37 | 211,413,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5 | py | s3=1
| [
"[email protected]"
] | |
26455f7f887f002a25abe4aae6cbde984a2d68c6 | 17381d148b86fc4354d1ac0e4062a35215eafd09 | /paiza/C062.py | d97af758a417b3e2440ff2a2c8de5acb6614538a | [] | no_license | worldyone/workspace | 027a93e1f227eb1c10485f6f2082a92bd98710d5 | 1e3fa21e23d6e91268882c9e480b76c4a3c4920f | refs/heads/master | 2023-03-09T01:21:53.402440 | 2022-09-20T14:47:49 | 2022-09-20T14:47:49 | 174,147,113 | 0 | 1 | null | 2023-02-10T22:53:26 | 2019-03-06T13:07:08 | Python | UTF-8 | Python | false | false | 195 | py | cnt = 0
rcnt = 0
m = "melon"
T = int(input())
for t in range(T):
sushi = input()
rcnt -= 1
if sushi == m and rcnt <= 0:
cnt += 1
rcnt = 11
print(cnt)
| [
"[email protected]"
] | |
77ff9ab3b4d79c4946379a900be2fcb4a29ca7d3 | d4792ed2061380bb23cec6fcd1c18ea2939490ac | /examples/keras_recipes/antirectifier.py | 4798e2df9b4fff6c10b3d5e9854ee727815d9458 | [
"Apache-2.0"
] | permissive | tcglarry/keras-io | 47e813795204a4fc88512c90168d3f81d2aad8aa | 4f26aead10fd238c98d85fbb6a32679923d79c76 | refs/heads/master | 2022-06-09T09:21:50.376337 | 2020-05-07T21:53:26 | 2020-05-07T21:53:26 | 262,175,145 | 2 | 0 | Apache-2.0 | 2020-05-07T22:53:45 | 2020-05-07T22:53:44 | null | UTF-8 | Python | false | false | 3,072 | py | """
Title: Simple custom layer example: Antirectifier
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2016/01/06
Last modified: 2020/04/20
Description: Demonstration of custom layer creation.
"""
"""
## Introduction
This example shows how to create custom layers, using the Antirectifier layer
(originally proposed as a Keras example script in January 2016), an alternative
to ReLU. Instead of zeroing-out the negative part of the input, it splits the negative
and positive parts and returns the concatenation of the absolute value
of both. This avoids loss of information, at the cost of an increase in dimensionality.
To fix the dimensionality increase, we linearly combine the
features back to a space of the original size.
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## The Antirectifier layer
"""
class Antirectifier(layers.Layer):
def __init__(self, initializer="he_normal", **kwargs):
super(Antirectifier, self).__init__(**kwargs)
self.initializer = keras.initializers.get(initializer)
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer=self.initializer,
name="kernel",
trainable=True,
)
def call(self, inputs):
inputs -= tf.reduce_mean(inputs, axis=-1, keepdims=True)
pos = tf.nn.relu(inputs)
neg = tf.nn.relu(-inputs)
concatenated = tf.concat([pos, neg], axis=-1)
mixed = tf.matmul(concatenated, self.kernel)
return mixed
def get_config(self):
# Implement get_config to enable serialization. This is optional.
base_config = super(Antirectifier, self).get_config()
config = {"initializer": keras.initializers.serialize(self.initializer)}
return dict(list(base_config.items()) + list(config.items()))
"""
## Let's test-drive it on MNIST
"""
# Training parameters
batch_size = 128
num_classes = 10
epochs = 20
# The data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 784)
x_test = x_test.reshape(-1, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Build the model
model = keras.Sequential(
[
keras.Input(shape=(784,)),
layers.Dense(256),
Antirectifier(),
layers.Dense(256),
Antirectifier(),
layers.Dropout(0.5),
layers.Dense(10),
]
)
# Compile the model
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train the model
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.15)
# Test the model
model.evaluate(x_test, y_test)
| [
"[email protected]"
] | |
6d1c82a577174ba581dfefe9fd5878004cf1c33d | 05ae2d651e6adbc4cfea04e2ab8a93c0a9e23aff | /core/roof/roof_props.py | d077cb45445b4acd8b5534a71210700108775101 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | thunderpwn/building_tool | 932087b2415607b41d3da374da1eb2d5e10dbb3d | 5aa4a476f93ae1b8fd8240439b3272d8cf33a0b1 | refs/heads/master | 2022-04-12T18:59:47.373480 | 2020-04-11T11:31:24 | 2020-04-11T11:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,963 | py | import bpy
from bpy.props import EnumProperty, FloatProperty, BoolProperty
class RoofProperty(bpy.types.PropertyGroup):
roof_items = [
("FLAT", "Flat", "", 0),
("GABLE", "Gable", "", 1),
("HIP", "Hip", "", 2),
]
type: EnumProperty(
name="Roof Type",
items=roof_items,
default="FLAT",
description="Type of roof to create",
)
thickness: FloatProperty(
name="Thickness",
min=0.01,
max=1000.0,
default=0.1,
description="Thickness of roof hangs",
)
outset: FloatProperty(
name="Outset",
min=0.01,
max=1.0,
default=0.1,
description="Outset of roof hangs",
)
height: FloatProperty(
name="Height",
min=0.01,
max=1000.0,
default=1,
description="Height of entire roof",
)
o_types = [("HORIZONTAL", "Horizontal", "", 0), ("VERTICAL", "Vertical", "", 1)]
orient: EnumProperty(
description="Orientation of gable", items=o_types, default="HORIZONTAL"
)
roof_hangs: BoolProperty(
name="Roof Hangs", default=True, description="Whether to add roof hangs"
)
def draw(self, context, layout):
layout.prop(self, "type", text="")
box = layout.box()
if self.type == "FLAT":
col = box.column(align=True)
col.prop(self, "thickness")
col.prop(self, "outset")
elif self.type == "GABLE":
row = box.row(align=True)
row.prop(self, "orient", expand=True)
col = box.column(align=True)
col.prop(self, "thickness")
col.prop(self, "outset")
col.prop(self, "height")
box.prop(self, "roof_hangs", toggle=True)
else:
col = box.column(align=True)
col.prop(self, "thickness")
col.prop(self, "outset")
col.prop(self, "height")
| [
"[email protected]"
] | |
2bc5c3b7f00dfa38d5f5b217488400d7f88be85b | 91ef6ab9391c7c78981b6a36a7daa47b48ac582a | /neupy/algorithms/steps/errdiff.py | fec7e685f5e29365892da5ffeead35a2bc97f9a2 | [
"MIT"
] | permissive | stczhc/neupy | f89505011d78b7ade5800b51057c4c1370dea83a | de09f5abd6667824f14806709de2afa1ac5daa09 | refs/heads/master | 2020-12-07T00:45:07.445414 | 2016-06-11T19:00:33 | 2016-06-11T19:00:33 | 58,636,539 | 0 | 1 | null | 2016-05-12T11:42:01 | 2016-05-12T11:42:01 | null | UTF-8 | Python | false | false | 2,809 | py | import theano
from theano.ifelse import ifelse
import numpy as np
from neupy.core.properties import (BoundedProperty,
ProperFractionProperty)
from .base import SingleStepConfigurable
__all__ = ('ErrDiffStepUpdate',)
class ErrDiffStepUpdate(SingleStepConfigurable):
""" This algorithm make step update base on error difference between
epochs.
Parameters
----------
update_for_smaller_error : float
Multiplies this option to ``step`` in if the error was less than in
previous epochs. Defaults to ``1.05``. Value can't be less
than ``1``.
update_for_bigger_error : float
Multiplies this option to ``step`` in if the error was more than in
previous epochs. Defaults to ``0.7``.
error_difference : float
The value indicates how many had to increase the error from the
previous epochs that would produce a reduction step. Defaults
to ``1.04``. Value can't be less than ``1``.
Warns
-----
{SingleStepConfigurable.Warns}
Examples
--------
>>> from neupy import algorithms
>>>
>>> bpnet = algorithms.GradientDescent(
... (2, 4, 1),
... step=0.1,
... verbose=False,
... addons=[algorithms.ErrDiffStepUpdate]
... )
>>>
"""
update_for_smaller_error = BoundedProperty(default=1.05, minval=1)
update_for_bigger_error = ProperFractionProperty(default=0.7)
error_difference = BoundedProperty(default=1.04, minval=1)
def init_variables(self):
self.variables.update(
last_error=theano.shared(name='last_error', value=np.nan),
previous_error=theano.shared(name='previous_error', value=np.nan),
)
super(ErrDiffStepUpdate, self).init_variables()
def init_train_updates(self):
updates = super(ErrDiffStepUpdate, self).init_train_updates()
step = self.variables.step
last_error = self.variables.last_error
previous_error = self.variables.previous_error
step_update_condition = ifelse(
last_error < previous_error,
self.update_for_smaller_error * step,
ifelse(
last_error > self.update_for_bigger_error * previous_error,
self.update_for_bigger_error * step,
step
)
)
updates.append((step, step_update_condition))
return updates
def on_epoch_start_update(self, epoch):
super(ErrDiffStepUpdate, self).on_epoch_start_update(epoch)
previous_error = self.errors.previous()
if previous_error:
last_error = self.errors.last()
self.variables.last_error.set_value(last_error)
self.variables.previous_error.set_value(previous_error)
| [
"[email protected]"
] | |
cba4cb0cc371d08ed47fec7e5feb685cd700f669 | 53c1eb6604f9e060bd6c9ce84395ab1a38d58f6f | /exercise/codewar/arara.py | cc30d2059afcc4569d4a2a045cf54c4e10176cbe | [] | no_license | turo62/exercise | 543c684ef3dfe138a5f0d6976b7ff0d9c19553f0 | 3d8d8d8a12bb3885b3015eff0032cd977c02957e | refs/heads/master | 2020-04-14T18:10:31.224244 | 2019-01-03T18:10:55 | 2019-01-03T18:10:55 | 164,008,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # Returns arara translation of a number. Wrong solution.
def count_arara(n):
val = ""
temp = 0
temp2 = 0
empty = " "
dict = {1 : "anane",
2 : "adak",
3 : "adak anane",
4 : "adak adak",
5 : "adak adak anane",
6 : "adak adak adak",
7 : "adak adak adak anane",
8 : "adak adak adak adak"
}
if n > 8:
temp = n // 8
val = (dict.get(8) * temp + " ")
val = val + (dict.get(n % 2)
if n <= 8:
val = dict.get(n)
return val
def main():
val = count_arara(5)
print(val)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
44c56b7babb4f9b8f08de5e547e5d173f72309fe | 16303902855d9a3b18b39e46c58567b16d907c02 | /setup.py | b9e8a6224c7cfcccdc1de8c06c6b1bfffbea86ca | [] | no_license | ConstClub/pyconst | 194f2d1f53113dec9a5178b56905c3a5e3892909 | 863fd2c0617d769f392cab4e1bf33555ee8f011c | refs/heads/master | 2021-01-24T16:52:09.458058 | 2019-10-18T08:23:29 | 2019-10-18T08:23:29 | 123,215,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | # -*- coding: utf-8 -*-
from setuptools import setup
version = '1.0.0'
setup(
name='pyconstant',
version=version,
keywords='',
description='Const for Python',
long_description=open('README.rst').read(),
url='https://github.com/ConstClub/pyconst',
author='Hackathon',
author_email='[email protected]',
packages=['pyconstant'],
py_modules=[],
install_requires=[],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| [
"[email protected]"
] | |
9e416dd9d02fb2dfe9f230adaee649b8bf5ab6d7 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayOpenMiniMembersHuobanCreateModel.py | e1ea8e7fdf3f890762367a9dcafff368584a3256 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,430 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniMembersHuobanCreateModel(object):
def __init__(self):
self._domain_account = None
self._login_id = None
self._operate_app_id = None
self._type = None
@property
def domain_account(self):
return self._domain_account
@domain_account.setter
def domain_account(self, value):
self._domain_account = value
@property
def login_id(self):
return self._login_id
@login_id.setter
def login_id(self, value):
self._login_id = value
@property
def operate_app_id(self):
return self._operate_app_id
@operate_app_id.setter
def operate_app_id(self, value):
self._operate_app_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.domain_account:
if hasattr(self.domain_account, 'to_alipay_dict'):
params['domain_account'] = self.domain_account.to_alipay_dict()
else:
params['domain_account'] = self.domain_account
if self.login_id:
if hasattr(self.login_id, 'to_alipay_dict'):
params['login_id'] = self.login_id.to_alipay_dict()
else:
params['login_id'] = self.login_id
if self.operate_app_id:
if hasattr(self.operate_app_id, 'to_alipay_dict'):
params['operate_app_id'] = self.operate_app_id.to_alipay_dict()
else:
params['operate_app_id'] = self.operate_app_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniMembersHuobanCreateModel()
if 'domain_account' in d:
o.domain_account = d['domain_account']
if 'login_id' in d:
o.login_id = d['login_id']
if 'operate_app_id' in d:
o.operate_app_id = d['operate_app_id']
if 'type' in d:
o.type = d['type']
return o
| [
"[email protected]"
] | |
ab443ab24b7f89149ef4fcfb7e441860e9d2030c | 5261e3c72259991fbdb9737c4c764eb0686860d3 | /tests/test_docs/test_cli_commands.py | 47dcb09ac7d77549be4329b7f7f7d7f6f3a6308f | [
"Apache-2.0"
] | permissive | eorituz/agents-aea | 45dfb9729718421290c71da91ac4c51f9cc6a608 | 197451196728141a27ec73fd8210c05cb74501f7 | refs/heads/main | 2023-03-24T02:40:27.132664 | 2021-03-23T14:42:58 | 2021-03-23T14:42:58 | 350,744,268 | 0 | 0 | Apache-2.0 | 2021-03-23T14:40:13 | 2021-03-23T14:32:29 | null | UTF-8 | Python | false | false | 2,632 | py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the content of cli-commands.md file."""
import pprint
import re
from pathlib import Path
from aea.cli import cli
from tests.conftest import ROOT_DIR
from tests.test_docs.helper import BaseTestMarkdownDocs
IGNORE_MATCHES = ["`-v DEBUG run`", "`config set [path] [--type TYPE]`"]
class TestCliCommands(BaseTestMarkdownDocs):
"""Test cli-commands.md documentation."""
DOC_PATH = Path(ROOT_DIR, "docs", "cli-commands.md")
def test_cli_commands(self):
"""Test CLI commands."""
commands_raw = re.compile(r"\| `.*` +\|").findall(self.doc_content)
commands_raw = [
re.compile(r"`([A-Za-z0-9\-_]+) ?.*`").search(s) for s in commands_raw
]
commands_raw = list(
filter(lambda x: x.group(0) not in IGNORE_MATCHES, commands_raw)
)
actual_commands = list(map(lambda match: match.group(1), commands_raw))
actual_commands_set = set(actual_commands)
expected_commands = set(cli.commands.keys())
# test no duplicates
assert len(actual_commands) == len(
actual_commands_set
), "Found duplicate commands in the documentation."
# test that there is no missing command
missing = expected_commands.difference(actual_commands)
assert (
len(missing) == 0
), f"Missing the following commands: {pprint.pformat(missing)}"
# test that there are no more commands
more = actual_commands_set.difference(expected_commands)
assert len(more) == 0, f"There are unknown commands: {pprint.pformat(missing)}"
# test that they are in the same order.
actual = actual_commands
expected = sorted(expected_commands)
assert actual == expected, "Commands are not in alphabetical order."
| [
"[email protected]"
] | |
e8d244e1575403819544217a2d429d5ef5a0d1af | 77dcf5ebad9512843742741c20cd412972d3261d | /alien.py | 83eda05822e131264b074b9f2c51b8590a2cbc4c | [] | no_license | crystalDf/Python-Crash-Course-2nd-Edition-Chapter-06-Dictionaries | 5742236d071e16cbb672be7f010f9fbc82033c06 | cc602b56134d63b6baf286e718daf08a2955967c | refs/heads/master | 2023-06-03T09:47:26.659353 | 2021-06-20T12:55:09 | 2021-06-20T12:55:09 | 378,645,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | alien_0 = {'color': 'green', 'points': 5}
print(alien_0['color'])
print(alien_0['points'])
new_points = alien_0['points']
print(f"You just earned {new_points} points!")
print(alien_0)
alien_0['x_position'] = 0
alien_0['y_position'] = 25
print(alien_0)
alien_0 = {}
alien_0['color'] = 'green'
alien_0['points'] = 5
print(alien_0)
alien_0 = {'color': 'green'}
print(f"The alien is {alien_0['color']}.")
alien_0['color'] = 'yellow'
print(f"The alien is now {alien_0['color']}.")
alien_0 = {'x_position': 0, 'y_position': 25, 'speed': 'medium'}
print(f"Original position: {alien_0['x_position']}")
if alien_0['speed'] == 'slow':
x_increment = 1
elif alien_0['speed'] == 'medium':
x_increment = 2
else:
x_increment = 3
alien_0['x_position'] = alien_0['x_position'] + x_increment
print(f"New position: {alien_0['x_position']}")
alien_0 = {'color': 'green', 'points': 5}
print(alien_0)
del alien_0['points']
print(alien_0)
| [
"[email protected]"
] | |
94c48e5e215007b0d4439de04f8f769da3f0762b | a20ae2286d7055de8c533bc954c18b22d2a3cf5a | /sstones/ss_app/migrations/0022_auto_20190125_2146.py | 89a1a1c7637a8511bde7ab64191b1c4ea7b1f968 | [] | no_license | jflynn87/sstones | 695442c2c1745c83a37100d4b163938891afe184 | 3a2548b5457ef6386e808b6d984e376f2c83a254 | refs/heads/master | 2021-07-13T19:04:59.898493 | 2020-06-09T03:07:43 | 2020-06-09T03:07:43 | 139,449,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | # Generated by Django 2.0.4 on 2019-01-25 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ss_app', '0021_auto_20190125_2110'),
]
operations = [
migrations.AlterField(
model_name='invoice',
name='principal',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='invoice',
name='tax',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='invoice',
name='total',
field=models.IntegerField(),
),
]
| [
"[email protected]"
] | |
72ba6c385460dab2b1036368b0325f806107877f | fe6775ca8c5b42710785e3a923974ae079f92c8f | /秋招/电信/3.py | 05b38997f944a3eb85cdd29b02167a6d0624197c | [] | no_license | AiZhanghan/Leetcode | 41bda6676fa1a25fa19e393553c1148ed51fdf72 | 101bce2fac8b188a4eb2f5e017293d21ad0ecb21 | refs/heads/master | 2021-06-28T10:48:07.865968 | 2020-11-20T09:45:15 | 2020-11-20T09:45:15 | 188,155,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | class Solution:
def func(self, nums):
"""
动态规划
Args:
nums: list[int]
Return:
int
"""
dp = [[0 for _ in range(2)] for _ in range(len(nums))]
dp[0][1] = nums[0]
for i in range(1, len(nums)):
dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])
dp[i][1] = dp[i - 1][0] + nums[i]
return max(dp[-1][0], dp[-1][1])
if __name__ == "__main__":
nums = list(map(int, input().split(",")))
print(Solution().func(nums)) | [
"[email protected]"
] | |
a88a544ffd2d07eb0de1c4220d463733c1ad7f92 | 3fda3ff2e9334433554b6cf923506f428d9e9366 | /hipeac/migrations/0003_auto_20181203_1702.py | 7b70e8202885094bb723ff32f5c3484853d1a689 | [
"MIT"
] | permissive | CreativeOthman/hipeac | 12adb61099886a6719dfccfa5ce26fdec8951bf9 | 2ce98da17cac2c6a87ec88df1b7676db4c200607 | refs/heads/master | 2022-07-20T10:06:58.771811 | 2020-05-07T11:39:13 | 2020-05-07T11:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py | # Generated by Django 2.1.3 on 2018-12-03 16:02
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("hipeac", "0002_auto_20181203_1217"),
]
operations = [
migrations.AddField(
model_name="magazine",
name="application_areas",
field=models.CharField(
blank=True,
max_length=250,
validators=[
django.core.validators.RegexValidator(
re.compile("^\\d+(?:,\\d+)*\\Z"),
code="invalid",
message="Enter only digits separated by commas.",
)
],
),
),
migrations.AddField(
model_name="magazine",
name="projects",
field=models.ManyToManyField(blank=True, related_name="magazines", to="hipeac.Project"),
),
migrations.AddField(
model_name="magazine",
name="topics",
field=models.CharField(
blank=True,
max_length=250,
validators=[
django.core.validators.RegexValidator(
re.compile("^\\d+(?:,\\d+)*\\Z"),
code="invalid",
message="Enter only digits separated by commas.",
)
],
),
),
migrations.AddField(
model_name="magazine",
name="users",
field=models.ManyToManyField(blank=True, related_name="magazines", to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
cff3977d4479674d5ad613e5cf07ee5c36761581 | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/knight-dialer/403821504.py | fe51e2c85fa1fd0b3cc504be5cf71dc4954c155d | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # title: knight-dialer
# detail: https://leetcode.com/submissions/detail/403821504/
# datetime: Sat Oct 3 15:34:15 2020
# runtime: 2428 ms
# memory: 45.6 MB
class Solution:
def knightDialer(self, n: int) -> int:
@lru_cache(None)
def dp(i, k):
if k == 0:
return 1
result = 0
for j in jump[i]:
result = (result + dp(j, k - 1)) % MOD
return result
MOD = 10 ** 9 + 7
jump = [[4, 6], [6, 8], [7, 9], [4, 8], [3, 9, 0], [], [0, 1, 7], [2, 6], [1, 3], [2, 4], list(range(10))]
return dp(10, n)
| [
"[email protected]"
] | |
54ff96ab34c33c8f01b1916ca42287abe1003e46 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/hoework01/gettop10frommaoyam01_20200626092144.py | 117a3cb23b43109ab0bbd9c834519eb7bb082518 | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 2,464 | py | # 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中
import requests
from bs4 import BeautifulSoup as bs
maoyanUrl = "https://maoyan.com/board/4";
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593100662316.1593100664951.15; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; mojo-session-id={"id":"435818e6a726415f46defffa27f7abc6","time":1593100221937}; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100665; mojo-trace-id=17; _lxsdk_s=172ec2bff67-0c2-e9f-c64%7C%7C24__mta=251934006.1593072991075.1593100690175.1593100868002.17; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100868; _lxsdk_s=172ee2f4a3e-1c2-3a1-5a4%7C%7C1',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
response = requests.get(maoyanUrl,headers=header)
# response.encoding = 'utf-8'
bs_info = bs(response.text,"html.parser")
# print(response.text)
for tag in bs_info.find_all('div',):
for tag_p in tag.find_all('p',):
# 打印名称
print(tag_p.get('name').text)
#
| [
"[email protected]"
] | |
2a9ff450f3c482b34eb8687346f3c7e4801cec7c | 50ba981bc65efea92f61c698cecfbbe3214a724e | /Django_Forms_Demo/Django_Forms_Demo/urls.py | 819f017ce66d33152fbe7e2a4d1447170c5cd234 | [] | no_license | shubhamjain31/demorepo | ff0a4283fc866ea94df1c340da430271daf93cb6 | 90639b8622e68155ff19bfec0bb6925b421f04cf | refs/heads/master | 2023-04-27T03:42:10.057387 | 2022-06-28T06:14:44 | 2022-06-28T06:14:44 | 229,792,545 | 1 | 0 | null | 2023-04-21T21:36:24 | 2019-12-23T17:04:22 | Python | UTF-8 | Python | false | false | 803 | py | """Django_Forms_Demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('App.urls')),
]
| [
"[email protected]"
] | |
871f0d67060d51a0f99c1d015b019564e3419060 | 6f4f8a8406f5101653fba51fc595ad572f32829e | /thonny/plugins/cpython/__init__.py | 0ba29cc668058820f6d0ef91495e2cd6139e1351 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | codecopy/thonny | 4664ebe1e19665bb2e4e60134f8112c225a5726c | 36d81fe6a13f79ee7a0bf2a85e8128e610a9b843 | refs/heads/master | 2023-02-17T00:08:17.295990 | 2021-01-07T17:21:25 | 2021-01-07T17:21:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,238 | py | import logging
import os.path
import subprocess
import sys
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
import thonny
from thonny import get_workbench, get_runner, ui_utils, THONNY_USER_DIR, running
from thonny.common import (
ToplevelCommand,
InlineCommand,
is_same_path,
normpath_with_actual_case,
get_python_version_string,
)
from thonny.languages import tr
from thonny.misc_utils import running_on_windows, running_on_mac_os, running_on_linux
from thonny.plugins.backend_config_page import BackendDetailsConfigPage
from thonny.running import (
SubprocessProxy,
create_frontend_python_process,
get_interpreter_for_subprocess,
is_bundled_python,
WINDOWS_EXE,
)
from thonny.terminal import run_in_terminal
from thonny.ui_utils import askdirectory, askopenfilename, create_string_var
logger = logging.getLogger(__name__)
class CPythonProxy(SubprocessProxy):
"abstract class"
def __init__(self, clean: bool, executable: str) -> None:
super().__init__(clean, executable)
self._send_msg(ToplevelCommand("get_environment_info"))
def _get_initial_cwd(self):
return get_workbench().get_local_cwd()
def _get_launch_cwd(self):
# launch in the directory containing thonny package, so that other interpreters can import it as well
return os.path.dirname(os.path.dirname(thonny.__file__))
def _get_launcher_with_args(self):
return ["-m", "thonny.plugins.cpython", self.get_cwd()]
def _store_state_info(self, msg):
super()._store_state_info(msg)
if "gui_is_active" in msg:
self._update_gui_updating(msg)
def _clear_environment(self):
self._close_backend()
self._start_background_process()
def _close_backend(self):
self._cancel_gui_update_loop()
super()._close_backend()
def get_local_executable(self):
return self._executable
def _update_gui_updating(self, msg):
"""Enables running Tkinter or Qt programs which doesn't call mainloop.
When mainloop is omitted, then program can be interacted with
from the shell after it runs to the end.
Each ToplevelResponse is supposed to tell, whether gui is active
and needs updating.
"""
if not "gui_is_active" in msg:
return
if msg["gui_is_active"] and self._gui_update_loop_id is None:
# Start updating
self._loop_gui_update(True)
elif not msg["gui_is_active"] and self._gui_update_loop_id is not None:
self._cancel_gui_update_loop()
def _loop_gui_update(self, force=False):
if force or get_runner().is_waiting_toplevel_command():
try:
self.send_command(InlineCommand("process_gui_events"))
except OSError:
# the backend process may have been closed already
# https://github.com/thonny/thonny/issues/966
logger.exception("Could not send process_gui_events")
self._gui_update_loop_id = get_workbench().after(50, self._loop_gui_update)
def _cancel_gui_update_loop(self):
if self._gui_update_loop_id is not None:
try:
get_workbench().after_cancel(self._gui_update_loop_id)
finally:
self._gui_update_loop_id = None
def interrupt(self):
import signal
if self._proc is not None and self._proc.poll() is None:
if running_on_windows():
try:
os.kill(self._proc.pid, signal.CTRL_BREAK_EVENT) # pylint: disable=no-member
except Exception:
logging.exception("Could not interrupt backend process")
else:
self._proc.send_signal(signal.SIGINT)
def run_script_in_terminal(self, script_path, args, interactive, keep_open):
cmd = [self._executable]
if interactive:
cmd.append("-i")
cmd.append(os.path.basename(script_path))
cmd.extend(args)
run_in_terminal(cmd, os.path.dirname(script_path), keep_open=keep_open)
def get_supported_features(self):
return {"run", "debug", "run_in_terminal", "pip_gui", "system_shell"}
def get_pip_gui_class(self):
from thonny.plugins.pip_gui import CPythonBackendPipDialog
return CPythonBackendPipDialog
def can_run_remote_files(self):
return False
def can_run_local_files(self):
return True
class PrivateVenvCPythonProxy(CPythonProxy):
def __init__(self, clean):
self._prepare_private_venv()
super().__init__(clean, get_private_venv_executable())
def _prepare_private_venv(self):
path = get_private_venv_path()
if os.path.isdir(path) and os.path.isfile(os.path.join(path, "pyvenv.cfg")):
self._check_upgrade_private_venv(path)
else:
self._create_private_venv(
path, "Please wait!\nThonny prepares its virtual environment."
)
def _check_upgrade_private_venv(self, path):
# If home is wrong then regenerate
# If only micro version is different, then upgrade
info = _get_venv_info(path)
if not is_same_path(info["home"], os.path.dirname(sys.executable)):
self._create_private_venv(
path,
"Thonny's virtual environment was created for another interpreter.\n"
+ "Regenerating the virtual environment for current interpreter.\n"
+ "(You may need to reinstall your 3rd party packages)\n"
+ "Please wait!.",
clear=True,
)
else:
venv_version = tuple(map(int, info["version"].split(".")))
sys_version = sys.version_info[:3]
assert venv_version[0] == sys_version[0]
assert venv_version[1] == sys_version[1]
if venv_version[2] != sys_version[2]:
self._create_private_venv(
path, "Please wait!\nUpgrading Thonny's virtual environment.", upgrade=True
)
def _create_private_venv(self, path, description, clear=False, upgrade=False):
if not check_venv_installed(self):
return
# Don't include system site packages
# This way all students will have similar configuration
# independently of system Python (if Thonny is used with system Python)
# NB! Cant run venv.create directly, because in Windows bundle
# it tries to link venv to thonny.exe.
# Need to run it via proper python
args = ["-m", "venv"]
if clear:
args.append("--clear")
if upgrade:
args.append("--upgrade")
try:
import ensurepip
except ImportError:
args.append("--without-pip")
args.append(path)
proc = create_frontend_python_process(args)
from thonny.workdlg import SubprocessDialog
dlg = SubprocessDialog(
get_workbench(),
proc,
"Preparing the backend",
long_description=description,
autostart=True,
)
try:
ui_utils.show_dialog(dlg)
except Exception:
# if using --without-pip the dialog may close very quickly
# and for some reason wait_window would give error then
logging.exception("Problem with waiting for venv creation dialog")
get_workbench().become_active_window() # Otherwise focus may get stuck somewhere
bindir = os.path.dirname(get_private_venv_executable())
# create private env marker
marker_path = os.path.join(bindir, "is_private")
with open(marker_path, mode="w") as fp:
fp.write("# This file marks Thonny-private venv")
# Create recommended pip conf to get rid of list deprecation warning
# https://github.com/pypa/pip/issues/4058
pip_conf = "pip.ini" if running_on_windows() else "pip.conf"
with open(os.path.join(path, pip_conf), mode="w") as fp:
fp.write("[list]\nformat = columns")
assert os.path.isdir(path)
@classmethod
def get_switcher_entries(cls):
return []
class SameAsFrontendCPythonProxy(CPythonProxy):
def __init__(self, clean):
super().__init__(clean, get_interpreter_for_subprocess())
def fetch_next_message(self):
msg = super().fetch_next_message()
if msg and "welcome_text" in msg:
if is_bundled_python(self._executable):
msg["welcome_text"] += " (bundled)"
else:
msg["welcome_text"] += " (" + self._executable + ")"
return msg
def get_clean_description(self):
return "Python " + get_python_version_string()
class CustomCPythonProxy(CPythonProxy):
def __init__(self, clean):
executable = get_workbench().get_option("CustomInterpreter.path")
# Rembember the usage of this non-default interpreter
used_interpreters = get_workbench().get_option("CustomInterpreter.used_paths")
if executable not in used_interpreters:
used_interpreters.append(executable)
get_workbench().set_option("CustomInterpreter.used_paths", used_interpreters)
super().__init__(clean, get_interpreter_for_subprocess(executable))
def fetch_next_message(self):
msg = super().fetch_next_message()
if msg and "welcome_text" in msg:
msg["welcome_text"] += " (" + self._executable + ")"
return msg
def get_clean_description(self):
desc = get_workbench().get_option("CustomInterpreter.path")
if not desc:
desc = sys.executable
return desc
@classmethod
def _get_switcher_entry_for_executable(cls, executable):
return (
{"run.backend_name": cls.backend_name, "CustomInterpreter.path": executable},
executable,
)
@classmethod
def get_current_switcher_configuration(cls):
return cls._get_switcher_entry_for_executable(
get_workbench().get_option("CustomInterpreter.path")
)[0]
@classmethod
def get_switcher_entries(cls):
return [
cls._get_switcher_entry_for_executable(executable)
for executable in _get_interpreters()
if os.path.exists(executable)
]
def get_private_venv_path():
if is_bundled_python(sys.executable.lower()):
prefix = "BundledPython"
else:
prefix = "Python"
return os.path.join(
THONNY_USER_DIR, prefix + "%d%d" % (sys.version_info[0], sys.version_info[1])
)
def get_private_venv_executable():
venv_path = get_private_venv_path()
if running_on_windows():
exe = os.path.join(venv_path, "Scripts", WINDOWS_EXE)
else:
exe = os.path.join(venv_path, "bin", "python3")
return exe
def _get_venv_info(venv_path):
cfg_path = os.path.join(venv_path, "pyvenv.cfg")
result = {}
with open(cfg_path, encoding="UTF-8") as fp:
for line in fp:
if "=" in line:
key, val = line.split("=", maxsplit=1)
result[key.strip()] = val.strip()
return result
class SameAsFrontEndConfigurationPage(BackendDetailsConfigPage):
def __init__(self, master):
super().__init__(master)
label = ttk.Label(self, text=get_interpreter_for_subprocess())
label.grid()
def should_restart(self):
return False
class PrivateVenvConfigurationPage(BackendDetailsConfigPage):
def __init__(self, master):
super().__init__(master)
text = (
tr("This virtual environment is automatically maintained by Thonny.\n")
+ tr("Location: ")
+ get_private_venv_path()
)
label = ttk.Label(self, text=text)
label.grid()
def should_restart(self):
return False
class CustomCPythonConfigurationPage(BackendDetailsConfigPage):
def __init__(self, master):
super().__init__(master)
self._configuration_variable = create_string_var(
get_workbench().get_option("CustomInterpreter.path")
)
entry_label = ttk.Label(self, text=tr("Python executable"))
entry_label.grid(row=0, column=1, columnspan=2, sticky=tk.W)
self._entry = ttk.Combobox(
self,
exportselection=False,
textvariable=self._configuration_variable,
values=_get_interpreters(),
)
self._entry.grid(row=1, column=1, sticky=tk.NSEW)
self._select_button = ttk.Button(
self,
text="...",
width=3,
command=self._select_executable,
)
self._select_button.grid(row=1, column=2, sticky="e", padx=(10, 0))
self.columnconfigure(1, weight=1)
extra_text = tr("NB! Thonny only supports Python 3.5 and later")
if running_on_mac_os():
extra_text += "\n\n" + tr(
"NB! File selection button may not work properly when selecting executables\n"
+ "from a virtual environment. In this case choose the 'activate' script instead\n"
+ "of the interpreter (or enter the path directly to the box)!"
)
extra_label = ttk.Label(self, text=extra_text)
extra_label.grid(row=2, column=1, columnspan=2, pady=10, sticky="w")
last_row = ttk.Frame(self)
last_row.grid(row=100, sticky="swe", column=1, columnspan=2)
self.rowconfigure(100, weight=1)
last_row.columnconfigure(1, weight=1)
new_venv_link = ui_utils.create_action_label(
last_row,
"New virtual environment",
self._create_venv,
)
new_venv_link.grid(row=0, column=1, sticky="e", pady=10)
# self.columnconfigure(1, weight=1)
def _select_executable(self):
# TODO: get dir of current interpreter
options = {"parent": self.winfo_toplevel()}
if running_on_windows():
options["filetypes"] = [
(tr("Python interpreters"), "python.exe"),
(tr("all files"), ".*"),
]
filename = askopenfilename(**options)
if not filename:
return
if filename.endswith("/activate"):
filename = filename[: -len("activate")] + "python3"
if filename:
self._configuration_variable.set(filename)
def _create_venv(self, event=None):
if not check_venv_installed(self):
return
messagebox.showinfo(
"Creating new virtual environment",
"After clicking 'OK' you need to choose an empty directory, "
"which will be the root of your new virtual environment.",
parent=self,
)
path = None
while True:
path = askdirectory(
parent=self.winfo_toplevel(),
initialdir=path,
title=tr("Select empty directory for new virtual environment"),
)
if not path:
return
if os.listdir(path):
messagebox.showerror(
tr("Bad directory"),
tr("Selected directory is not empty.\nSelect another or cancel."),
master=self,
)
else:
break
assert os.path.isdir(path)
path = normpath_with_actual_case(path)
proc = subprocess.Popen(
[running.get_interpreter_for_subprocess(), "-m", "venv", path],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
from thonny.workdlg import SubprocessDialog
dlg = SubprocessDialog(self, proc, tr("Creating virtual environment"), autostart=True)
ui_utils.show_dialog(dlg)
if running_on_windows():
exe_path = normpath_with_actual_case(os.path.join(path, "Scripts", "python.exe"))
else:
exe_path = os.path.join(path, "bin", "python3")
if os.path.exists(exe_path):
self._configuration_variable.set(exe_path)
def should_restart(self):
return self._configuration_variable.modified
def apply(self):
if not self.should_restart():
return
path = self._configuration_variable.get()
if os.path.isfile(path):
get_workbench().set_option("CustomInterpreter.path", path)
def _get_interpreters():
result = set()
if running_on_windows():
# registry
result.update(_get_interpreters_from_windows_registry())
for minor in [6, 7, 8, 9]:
for dir_ in [
"C:\\Python3%d" % minor,
"C:\\Python3%d-32" % minor,
"C:\\Python3%d-64" % minor,
"C:\\Program Files\\Python 3.%d" % minor,
"C:\\Program Files\\Python 3.%d-64" % minor,
"C:\\Program Files (x86)\\Python 3.%d" % minor,
"C:\\Program Files (x86)\\Python 3.%d-32" % minor,
]:
path = os.path.join(dir_, WINDOWS_EXE)
if os.path.exists(path):
result.add(normpath_with_actual_case(path))
# other locations
for dir_ in ["C:\\Anaconda3", os.path.expanduser("~/Anaconda3")]:
path = os.path.join(dir_, WINDOWS_EXE)
if os.path.exists(path):
result.add(normpath_with_actual_case(path))
else:
# Common unix locations
dirs = ["/bin", "/usr/bin", "/usr/local/bin", os.path.expanduser("~/.local/bin")]
for dir_ in dirs:
# if the dir_ is just a link to another dir_, skip it
# (not to show items twice)
# for example on Fedora /bin -> usr/bin
if not os.path.exists(dir_):
continue
apath = normpath_with_actual_case(dir_)
if apath != dir_ and apath in dirs:
continue
for name in ["python3", "python3.5", "python3.6", "python3.7", "python3.8"]:
path = os.path.join(dir_, name)
if os.path.exists(path):
result.add(path)
if running_on_mac_os():
for version in ["3.6", "3.7", "3.8", "3.9"]:
dir_ = os.path.join("/Library/Frameworks/Python.framework/Versions", version, "bin")
path = os.path.join(dir_, "python3")
if os.path.exists(path):
result.add(path)
from shutil import which
for command in ["python3", "python3.6", "python3.7", "python3.8", "python3.9"]:
path = which(command)
if path is not None and os.path.isabs(path):
result.add(path)
for path in get_workbench().get_option("CustomInterpreter.used_paths"):
if os.path.exists(path):
result.add(normpath_with_actual_case(path))
return sorted(result)
def _get_interpreters_from_windows_registry():
# https://github.com/python/cpython/blob/master/Tools/msi/README.txt
import winreg
result = set()
for key in [winreg.HKEY_LOCAL_MACHINE, winreg.HKEY_CURRENT_USER]:
for version in [
"3.6",
"3.6-32",
"3.6-64",
"3.7",
"3.7-32",
"3.7-64",
"3.8",
"3.8-32",
"3.8-64",
"3.9",
"3.9-32",
"3.9-64",
]:
try:
for subkey in [
"SOFTWARE\\Python\\PythonCore\\" + version + "\\InstallPath",
"SOFTWARE\\Python\\PythonCore\\Wow6432Node\\" + version + "\\InstallPath",
]:
dir_ = winreg.QueryValue(key, subkey)
if dir_:
path = os.path.join(dir_, WINDOWS_EXE)
if os.path.exists(path):
result.add(path)
except Exception:
pass
return result
def check_venv_installed(parent):
try:
import venv
return True
except ImportError:
messagebox.showerror("Error", "Package 'venv' is not available.", parent=parent)
return False
def load_plugin():
wb = get_workbench()
wb.set_default("run.backend_name", "SameAsFrontend")
wb.set_default("CustomInterpreter.used_paths", [])
wb.set_default("CustomInterpreter.path", "")
wb.add_backend(
"SameAsFrontend",
SameAsFrontendCPythonProxy,
tr("The same interpreter which runs Thonny (default)"),
SameAsFrontEndConfigurationPage,
"01",
)
wb.add_backend(
"CustomCPython",
CustomCPythonProxy,
tr("Alternative Python 3 interpreter or virtual environment"),
CustomCPythonConfigurationPage,
"02",
)
wb.add_backend(
"PrivateVenv",
PrivateVenvCPythonProxy,
tr("A special virtual environment (deprecated)"),
PrivateVenvConfigurationPage,
"zz",
)
| [
"[email protected]"
] | |
f73f479e605321b60ed24838163882b728e952fb | 488dfea1687152188bb0a6ca38a9bd24594f7010 | /qiskit/transpiler/passes/calibration/builders.py | ee53551265388b2789dface04f34b9f347ba3ece | [
"Apache-2.0"
] | permissive | CoolProgrammerX/qiskit-terra | e5b00a8efd5aae8c2de4ff52836640c70f09729a | 6262127f9f18e4c01846ec23a757bf1c97624fd9 | refs/heads/main | 2023-07-19T07:49:07.296112 | 2021-09-07T16:32:01 | 2021-09-07T16:32:01 | 404,226,586 | 1 | 0 | Apache-2.0 | 2021-09-08T05:49:23 | 2021-09-08T05:49:23 | null | UTF-8 | Python | false | false | 18,071 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Calibration creators."""
from abc import abstractmethod
from typing import List, Union
import math
import numpy as np
from qiskit.circuit import Instruction as CircuitInst
from qiskit.circuit.library.standard_gates import RZXGate
from qiskit.dagcircuit import DAGCircuit
from qiskit.exceptions import QiskitError
from qiskit.providers import basebackend
from qiskit.pulse import (
Play,
Delay,
ShiftPhase,
Schedule,
ScheduleBlock,
ControlChannel,
DriveChannel,
GaussianSquare,
)
from qiskit.pulse.instruction_schedule_map import InstructionScheduleMap, CalibrationPublisher
from qiskit.pulse.instructions.instruction import Instruction as PulseInst
from qiskit.transpiler.basepasses import TransformationPass
class CalibrationBuilder(TransformationPass):
"""Abstract base class to inject calibrations into circuits."""
@abstractmethod
def supported(self, node_op: CircuitInst, qubits: List) -> bool:
"""Determine if a given node supports the calibration.
Args:
node_op: Target instruction object.
qubits: Integer qubit indices to check.
Returns:
Return ``True`` is calibration can be provided.
"""
@abstractmethod
def get_calibration(self, node_op: CircuitInst, qubits: List) -> Union[Schedule, ScheduleBlock]:
"""Gets the calibrated schedule for the given instruction and qubits.
Args:
node_op: Target instruction object.
qubits: Integer qubit indices to check.
Returns:
Return Schedule of target gate instruction.
"""
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the calibration adder pass on `dag`.
Args:
dag: DAG to schedule.
Returns:
A DAG with calibrations added to it.
"""
for node in dag.gate_nodes():
qubits = list(dag.qubits.index(q) for q in node.qargs)
if self.supported(node.op, qubits) and not dag.has_calibration_for(node):
# calibration can be provided and no user-defined calibration is already provided
schedule = self.get_calibration(node.op, qubits)
publisher = schedule.metadata.get("publisher", CalibrationPublisher.QISKIT)
# add calibration if it is not backend default
if publisher != CalibrationPublisher.BACKEND_PROVIDER:
dag.add_calibration(gate=node.op, qubits=qubits, schedule=schedule)
return dag
class RZXCalibrationBuilder(CalibrationBuilder):
"""
Creates calibrations for RZXGate(theta) by stretching and compressing
Gaussian square pulses in the CX gate. This is done by retrieving (for a given pair of
qubits) the CX schedule in the instruction schedule map of the backend defaults.
The CX schedule must be an echoed cross-resonance gate optionally with rotary tones.
The cross-resonance drive tones and rotary pulses must be Gaussian square pulses.
The width of the Gaussian square pulse is adjusted so as to match the desired rotation angle.
If the rotation angle is small such that the width disappears then the amplitude of the
zero width Gaussian square pulse (i.e. a Gaussian) is reduced to reach the target rotation
angle. Additional details can be found in https://arxiv.org/abs/2012.11660.
"""
def __init__(self, backend: basebackend):
"""
Initializes a RZXGate calibration builder.
Args:
backend: Backend for which to construct the gates.
Raises:
QiskitError: if open pulse is not supported by the backend.
"""
super().__init__()
if not backend.configuration().open_pulse:
raise QiskitError(
"Calibrations can only be added to Pulse-enabled backends, "
"but {} is not enabled with Pulse.".format(backend.name())
)
self._inst_map = backend.defaults().instruction_schedule_map
self._config = backend.configuration()
self._channel_map = backend.configuration().qubit_channel_mapping
def supported(self, node_op: CircuitInst, qubits: List) -> bool:
"""Determine if a given node supports the calibration.
Args:
node_op: Target instruction object.
qubits: Integer qubit indices to check.
Returns:
Return ``True`` is calibration can be provided.
"""
return isinstance(node_op, RZXGate)
@staticmethod
def rescale_cr_inst(instruction: Play, theta: float, sample_mult: int = 16) -> Play:
"""
Args:
instruction: The instruction from which to create a new shortened or lengthened pulse.
theta: desired angle, pi/2 is assumed to be the angle that the pulse in the given
play instruction implements.
sample_mult: All pulses must be a multiple of sample_mult.
Returns:
qiskit.pulse.Play: The play instruction with the stretched compressed
GaussianSquare pulse.
Raises:
QiskitError: if the pulses are not GaussianSquare.
"""
pulse_ = instruction.pulse
if isinstance(pulse_, GaussianSquare):
amp = pulse_.amp
width = pulse_.width
sigma = pulse_.sigma
n_sigmas = (pulse_.duration - width) / sigma
# The error function is used because the Gaussian may have chopped tails.
gaussian_area = abs(amp) * sigma * np.sqrt(2 * np.pi) * math.erf(n_sigmas)
area = gaussian_area + abs(amp) * width
target_area = abs(theta) / (np.pi / 2.0) * area
sign = theta / abs(theta)
if target_area > gaussian_area:
width = (target_area - gaussian_area) / abs(amp)
duration = math.ceil((width + n_sigmas * sigma) / sample_mult) * sample_mult
return Play(
GaussianSquare(amp=sign * amp, width=width, sigma=sigma, duration=duration),
channel=instruction.channel,
)
else:
amp_scale = sign * target_area / gaussian_area
duration = math.ceil(n_sigmas * sigma / sample_mult) * sample_mult
return Play(
GaussianSquare(amp=amp * amp_scale, width=0, sigma=sigma, duration=duration),
channel=instruction.channel,
)
else:
raise QiskitError("RZXCalibrationBuilder only stretches/compresses GaussianSquare.")
def get_calibration(self, node_op: CircuitInst, qubits: List) -> Union[Schedule, ScheduleBlock]:
"""Builds the calibration schedule for the RZXGate(theta) with echos.
Args:
node_op: Instruction of the RZXGate(theta). I.e. params[0] is theta.
qubits: List of qubits for which to get the schedules. The first qubit is
the control and the second is the target.
Returns:
schedule: The calibration schedule for the RZXGate(theta).
Raises:
QiskitError: if the control and target qubits cannot be identified or the backend
does not support cx between the qubits.
"""
theta = node_op.params[0]
q1, q2 = qubits[0], qubits[1]
if not self._inst_map.has("cx", qubits):
raise QiskitError(
"This transpilation pass requires the backend to support cx "
"between qubits %i and %i." % (q1, q2)
)
cx_sched = self._inst_map.get("cx", qubits=(q1, q2))
rzx_theta = Schedule(name="rzx(%.3f)" % theta)
rzx_theta.metadata["publisher"] = CalibrationPublisher.QISKIT
if theta == 0.0:
return rzx_theta
crs, comp_tones = [], []
control, target = None, None
for time, inst in cx_sched.instructions:
# Identify the CR pulses.
if isinstance(inst, Play) and not isinstance(inst, ShiftPhase):
if isinstance(inst.channel, ControlChannel):
crs.append((time, inst))
# Identify the compensation tones.
if isinstance(inst.channel, DriveChannel) and not isinstance(inst, ShiftPhase):
if isinstance(inst.pulse, GaussianSquare):
comp_tones.append((time, inst))
target = inst.channel.index
control = q1 if target == q2 else q2
if control is None:
raise QiskitError("Control qubit is None.")
if target is None:
raise QiskitError("Target qubit is None.")
echo_x = self._inst_map.get("x", qubits=control)
# Build the schedule
# Stretch/compress the CR gates and compensation tones
cr1 = self.rescale_cr_inst(crs[0][1], theta)
cr2 = self.rescale_cr_inst(crs[1][1], theta)
if len(comp_tones) == 0:
comp1, comp2 = None, None
elif len(comp_tones) == 2:
comp1 = self.rescale_cr_inst(comp_tones[0][1], theta)
comp2 = self.rescale_cr_inst(comp_tones[1][1], theta)
else:
raise QiskitError(
"CX must have either 0 or 2 rotary tones between qubits %i and %i "
"but %i were found." % (control, target, len(comp_tones))
)
# Build the schedule for the RZXGate
rzx_theta = rzx_theta.insert(0, cr1)
if comp1 is not None:
rzx_theta = rzx_theta.insert(0, comp1)
rzx_theta = rzx_theta.insert(comp1.duration, echo_x)
time = comp1.duration + echo_x.duration
rzx_theta = rzx_theta.insert(time, cr2)
if comp2 is not None:
rzx_theta = rzx_theta.insert(time, comp2)
time = 2 * comp1.duration + echo_x.duration
rzx_theta = rzx_theta.insert(time, echo_x)
# Reverse direction of the ZX with Hadamard gates
if control == qubits[0]:
return rzx_theta
else:
rzc = self._inst_map.get("rz", [control], np.pi / 2)
sxc = self._inst_map.get("sx", [control])
rzt = self._inst_map.get("rz", [target], np.pi / 2)
sxt = self._inst_map.get("sx", [target])
h_sched = Schedule(name="hadamards")
h_sched = h_sched.insert(0, rzc)
h_sched = h_sched.insert(0, sxc)
h_sched = h_sched.insert(sxc.duration, rzc)
h_sched = h_sched.insert(0, rzt)
h_sched = h_sched.insert(0, sxt)
h_sched = h_sched.insert(sxc.duration, rzt)
rzx_theta = h_sched.append(rzx_theta)
return rzx_theta.append(h_sched)
class RZXCalibrationBuilderNoEcho(RZXCalibrationBuilder):
"""
Creates calibrations for RZXGate(theta) by stretching and compressing
Gaussian square pulses in the CX gate.
The ``RZXCalibrationBuilderNoEcho`` is a variation of the
:class:`~qiskit.transpiler.passes.RZXCalibrationBuilder` pass
that creates calibrations for the cross-resonance pulses without inserting
the echo pulses in the pulse schedule. This enables exposing the echo in
the cross-resonance sequence as gates so that the transpiler can simplify them.
The ``RZXCalibrationBuilderNoEcho`` only supports the hardware-native direction
of the CX gate.
"""
@staticmethod
def _filter_control(inst: (int, Union["Schedule", PulseInst])) -> bool:
"""
Looks for Gaussian square pulses applied to control channels.
Args:
inst: Instructions to be filtered.
Returns:
match: True if the instruction is a Play instruction with
a Gaussian square pulse on the ControlChannel.
"""
if isinstance(inst[1], Play):
if isinstance(inst[1].pulse, GaussianSquare) and isinstance(
inst[1].channel, ControlChannel
):
return True
return False
@staticmethod
def _filter_drive(inst: (int, Union["Schedule", PulseInst])) -> bool:
"""
Looks for Gaussian square pulses applied to drive channels.
Args:
inst: Instructions to be filtered.
Returns:
match: True if the instruction is a Play instruction with
a Gaussian square pulse on the DriveChannel.
"""
if isinstance(inst[1], Play):
if isinstance(inst[1].pulse, GaussianSquare) and isinstance(
inst[1].channel, DriveChannel
):
return True
return False
def get_calibration(self, node_op: CircuitInst, qubits: List) -> Union[Schedule, ScheduleBlock]:
"""Builds the calibration schedule for the RZXGate(theta) without echos.
Args:
node_op: Instruction of the RZXGate(theta). I.e. params[0] is theta.
qubits: List of qubits for which to get the schedules. The first qubit is
the control and the second is the target.
Returns:
schedule: The calibration schedule for the RZXGate(theta).
Raises:
QiskitError: If the control and target qubits cannot be identified, or the backend
does not support a cx gate between the qubits, or the backend does not natively
support the specified direction of the cx.
"""
theta = node_op.params[0]
q1, q2 = qubits[0], qubits[1]
if not self._inst_map.has("cx", qubits):
raise QiskitError(
"This transpilation pass requires the backend to support cx "
"between qubits %i and %i." % (q1, q2)
)
cx_sched = self._inst_map.get("cx", qubits=(q1, q2))
rzx_theta = Schedule(name="rzx(%.3f)" % theta)
rzx_theta.metadata["publisher"] = CalibrationPublisher.QISKIT
if theta == 0.0:
return rzx_theta
control, target = None, None
for _, inst in cx_sched.instructions:
# Identify the compensation tones.
if isinstance(inst.channel, DriveChannel) and isinstance(inst, Play):
if isinstance(inst.pulse, GaussianSquare):
target = inst.channel.index
control = q1 if target == q2 else q2
if control is None:
raise QiskitError("Control qubit is None.")
if target is None:
raise QiskitError("Target qubit is None.")
if control != qubits[0]:
raise QiskitError(
"RZXCalibrationBuilderNoEcho only supports hardware-native RZX gates."
)
# Get the filtered Schedule instructions for the CR gates and compensation tones.
crs = cx_sched.filter(*[self._filter_control]).instructions
rotaries = cx_sched.filter(*[self._filter_drive]).instructions
# Stretch/compress the CR gates and compensation tones.
cr = self.rescale_cr_inst(crs[0][1], 2 * theta)
rot = self.rescale_cr_inst(rotaries[0][1], 2 * theta)
# Build the schedule for the RZXGate without the echos.
rzx_theta = rzx_theta.insert(0, cr)
rzx_theta = rzx_theta.insert(0, rot)
rzx_theta = rzx_theta.insert(0, Delay(cr.duration, DriveChannel(control)))
return rzx_theta
class PulseGates(CalibrationBuilder):
"""Pulse gate adding pass.
This pass adds gate calibrations from the supplied ``InstructionScheduleMap``
to a quantum circuit.
This pass checks each DAG circuit node and acquires a corresponding schedule from
the instruction schedule map object that may be provided by the target backend.
Because this map is a mutable object, the end-user can provide a configured backend to
execute the circuit with customized gate implementations.
This mapping object returns a schedule with "publisher" metadata which is an integer Enum
value representing who created the gate schedule.
If the gate schedule is provided by end-users, this pass attaches the schedule to
the DAG circuit as a calibration.
This pass allows users to easily override quantum circuit with custom gate definitions
without directly dealing with those schedules.
References
* [1] OpenQASM 3: A broader and deeper quantum assembly language
https://arxiv.org/abs/2104.14722
"""
def __init__(
self,
inst_map: InstructionScheduleMap,
):
"""Create new pass.
Args:
inst_map: Instruction schedule map that user may override.
"""
super().__init__()
self.inst_map = inst_map
def supported(self, node_op: CircuitInst, qubits: List) -> bool:
"""Determine if a given node supports the calibration.
Args:
node_op: Target instruction object.
qubits: Integer qubit indices to check.
Returns:
Return ``True`` is calibration can be provided.
"""
return self.inst_map.has(instruction=node_op.name, qubits=qubits)
def get_calibration(self, node_op: CircuitInst, qubits: List) -> Union[Schedule, ScheduleBlock]:
"""Gets the calibrated schedule for the given instruction and qubits.
Args:
node_op: Target instruction object.
qubits: Integer qubit indices to check.
Returns:
Return Schedule of target gate instruction.
"""
return self.inst_map.get(node_op.name, qubits, *node_op.params)
| [
"[email protected]"
] | |
7089ce2997caa0906fdb6ca385e862bf3d1ffb8e | a5f95d0564f03191b75171b00bfbaf25a86e57f1 | /hathi-0-match-registrations.py | 2eb85d6f28d5bf3537dc9ec9cfc9db82e90c9d9d | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | leonardr/cce-python | ec9b96f13c59abfa44b050be6762727f924b7ffc | 4240ad3f175c7b74ca160e260fa71d624b7681ca | refs/heads/master | 2020-06-28T18:47:47.078133 | 2019-09-12T20:39:50 | 2019-09-12T20:39:50 | 200,312,152 | 36 | 2 | MIT | 2019-08-22T12:46:40 | 2019-08-03T00:10:55 | Python | UTF-8 | Python | false | false | 13,396 | py | import sys
import Levenshtein as lev
from pdb import set_trace
from model import Registration
import datetime
import re
import json
from collections import defaultdict
# Ignore CCE entries if they have more than this many matches on the
# IA side.
MATCH_CUTOFF = 50
# Only output potential matches if the quality score is above this level.
QUALITY_CUTOFF = 0
# Stuff published before this year is public domain.
CUTOFF_YEAR = datetime.datetime.today().year - 95
class Comparator(object):
NON_ALPHABETIC = re.compile("[\W0-9]", re.I + re.UNICODE)
NON_ALPHANUMERIC = re.compile("[\W_]", re.I + re.UNICODE)
MULTIPLE_SPACES = re.compile("\s+")
GENERIC_TITLES = (
'annual report',
'special report',
'proceedings of',
'proceedings',
'general catalog',
'catalog',
'report',
'questions and answers',
'transactions',
'yearbook',
'year book',
'selected poems',
'poems',
'bulletin',
'papers',
)
GENERIC_TITLES_RE = re.compile("(%s)" % "|".join(GENERIC_TITLES))
TOTALLY_GENERIC_TITLES_RE = re.compile("^(%s)$" % "|".join(GENERIC_TITLES))
def __init__(self, hathi_text_file):
self.by_title_key = defaultdict(list)
self._normalized = dict()
self._normalized_names = dict()
self._name_words = dict()
for i, raw in enumerate(open(hathi_text_file)):
row = raw.strip().split("\t")
try:
htid,access,rights,ht_bib_key,description,source,source_bib_num,oclc_num,isbn,issn,lccn,title,imprint,rights_reason_code,rights_timestamp,us_gov_doc_flag,rights_date_used,pub_place,lang,bib_fmt,collection_code,content_provider_code,responsible_entity_code,digitization_agent_code,access_profile_code,author = row
except Exception as e:
continue
if bib_fmt != 'BK':
# Not a book proper
continue
# Already open access?
if us_gov_doc_flag != '0':
continue
if rights in ['pdus', 'pd']:
continue
# und?
if rights not in ['ic', 'und']:
continue
try:
year = int(rights_date_used)
except Exception as e:
continue
if year > 1963+5 or year < CUTOFF_YEAR:
# Don't consider works published more than 5 years out
# of the range we're considering. That's plenty of
# time to publish the work you registered, or to register
# the work you published.
continue
hathi_dict = dict(
title=title, author=author, identifier=ht_bib_key,
year=year
)
title = self.normalize(title)
author = self.normalize(author)
if not title:
continue
key = self.title_key(title)
self.by_title_key[key].append((ht_bib_key, title, author, year, hathi_dict, row))
def generic_title_penalties(self, title):
# A generic-looking title means that an author match
# and a close date match is relatively more important.
title = self.normalize(title)
if "telephone director" in title:
# Telephone directories are uniquely awful, and they're
# published every year. Hold them to the highest standards.
return 7, 1.0, 7
if self.TOTALLY_GENERIC_TITLES_RE.match(title):
return 6, 0.8, 5
if self.GENERIC_TITLES_RE.match(title):
return 4, 0.7, 4
return 1, 0, 1
def normalize(self, text):
if isinstance(text, list):
if len(text) == 2:
# title + subtitle
text = ": ".join(text)
else:
# book just has variant titles.
text = text[0]
original = text
if original in self._normalized:
return self._normalized[original]
text = text.lower()
text = self.NON_ALPHANUMERIC.sub(" ", text)
text = self.MULTIPLE_SPACES.sub(" ", text)
# Just ignore these stopwords -- they're commonly missing or
# duplicated.
for ignorable in (
' the ',
' a ',
' an ',
):
text = text.replace(ignorable, '')
text = text.strip()
self._normalized[original] = text
return text
def normalize_name(self, name):
if not name:
return None
# Normalize a person's name.
original = name
if original in self._normalized_names:
return self._normalized_names[original]
name = name.lower()
name = self.NON_ALPHABETIC.sub(" ", name)
name = self.MULTIPLE_SPACES.sub(" ", name)
name = name.strip()
self._normalized_names[original] = name
return name
def name_words(self, name):
if not name:
return None
original = name
if original in self._name_words:
return self._name_words[original]
words = sorted(name.split())
self._name_words[original] = words
return words
def title_key(self, normalized_title):
words = [x for x in normalized_title.split(" ") if x]
longest_words = sorted(words, key= lambda x: (-len(x), x))
return tuple(longest_words[:2])
def matches(self, registration):
if not registration.title:
return
registration_title = self.normalize(registration.title)
key = self.title_key(registration_title)
key_matches = self.by_title_key[key]
for hathi_data in key_matches:
quality = self.evaluate_match(hathi_data, registration, registration_title)
if quality > 0:
yield registration, hathi_data, quality
def evaluate_match(self, hathi_data, registration, registration_title):
# The basic quality evaluation is based on title similarity.
ht_bib_key, hathi_title, hathi_author, hathi_year, hathi_dict, hathi_row = hathi_data
title_quality = self.evaluate_titles(
hathi_title, registration_title
)
# A penalty is applied if the Hathi publication date is far away from the
# copyright registration date.
registration_date = registration.best_guess_registration_date
# Assume we don't know the registration date; there will be no penalty.
date_penalty = 0
if registration_date:
date_penalty = self.evaluate_years(hathi_year, registration_date.year)
# A penalty is applied if the authors are clearly divergent.
registration_authors = registration.authors or []
if registration_authors and hathi_author:
author_penalty = self.evaluate_authors(
hathi_author, registration_authors
)
else:
# Author data is missing from registration. Give a slight penalty.
author_penalty = 0.2
# A generic-looking title has a correspondingly greater emphasis on
# an author match and a close year match.
author_penalty_multiplier, author_base_penalty, year_penalty_multiplier = self.generic_title_penalties(
registration_title
)
if author_penalty == 0:
author_penalty = author_base_penalty
elif author_penalty > 0:
author_penalty *= author_penalty_multiplier
if date_penalty > 0:
date_penalty *= year_penalty_multiplier
return title_quality - date_penalty - author_penalty
def evaluate_titles(self, ia, registration):
normalized_registration = self.normalize(registration)
if not normalized_registration:
return -1
if ia == normalized_registration:
# The titles are a perfect match. Give a bonus -- unless
# the title is short or generic. That's not very impressive.
a, b, c = self.generic_title_penalties(title)
if len(title) < 15:
length_multiplier = 1- ((15 - len(title)) * 0.05)
else:
length_multiplier = 1
if a == 1:
# Not generic.
return 1.2 * length_multiplier
else:
# Generic.
return 1 * length_multiplier
# Calculate the Levenshtein distance between the two strings,
# as a proportion of the length of the longer string.
#
# This ~ the quality of the title match.
# If you have to change half of the characters to get from one
# string to another, that's a score of 50%, which isn't
# "okay", it's really bad. Multiply the distance by a
# constant to reflect this.
distance = lev.distance(ia, normalized_registration) * 1.5
longer_string = max(len(ia), len(normalized_registration))
proportional_changes = distance / float(longer_string)
proportional_distance = 1-(proportional_changes)
return proportional_distance
def evaluate_years(self, ia, registration):
if ia == registration:
# Exact match gets a slight negative penalty -- a bonus.
return -0.01
# Apply a penalty for every year of difference between the
# registration year and the publication year according to Hathi.
# The penalty has a slight exponential element -- 5 years in
# either direction really should be enough for a match.
return (abs(ia-registration) ** 1.15) * 0.1
def evaluate_authors(self, hathi_authors, registration_authors):
if not hathi_authors or not registration_authors:
# We don't have the information necessary to match up
# authors. No penalty (though if the title is generic, a
# base penalty will be applied.)
return 0
# Return the smallest penalty for the given list of authors.
if not isinstance(hathi_authors, list):
hathi_authors = [hathi_authors]
if not isinstance(registration_authors, list):
registration_authors = [registration_authors]
penalties = []
for ia in hathi_authors:
for ra in registration_authors:
penalty = self.evaluate_author(ia, ra)
if penalty is not None:
penalties.append(penalty)
if not penalties:
# We couldn't figure it out. No penalty.
return 0
# This will find the largest negative penalty (bonus) or the
# smallest positive penalty.
return min(penalties)
def evaluate_author(self, hathi_author, registration_author):
# Determine the size of the rating penalty due to the mismatch
# between these two authors.
hathi_author = self.normalize_name(hathi_author)
registration_author = self.normalize_name(registration_author)
if not hathi_author or not registration_author:
# We just don't know.
return None
if hathi_author == registration_author:
# Exact match gets a negative penalty -- a bonus.
return -0.25
hathi_words = self.name_words(hathi_author)
registration_words = self.name_words(registration_author)
if hathi_words == registration_words:
# These are probably the same author. Return a negative
# penalty -- a bonus.
return -0.2
distance = lev.distance(hathi_author, registration_author)
longer_string = max(len(hathi_author), len(registration_author))
proportional_changes = distance / float(longer_string)
penalty = 1 - proportional_changes
if penalty > 0:
penalty = min(penalty, 0.50)
return penalty
comparator = Comparator(sys.argv[1])
output = open("output/hathi-0-matched.ndjson", "w")
for filename in ["FINAL-not-renewed.ndjson"]: #"FINAL-possibly-renewed.ndjson"]:
for i in open("output/%s" % filename):
cce = Registration.from_json(json.loads(i))
title = cce.title
if not title or not comparator.normalize(title):
continue
matches = list(comparator.matches(cce))
# If there are a huge number of Hathi matches for a CCE title,
# penalize them -- it's probably a big mess that must be dealt
# with separately. Give a slight boost if there's only a single
# match.
if len(matches) == 1:
num_matches_coefficient = 1.1
elif len(matches) <= MATCH_CUTOFF:
num_matches_coefficient = 1
else:
num_matches_coefficient = 1-(
len(matches) - MATCH_CUTOFF/float(MATCH_CUTOFF)
)
for registration, hathi, quality in matches:
quality = quality * num_matches_coefficient
if quality <= QUALITY_CUTOFF:
continue
hathi_dict = hathi[-2]
output_data = dict(
quality=quality, hathi=hathi_dict, cce=registration.jsonable()
)
json.dump(output_data, output)
output.write("\n")
| [
"[email protected]"
] | |
75051d09b67149f3380eaf69db28304e417cc616 | 13569261f1f7808aa2f9424a957358da77a7a949 | /Monet_Harkin/Bike/server.py | 755633b8d945d7b8a725af4c53ff9e9d3b02838b | [] | no_license | CodingDojoDallas/python-nov-2016 | 75049d114116330c1898ae5e3d1fd202a999da5d | a6a50cc7cd4f50b59459d995d2df4707417b8f1c | refs/heads/master | 2021-01-12T12:21:18.972855 | 2016-11-23T21:24:53 | 2016-11-23T21:24:53 | 72,453,587 | 0 | 13 | null | 2016-11-23T21:24:54 | 2016-10-31T16:07:05 | Python | UTF-8 | Python | false | false | 792 | py | # Monet Harkin - Bike OOP test
class Bike(object):
def __init__(self, price, max_speed):
self.price = price
self.max_speed = max_speed
self.miles= 0
def displayInfo(self):
print "Price: "+ str(self.price )
print "Max speed: "+ str(self.max_speed)
print "Miles: "+ str(self.miles)
def ride(self):
self.miles += 10
print "Riding"
def reverse(self):
if self.miles >= 5:
self.miles -= 5
else:
self.miles =0
print "Reversing"
bike1 = Bike(200, "25mph")
bike2 = Bike(300, "27mph")
bike3 = Bike(100, "10mph")
bike1.ride()
bike1.ride()
bike1.ride()
bike1.reverse()
bike1.displayInfo()
print"*" * 50
bike2.ride()
bike2.ride()
bike2.reverse()
bike2.reverse()
bike2.displayInfo()
print"*" * 50
bike3.reverse()
bike3.reverse()
bike3.reverse()
bike3.displayInfo()
| [
"[email protected]"
] | |
03184a2f469e1ce27641847dc30183d3a658359d | f2a5680231e205dc49a083578d9bd90e4603036c | /Grokking-Coding-Interview-Patterns/14. Top K Numbers/KpointsClosestTwoOrigin.py | ef7a6e0c1f638611eb0b6307af2de741ced1381e | [] | no_license | flogothetis/Technical-Coding-Interviews-Algorithms-LeetCode | d592451f7d297fd52395e33dc67686e9990a663c | 7c8473fce4b5b5affbfde5ed8c39fdb89cbc77d4 | refs/heads/master | 2023-01-13T15:56:07.706164 | 2020-11-18T18:54:52 | 2020-11-18T18:54:52 | 281,101,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | '''
Problem Statement #
Given an array of points in the a 2D2D2D plane, find ‘K’ closest points to the origin.
'''
from heapq import *
# Time Comp : O(NlogK)
# Space Comp : O(K)
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __lt__(self, other):
self.euclidean_dist() > other.euclidean_dist()
def print_point(self):
print("[" + str(self.x) + ", " + str(self.y) + "] ", end='')
def euclidean_dist (self):
return (self.x ** 2 + self.y **2) ** (1/2)
def find_closest_points(points, k):
result = []
max_heap = []
for i in range(k):
heappush(max_heap, points[i])
for i in range ( k, len(points)):
if (points[i].euclidean_dist() < max_heap[0].euclidean_dist()):
heappop(max_heap)
heappush(max_heap, points[i])
return list(max_heap)
def main():
result = find_closest_points([Point(1, 3), Point(3, 4), Point(2, -1)], 2)
print("Here are the k points closest the origin: ", end='')
for point in result:
point.print_point()
main()
| [
"[email protected]"
] | |
15ed73aa9d59827398ca3d5f8fe89d7baaf23d8d | f5645f685c8be36711f71c3a6763d6a4f93788b5 | /sandbox/settings/test.py | 9ef14ae7a9e7f1cd9b73a4a8de05e269cc2be55b | [
"MIT"
] | permissive | pincoin/rakmai | ef034c6b6d5501cd433869446275f4f3b622a73b | fe41fd0ab88bf143e65b450ceb798741d0f80330 | refs/heads/master | 2023-03-02T13:24:10.885714 | 2023-02-27T14:37:10 | 2023-02-27T14:37:10 | 112,416,258 | 13 | 4 | MIT | 2023-02-15T18:54:11 | 2017-11-29T02:34:15 | JavaScript | UTF-8 | Python | false | false | 2,098 | py | SECRET_KEY = 'rakmai_fake_key'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
INSTALLED_APPS += [
'mptt',
'taggit',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.google',
# 'allauth.socialaccount.providers.kakao',
# 'allauth.socialaccount.providers.line',
'import_export',
'easy_thumbnails',
'crispy_forms',
'rakmai',
'member',
'blog',
'board',
'book',
'shop',
'help',
'rabop',
'banner',
]
ROOT_URLCONF = 'sandbox.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db_test.sqlite3',
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ADMIN_URL = 'secret-admin/'
RABOP_URL = 'rabop/'
API_URL = 'api/'
# Allauth
# Member
# Blog
# Board
# Forum
# Bleach sanitizing text fragments
BLEACH_ALLOWED_TAGS = [
'h1', 'h2', 'h3', 'h4', 'h5', 'ol', 'ul', 'li', 'div', 'p', 'code', 'blockquote', 'pre', 'span', 'table', 'thead',
'tbody', 'tr', 'th', 'td', 'a', 'em', 'strong', 'hr', 'img'
]
BLEACH_ALLOWED_ATTRIBUTES = {
'*': ['class', 'id'],
'a': ['href', 'rel'],
'img': ['alt', 'src'],
}
# crispy-form
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Dummy cache for testing
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'TIMEOUT': 300,
}
}
GOOGLE_OTP_ENABLED = False
| [
"[email protected]"
] | |
50f57cdf741ff10781381d4764ff69474278b5b8 | 124b9b3aa4d6c4e1014e2119a32b711c9bfe3b78 | /Problem Solving/Birthday cake candles.py | b40d1a9bc11bff36ff280ded0ffb1d7af6ec3d14 | [] | no_license | Devendra33/HackerRank | 5f4929c1161fade3ed1a593b847403943e757bdb | 980f8577677e24da654627b35fbfccb69b17f218 | refs/heads/master | 2022-12-14T05:13:30.405197 | 2020-09-12T09:24:31 | 2020-09-12T09:24:31 | 264,129,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the birthdayCakeCandles function below.
def birthdayCakeCandles(ar, ar_count):
cnt = 0
num = max(ar)
for i in ar:
if i == num:
cnt += 1
return cnt
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ar_count = int(input())
ar = list(map(int, input().rstrip().split()))
result = birthdayCakeCandles(ar,ar_count)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
dcd82fd2a59803baeb191e7c636fb4021b62689e | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_46/run_cfg.py | d0f4e8c6b8e8c8ce3a1c19aadea1c9c7a03b7ca4 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_262.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_263.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_264.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_265.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_266.root')
)
| [
"[email protected]"
] | |
ac3f681ff49d500b4fa57354e421919c40815772 | d1bcd1a55221d52cab88f397183ee6492fc7f5a5 | /workmail-translate-email/src/translate_helper.py | 3fb6cccbe58ab4aa99341ff7ff29997ad198d655 | [
"Apache-2.0"
] | permissive | aws-samples/amazon-workmail-lambda-templates | fccd1b490d69a08b512e2e666d4a85000745b793 | d7b83d7f4499768f60fd115f5f995e1d8daccf89 | refs/heads/master | 2022-06-30T22:22:43.030710 | 2022-06-30T19:05:15 | 2022-06-30T19:05:15 | 168,240,710 | 39 | 18 | Apache-2.0 | 2022-06-30T19:05:16 | 2019-01-29T22:38:50 | Python | UTF-8 | Python | false | false | 1,323 | py | import boto3
comprehend = boto3.client(service_name='comprehend')
translate = boto3.client(service_name='translate')
def detect_language(text):
"""
Detects the dominant language in a text
Parameters
----------
text: string, required
Input text
Returns
-------
string
Representing language code of the dominant language
"""
# Sending call to get language
result = comprehend.detect_dominant_language(Text = text)['Languages']
# Since the result can contain more than one language find the one with the highest score.
high_score = 0
best_guess = ''
for lang in range(len(result)):
if result[lang]['Score'] > high_score:
high_score = result[lang]['Score']
best_guess = result[lang]['LanguageCode']
return best_guess
def translate_text(text, source_lang, destination_lang):
"""
Translates given text from source language into destination language
Parameters
----------
text: string, required
Input text in source language
Returns
-------
string
Translated text in destination language
"""
result = translate.translate_text(Text=text,
SourceLanguageCode=source_lang, TargetLanguageCode=destination_lang)
return result.get('TranslatedText')
| [
"[email protected]"
] | |
546a4af39360350465c0ca3bda6e5dabafad1e0c | 3208f15876e5ae0275366763f57380f383eae55b | /manage.py | baef8477b5382806b9d2514ca37998eb21f3ff95 | [
"MIT"
] | permissive | jpadilla/feedleap | 05d4abbd21408ec2c4d1f2a99aaba0fe22a7e3f7 | 06d87a680a4bd2ead550b3540e58a8c520a733ba | refs/heads/master | 2023-05-31T19:22:14.143515 | 2014-06-27T13:02:52 | 2014-06-27T13:02:52 | 9,079,296 | 1 | 1 | null | 2013-05-05T17:05:29 | 2013-03-28T14:53:25 | Python | UTF-8 | Python | false | false | 1,004 | py | #!/usr/bin/env python
import os
import sys
import re
def read_env():
"""Pulled from Honcho code with minor updates, reads local default
environment variables from a .env file located in the project root
directory.
"""
try:
with open('.env') as f:
content = f.read()
except IOError:
content = ''
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
os.environ.setdefault(key, val)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "feedleap.settings.base")
from django.core.management import execute_from_command_line
read_env()
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
ca93204a00373b06690345a47392831fa59520db | 477ddbb411b0b9d7431eed8e1d5be155f4940e05 | /feedly/serializers/base.py | 06687c2c8bd6ca74134fb5225350beb2598cde3b | [
"BSD-3-Clause"
] | permissive | atefzed/Feedly | 9ba531d183637e9d42bfdade4c831825f549e2d1 | f3824b95cefc360ebbfc2d2166b53a5505f49fbc | refs/heads/master | 2021-01-15T17:07:40.795228 | 2013-05-07T22:17:15 | 2013-05-07T22:17:15 | 10,055,814 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 41 | py |
class BaseSerializer(object):
pass
| [
"[email protected]"
] | |
03d5325ec088f2701d3826c732407eeba995016a | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /g7sh7oLoArRLmM2ky_6.py | d4615fa5b39a2c4ba13bdd9812cce19b2372f521 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py |
import re
def baconify(msg, mask=""):
d_key={
"uuuuu" :"a" ,
"uuuul" :"b" ,
"uuulu" :"c" ,
"uuull" :"d" ,
"uuluu" :"e" ,
"uulul" :"f" ,
"uullu" :"g" ,
"uulll" :"h" ,
"uluuu" :"i" ,
"uluul" :"j" ,
"ululu" :"k" ,
"ulull" :"l" ,
"ulluu" :"m" ,
"ullul" :"n" ,
"ulllu" :"o" ,
"ullll" :"p" ,
"luuuu" :"q" ,
"luuul" :"r" ,
"luulu" :"s" ,
"luull" :"t" ,
"luluu" :"u" ,
"lulul" :"v" ,
"lullu" :"w" ,
"lulll" :"x" ,
"lluuu" :"y" ,
"lluul" :"z" ,
"llllu" :"." ,
"lllll" :" "
}
e_key={
"a": "uuuuu",
"b": "uuuul",
"c": "uuulu",
"d": "uuull",
"e": "uuluu",
"f": "uulul",
"g": "uullu",
"h": "uulll",
"i": "uluuu",
"j": "uluul",
"k": "ululu",
"l": "ulull",
"m": "ulluu",
"n": "ullul",
"o": "ulllu",
"p": "ullll",
"q": "luuuu",
"r": "luuul",
"s": "luulu",
"t": "luull",
"u": "luluu",
"v": "lulul",
"w": "lullu",
"x": "lulll",
"y": "lluuu",
"z": "lluul",
".": "llllu",
" ": "lllll"
}
return_statement=""
borken_word=""
if not mask:
msg=re.sub("[\d\W\s]","",msg)
for ch in msg:
borken_word+="l" if str.islower(ch) else "u"
if len(borken_word)==5:
return_statement+=(d_key[borken_word])
borken_word=""
return return_statement
else:
ch_count=0
msg= re.sub("[!?:;'\"]","",msg)
return_statement=list(mask)
for ch in str.lower(msg):
for x in (e_key[ch]):
while not str.isalpha(mask[ch_count]):
ch_count+=1
return_statement[ch_count]=str.upper(mask[ch_count]) if x=="u" else str.lower(mask[ch_count])
ch_count+=1
return "".join(return_statement)
| [
"[email protected]"
] | |
ea6d7a09a0098c84b4c48d440ab621b0b6a8bec7 | 98d832289b7437247ce03ea54ad3cb7b95451159 | /test/test_smtp_alert.py | 1dfe158f6bb2b04eba1df72525a8c0295f830931 | [
"MIT"
] | permissive | rmehilli-r7/vm-console-client-python | 7f02f13345dce4f4d4d85e18da7146daeefbceb9 | 069041c1c7b53c6b3d8bfdd81b974141bfca3c0c | refs/heads/master | 2020-03-23T11:20:33.364442 | 2018-08-10T20:06:37 | 2018-08-10T20:06:37 | 141,498,444 | 0 | 0 | MIT | 2018-08-08T19:58:45 | 2018-07-18T23:00:41 | Python | UTF-8 | Python | false | false | 48,842 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `string` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.smtp_alert import SmtpAlert # noqa: E501
from swagger_client.rest import ApiException
class TestSmtpAlert(unittest.TestCase):
"""SmtpAlert unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSmtpAlert(self):
"""Test SmtpAlert"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.smtp_alert.SmtpAlert() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
e710851a689538325d804091ca103627905d86d5 | 8329282a8fda056d705c1af6dbcd0de1ed7ca25e | /.history/textutiles/textutiles/views_20210522225246.py | ec3d62029c51e1f60b0b7c91f234a8013ae9d2c6 | [] | no_license | ritikalohia/Django-beginners- | c069b16867407ef883bb00c6faf4f601921c118a | 829e28ab25201853de5c71a10ceff30496afea52 | refs/heads/main | 2023-05-04T03:34:29.082656 | 2021-05-22T17:38:21 | 2021-05-22T17:38:21 | 369,869,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,784 | py | #created
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
#params = {'name' : 'Ritika', 'place' : 'Mars'}
return render(request, 'index.html')
#return HttpResponse("Home")
def contact(request):
return render(request, 'contact.html')
def about(request):
return render(request, 'about_us.html')
def analyze(request):
#get the text in head
djtext = request.POST.get('text', 'default' )
#check checkbox values
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
spaceremover = request.POST.get('spaceremover', 'off'),
charcount = request.POST.get('charcount', 'off')
if removepunc == "on":
#analyzed = djtext
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params ={'purpose':'removed punctuations', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(fullcaps == "on"):
analyzed =""
for char in djtext:
analyzed = analyzed + char.upper()
params ={'purpose':'changed to UPPERCASE', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(newlineremover== "on"):
analyzed =""
for char in djtext:
if char != '\n' and char !="\r":
analyzed = analyzed + char
params ={'purpose':'Removed new lines', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(spaceremover== "on"):
analyzed =""
for index, char in enumerate(djtext):
if not djtext[index] == " " and djtext[index+1]==" ":
analyzed = analyzed + char
params ={'purpose':'extra space removed', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(charcount== "on"):
a=0
for char in djtext:
a = a + 1
params ={'purpose':'extra space removed', 'analyzed_text': a}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(removepunc != "on" and newlineremover != "on" and spaceremover !="on" and fullcaps != "on"):
return render(request, 'analyze.html', params)
# def capfirst(request):
# return HttpResponse("capitalize first") | [
"[email protected]"
] | |
6069be8064002ba6e48ace2e422ed62d65f716d8 | 51888119e10cdff12dafb060a54824632edccf3f | /Folders/Python/stamps.py | 2a699233a77790833fb5c81c1d8085e8eff222c2 | [
"BSD-2-Clause"
] | permissive | kuchinal/lamakaha | b64511ad8c6d2b36da5a84a266b9e7a69acd3106 | 24e3b2ff53bcac2ad1c0e5a3b9afd4593d85f22d | refs/heads/master | 2023-09-01T17:55:56.551183 | 2023-07-31T19:32:04 | 2023-07-31T19:32:04 | 182,849,747 | 0 | 0 | null | 2021-09-10T06:34:22 | 2019-04-22T19:00:02 | Python | UTF-8 | Python | false | false | 33,729 | py | """
Stamps rebuilt by Adrian Pueyo
Full original concept and Postage Stamps created by Alexey Kuchinski and Ernest Dios.
"""
version= "v0.2"
# Constants
STAMP_DEFAULTS = { "note_font_size":20, "hide_input":0 }
ANCHOR_DEFAULTS = { "tile_color" : int('%02x%02x%02x%02x' % (255,255,255,1),16),
"autolabel": 'nuke.thisNode().knob("title").value()',
"knobChanged":'import stamps; stamps.anchorKnobChanged()'}
WIRED_DEFAULTS = { "tile_color" : int('%02x%02x%02x%02x' % (1,0,0,1),16),
"autolabel": 'nuke.thisNode().knob("title").value()',
"knobChanged":'import stamps; stamps.wiredKnobChanged()'}
DeepExceptionClasses = ["DeepToImage","DeepHoldout"] # Nodes with "Deep" in their class that don't classify as Deep.
NodeExceptionClasses = ["Viewer"] # Nodes that won't accept stamps
ParticleExceptionClasses = ["ParticleToImage"] # Nodes with "Particle" in class and an input called "particles" that don't classify as particles.
StampClasses = {"2D":"NoOp", "Deep":"DeepExpression"}
AnchorClassColors = {"Camera":int('%02x%02x%02x%02x' % (255,0,0,1),16),
"3D":int('%02x%02x%02x%02x' % (255,0,0,1),16),}
WiredClassColors = {"Camera":int('%02x%02x%02x%02x' % (255,0,0,1),16),
"3D":int('%02x%02x%02x%02x' % (255,0,0,1),16),}
STAMPS_HELP = "Redone by Adrian Pueyo.\nUpdated 29 Mar 2019\n\nFull original concept and \"Postage Stamps\" created by Alexey Kuchinski."
VERSION_TOOLTIP = "Stamps \nRedone by Adrian Pueyo.\nFull original concept and \"Postage Stamps\" created by Alexey Kuchinski."
if not globals().has_key('Stamps_LastCreated'):
Stamps_LastCreated = None
import nuke
import nukescripts
import re
from functools import partial
# PySide import switch
try:
from PySide import QtGui, QtCore, QtGui as QtWidgets
except ImportError:
from PySide2 import QtWidgets, QtGui, QtCore
#################################
### FUNCTIONS INSIDE OF BUTTONS
#################################
def wiredShowAnchor():
n = nuke.thisNode()
if n.inputs():
nuke.show(n.input(0))
def wiredZoomAnchor():
n = nuke.thisNode()
if n.inputs():
ni = n.input(0)
nuke.zoom(nuke.zoom(),[ni.xpos()+ni.screenWidth()/2,ni.ypos()+ni.screenHeight()/2])
def wiredZoomThis():
n = nuke.thisNode()
nuke.zoom(nuke.zoom(),[n.xpos(),n.ypos()])
def wiredStyle(n, style = 0):
''' Change the style of a wired stamp, based on some presets '''
nf = n["note_font"].value().split(" Bold")[0].split(" bold")[0]
if style == 0: # DEFAULT
n["note_font_size"].setValue(20)
n["note_font_color"].setValue(0)
n["note_font"].setValue(nf)
elif style == 1: # BROKEN
n["note_font_size"].setValue(40)
n["note_font_color"].setValue(4278190335)
n["note_font"].setValue(nf+" Bold")
def wiredKnobChanged():
k = nuke.thisKnob()
kn = k.name()
if kn in ["xpos","ypos"]:
return
n = nuke.thisNode()
ni = n.inputs()
if n.knob("toReconnect") and n.knob("toReconnect").value() and nuke.GUI and not ni:
try:
inp = n.knob("anchor").value()
a = nuke.toNode(inp)
if a.knob("title") and n.knob("title") and a["title"].value() == n["title"].value():
nuke.thisNode().setInput(0,a)
else:
wiredStyle(n,1)
n.knob("toReconnect").setValue(False)
except:
pass
elif kn == "selected": #First time it's this knob, it will activate the first if, then, ignore.
return
elif kn == "inputChange" or not ni:
if not ni or (ni and not isAnchor(n.input(0))):
wiredStyle(n,1)
elif n.input(0).knob("title") and n.knob("title") and n.input(0)["title"].value() == n["title"].value():
wiredStyle(n,0)
elif kn == "title":
kv = k.value()
if titleIsLegal(kv):
if nuke.ask("Do you want to update the linked stamps' title?"):
a = retitleAnchor(n) # Retitle anchor
retitleWired(a) # Retitle wired stamps of anchor a
return
else:
nuke.message("Please set a valid title.")
try:
n["title"].setValue(n["prev_title"].value())
except:
pass
else:
try:
n.knob("toReconnect").setValue(False)
if ni:
if isAnchor(n.input(0)):
if n.knob("title").value() == n.input(0).knob("title").value():
n.knob("anchor").setValue(n.input(0).name())
elif nuke.ask("Do you want to change the anchor for the current stamp?"):
n.knob("anchor").setValue(n.input(0).name())
n.knob("title").setValue(n.input(0).knob("title").value())
n.knob("prev_title").setValue(n.input(0).knob("title").value())
else:
n.setInput(0,None)
except:
pass
def anchorKnobChanged():
k = nuke.thisKnob()
kn = k.name()
if kn in ["xpos","ypos"]:
return
n = nuke.thisNode()
if kn == "title":
kv = k.value()
if titleIsLegal(kv):
if nuke.ask("Do you want to update the linked stamps' title?"):
retitleWired(n) # Retitle wired stamps of anchor a
#print "Wired stamps retitled"
return
else:
nuke.message("Please set a valid title.")
try:
n["title"].setValue(n["prev_title"].value())
except:
pass
def retitleAnchor(ref = ""):
'''
Retitle Anchor of current wired stamp to match its title.
returns: anchor node
'''
if ref == "":
ref = nuke.thisNode()
try:
ref_title = ref["title"].value().strip()
ref_anchor = ref["anchor"].value()
na = nuke.toNode(ref_anchor)
for kn in ["title","prev_title"]:
na[kn].setValue(ref_title)
ref["prev_title"].setValue(ref_title)
return na
except:
return None
def retitleWired(anchor = ""):
'''
Retitle wired stamps connected to supplied anchor
'''
if anchor == "":
return
try:
anchor_title = anchor["title"].value()
anchor_name = anchor.name()
for nw in nuke.allNodes("NoOp"):
if all(nw.knob(i) for i in ["identifier","anchor","title","prev_title"]):
if nw["identifier"].value() == "wired" and nw["anchor"].value() == anchor_name:
nw["title"].setValue(anchor_title)
nw["prev_title"].setValue(anchor_title)
return True
except:
pass
def wiredSelectSimilar(anchor_name = ""):
if anchor_name=="":
anchor_name = nuke.thisNode().knob("anchor").value()
for i in nuke.allNodes("NoOp"):
if i.knob("identifier") and i.knob("anchor"):
if i.knob("identifier").value() == "wired" and i.knob("anchor").value() == anchor_name:
i.setSelected(True)
def wiredReconnectSimilar(anchor_name = ""):
if anchor_name=="":
anchor_name = nuke.thisNode().knob("anchor").value()
for i in nuke.allNodes("NoOp"):
if i.knob("identifier") and i.knob("anchor"):
if i.knob("identifier").value() == "wired" and i.knob("anchor").value() == anchor_name:
reconnectErrors = 0
try:
i.knob("reconnect").execute()
except:
reconnectErrors += 1
finally:
if reconnectErrors > 0:
nuke.message("Couldn't reconnect {} nodes".format(str(reconnectErrors)))
def anchorReconnectWired(anchor = ""):
if anchor=="":
anchor = nuke.thisNode()
anchor_name = anchor.name()
for i in nuke.allNodes("NoOp"):
if i.knob("identifier") and i.knob("anchor"):
if i.knob("identifier").value() == "wired" and i.knob("anchor").value() == anchor_name:
reconnectErrors = 0
try:
i.setInput(0,anchor)
except:
reconnectErrors += 1
finally:
if reconnectErrors > 0:
nuke.message("Couldn't reconnect {} nodes".format(str(reconnectErrors)))
def wiredZoomNext(anchor_name = ""):
if anchor_name=="":
anchor_name = nuke.thisNode().knob("anchor").value()
anchor = nuke.toNode(anchor_name)
showing_knob = anchor.knob("showing")
showing_value = showing_knob.value()
i = 0
for ni in nuke.allNodes("NoOp"):
if ni.knob("identifier") and ni.knob("anchor"):
if ni.knob("identifier").value() == "wired" and ni.knob("anchor").value() == anchor_name:
if i == showing_value:
nuke.zoom(nuke.zoom(),[ni.xpos(),ni.ypos()])
showing_knob.setValue(i+1)
return
i+=1
showing_knob.setValue(0)
nuke.message("Couldn't find any more similar wired stamps.")
def showHelp():
import os
os.system("firefox http://phabricator.trixter.intern/w/artist/compositing/nuke/enhancements/postage_stamps/ &")
def anchorSelectWireds(anchor = ""):
if anchor == "":
try:
anchor = nuke.selectedNode()
except:
pass
if isAnchor(anchor):
anchor.setSelected(False)
wiredSelectSimilar(anchor.name())
wiredOnCreate_code = """if nuke.GUI:
try:
nuke.thisNode().knob("toReconnect").setValue(1)
except:
pass
"""
wiredReconnectToTitle_code = """n = nuke.thisNode()
try:
nt = n.knob("title").value()
for a in nuke.allNodes("NoOp"):
if a.knob("identifier").value() == "anchor" and a.knob("title").value() == nt:
n.setInput(0,a)
break
except:
nuke.message("Unable to reconnect.")
"""
wiredReconnect_code = """n = nuke.thisNode()
try:
n.setInput(0,nuke.toNode(n.knob("anchor").value()))
except:
nuke.message("Unable to reconnect.")
"""
#################################
### STAMP, ANCHOR, WIRED
#################################
def anchor(title = "", tags = "", input_node = "", node_type = "2D"):
''' Anchor Stamp '''
try:
n = nuke.createNode(StampClasses[node_type])
except:
n = nuke.createNode("NoOp")
n["name"].setValue(getAvailableName("Anchor"))
# Set default knob values
defaults = STAMP_DEFAULTS.copy()
defaults.update(ANCHOR_DEFAULTS)
for i,j in defaults.items():
try:
n.knob(i).setValue(j)
except:
pass
# Main knobs
anchorTab_knob = nuke.Tab_Knob('anchor_tab','Anchor Stamp')
identifier_knob = nuke.Text_Knob('identifier','identifier', 'anchor')
identifier_knob.setVisible(False)
title_knob = nuke.String_Knob('title','Title', title)
prev_title_knob = nuke.Text_Knob('prev_title','', title)
prev_title_knob.setVisible(False)
showing_knob = nuke.Int_Knob('showing','', 0)
showing_knob.setVisible(False)
tags_knob = nuke.String_Knob('tags','Tags', tags)
for k in [anchorTab_knob, identifier_knob, title_knob, prev_title_knob, showing_knob, tags_knob]:
n.addKnob(k)
# Buttons
wiredLabel_knob = nuke.Text_Knob('wiredLabel','Wired Stamps', "")
wiredLabel_knob.setFlag(nuke.STARTLINE)
buttonSelectStamps = nuke.PyScript_Knob("selectStamps","select","import stamps; stamps.wiredSelectSimilar(nuke.thisNode().name())")
buttonSelectStamps.setFlag(nuke.STARTLINE)
buttonReconnectStamps = nuke.PyScript_Knob("reconnectStamps","reconnect","import stamps; stamps.anchorReconnectWired()")
buttonZoomNext = nuke.PyScript_Knob("zoomNext","zoom next","import stamps; stamps.wiredZoomNext(nuke.thisNode().name())")
for k in [wiredLabel_knob, buttonSelectStamps, buttonReconnectStamps, buttonZoomNext]:
n.addKnob(k)
# Version (for future update checks)
line_knob = nuke.Text_Knob("line", "", "")
buttonHelp = nuke.PyScript_Knob("buttonHelp","Help","import stamps; stamps.showHelp()")
version_knob = nuke.Text_Knob('version','',version)
version_knob.setTooltip(VERSION_TOOLTIP)
version_knob.clearFlag(nuke.STARTLINE)
for k in [line_knob, buttonHelp, version_knob]:
n.addKnob(k)
n["help"].setValue(STAMPS_HELP)
return n
def wired(anchor):
''' Wired Stamp '''
global Stamps_LastCreated
Stamps_LastCreated = anchor.name()
node_type = nodeType(anchor)
try:
n = nuke.createNode(StampClasses[node_type])
except:
n = nuke.createNode("NoOp")
n["name"].setValue(getAvailableName("Stamp"))
# Set default knob values
defaults = STAMP_DEFAULTS.copy()
defaults.update(WIRED_DEFAULTS)
for i,j in defaults.items():
try:
n.knob(i).setValue(j)
except:
pass
n["onCreate"].setValue(wiredOnCreate_code)
# Main knobs
wiredTab_knob = nuke.Tab_Knob('wired_tab','Wired Stamp')
identifier_knob = nuke.Text_Knob('identifier','identifier', 'wired')
identifier_knob.setVisible(False)
toReconnect_knob = nuke.Boolean_Knob("toReconnect")
toReconnect_knob.setVisible(False)
title_knob = nuke.String_Knob('title','Title', anchor["title"].value())
prev_title_knob = nuke.Text_Knob('prev_title','', anchor["title"].value())
prev_title_knob.setVisible(False)
anchor_knob = nuke.String_Knob('anchor','Anchor', anchor.name())
for k in [wiredTab_knob, identifier_knob, toReconnect_knob, title_knob, prev_title_knob, anchor_knob]:
n.addKnob(k)
wiredTab_knob.setFlag(0) #Open the tab
# Buttons
anchorLabel_knob = nuke.Text_Knob('anchorLabel','Anchor Stamp', "")
anchorLabel_knob.setFlag(nuke.STARTLINE)
buttonShow = nuke.PyScript_Knob("show","show","import stamps; stamps.wiredShowAnchor()")
buttonShow.clearFlag(nuke.STARTLINE)
buttonZoomAnchor = nuke.PyScript_Knob("zoomAnchor","zoom","import stamps; stamps.wiredZoomAnchor()")
buttonReconnect = nuke.PyScript_Knob("reconnect","reconnect",wiredReconnect_code)
wiredLabel_knob = nuke.Text_Knob('wiredLabel','Wired Stamps', "")
wiredLabel_knob.setFlag(nuke.STARTLINE)
buttonSelectSimilar = nuke.PyScript_Knob("selectSimilar","select similar","import stamps; stamps.wiredSelectSimilar()")
buttonSelectSimilar.setFlag(nuke.STARTLINE)
buttonZoomNext = nuke.PyScript_Knob("zoomNext","zoom next","import stamps; stamps.wiredZoomNext()")
buttonReconnectSimilar = nuke.PyScript_Knob("reconnectSimilar","reconnect similar","import stamps; stamps.wiredReconnectSimilar()")
for k in [anchorLabel_knob, buttonShow, buttonZoomAnchor, buttonReconnect, wiredLabel_knob, buttonSelectSimilar, buttonZoomNext, buttonReconnectSimilar]:
n.addKnob(k)
# Version (for future update checks)
line_knob = nuke.Text_Knob("line", "", "")
buttonHelp = nuke.PyScript_Knob("buttonHelp","Help","import stamps; stamps.showHelp()")
version_knob = nuke.Text_Knob('version','',version)
version_knob.clearFlag(nuke.STARTLINE)
version_knob.setTooltip(VERSION_TOOLTIP)
for k in [line_knob, buttonHelp, version_knob]:
n.addKnob(k)
# Hide input while not messing possition
x,y = n.xpos(),n.ypos()
n.setInput(0,anchor)
n["hide_input"].setValue(True)
n["xpos"].setValue(x)
n["ypos"].setValue(y)
n["help"].setValue(STAMPS_HELP)
return n
Stamps_LastCreated = anchor.name()
def getAvailableName(name = "Untitled"):
''' Returns a node name starting with @name followed by a number, that doesn't exist already '''
i = 1
while True:
available_name = name+str(i)
if not nuke.exists(available_name):
return available_name
i += 1
#################################
### ANCHOR SELECTION CLASS
#################################
class AnchorSelector(QtWidgets.QDialog):
'''
Panel to select an anchor, showing the different anchors on dropdowns based on their tags.
'''
def __init__(self):
super(AnchorSelector, self).__init__()
self.setWindowTitle("Select an anchor.")
self.initUI()
self.custom_anchors_lineEdit.setFocus()
def initUI(self):
# Find all anchors and get all tags
self.findAnchorsAndTags() # Generate a dictionary: {"Camera1":["Camera","New","Custom1"],"Read":["2D","New"]}
self.custom_chosen = False # If clicked OK on the custom lineedit
# Layouts
self.master_layout = QtWidgets.QVBoxLayout()
self.grid = QtWidgets.QGridLayout()
for tag_num, tag in enumerate(self._all_tags):
if tag == "":
continue
tag_label = QtWidgets.QLabel(tag+": ")
tag_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
anchors_dropdown = QtWidgets.QComboBox()
for i, cur_name in enumerate(self._all_anchors_names):
cur_title = self._all_anchors_titles[i]
##title_repeated = self._all_anchors_titles.count(cur_title) #TODO: repeated for current tag, not just anywhere (function already started)
title_repeated = self.titleRepeatedForTag(cur_title, tag)
if tag in self._anchors_and_tags[cur_name]:
if title_repeated:
anchors_dropdown.addItem("{0} ({1})".format(cur_title, cur_name), cur_name)
else:
anchors_dropdown.addItem(cur_title, cur_name)
ok_btn = QtWidgets.QPushButton("OK")
ok_btn.clicked.connect(partial(self.okPressed,dropdown=anchors_dropdown))
self.grid.addWidget(tag_label,tag_num,0)
self.grid.addWidget(anchors_dropdown,tag_num,1)
self.grid.addWidget(ok_btn,tag_num,2)
# ALL
tag_num = len(self._all_tags)
all_tag_label = QtWidgets.QLabel("<b>all</b>: ")
all_tag_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.all_anchors_dropdown = QtWidgets.QComboBox()
all_tag_texts = [] # List of all texts of the items (usually equals the "title", or "title (name)")
all_tag_names = [i for i in self._all_anchors_names] # List of all real anchor names of the items.
for i, cur_name in enumerate(self._all_anchors_names):
cur_title = self._all_anchors_titles[i]
title_repeated = self._all_anchors_titles.count(cur_title)
if title_repeated > 1:
all_tag_texts.append("{0} ({1})".format(cur_title, cur_name))
else:
all_tag_texts.append(cur_title)
self.all_tag_sorted = sorted(zip(all_tag_texts,all_tag_names), key=lambda pair: pair[0].lower())
for [text, name] in self.all_tag_sorted:
self.all_anchors_dropdown.addItem(text, name)
all_ok_btn = QtWidgets.QPushButton("OK")
all_ok_btn.clicked.connect(partial(self.okPressed,dropdown=self.all_anchors_dropdown))
self.grid.addWidget(all_tag_label,tag_num,0)
self.grid.addWidget(self.all_anchors_dropdown,tag_num,1)
self.grid.addWidget(all_ok_btn,tag_num,2)
tag_num += 1
# LineEdit with completer
custom_tag_label = QtWidgets.QLabel("<b>by title</b>: ")
custom_tag_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.custom_anchors_lineEdit = QtWidgets.QLineEdit()
self.custom_anchors_completer = QtWidgets.QCompleter([i for i,_ in self.all_tag_sorted], self)
self.custom_anchors_completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.custom_anchors_completer.setCompletionMode(QtWidgets.QCompleter.InlineCompletion)
self.custom_anchors_lineEdit.setCompleter(self.custom_anchors_completer)
custom_ok_btn = QtWidgets.QPushButton("OK")
custom_ok_btn.clicked.connect(partial(self.okCustomPressed,dropdown=self.custom_anchors_lineEdit))
self.grid.addWidget(custom_tag_label,tag_num,0)
self.grid.addWidget(self.custom_anchors_lineEdit,tag_num,1)
self.grid.addWidget(custom_ok_btn,tag_num,2)
# Layout shit
self.grid.setColumnStretch(1,1)
self.master_layout.addLayout(self.grid)
self.setLayout(self.master_layout)
def keyPressEvent(self, e):
selectorType = type(self.focusWidget()).__name__ #QComboBox or QLineEdit
if e.key() == 16777220:
if selectorType == "QLineEdit":
self.okCustomPressed(dropdown=self.focusWidget())
else:
self.okPressed(dropdown=self.focusWidget())
else:
return QtWidgets.QDialog.keyPressEvent(self, e)
def findAnchorsAndTags(self):
# Lets find anchors
self._all_anchors_titles = []
self._all_anchors_names = []
self._all_tags = set()
self._anchors_and_tags = {} # Name:tags. Not title.
for ni in nuke.allNodes("NoOp"):
if ni.knob("identifier") and ni.knob("identifier").value() == "anchor":
try:
title_value = ni["title"].value()
name_value = ni.name()
tags_value = ni["tags"].value()
tags = re.split(" *, *",tags_value.strip()) # Remove leading/trailing spaces and separate by commas (with or without spaces)
self._all_anchors_titles.append(title_value.strip())
self._all_anchors_names.append(name_value)
self._all_tags.update(tags)
self._anchors_and_tags[name_value] = set(tags)
except:
pass
return self._anchors_and_tags
def titleRepeatedForTag(self, title, tag):
# DONE IT SEEMS!!!!! NOW IMPLEMENT.
if self._all_anchors_titles.count(title) <= 1:
return False
# Get list of all names that have that tag, and list of related titles
names_with_tag = []
titles_with_tag = []
for i, name in enumerate(self._all_anchors_names):
if tag in self._anchors_and_tags[name]:
names_with_tag.append(name)
titles_with_tag.append(self._all_anchors_titles[i])
# Count titles repetition
title_repetitions = titles_with_tag.count(title)
return (title_repetitions > 1)
def okPressed(self, dropdown):
''' Runs after an ok button is pressed '''
dropdown_value = dropdown.currentText()
dropdown_index = dropdown.currentIndex()
dropdown_data = dropdown.itemData(dropdown_index)
try:
match_anchor = nuke.toNode(dropdown_data)
except:
pass
self.chosen_value = dropdown_value
self.chosen_anchor_name = dropdown_data
if match_anchor is not None:
self.chosen_anchor = match_anchor
self.accept()
else:
nuke.message("There was a problem selecting a valid anchor.")
def okCustomPressed(self, dropdown):
''' Runs after the custom ok button is pressed '''
global Stamps_LastCreated
written_value = dropdown.text() # This means it's been written down on the lineEdit
written_lower = written_value.lower().strip()
found_data = None
if written_value == "" and globals().has_key('Stamps_LastCreated'):
found_data = Stamps_LastCreated
else:
for [text, name] in self.all_tag_sorted:
if written_lower in text.lower():
found_data = name
break
try:
match_anchor = nuke.toNode(found_data)
except:
nuke.message("Please write a valid name.")
return
self.chosen_value = written_value
self.chosen_anchor_name = found_data
if match_anchor is not None:
self.chosen_anchor = match_anchor
self.accept()
else:
nuke.message("There was a problem selecting a valid anchor.")
#################################
### FUNCTIONS
#################################
def getDefaultTitle(node = None):
if node == None:
return False
title = str(node.name())
if "tx_edit_format" in title:
title = "editRef"
return title
# If cam
if "Camera" in node.Class() and not any([(i.knob("title") and i["title"].value() == "cam") for i in nuke.allNodes("NoOp")]):
title = "cam"
return title
# If filename
try:
file = node['file'].value()
if not (node.knob("read_from_file") and not node["read_from_file"].value()):
if file != "":
rawname = file.rpartition('/')[2].rpartition('.')[0]
if '.' in rawname:
rawname = rawname.rpartition('.')[0]
# 1: beauty?
m = re.match(r"([\w]+)_v[\d]+_beauty", rawname)
if m:
pre_version = m.groups()[0]
title = "_".join(pre_version.split("_")[3:])
return title
# 2: Other
rawname = str(re.split("_v[0-9]*_",rawname)[-1]).replace("_render","")
title = rawname
except:
pass
# If element
try:
title = selNode['element'].value()
except:
pass
return title
def stampCreateAnchor(node = None, extra_tags = [], no_default_tag = False):
'''
Create a stamp from any nuke node.
Returns: extra_tags list is success, None if cancelled
'''
ns = nuke.selectedNodes()
for n in ns:
n.setSelected(False)
if node is not None:
node.setSelected(True)
default_title = getDefaultTitle(node)
default_tag = nodeType(node)
node_type = nodeType(node)
window_title = "New Stamp: "+str(node.name())
else:
default_title = ""
default_tag = ""
node_type = "2D"
window_title = "New Stamp"
panel = nuke.Panel(window_title)
panel.setWidth(240)
panel.addSingleLineInput("Title:",default_title)
if no_default_tag:
default_tags = ", ".join(extra_tags)
else:
default_tags = default_tag + ", " + ", ".join(extra_tags)
panel.addSingleLineInput("Tags:",default_tags)
while True:
if panel.show():
anchor_title = panel.value("Title:").strip()
anchor_tags = panel.value("Tags:")
if not titleIsLegal(anchor_title):
nuke.message("Please set a valid title.")
continue
elif len(findAnchorsByTitle(anchor_title)):
if not nuke.ask("There is already a Stamp titled "+anchor_title+".\nDo you still want to use this title?"):
continue
na = anchor(title = anchor_title, tags = anchor_tags, input_node = node, node_type = node_type)
na.setYpos(na.ypos()+20)
stampCreateWired(na)
for n in ns:
n.setSelected(True)
node.setSelected(False)
extra_tags = re.split(" *, *",anchor_tags.strip())
extra_tags = [t for t in extra_tags if t != default_tag]
break
else:
break
return extra_tags
def stampSelectAnchor():
'''
Panel to select a stamp anchor (if there are any)
Returns: selected anchor node, or None.
'''
# 1.Get position where to make the child...
nodeForPos = nuke.createNode("NoOp")
childNodePos = [nodeForPos.xpos(),nodeForPos.ypos()]
nuke.delete(nodeForPos)
# 2.Choose the anchor...
anchorList = [n.name() for n in nuke.allNodes("NoOp") if n.knob("identifier") and n["identifier"].value() == "anchor"]
if not len(anchorList):
nuke.message("Please create some stamps first...")
return None
else:
# REDONE ON THE NEXT LINES
global select_anchor_panel
select_anchor_panel = AnchorSelector()
if select_anchor_panel.exec_(): # If user clicks OK
chosen_anchor = select_anchor_panel.chosen_anchor
#all_anchors_dropdown_list = select_anchor_panel.all_anchors_dropdown_list #TODO
if chosen_anchor:
return chosen_anchor
return None
def stampCreateWired(anchor = ""):
''' Create a wired stamp from an anchor node. '''
global Stamps_LastCreated
if anchor == "":
anchor = stampSelectAnchor()
if anchor == None:
return
nw = wired(anchor = anchor)
nw.setInput(0,anchor)
else:
ns = nuke.selectedNodes()
for n in ns:
n.setSelected(False)
dot = nuke.nodes.Dot()
dot.setXYpos(anchor.xpos(),anchor.ypos())
dot.setInput(0,anchor)
#anchor.setSelected(True)
nw = wired(anchor = anchor)
nw.setXYpos(anchor.xpos(),anchor.ypos()+56)
nuke.delete(dot)
for n in ns:
n.setSelected(True)
anchor.setSelected(False)
return nw
def stampDuplicateWired(wired = ""):
''' Create a duplicate of a wired stamp '''
ns = nuke.selectedNodes()
for n in ns:
n.setSelected(False)
wired.setSelected(True)
clipboard = QtWidgets.QApplication.clipboard()
ctext = clipboard.text()
nuke.nodeCopy("%clipboard%")
wired.setSelected(False)
new_wired = nuke.nodePaste("%clipboard%")
clipboard.setText(ctext)
new_wired.setXYpos(wired.xpos()-110,wired.ypos()+55)
try:
new_wired.setInput(0,wired.input(0))
except:
pass
for n in ns:
n.setSelected(True)
wired.setSelected(False)
def stampType(n = ""):
''' Returns the identifier value if it exists, otherwise False. '''
stamp_id_knob = n.knob("identifier") # Could be anchor, or wired
if isAnchor(n):
return "anchor"
elif isWired(n):
return "wired"
else:
return False
def nodeType(n=""):
'''Returns the node type: Camera, Deep, 3D, Particles, Image or False'''
try:
nodeClass = n.Class()
except:
return False
if nodeClass.startswith("Deep") and nodeClass not in DeepExceptionClasses:
return "Deep"
elif nodeClass.startswith("Particle") and nodeClass not in ParticleExceptionClasses:
return "Particle"
elif nodeClass in ["Camera", "Camera2"]:
return "Camera"
elif (n.knob("render_mode") and n.knob("display")) or nodeClass in ["Axis2"]:
return "3D"
else:
return "2D"
def totalAnchors(selection=""):
if selection == "":
selection = nuke.allNodes("NoOp")
num_anchors = len([a for a in selection if a in nuke.allNodes("NoOp") and a.knob("title") and a.knob("identifier") and a["identifier"].value() == "anchor"])
return num_anchors
def findAnchorsByTitle(title = "", selection=""):
''' Returns list of nodes '''
if title == "":
return None
if selection == "":
found_anchors = [a for a in nuke.allNodes("NoOp")
if a.knob("identifier") and a["identifier"].value() == "anchor"
and a.knob("title") and a.knob("title").value() == title]
else:
found_anchors = [a for a in selection if a in nuke.allNodes("NoOp")
and a.knob("identifier") and a["identifier"].value() == "anchor"
and a.knob("title") and a.knob("title").value() == title]
return found_anchors
def titleIsLegal(title=""):
'''
Convenience function to determine which stamp titles are legal.
titleIsLegal(title) -> True or False
'''
if not title or title == "":
return False
return True
def isAnchor(node=""):
''' True or False '''
return True if all(node.knob(i) for i in ["identifier","title"]) and node["identifier"].value() == "anchor" else False
def isWired(node=""):
''' True or False '''
return True if all(node.knob(i) for i in ["identifier","title"]) and node["identifier"].value() == "wired" else False
#################################
### MAIN IMPLEMENTATION
#################################
def goStamp(ns=""):
''' Main stamp function, the one that is called when pressing the main shortcut. '''
if ns=="":
ns = nuke.selectedNodes()
if not len(ns):
if not totalAnchors(): # If no anchors on the script, create an anchor with no input
stampCreateAnchor(no_default_tag = True)
else:
stampCreateWired() # Selection panel in order to create a stamp
return
else:
# Warn if the selection is too big
if len(ns) > 10 and not nuke.ask("You have "+str(len(ns))+" nodes selected.\nDo you want make stamps for all of them?"):
return
# Main loop
extra_tags = []
for n in ns:
if isAnchor(n):
# Make a child to n
stampCreateWired(n)
elif isWired(n):
# Make a copy of n next to it
stampDuplicateWired(n)
else:
if n.knob("stamp_tags"):
stampCreateAnchor(n, extra_tags = n.knob("stamp_tags").value().split(","), no_default_tag = True)
else:
extra_tags = stampCreateAnchor(n, extra_tags = extra_tags) # Create anchor via anchor creation panel
if "Cryptomatte" in n.Class() and n.knob("matteOnly"):
n['matteOnly'].setValue(1)
stShortcut = "shift+F8" # TO CHANGE FOR F8
nuke.menu('Nuke').addCommand('Edit/Stamps/Make Stamp', 'reload(stamps);stamps.goStamp()', stShortcut) | [
"[email protected]"
] | |
98f694263451c4748f27f993918222f436b573c9 | a32ca3544bb5a587e5fd7aaa1c73ac0fb918f11e | /hypha/apply/funds/migrations/0102_add_projectapprovalform_to_fundbase_labbase.py | 882caed4c6e476984fe371711a1304045f239190 | [
"BSD-3-Clause"
] | permissive | jvasile/hypha | 87904bf514e7cf5af63c7146eaaa49d3611fd57f | b5ccad20dd3434f53a2b9d711fac510124c70a6e | refs/heads/main | 2023-07-08T04:10:08.233259 | 2023-06-20T05:35:29 | 2023-06-20T05:35:29 | 354,630,183 | 0 | 0 | BSD-3-Clause | 2021-04-04T19:32:38 | 2021-04-04T19:32:38 | null | UTF-8 | Python | false | false | 2,110 | py | # Generated by Django 3.2.15 on 2022-09-07 12:35
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('application_projects', '0055_alter_project_status_add_pafreviewersrole'),
('funds', '0101_auto_20220722_0844'),
]
operations = [
migrations.RemoveField(
model_name='applicationbase',
name='approval_form',
),
migrations.RemoveField(
model_name='labbase',
name='approval_form',
),
migrations.CreateModel(
name='LabBaseProjectApprovalForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('form', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='application_projects.projectapprovalform')),
('lab', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='approval_forms', to='funds.labbase')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ApplicationBaseProjectApprovalForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('application', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='approval_forms', to='funds.applicationbase')),
('form', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='application_projects.projectapprovalform')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| [
"[email protected]"
] | |
88bc688e498094503a5fbc932872ebc11eb90b56 | f104705c6bf8679b4990e1c7cbddd88b00bb4526 | /zwave_power_meter_socket_a.py | 0ae8087081cd7efcf67222cfbb1c29be26b50f18 | [
"MIT"
] | permissive | ContinuumBridge/zwave_power_meter_socket | 80e8e108a581333fa770653ba93d238f64781c1e | 78aa6d59bebf1993ec376010b6f8b83a898ed69c | refs/heads/master | 2021-01-10T21:00:39.936142 | 2015-10-13T08:20:42 | 2015-10-13T08:20:42 | 24,646,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,348 | py | #!/usr/bin/env python
# zwave_power_meter_socket.py
# Copyright (C) ContinuumBridge Limited, 2014 - 2015
# Written by Peter Claydon
#
ModuleName = "zwave_power_meter_socket"
INTERVAL = 60 # How often to request sensor values
CHECK_ALIVE_INTERVAL = 120
TIME_CUTOFF = 1800 # Data older than this is considered "stale"
import sys
import time
import os
from cbcommslib import CbAdaptor
from cbconfig import *
from twisted.internet import threads
from twisted.internet import reactor
class Adaptor(CbAdaptor):
def __init__(self, argv):
self.status = "ok"
self.state = "stopped"
self.connected = False
self.switchState = "unknown"
self.apps = {"energy": [],
"power": [],
"voltage": [],
"current": [],
"power_factor": [],
"binary_sensor": [],
"switch": [],
"connected": []}
self.lastEnergyTime = 0
self.lastPowerTime = 0
self.lastVoltageTime = 0
self.lastCurrentTime = 0
self.lastPowerFactorTime = 0
self.lastBinaryTime = 0
# super's __init__ must be called:
#super(Adaptor, self).__init__(argv)
CbAdaptor.__init__(self, argv)
def setState(self, action):
# error is only ever set from the running state, so set back to running if error is cleared
if action == "error":
self.state == "error"
elif action == "clear_error":
self.state = "running"
msg = {"id": self.id,
"status": "state",
"state": self.state}
self.sendManagerMessage(msg)
def sendCharacteristic(self, characteristic, data, timeStamp):
msg = {"id": self.id,
"content": "characteristic",
"characteristic": characteristic,
"data": data,
"timeStamp": timeStamp}
for a in self.apps[characteristic]:
reactor.callFromThread(self.sendMessage, msg, a)
def onStop(self):
# Mainly caters for situation where adaptor is told to stop while it is starting
pass
def pollSensors(self):
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "50",
"action": "Get",
"value": ""
}
self.sendZwaveMessage(cmd)
reactor.callLater(INTERVAL, self.pollSensors)
def checkConnected(self):
if time.time() - self.updateTime > CHECK_ALIVE_INTERVAL + 60:
self.connected = False
else:
self.connected = True
self.sendCharacteristic("connected", self.connected, time.time())
reactor.callLater(INTERVAL, self.checkConnected)
def onZwaveMessage(self, message):
#self.cbLog("debug", "onZwaveMessage, message: " + str(message))
if message["content"] == "init":
self.updateTime = 0
self.lastUpdateTime = time.time()
# Energy - KWh
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "50",
"value": "0"
}
self.sendZwaveMessage(cmd)
# Power - W
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "50",
"value": "2"
}
self.sendZwaveMessage(cmd)
# Voltage
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "50",
"value": "4"
}
self.sendZwaveMessage(cmd)
# Current - A
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "50",
"value": "5"
}
self.sendZwaveMessage(cmd)
# Power Factor
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "50",
"value": "6"
}
self.sendZwaveMessage(cmd)
# Switch state
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "37",
"value": "level"
}
self.sendZwaveMessage(cmd)
# wakeup
cmd = {"id": self.id,
"request": "get",
"address": self.addr,
"instance": "0",
"commandClass": "132",
"value": "lastWakeup"
}
self.sendZwaveMessage(cmd)
reactor.callLater(30, self.pollSensors)
reactor.callLater(INTERVAL, self.checkConnected)
elif message["content"] == "data":
if True:
#try:
if message["commandClass"] == "50":
if message["value"] == "0":
updateTime = message["data"]["val"]["updateTime"]
if updateTime != self.lastEnergyTime and time.time() - updateTime < TIME_CUTOFF:
energy = message["data"]["val"]["value"]
self.sendCharacteristic("energy", energy, time.time())
self.lastEnergyTime = updateTime
elif message["value"] == "2":
updateTime = message["data"]["val"]["updateTime"]
if updateTime != self.lastPowerTime and time.time() - updateTime < TIME_CUTOFF:
power = message["data"]["val"]["value"]
if power > 4000:
self.cbLog("debug", "onZwaveMessage, power " + str(power) + " set to 0")
self.cbLog("debug", "onZwaveMessage, power message was: " + str(message))
power = 0
elif power < -1:
self.cbLog("debug", "onZwaveMessage, power " + str(power) + " set to -1")
self.cbLog("debug", "onZwaveMessage, power message was: " + str(message))
power = -1
self.sendCharacteristic("power", power, time.time())
self.lastPowerTime = updateTime
elif message["value"] == "4":
updateTime = message["data"]["val"]["updateTime"]
if updateTime != self.lastVoltageTime and time.time() - updateTime < TIME_CUTOFF:
voltage = message["data"]["val"]["value"]
self.sendCharacteristic("voltage", voltage, time.time())
self.lastVoltageTime = updateTime
elif message["value"] == "5":
updateTime = message["data"]["val"]["updateTime"]
if updateTime != self.lastCurrentTime and time.time() - updateTime < TIME_CUTOFF:
current = message["data"]["val"]["value"]
self.sendCharacteristic("current", current, time.time())
self.lastCurrentTime = updateTime
elif message["value"] == "6":
updateTime = message["data"]["val"]["updateTime"]
if updateTime != self.lastPowerFactorTime and time.time() - updateTime < TIME_CUTOFF:
power_factor = message["data"]["val"]["value"]
self.sendCharacteristic("power_factor", power_factor, time.time())
self.lastPowerFactorTime = updateTime
elif message["commandClass"] == "37":
updateTime = message["data"]["updateTime"]
if updateTime != self.lastBinaryTime and time.time() - updateTime < TIME_CUTOFF:
if message["value"] == "level":
if message["data"]["value"]:
b = "on"
else:
b = "off"
self.switchState = b
self.sendCharacteristic("binary_sensor", b, time.time())
self.lastBinaryTime = updateTime
self.updateTime = message["data"]["updateTime"]
#except:
# self.cbLog("warning", "onZwaveMessage, unexpected message: " + str(message))
def onOff(self, s):
if s == "on":
return "255"
else:
return "0"
def switch(self, onOrOff):
cmd = {"id": self.id,
"request": "post",
"address": self.addr,
"instance": "0",
"commandClass": "0x25",
"action": "Set",
"value": self.onOff(onOrOff)
}
self.sendZwaveMessage(cmd)
def onAppInit(self, message):
self.cbLog("debug", "onAppInit, req = " + str(message))
resp = {"name": self.name,
"id": self.id,
"status": "ok",
"service": [{"characteristic": "energy", "interval": INTERVAL, "type": "switch"},
{"characteristic": "power", "interval": 0, "type": "switch"},
{"characteristic": "voltage", "interval": INTERVAL, "type": "switch"},
{"characteristic": "current", "interval": INTERVAL, "type": "switch"},
{"characteristic": "power_factor", "interval": INTERVAL, "type": "switch"},
{"characteristic": "connected", "interval": INTERVAL, "type": "switch"},
{"characteristic": "binary_sensor", "interval": 0, "type": "switch"},
{"characteristic": "switch", "interval": 0}],
"content": "service"}
self.sendMessage(resp, message["id"])
self.setState("running")
def onAppRequest(self, message):
#self.cbLog("debug", "onAppRequest, message: " + str(message))
# Switch off anything that already exists for this app
for a in self.apps:
if message["id"] in self.apps[a]:
self.apps[a].remove(message["id"])
# Now update details based on the message
for f in message["service"]:
if message["id"] not in self.apps[f["characteristic"]]:
self.apps[f["characteristic"]].append(message["id"])
self.cbLog("debug", "onAppRequest, apps: " + str(self.apps))
def onAppCommand(self, message):
self.cbLog("debug", "onAppCommand, req: " + str(message))
if "data" not in message:
self.cbLog("warning", "app message without data: " + str(message))
elif message["data"] != "on" and message["data"] != "off":
self.cbLog("warning", "This is a sensor. Message not understood: " + str(message))
else:
if message["data"] != self.switchState:
self.switch(message["data"])
def onConfigureMessage(self, config):
"""Config is based on what apps are to be connected.
May be called again if there is a new configuration, which
could be because a new app has been added.
"""
self.cbLog("debug", "onConfigureMessage, config: " + str(config))
self.setState("starting")
if __name__ == '__main__':
Adaptor(sys.argv)
| [
"[email protected]"
] | |
82b262e6528870bd41e0231a81c5d9242080a628 | e567b06c895054d88758366e769de77ee693a568 | /SciComputing with Python/lesson_05-15/asteroids.py | 19db8c21126f49a799c2586684f0c759519408df | [
"MIT"
] | permissive | evtodorov/aerospace | 68986b4ae772e1de8cc7982b4f8497b6423ac8cc | 54a1b58c3c0b02c0eaa3aef14d0e732d7f867566 | refs/heads/main | 2023-01-19T17:52:29.520340 | 2020-11-29T13:23:31 | 2020-11-29T13:23:31 | 315,653,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 15 14:13:36 2014
@author: etodorov
"""
import pygame as pg
import numpy as np
#Loading resources
pg.init()
bg = pg.image.load("background.jpg")
scrWidth, scrHeight = bg.get_width(), bg.get_height()
scr = pg.display.set_mode((scrWidth, scrHeight))
scr.blit(bg,(0,0))
ship = pg.image.load("ship.gif")
shipRect = ship.get_rect()
wShip = ship.get_width()
hShip = ship.get_height()
ast = pg.image.load("ast1.gif")
astRect = ast.get_rect()
wAst = ast.get_width()
hAst = ast.get_height()
def detectCollision(x1,y1,w1,h1,x2,y2,w2,h2):
if (x2+w2>=x1>=x2 and y2+h2>=y1>=y2): return True
elif (x2+w2>=x1+w1>=x2 and y2+h2>=y1>=y2):return True
else: return False
print "Resources Loaded. Initializing game."
#initialize game loop
xShip = scrWidth/2
v = 100 #px/s
vAst = 400 #px/s
xAst = np.array([])
yAst = np.array([])
totAst = 0
tAst = .3 #threshold
t0Ast = 0
running = True
t0 = pg.time.get_ticks()*.001
while running:
pg.event.pump()
#get time
t = pg.time.get_ticks()*.001
dt = t-t0
t0 = t
#events
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
keys = pg.key.get_pressed()
if keys[pg.K_ESCAPE]:
running = False
if keys[pg.K_LEFT]:
xShip += -v*dt
if keys[pg.K_RIGHT]:
xShip += v*dt
#engine
yShip = scrHeight - ship.get_height()
dtAst = pg.time.get_ticks()*.001 - t0Ast
if(dtAst>=tAst):
t0Ast = pg.time.get_ticks()*.001
xAst = np.append(xAst,np.random.random_integers(0,scrWidth-ship.get_width()))
yAst = np.append(yAst,ship.get_height()+1.)
totAst += 1
yAst += vAst*dt
xAst = xAst[yAst<scrHeight]
yAst = yAst[yAst<scrHeight]
#draw
scr.blit(bg,(0,0))
for x,y in zip(xAst,yAst):
scr.blit(ast,(int(x),int(y)))
if(detectCollision(xShip,yShip,wShip,hShip,x,y,wAst,hAst)):
running = False
scr.blit(ship,(int(xShip),int(yShip)))
pg.display.flip()
score = totAst - len(xAst)
print "Your score is", score
pg.quit() | [
"[email protected]"
] | |
d0c0ada98ca93fd50965148c85fac09b8295da92 | 4a28995530e5766675869266704fa3b59e6b9908 | /bravo/tests/infini/test_packets.py | f50d0439be585dfeb0bfd3b52df85d8a54feeb0c | [
"MIT"
] | permissive | CyberFlameGO/bravo | e732dc87309e98e52fb02195d542f3105486b9c8 | 7be5d792871a8447499911fa1502c6a7c1437dc3 | refs/heads/master | 2021-12-04T02:04:00.138404 | 2014-09-09T04:43:18 | 2014-09-09T04:43:18 | 415,181,334 | 0 | 0 | NOASSERTION | 2023-08-16T22:12:36 | 2021-10-09T02:34:28 | null | UTF-8 | Python | false | false | 989 | py | from twisted.trial import unittest
from bravo.infini.packets import packets, parse_packets
class TestInfiniPacketParsing(unittest.TestCase):
def test_ping(self):
raw = "\x00\x01\x00\x00\x00\x06\x00\x10\x00\x4d\x3c\x7d\x7c"
parsed = packets[0].parse(raw)
self.assertEqual(parsed.header.identifier, 0x00)
self.assertEqual(parsed.header.flags, 0x01)
self.assertEqual(parsed.payload.uid, 16)
self.assertEqual(parsed.payload.timestamp, 5061757)
def test_disconnect(self):
raw = "\xff\x00\x00\x00\x00\x19\x00\x17Invalid client version!"
parsed = packets[255].parse(raw)
self.assertEqual(parsed.header.identifier, 0xff)
self.assertEqual(parsed.payload.explanation,
"Invalid client version!")
class TestInfiniPacketStream(unittest.TestCase):
def test_ping_stream(self):
raw = "\x00\x01\x00\x00\x00\x06\x00\x10\x00\x4d\x3c\x7d\x7c"
packets, leftovers = parse_packets(raw)
| [
"[email protected]"
] | |
f6ed307305ac991a2a922af645d6169197b603d8 | d2fedd2085cbdbd5e54228abf0633001989787cc | /36.COUNT PALINDROMIC SUBSEQUENCE IN A RANGE.py | 55ab57ce87bc84e1d63299856106adef6cff022f | [] | no_license | KumarAmbuj/dynamic-programing-intermediate | 228d25205d370ebc329eaf6ffbcfbc2853b18abe | 4b40f322f57762e0cf264fb2024ae56d1fa3243b | refs/heads/main | 2023-02-23T20:42:26.865855 | 2021-01-23T11:10:44 | 2021-01-23T11:10:44 | 332,188,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | def findpalindromicsequence(s,l,r):
n=len(s)
dp=[[False for j in range(len(s))] for i in range(len(s))]
for g in range(len(s)):
i=0
for j in range(g,len(s)):
if g==0:
dp[i][j]=True
elif g==1:
if s[i]==s[j]:
dp[i][j]=True
else:
dp[i][j]=False
else:
if s[i]==s[j] and dp[i+1][j-1]==True:
dp[i][j]=True
else:
dp[i][j]=False
i+=1
count=0
for g in range(r-l+1):
i=l
j=l+g
while(j<r+1):
if dp[i][j]==True:
count+=1
i+=1
j+=1
#method 2
count1=0
for i in range(l,r+1):
for j in range(i,r+1):
if dp[i][j]==True:
count1+=1
print(count1)
print(count)
s='abccbc'
findpalindromicsequence(s,3,5)
s = "xyaabax"
findpalindromicsequence(s,3,5)
s = "xyaabax"
findpalindromicsequence(s,2,3)
| [
"[email protected]"
] | |
eafec4733775b023c1b2b9de75a58d4fa681f74c | 6c53847f9956edc8f31b23c24b1786d1b9789f03 | /gps-server-master/setup.py | d313f9422a4ec8e4b70cb05bc8e2ad640ac49b23 | [] | no_license | kunal1510010/Quikmile | c64a9264798cf834aaf32ecb4653b9b81dab0dd5 | 244d2749eb8438ce858de51c088a52ca3de58992 | refs/heads/master | 2022-12-12T08:03:50.628252 | 2018-11-29T15:24:46 | 2018-11-29T15:24:46 | 159,226,383 | 0 | 0 | null | 2022-12-08T02:27:44 | 2018-11-26T20:10:20 | Python | UTF-8 | Python | false | false | 580 | py | from pip.download import PipSession
from pip.req import parse_requirements
from setuptools import setup, find_packages
install_reqs = parse_requirements("./requirements.txt", session=PipSession())
install_requires = [str(ir.req).split('==')[0] for ir in install_reqs]
setup(
name='gps-server',
packages=find_packages(exclude=['examples', 'tests']),
version='1.0',
description='GPS Server and Kafka Producer',
author='Abhishek Verma, Chirag',
author_email='[email protected]',
package_data={'': ['*.json']},
install_requires=install_requires
)
| [
"[email protected]"
] | |
49869b77fef23d2b4bef119a349bac2b3c4b8258 | b315056385e631e4b5a250d3c7e1c0ae32c479f0 | /illumiprocessor/__init__.py | bf0b82f71bf8a5dda73842deac3484f6c1ff42aa | [] | no_license | mkweskin/illumiprocessor | e7e20cb0d24a1d7cc89e5519e08c3e435c32a070 | 4e37e037c7b9db8c89bbafe21ca9b59ed7630211 | refs/heads/master | 2021-09-15T16:34:00.846218 | 2018-06-06T19:35:43 | 2018-06-06T19:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | #!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import
__version__ = "2.0.9"
| [
"[email protected]"
] | |
89e3c5c2c55d178ad61eb93db58ca05320bd60b7 | fa93e53a9eee6cb476b8998d62067fce2fbcea13 | /devel/.private/play_motion_msgs/lib/python2.7/dist-packages/play_motion_msgs/msg/_PlayMotionActionGoal.py | a5370afc34256859512ca7341d9f7c63bf486bcb | [] | no_license | oyetripathi/ROS_conclusion_project | 2947ee2f575ddf05480dabc69cf8af3c2df53f73 | 01e71350437d57d8112b6cec298f89fc8291fb5f | refs/heads/master | 2023-06-30T00:38:29.711137 | 2021-08-05T09:17:54 | 2021-08-05T09:17:54 | 392,716,311 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,736 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from play_motion_msgs/PlayMotionActionGoal.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import actionlib_msgs.msg
import genpy
import play_motion_msgs.msg
import std_msgs.msg
class PlayMotionActionGoal(genpy.Message):
_md5sum = "ce0017b9347897304a66c70a78c9408b"
_type = "play_motion_msgs/PlayMotionActionGoal"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalID goal_id
PlayMotionGoal goal
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: play_motion_msgs/PlayMotionGoal
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
string motion_name
bool skip_planning
int32 priority
"""
__slots__ = ['header','goal_id','goal']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalID','play_motion_msgs/PlayMotionGoal']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,goal_id,goal
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PlayMotionActionGoal, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = play_motion_msgs.msg.PlayMotionGoal()
else:
self.header = std_msgs.msg.Header()
self.goal_id = actionlib_msgs.msg.GoalID()
self.goal = play_motion_msgs.msg.PlayMotionGoal()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
_x = self.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.goal.motion_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_Bi().pack(_x.goal.skip_planning, _x.goal.priority))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = play_motion_msgs.msg.PlayMotionGoal()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal_id.id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.goal_id.id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal.motion_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.goal.motion_name = str[start:end]
_x = self
start = end
end += 5
(_x.goal.skip_planning, _x.goal.priority,) = _get_struct_Bi().unpack(str[start:end])
self.goal.skip_planning = bool(self.goal.skip_planning)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
_x = self.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.goal.motion_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_Bi().pack(_x.goal.skip_planning, _x.goal.priority))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.goal_id is None:
self.goal_id = actionlib_msgs.msg.GoalID()
if self.goal is None:
self.goal = play_motion_msgs.msg.PlayMotionGoal()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal_id.id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.goal_id.id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.goal.motion_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.goal.motion_name = str[start:end]
_x = self
start = end
end += 5
(_x.goal.skip_planning, _x.goal.priority,) = _get_struct_Bi().unpack(str[start:end])
self.goal.skip_planning = bool(self.goal.skip_planning)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_Bi = None
def _get_struct_Bi():
global _struct_Bi
if _struct_Bi is None:
_struct_Bi = struct.Struct("<Bi")
return _struct_Bi
| [
"[email protected]"
] | |
2131681365efc05ba1105dd453257b1ea60ea71e | 82a9077bcb5a90d88e0a8be7f8627af4f0844434 | /google-cloud-sdk/lib/tests/unit/surface/compute/org_security_policies/associations/create_associations_test.py | d67314583ec45ddf3221e10dda33832b9c249e68 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | piotradamczyk5/gcloud_cli | 1ae2553595e569fad6ce84af62b91a7ee5489017 | 384ece11040caadcd64d51da74e0b8491dd22ca3 | refs/heads/master | 2023-01-01T23:00:27.858583 | 2020-10-21T04:21:23 | 2020-10-21T04:21:23 | 290,238,061 | 0 | 0 | null | 2020-10-19T16:43:36 | 2020-08-25T14:31:00 | Python | UTF-8 | Python | false | false | 4,684 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the organization security policy associations create subcommand."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py.testing import mock
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import base as calliope_base
from tests.lib import cli_test_base
from tests.lib import sdk_test_base
from tests.lib import test_case
from tests.lib.api_lib.util import waiter as waiter_test_base
class OrgSecurityPoliciesAssociationsCreateBetaTest(sdk_test_base.WithFakeAuth,
cli_test_base.CliTestBase,
waiter_test_base.Base):
def SetUp(self):
self.track = calliope_base.ReleaseTrack.BETA
self.api_version = 'beta'
self.messages = core_apis.GetMessagesModule('compute', self.api_version)
self.mock_client = mock.Client(
core_apis.GetClientClass('compute', self.api_version),
real_client=core_apis.GetClientInstance(
'compute', self.api_version, no_http=True))
self.mock_client.Mock()
self.addCleanup(self.mock_client.Unmock)
def CreateTestOrgSecurityPolicyMessage(self, **kwargs):
return self.messages.SecurityPolicy(
description='test-description',
displayName='test-sp',
type=self.messages.SecurityPolicy.TypeValueValuesEnum.FIREWALL)
def _GetOperationMessage(self, operation_name, status, resource_uri=None):
return self.messages.Operation(
name=operation_name,
status=status,
selfLink='https://compute.googleapis.com/compute/{0}/locations/'
'global/operations/{1}'.format(self.api_version, operation_name),
targetLink=resource_uri)
def CreateTestOrgSecurityPolicyAssociationMessage(self, **kwargs):
return self.messages.SecurityPolicyAssociation(
attachmentId='organizations/12345', name='association-name')
def testAssociationsCreateOrgSecurityPolicyWithOrganization(self):
self.mock_client.organizationSecurityPolicies.AddAssociation.Expect(
self.messages.ComputeOrganizationSecurityPoliciesAddAssociationRequest(
securityPolicy='12345678910',
securityPolicyAssociation=self
.CreateTestOrgSecurityPolicyAssociationMessage(),
replaceExistingAssociation=False),
self._GetOperationMessage(
operation_name='org-12345-operation-myop',
status=self.messages.Operation.StatusValueValuesEnum.PENDING))
self.mock_client.globalOrganizationOperations.Get.Expect(
self.messages.ComputeGlobalOrganizationOperationsGetRequest(
parentId='organizations/12345',
operation='org-12345-operation-myop'),
self._GetOperationMessage(
operation_name='org-12345-operation-myop',
status=self.messages.Operation.StatusValueValuesEnum.DONE,
resource_uri='https://compute.googleapis.com/compute/{0}/'
'locations/global/securityPolicies/{1}'.format(
self.api_version, '12345678910')))
self.Run('compute org-security-policies associations create '
'--name association-name '
'--organization 12345 '
'--security-policy 12345678910')
self.AssertOutputEquals('')
self.AssertErrContains(
'Add association of the organization Security Policy.')
class OrgSecurityPoliciesAssociationsCreateAlphaTest(
OrgSecurityPoliciesAssociationsCreateBetaTest):
def SetUp(self):
self.track = calliope_base.ReleaseTrack.ALPHA
self.api_version = 'alpha'
self.messages = core_apis.GetMessagesModule('compute', self.api_version)
self.mock_client = mock.Client(
core_apis.GetClientClass('compute', self.api_version),
real_client=core_apis.GetClientInstance(
'compute', self.api_version, no_http=True))
self.mock_client.Mock()
self.addCleanup(self.mock_client.Unmock)
if __name__ == '__main__':
test_case.main()
| [
"[email protected]"
] | |
526da7b66b7f3a0e2d31d78cfa9e1e610ce48f3b | 15e6385746ccf4b8eb6c6e302aca236021bb8781 | /BinaryTree and Divide and Conquer/le257_binaryTreePaths.py | 04f6722ba0b8cd3e57bdecab9d2bbb33d805064f | [] | no_license | akb46mayu/Data-Structures-and-Algorithms | 11c4bbddc9b4d286e1aeaa9481eb6a620cd54746 | de98494e14fff3e2a468da681c48d60b4d1445a1 | refs/heads/master | 2021-01-12T09:51:32.618362 | 2018-05-16T16:37:18 | 2018-05-16T16:37:18 | 76,279,268 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | """
Given a binary tree, return all root-to-leaf paths.
For example, given the following binary tree:
1
/ \
2 3
\
5
All root-to-leaf paths are:
["1->2->5", "1->3"]
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
if not root:
return []
path = []
paths = []
self.btreeHelper(root, path, paths)
return paths
def btreeHelper(self, root, path, paths):
if not root:
return
path.append(str(root.val))
if not root.left and not root.right:
paths.append('->'.join(path))
path.pop() # is this pop for printing result usage?
return
self.btreeHelper(root.left, path , paths)
self.btreeHelper(root.right, path, paths)
path.pop() # is this pop for traverse usage?
| [
"[email protected]"
] | |
828d680c2d1538b21c4b8b9195efc0e26cac2b28 | de64b143a346585f51590bd674e8d13bbc672386 | /algorithm/2023/0321_40_Combination_Sum_II/myunghak.py | 8d64f62cc7c6d9a896ef5565521251d1cff3a514 | [] | no_license | ai-kmu/etc | 304ec20f59e4026025abdcbcae21863c80630dcb | 9c29941e19b7dd2a2037b110dd6e16690e9a0cc2 | refs/heads/master | 2023-08-21T16:30:31.149956 | 2023-08-21T16:26:19 | 2023-08-21T16:26:19 | 199,843,899 | 3 | 24 | null | 2023-05-31T09:56:59 | 2019-07-31T11:36:16 | Jupyter Notebook | UTF-8 | Python | false | false | 796 | py | # 답보고 풀었습니다. 풀이 안해주셔도 되요
from collections import deque
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
def backtrack(start, target):
if target == 0:
result.append(list(dq))
return
for i in range(start, len(candidates)):
if target < candidates[i]:
break
elif i <= start or candidates[i] != candidates[i-1]:
dq.append(candidates[i])
backtrack(i+1, target-candidates[i])
dq.pop()
candidates.sort()
dq = deque()
result = []
backtrack(0, target)
return result
| [
"[email protected]"
] | |
09de35b9c9569a3d6a18907aa2a1c17e263a55cb | 32f34baaa620d6ec945a08c842123a8872c7a2a5 | /blog/admin.py | 31122591ac74349107d324ee346326f92261f9a7 | [] | no_license | devArist/bankapp_kivy | e3590723643d30746c3ffce9d70868b3524b6e30 | 701db3a856722a618fbcfa8fbd0b606415265ca9 | refs/heads/main | 2023-07-18T06:38:00.148288 | 2021-08-27T14:05:47 | 2021-08-27T14:05:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from django.contrib import admin
from blog import models
from django.utils.safestring import mark_safe
# Register your models here.
@admin.register(models.Blog)
class BlogAdmin(admin.ModelAdmin):
list_display = ('title', 'imageblog')
search_fields = ('title',)
def imageblog(self, obj):
return mark_safe(f"<img src={obj.image.url} style='width:150px; height:90px' ") | [
"[email protected]"
] | |
7316502afc84f5131380f552c1cb161afa993ec6 | dae2e18c9076d91680ef5e9ee9ad7aa155d5071d | /dm_control/locomotion/walkers/rodent.py | b9d29b106f66945117f6a5596d42b2300a243d74 | [
"Apache-2.0"
] | permissive | wangsd01/dm_control | 5c233770524ef2fbb4c358d619934e2d76b990bf | d6f9cb4e4a616d1e1d3bd8944bc89541434f1d49 | refs/heads/master | 2023-08-16T16:08:28.134204 | 2023-07-20T11:24:16 | 2023-07-20T11:25:09 | 293,401,717 | 0 | 0 | Apache-2.0 | 2020-09-07T02:23:31 | 2020-09-07T02:23:30 | null | UTF-8 | Python | false | false | 12,164 | py | # Copyright 2020 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A Rodent walker."""
import os
import re
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.observation import observable
from dm_control.locomotion.walkers import base
from dm_control.locomotion.walkers import legacy_base
from dm_control.mujoco import wrapper as mj_wrapper
import numpy as np
_XML_PATH = os.path.join(os.path.dirname(__file__),
'assets/rodent.xml')
_RAT_MOCAP_JOINTS = [
'vertebra_1_extend', 'vertebra_2_bend', 'vertebra_3_twist',
'vertebra_4_extend', 'vertebra_5_bend', 'vertebra_6_twist',
'hip_L_supinate', 'hip_L_abduct', 'hip_L_extend', 'knee_L', 'ankle_L',
'toe_L', 'hip_R_supinate', 'hip_R_abduct', 'hip_R_extend', 'knee_R',
'ankle_R', 'toe_R', 'vertebra_C1_extend', 'vertebra_C1_bend',
'vertebra_C2_extend', 'vertebra_C2_bend', 'vertebra_C3_extend',
'vertebra_C3_bend', 'vertebra_C4_extend', 'vertebra_C4_bend',
'vertebra_C5_extend', 'vertebra_C5_bend', 'vertebra_C6_extend',
'vertebra_C6_bend', 'vertebra_C7_extend', 'vertebra_C9_bend',
'vertebra_C11_extend', 'vertebra_C13_bend', 'vertebra_C15_extend',
'vertebra_C17_bend', 'vertebra_C19_extend', 'vertebra_C21_bend',
'vertebra_C23_extend', 'vertebra_C25_bend', 'vertebra_C27_extend',
'vertebra_C29_bend', 'vertebra_cervical_5_extend',
'vertebra_cervical_4_bend', 'vertebra_cervical_3_twist',
'vertebra_cervical_2_extend', 'vertebra_cervical_1_bend',
'vertebra_axis_twist', 'vertebra_atlant_extend', 'atlas', 'mandible',
'scapula_L_supinate', 'scapula_L_abduct', 'scapula_L_extend', 'shoulder_L',
'shoulder_sup_L', 'elbow_L', 'wrist_L', 'finger_L', 'scapula_R_supinate',
'scapula_R_abduct', 'scapula_R_extend', 'shoulder_R', 'shoulder_sup_R',
'elbow_R', 'wrist_R', 'finger_R'
]
_UPRIGHT_POS = (0.0, 0.0, 0.0)
_UPRIGHT_QUAT = (1., 0., 0., 0.)
_TORQUE_THRESHOLD = 60
class Rat(legacy_base.Walker):
"""A position-controlled rat with control range scaled to [-1, 1]."""
def _build(self,
params=None,
name='walker',
torque_actuators=False,
foot_mods=False,
initializer=None):
self.params = params
self._mjcf_root = mjcf.from_path(_XML_PATH)
if name:
self._mjcf_root.model = name
self.body_sites = []
super()._build(initializer=initializer)
# modify actuators
if torque_actuators:
for actuator in self._mjcf_root.find_all('actuator'):
actuator.gainprm = [actuator.forcerange[1]]
del actuator.biastype
del actuator.biasprm
# modify ankle and toe limits
if foot_mods:
self._mjcf_root.find('default', 'ankle').joint.range = [-0.1, 2.]
self._mjcf_root.find('default', 'toe').joint.range = [-0.7, 0.87]
@property
def upright_pose(self):
"""Reset pose to upright position."""
return base.WalkerPose(xpos=_UPRIGHT_POS, xquat=_UPRIGHT_QUAT)
@property
def mjcf_model(self):
"""Return the model root."""
return self._mjcf_root
@composer.cached_property
def actuators(self):
"""Return all actuators."""
return tuple(self._mjcf_root.find_all('actuator'))
@composer.cached_property
def root_body(self):
"""Return the body."""
return self._mjcf_root.find('body', 'torso')
@composer.cached_property
def pelvis_body(self):
"""Return the body."""
return self._mjcf_root.find('body', 'pelvis')
@composer.cached_property
def head(self):
"""Return the head."""
return self._mjcf_root.find('body', 'skull')
@composer.cached_property
def left_arm_root(self):
"""Return the left arm."""
return self._mjcf_root.find('body', 'scapula_L')
@composer.cached_property
def right_arm_root(self):
"""Return the right arm."""
return self._mjcf_root.find('body', 'scapula_R')
@composer.cached_property
def ground_contact_geoms(self):
"""Return ground contact geoms."""
return tuple(
self._mjcf_root.find('body', 'foot_L').find_all('geom') +
self._mjcf_root.find('body', 'foot_R').find_all('geom') +
self._mjcf_root.find('body', 'hand_L').find_all('geom') +
self._mjcf_root.find('body', 'hand_R').find_all('geom') +
self._mjcf_root.find('body', 'vertebra_C1').find_all('geom')
)
@composer.cached_property
def standing_height(self):
"""Return standing height."""
return self.params['_STAND_HEIGHT']
@composer.cached_property
def end_effectors(self):
"""Return end effectors."""
return (self._mjcf_root.find('body', 'lower_arm_R'),
self._mjcf_root.find('body', 'lower_arm_L'),
self._mjcf_root.find('body', 'foot_R'),
self._mjcf_root.find('body', 'foot_L'))
@composer.cached_property
def observable_joints(self):
"""Return observable joints."""
return tuple(actuator.joint
for actuator in self.actuators # This lint is mistaken; pylint: disable=not-an-iterable
if actuator.joint is not None)
@composer.cached_property
def observable_tendons(self):
return self._mjcf_root.find_all('tendon')
@composer.cached_property
def mocap_joints(self):
return tuple(
self._mjcf_root.find('joint', name) for name in _RAT_MOCAP_JOINTS)
@composer.cached_property
def mocap_joint_order(self):
return tuple([jnt.name for jnt in self.mocap_joints]) # This lint is mistaken; pylint: disable=not-an-iterable
@composer.cached_property
def bodies(self):
"""Return all bodies."""
return tuple(self._mjcf_root.find_all('body'))
@composer.cached_property
def mocap_tracking_bodies(self):
"""Return bodies for mocap comparison."""
return tuple(body for body in self._mjcf_root.find_all('body')
if not re.match(r'(vertebra|hand|toe)', body.name))
@composer.cached_property
def primary_joints(self):
"""Return primary (non-vertebra) joints."""
return tuple(jnt for jnt in self._mjcf_root.find_all('joint')
if 'vertebra' not in jnt.name)
@composer.cached_property
def vertebra_joints(self):
"""Return vertebra joints."""
return tuple(jnt for jnt in self._mjcf_root.find_all('joint')
if 'vertebra' in jnt.name)
@composer.cached_property
def primary_joint_order(self):
joint_names = self.mocap_joint_order
primary_names = tuple([jnt.name for jnt in self.primary_joints]) # pylint: disable=not-an-iterable
primary_order = []
for nm in primary_names:
primary_order.append(joint_names.index(nm))
return primary_order
@composer.cached_property
def vertebra_joint_order(self):
joint_names = self.mocap_joint_order
vertebra_names = tuple([jnt.name for jnt in self.vertebra_joints]) # pylint: disable=not-an-iterable
vertebra_order = []
for nm in vertebra_names:
vertebra_order.append(joint_names.index(nm))
return vertebra_order
@composer.cached_property
def egocentric_camera(self):
"""Return the egocentric camera."""
return self._mjcf_root.find('camera', 'egocentric')
@property
def _xml_path(self):
"""Return the path to th model .xml file."""
return self.params['_XML_PATH']
@composer.cached_property
def joint_actuators(self):
"""Return all joint actuators."""
return tuple([act for act in self._mjcf_root.find_all('actuator')
if act.joint])
@composer.cached_property
def joint_actuators_range(self):
act_joint_range = []
for act in self.joint_actuators: # This lint is mistaken; pylint: disable=not-an-iterable
associated_joint = self._mjcf_root.find('joint', act.name)
act_range = associated_joint.dclass.joint.range
act_joint_range.append(act_range)
return act_joint_range
def pose_to_actuation(self, pose):
# holds for joint actuators, find desired torque = 0
# u_ref = [2 q_ref - (r_low + r_up) ]/(r_up - r_low)
r_lower = np.array([ajr[0] for ajr in self.joint_actuators_range]) # This lint is mistaken; pylint: disable=not-an-iterable
r_upper = np.array([ajr[1] for ajr in self.joint_actuators_range]) # This lint is mistaken; pylint: disable=not-an-iterable
num_tendon_actuators = len(self.actuators) - len(self.joint_actuators)
tendon_actions = np.zeros(num_tendon_actuators)
return np.hstack([tendon_actions, (2*pose[self.joint_actuator_order]-
(r_lower+r_upper))/(r_upper-r_lower)])
@composer.cached_property
def joint_actuator_order(self):
joint_names = self.mocap_joint_order
joint_actuator_names = tuple([act.name for act in self.joint_actuators]) # This lint is mistaken; pylint: disable=not-an-iterable
actuator_order = []
for nm in joint_actuator_names:
actuator_order.append(joint_names.index(nm))
return actuator_order
def _build_observables(self):
return RodentObservables(self)
class RodentObservables(legacy_base.WalkerObservables):
"""Observables for the Rat."""
@composer.observable
def head_height(self):
"""Observe the head height."""
return observable.MJCFFeature('xpos', self._entity.head)[2]
@composer.observable
def sensors_torque(self):
"""Observe the torque sensors."""
return observable.MJCFFeature(
'sensordata',
self._entity.mjcf_model.sensor.torque,
corruptor=lambda v, random_state: np.tanh(2 * v / _TORQUE_THRESHOLD)
)
@composer.observable
def tendons_pos(self):
return observable.MJCFFeature('length', self._entity.observable_tendons)
@composer.observable
def tendons_vel(self):
return observable.MJCFFeature('velocity', self._entity.observable_tendons)
@composer.observable
def actuator_activation(self):
"""Observe the actuator activation."""
model = self._entity.mjcf_model
return observable.MJCFFeature('act', model.find_all('actuator'))
@composer.observable
def appendages_pos(self):
"""Equivalent to `end_effectors_pos` with head's position appended."""
def relative_pos_in_egocentric_frame(physics):
end_effectors_with_head = (
self._entity.end_effectors + (self._entity.head,))
end_effector = physics.bind(end_effectors_with_head).xpos
torso = physics.bind(self._entity.root_body).xpos
xmat = \
np.reshape(physics.bind(self._entity.root_body).xmat, (3, 3))
return np.reshape(np.dot(end_effector - torso, xmat), -1)
return observable.Generic(relative_pos_in_egocentric_frame)
@property
def proprioception(self):
"""Return proprioceptive information."""
return [
self.joints_pos, self.joints_vel,
self.tendons_pos, self.tendons_vel,
self.actuator_activation,
self.body_height, self.end_effectors_pos, self.appendages_pos,
self.world_zaxis
] + self._collect_from_attachments('proprioception')
@composer.observable
def egocentric_camera(self):
"""Observable of the egocentric camera."""
if not hasattr(self, '_scene_options'):
# Don't render this walker's geoms.
self._scene_options = mj_wrapper.MjvOption()
collision_geom_group = 2
self._scene_options.geomgroup[collision_geom_group] = 0
cosmetic_geom_group = 1
self._scene_options.geomgroup[cosmetic_geom_group] = 0
return observable.MJCFCamera(self._entity.egocentric_camera,
width=64, height=64,
scene_option=self._scene_options
)
| [
"[email protected]"
] | |
933a002ab324b2adef7d0dbfa19291d879c51704 | 35788f5ca40e5b9e14d4700084e0f3a7fd7f3033 | /basic/conversion-teacher.py | a8f091bdf171ca3f3c8dc193324984e020ad5ccf | [] | no_license | yunnyisgood/django-monaco | 0c097385c0497b2bcbd66571f16a84d53b4913db | b83e9702a064f81d76be35a2e8f4c02c03b5255e | refs/heads/master | 2023-05-23T03:53:26.934348 | 2021-06-14T14:29:43 | 2021-06-14T14:29:43 | 370,548,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | import pandas as pd
class Conversion(object):
df = pd.DataFrame()
# dp = ()
@staticmethod
def dp_create() -> ():
# 여기서 프로퍼티(self)를 사용하지 않는다 -> 객체 메소드가 아닌 staticmethod
return (1, 2, 3, 4, 5, 6, 7, 8, 9)
@staticmethod
def dp_to_list(dp) -> []:
return list(dp)
@staticmethod
def list_to_float(dp) -> []: # 리스트를 실수로 변환
return [float(dp[i]) for i in range(0, len(dp))]
@staticmethod
def float_to_num(dp) -> []:
return [int(dp[i]) for i in range(0, len(dp))]
@staticmethod
def list_to_dictionary(dp) -> {}:
return dict(zip([str(i) for i in dp], [str(i) for i in dp]))
@staticmethod
def string_to_tuple(h)-> ():
return tuple(list(h))
@staticmethod
def tuple_to_list(h) -> []:
return list(h)
# @staticmethod
def dictionary_to_dataFrame(dt):
return pd.DataFrame.from_dict(dt, orient='index')
@staticmethod
def main():
c = Conversion()
temp = ''
num = 0
f = 0.0
ls = []
dt = {}
h = 'hello'
while 1:
menu = input('0.Exit 1.String 2. Num 3. List 4. Dictionary 5.Tuple')
if menu == '0':
break
# 1부터 10까지 요소를 가진 튜플을 생성하시오
elif menu == '1':
dp = c.dp_create()
print(dp)
# 튜플을 리스트로 전환
elif menu == '2':
dp = c.dp_to_list(dp)
print(f'dp_to_list:{dp}')
#리스트를 실수 리스트로 전환
elif menu == '3':
dp = c.list_to_float(dp)
print(f'list_to_float:{dp}')
# 리스트를 정수 리스트로 전환
elif menu == '4':
dp = c.float_to_num(dp)
print(f'float_to_num:{dp}')
# 4번 리스트를 딕셔너리로 전환하시오. 단 키는 리스트의 인덱스인데 str 로 전환하시오 (return)
elif menu == '5':
c.list_to_dictionary(dp)
print(f'list_to_dictionary:{dt}')
# hello를 튜플로 전환
elif menu == '6':
h = c.string_to_tuple(h)
print(h)
# 6번 튜플을 리스트로 전환하시오
elif menu == '7':
h = c.tuple_to_list(h)
print(h)
#5번 딕셔너리를 데이터 프레임으로 전환하시오
elif menu == '8':
df = c.dictionary_to_dataFrame(pd.DataFrame(dt))
else:
continue
Conversion.main()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.