blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f3958a62d181d094a638a1d21990e621e83aee4
|
16e266cf50a712ed29a4097e34504aac0281e6cb
|
/Functions/venv/lib/python3.6/site-packages/_TFL/IV_Number.py
|
727c60959a0c90ead1b7c1e10a1327d21cb2e807
|
[
"BSD-3-Clause"
] |
permissive
|
felix-ogutu/PYTHON-PROJECTS
|
9dd4fdcfff6957830587b64c5da3b5c3ade3a27e
|
8c1297dbda495078509d06a46f47dc7ee60b6d4e
|
refs/heads/master
| 2023-06-05T04:41:36.727376 | 2021-06-25T20:36:52 | 2021-06-25T20:36:52 | 380,348,911 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,642 |
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 1999-2015 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL.IV_Number
#
# Purpose
# Model Interface-Version Number
#
# Revision Dates
# 25-Oct-1999 (CT) Creation
# 2-Nov-1999 (CT) Comment added
# 15-Nov-1999 (CT) `db_extension` added
# 18-Nov-1999 (CT) `producer` added
# 19-Nov-1999 (CT) `producer` convert to list
# 19-Nov-1999 (CT) `consumer` added
# 8-Aug-2000 (MG) Format of `__repr__` changed
# 9-Aug-2000 (CT) `clone` added
# 9-Aug-2000 (MG) `_unnested` added and used in `__repr__`
# 28-Sep-2000 (CT) s/database/data base/g
# 13-Dec-2000 (CT) s/data base/database/g
# 12-Apr-2002 (CT) Use `StandardError` instead of `Exception`
# 15-Apr-2002 (CT) Raise `TypeError` instead of string exception
# (__setattr__)
# 24-Oct-2002 (CT) Esthetics
# 28-Sep-2004 (CT) Use `isinstance` instead of type comparison
# 14-Feb-2006 (CT) Moved into package `TFL`
# 9-Aug-2006 (CT) `__hash__` changed to return `hash (id (self))`
# instead of `id (self)`
# ««revision-date»»···
#--
from _TFL import TFL
from _TFL.pyk import pyk
class Interface_Mismatch (Exception) :
pass
class IV_Number :
"""Model Interface-Version Number.
An `IV_Number` describes the version of a specific interface of a
software product (e.g., a database read or written).
`external_version` is set to the version of the interface when the
program reads information from that interface. The value of
`external_version` can be used to convert from an old to a new format.
`external_version` applies only to two-way interfaces which are
read and written by the same program.
`external_version` must lie in the interval (`comp_min`, `comp_max`).
If it is set to a value not in that interval, an exception is raised.
The function `compatible` can be used to check the `external_version`
before setting it.
"""
def __init__ \
( self, name, producer, consumer, program_version
, comp_min = None
, comp_max = None
, db_extension = None
) :
if isinstance (producer, pyk.string_types) :
producer = (producer, )
if isinstance (consumer, pyk.string_types) :
consumer = (consumer, )
self.name = name
self.producer = producer
self.consumer = consumer
self.program_version = program_version
self.comp_min = (comp_min, program_version) [comp_min is None]
self.comp_max = (comp_max, program_version) [comp_max is None]
self.db_extension = db_extension
self.reset_external_version ()
# end def __init__
def clone (self, comp_min) :
"""Returns a clone of `self` with changed `comp_min`."""
return self.__class__ \
( self.name
, self.producer
, self.consumer
, self.program_version
, comp_min
, self.comp_max
, self.db_extension
)
# end def clone
def compatible (self, external_version) :
return self.comp_min <= external_version <= self.comp_max
# end def compatible
def restrict (self, comp_min, comp_max) :
"""Restrict compatibility interval to `comp_min` and `comp_max`."""
self.__dict__ ["comp_min"] = max (self.comp_min, comp_min)
self.__dict__ ["comp_max"] = min (self.comp_max, comp_max)
# end def restrict
def reset_external_version (self) :
"""Reset `self.external_version`."""
self.__dict__ ["external_version"] = -1
# end def reset_external_version
def __setattr__ (self, name, value) :
"""Prevent the changing of attributes other than `external_version`.
`external_version` is checked for compatibility with `comp_min`
and `comp_max`.
Once an attribute is set, it cannot be changed to another value.
"""
if hasattr (self, name) and name != "external_version" :
raise TypeError \
( "Attribute %s is readonly. Cannot change value from %s to %s"
% (name, getattr (self, name), value)
)
self.__dict__ [name] = value
if name == "external_version" :
if not self.compatible (value) :
raise Interface_Mismatch (self)
# end def __setattr__
def __str__ (self) :
return "%s = %s" % (self.name, self.program_version)
# end def __str__
def _unnested (self, l) :
if len (l) == 1 :
return l [0]
else :
return l
# end def _unnested
def __repr__ (self) :
return "%s ('%s', %s, %s, %s, %s, %s, '%s')" % \
( self.__class__.__name__, self.name
, repr (self._unnested (self.producer))
, repr (self._unnested (self.consumer))
, self.program_version, self.comp_min, self.comp_max
, self.db_extension or ""
)
# end def __repr__
# end class IV_Number
if __name__ != "__main__" :
TFL._Export ("*")
### __END__ TFL.IV_Number
|
[
"[email protected]"
] | |
93138bc3c1cbcdd2bc2cd63d9d87a7507f988c15
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/4ef793f2658bd7c8752e604548e55c2bcdd82d7b-<test_constructor_from_items>-fix.py
|
338f47c225316ff2ab0d0bf23e89689e8cc87cbd
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,948 |
py
|
def test_constructor_from_items(self, float_frame, float_string_frame):
items = [(c, float_frame[c]) for c in float_frame.columns]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, float_frame)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, float_frame.loc[:, ['C', 'B', 'A']])
row_items = [(idx, float_string_frame.xs(idx)) for idx in float_string_frame.index]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(row_items, columns=float_string_frame.columns, orient='index')
tm.assert_frame_equal(recons, float_string_frame)
assert (recons['A'].dtype == np.float64)
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
arr = construct_1d_object_array_from_listlike(([('bar', 'baz')] * len(float_string_frame)))
float_string_frame['foo'] = arr
row_items = [(idx, list(float_string_frame.xs(idx))) for idx in float_string_frame.index]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
recons = DataFrame.from_items(row_items, columns=float_string_frame.columns, orient='index')
tm.assert_frame_equal(recons, float_string_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'], columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
|
[
"[email protected]"
] | |
19ed14adeb53df54f976ec502b271381df4d55ff
|
7dd7b9a1dfd9d8c6ea08e1386737ba9a5c1b4163
|
/ordasambond/ordasambond/middlewares/deltafetch.py
|
c7e77a34393e69669ecde39cdbfdf352956b8cfd
|
[] |
no_license
|
gogn-in/ordasambond
|
f07c402c9af0915841e8dfb4bb6b9250e90480cb
|
ca5f2895a36156a1e9b8f9f28fe6c8f3f9f8b435
|
refs/heads/master
| 2021-01-10T10:50:48.072071 | 2016-03-22T22:58:22 | 2016-03-22T22:58:22 | 54,489,216 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,149 |
py
|
import os
import time
from scrapy.http import Request
from scrapy.item import BaseItem
from scrapy.utils.request import request_fingerprint
from scrapy.utils.project import data_path
from scrapy.exceptions import NotConfigured
from scrapy import signals
import logging
logger = logging.getLogger(__name__)
# Custom version of the DeltaFetch middleware from scrapylib:
# https://github.com/scrapinghub/scrapylib
# Custom in the fact that the latest version of scrapy has deprecated
# scrapy.log. This version uses python logging.
class DeltaFetch(object):
"""This is a spider middleware to ignore requests to pages containing items
seen in previous crawls of the same spider, thus producing a "delta crawl"
containing only new items.
This also speeds up the crawl, by reducing the number of requests that need
to be crawled, and processed (typically, item requests are the most cpu
intensive).
Supported settings:
* DELTAFETCH_ENABLED - to enable (or disable) this extension
* DELTAFETCH_DIR - directory where to store state
* DELTAFETCH_RESET - reset the state, clearing out all seen requests
Supported spider arguments:
* deltafetch_reset - same effect as DELTAFETCH_RESET setting
Supported request meta keys:
* deltafetch_key - used to define the lookup key for that request. by
default it's the fingerprint, but it can be changed to contain an item
id, for example. This requires support from the spider, but makes the
extension more efficient for sites that many URLs for the same item.
"""
def __init__(self, dir, reset=False):
dbmodule = None
try:
dbmodule = __import__('bsddb3').db
except ImportError:
try:
dbmodule = __import__('bsddb').db
except ImportError:
pass
if not dbmodule:
raise NotConfigured('bssdb or bsddb3 is required')
self.dbmodule = dbmodule
self.dir = dir
self.reset = reset
self.logger = logging.getLogger(__name__)
@classmethod
def from_crawler(cls, crawler):
s = crawler.settings
if not s.getbool('DELTAFETCH_ENABLED'):
raise NotConfigured
dir = data_path(s.get('DELTAFETCH_DIR', 'deltafetch'))
reset = s.getbool('DELTAFETCH_RESET')
o = cls(dir, reset)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
return o
def spider_opened(self, spider):
if not os.path.exists(self.dir):
os.makedirs(self.dir)
dbpath = os.path.join(self.dir, '%s.db' % spider.name)
reset = self.reset or getattr(spider, 'deltafetch_reset', False)
flag = self.dbmodule.DB_TRUNCATE if reset else self.dbmodule.DB_CREATE
try:
self.db = self.dbmodule.DB()
self.db.open(filename=dbpath,
dbtype=self.dbmodule.DB_HASH,
flags=flag)
except Exception:
logger.critical("Failed to open DeltaFetch database at %s, "
"trying to recreate it" % dbpath)
if os.path.exists(dbpath):
os.remove(dbpath)
self.db = self.dbmodule.DB()
self.db.open(filename=dbpath,
dbtype=self.dbmodule.DB_HASH,
flags=self.dbmodule.DB_CREATE)
def spider_closed(self, spider):
self.db.close()
def process_spider_output(self, response, result, spider):
for r in result:
if isinstance(r, Request):
key = self._get_key(r)
if self.db.has_key(key):
self.logger.info("Ignoring already visited: %s" % r)
continue
elif isinstance(r, BaseItem):
key = self._get_key(response.request)
self.db[key] = str(time.time())
yield r
def _get_key(self, request):
return request.meta.get('deltafetch_key') or request_fingerprint(request)
|
[
"[email protected]"
] | |
b10177e34d37112453bda54ad58806a828ae33b8
|
b0807e3ad7af88ffd3152c7fc5315604f553f8fc
|
/perceptron/perceptron.py
|
7ab3b594c5df700d4fc76047f5a6939c77a4813e
|
[] |
no_license
|
jhamrick/sigcse-2016-slides
|
f34bfbb93b851efca574a0a2da0d4182c64b979a
|
44d3fab1c6dea14c00272a7ad7b43703fff7d4d2
|
refs/heads/master
| 2021-01-10T09:04:20.777687 | 2016-03-04T15:10:58 | 2016-03-04T15:10:58 | 53,106,715 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,586 |
py
|
import numpy as np
import matplotlib.pyplot as plt
def gen_data(m):
"""Generate m random data points from each of two diferent normal
distributions with unit variance, for a total of 2*m points.
Parameters
----------
m : int
Number of points per class
Returns
-------
x, y : numpy arrays
x is a float array with shape (m, 2)
y is a binary array with shape (m,)
"""
sigma = np.eye(2)
mu = np.array([[0, 2], [0, 0]])
mvrandn = np.random.multivariate_normal
x = np.concatenate([mvrandn(mu[:, 0], sigma, m), mvrandn(mu[:, 1], sigma, m)], axis=0)
y = np.concatenate([np.zeros(m), np.ones(m)], axis=0)
idx = np.arange(2 * m)
np.random.shuffle(idx)
x = x[idx]
y = y[idx]
return x, y
def set_limits(axis, x):
"""Set the axis limits, based on the min and max of the points.
Parameters
----------
axis : matplotlib axis object
x : array with shape (m, 2)
"""
axis.set_xlim(x[:, 0].min() - 0.5, x[:, 0].max() + 0.5)
axis.set_ylim(x[:, 1].min() - 0.5, x[:, 1].max() + 0.5)
def init_plot(x, y, boundary, loops):
"""Initialize the plot with two subplots: one for the training
error, and one for the decision boundary. Returns a function
that can be called with new errors and boundary to update the
plot.
Parameters
----------
x : numpy array with shape (m, 2)
The input data points
y : numpy array with shape (m,)
The true labels of the data
boundary : numpy array with shape (2, 2)
Essentially, [[xmin, ymin], [xmax, ymax]]
Returns
-------
update_plot : function
This function takes two arguments, the array of errors and
the boundary, and updates the error plot with the new errors
and the boundary on the data plot.
"""
plt.close('all')
fig, (ax1, ax2) = plt.subplots(1, 2)
error_line, = ax1.plot([0], [0], 'k-')
ax1.set_xlim(0, (loops * y.size) - 1)
ax1.set_ylim(0, 15)
ax1.set_xlabel("Iteration")
ax1.set_ylabel("Training error")
colors = np.empty((y.size, 3))
colors[y == 0] = [0, 0, 1]
colors[y == 1] = [1, 0, 0]
ax2.scatter(x[:, 0], x[:, 1], c=colors, s=25)
normal_line, = ax2.plot(boundary[0, 0], boundary[0, 1], 'k-', linewidth=1.5)
set_limits(ax2, x)
plt.draw()
plt.show()
def update_plot(errors, boundary):
error_line.set_xdata(np.arange(errors.size))
error_line.set_ydata(errors)
normal_line.set_xdata(boundary[:, 0])
normal_line.set_ydata(boundary[:, 1])
set_limits(ax2, x)
fig.canvas.draw()
return update_plot
def calc_normal(normal, weights):
"""Calculate the normal vector and decision boundary.
Parameters
----------
normal : numpy array with shape (2,)
The normal vector to the decision boundary
weights : numpy array with shape (3,)
Weights of the perceptron
Returns
-------
new_normal, boundary : numpy arrays
The new_normal array is the updated normal vector. The
boundary array is [[xmin, ymin], [xmax, ymax]] of the
boundary between the points.
"""
new_normal = normal - (np.dot(weights[:2], normal) / np.dot(weights[:2], weights[:2])) * weights[:2]
new_normal = new_normal / np.dot(new_normal, new_normal)
offset = -weights[2] * weights[:2] / np.dot(weights[:2], weights[:2])
normmult = np.array([-1000, 1000])
boundary = (new_normal[None] * normmult[:, None]) + offset[None]
return new_normal, boundary
|
[
"[email protected]"
] | |
dff5734cf3ac7ce60a11c12aaff326812956ba5c
|
afd11dfdfb07880d0dfe9593662753741627f39d
|
/lane_detection_cnn.py
|
e0fba6f2252c1221e1b09386661af826b2762d30
|
[] |
no_license
|
nghiatdprt/Lane-Detection-Basic
|
a12aeabae838c54912e67d95cf20fddecef6374d
|
c1449bf1d3539658f851fd996b2df74c0fdcbd5c
|
refs/heads/master
| 2020-03-28T16:10:51.485929 | 2018-09-13T16:42:42 | 2018-09-13T16:42:42 | 148,667,408 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,101 |
py
|
import tensorflow as tf
import numpy as np
import logging
import toulouse_dataset
import cv2
model_params = {
'input_shape': (320, 50, 3),
'batch_size': 100
}
hyper_params = {
'learning_rate': 0.01,
'drop_out': 0.25
}
tf.logging.set_verbosity(tf.logging.INFO)
def conv2d_fn(input_tensor, k_size, n_out):
return tf.layers.conv2d(inputs= input_tensor, \
filters= n_out, \
kernel_size= k_size, \
activation= tf.nn.relu, \
use_bias= True)
def maxpool2d_fn(input_tensor, p_size, strides):
return tf.layers.max_pooling2d(inputs= input_tensor, pool_size= p_size, strides= strides)
def model_fn(features, labels, mode):
features_tensor = tf.cast(features, tf.float32, name="input_tensor")
net = conv2d_fn(features_tensor, 3, 32)
net = maxpool2d_fn(net, 2, 2)
net = conv2d_fn(features_tensor, 3, 64)
net = maxpool2d_fn(net, 2, 2)
net = tf.layers.flatten(net)
# net = tf.layers.dense(inputs= features_tensor, units= 512, activation=tf.nn.relu)
# net = tf.layers.dense(inputs= net, units= 256)
out_put = tf.layers.dense(inputs= net, units= 2, name="out_put")
prediction = {
'coordinate' : tf.cast(out_put, tf.int32)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode = mode, predictions = prediction)
labels = tf.cast(labels, tf.int32)
loss = tf.losses.mean_squared_error(labels= labels, predictions= out_put)
tf.summary.scalar('loss', loss)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate= hyper_params['learning_rate'])
train_op = optimizer.minimize(loss = loss, global_step= tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode = mode, loss = loss, train_op = train_op)
rmse = tf.metrics.root_mean_squared_error(labels, prediction['coordinate'])
# Add the rmse to the collection of evaluation metrics.
eval_metrics = {"rmse": rmse}
return tf.estimator.EstimatorSpec(
mode=mode,
# Report sum of error for compatibility with pre-made estimators
loss=loss,
eval_metric_ops=eval_metrics)
def preprocess_data(img_list, width= 320, height=50):
# image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR)
res = img_list
# create a big 1D-array
# for img in img_list:
# # img = cv2.resize(img, (width, height), interpolation=cv2.INTER_LINEAR)
# # img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# # mask_white = cv2.inRange(img, 140, 255)
# res.append(cv2.resize(img, (int(width/2), int(height/2))))
# res = np.array(res)
# data_shape = res.shape
# res = np.reshape(res, [data_shape[0], data_shape[1], data_shape[2], -1])
# print(res.shape)
# Normalize
res = res / 255. # values in [0, 1]
res -= 0.5 # values in [-0.5, 0.5]
res *= 2 # values in [-1, 1]
return res
# x_train, y_train, x_test, y_test = toulouse_dataset.load_toulouse_dataset()
# x_train = preprocess_data(x_train)
# x_test = preprocess_data(x_test)
model_classifier = tf.estimator.Estimator(
model_fn = model_fn, \
model_dir= 'CheckPoint2')
# print(model_classifier)
# train_input_fn = tf.estimator.inputs.numpy_input_fn(
# x = x_train,
# y = y_train,
# num_epochs= None,
# batch_size= model_params['batch_size'],
# shuffle= True)
# model_classifier.train(
# input_fn = train_input_fn,\
# steps= 2000)
# eval_input_fn = tf.estimator.inputs.numpy_input_fn(
# x = x_train,
# y = y_train,
# num_epochs= 1,
# shuffle= False)
# eval_result = model_classifier.evaluate(input_fn = eval_input_fn)
# print(eval_result)
def serving_input_receiver_fn():
inputs = tf.placeholder(dtype = tf.float32, shape=[None, 50, 320, 3])
return tf.estimator.export.TensorServingInputReceiver(inputs, inputs)
model_classifier.export_savedmodel(export_dir_base="model", serving_input_receiver_fn= serving_input_receiver_fn)
|
[
"[email protected]"
] | |
6605c15fbea3b939cbdb337a3ca2ab161f3d6946
|
d786da05888e4154456e14caa52b10ea5075c65d
|
/aliyun-python-sdk-waf-openapi/aliyunsdkwaf_openapi/request/v20190910/DescribeProtectBlockSummaryRequest.py
|
03b19268e6418f6bfd57ce08fb13c3f1390f40c0
|
[
"Apache-2.0"
] |
permissive
|
yuerl/aliyun-openapi-python-sdk
|
c95d35712d21d42c5f9eec1d255375d3d11bd63b
|
205ab3f0ed32c61c382d7383659e42f3e5300e7b
|
refs/heads/master
| 2022-07-03T02:27:44.550641 | 2020-05-12T03:27:46 | 2020-05-12T03:27:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,561 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkwaf_openapi.endpoint import endpoint_data
class DescribeProtectBlockSummaryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'waf-openapi', '2019-09-10', 'DescribeProtectBlockSummary','waf')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTimestamp(self):
return self.get_query_params().get('StartTimestamp')
def set_StartTimestamp(self,StartTimestamp):
self.add_query_param('StartTimestamp',StartTimestamp)
def get_EndTimestamp(self):
return self.get_query_params().get('EndTimestamp')
def set_EndTimestamp(self,EndTimestamp):
self.add_query_param('EndTimestamp',EndTimestamp)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Domain(self):
return self.get_query_params().get('Domain')
def set_Domain(self,Domain):
self.add_query_param('Domain',Domain)
def get_Step(self):
return self.get_query_params().get('Step')
def set_Step(self,Step):
self.add_query_param('Step',Step)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region)
|
[
"[email protected]"
] | |
d4f548f887979fe662fa1a6fae964c21b6ec91cc
|
7773ea6f465ffecfd4f9821aad56ee1eab90d97a
|
/python/helpers/typeshed/stdlib/3/faulthandler.pyi
|
afd739462fc964f6a9a603b810193f2fd07831dd
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
aghasyedbilal/intellij-community
|
5fa14a8bb62a037c0d2764fb172e8109a3db471f
|
fa602b2874ea4eb59442f9937b952dcb55910b6e
|
refs/heads/master
| 2023-04-10T20:55:27.988445 | 2020-05-03T22:00:26 | 2020-05-03T22:26:23 | 261,074,802 | 2 | 0 |
Apache-2.0
| 2020-05-04T03:48:36 | 2020-05-04T03:48:35 | null |
UTF-8
|
Python
| false | false | 685 |
pyi
|
import io
import sys
from typing import Union, Protocol
from _types import FileDescriptorLike
def cancel_dump_traceback_later() -> None: ...
def disable() -> None: ...
def dump_traceback(file: FileDescriptorLike = ..., all_threads: bool = ...) -> None: ...
def dump_traceback_later(timeout: float, repeat: bool = ..., file: FileDescriptorLike = ..., exit: bool = ...) -> None: ...
def enable(file: FileDescriptorLike = ..., all_threads: bool = ...) -> None: ...
def is_enabled() -> bool: ...
if sys.platform != "win32":
def register(signum: int, file: FileDescriptorLike = ..., all_threads: bool = ..., chain: bool = ...) -> None: ...
def unregister(signum: int) -> None: ...
|
[
"[email protected]"
] | |
bb6938b5b08f304d80dd221b6c19dcf8965a7305
|
2b54b1fb1540ab73d6c83cae3acd5fdd58bdead5
|
/Vanadium_cluster_project/anatase_TiO2_101surface_used/V2O5_TiO2_101_zorderimageplot_3by1supercell.py
|
971ec963ae899a6b55cf92ebb34b23675e957e46
|
[] |
no_license
|
sivachiriki/GOFEE_Pt_V_supported
|
5787d44294262870075f35f2d31c096021b7ce20
|
6bd700dac1f3e7c58394b758d75246ac6e07eade
|
refs/heads/master
| 2022-04-08T11:38:13.038455 | 2020-03-09T10:48:31 | 2020-03-09T10:48:31 | 226,359,812 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,932 |
py
|
from __future__ import division
import matplotlib
#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'
#from plot_neb_tio2 import *
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
import matplotlib.patches as mpatches
from ase.data.colors import jmol_colors
from pylab import *
from ase.data import covalent_radii as aradii
from matplotlib.patches import Circle
from math import atan2,pi
import matplotlib.gridspec as gridspec
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
matplotlib.rc('font',**{'family':'sans-serif',
'sans-serif':['Helvetica'],
'size':14})
matplotlib.rc('text',usetex=True)
matplotlib.rcParams['text.latex.unicode']=True
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{bm}']
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{xfrac}']
matplotlib.rcParams['mathtext.default'] = 'regular'
matplotlib.rcParams['ps.usedistiller'] = 'xpdf'
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
def plot_atoms(ax, atoms, xyz, acols, alp, z):
ecols = [[0, 0, 0] for col in atoms]
indices = range(len(atoms))
for ia in indices:
acol = acols[ia]
ecol = ecols[ia]
if atoms[ia].symbol == 'Ti':
arad = aradii[atoms[ia].number] #* 0.9 * 0.5
else:
arad = aradii[atoms[ia].number] #* 0.9
apos = atoms[ia].position
eps = arad
circ = Circle([apos[xyz[0]], apos[xyz[1]]],
fc = acol,
ec = ecol,
radius = arad,
lw = 0.5,
alpha = alp[ia],
zorder = 1 - apos[1]/1000
)
ax.add_patch(circ)
def plot_conf(ax, atoms, rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
for i, atom in enumerate(atoms):
if (atom.number ==23):
colors[i] =[76/255, 153/255, 0/255]
if (atom.number ==8 and i >= 648):
colors[i] =[153/255, 0/255, 0/255]
if (atom.number ==1):
colors[i] =[255/255, 255/255, 255/255]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Ti' or a.symbol == 'O':
if a.position[2] < 13.50:
alp[i] = 0.6
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
data=read('anataseTi24O48_101surface_optPBEesben_1by3.traj@:')
for j in range(len(data)):
image = data[j]
#for i,a in enumerate(image):
# if a.position[1] >15.180:
# image.positions[i,1] =0.000
#image = image * (2,2,1)
# Make array of indices for atoms that should be repeated in x and y directions
plt.figure(figsize=(4.0,6.0))
gs = gridspec.GridSpec(2, 1,
height_ratios=[6.32,7.18])
cell = image.get_cell()
# 0 0
ax = plt.subplot(gs[0, 0])
img = image.copy()
plot_conf(ax, img)
print(cell[0,0])
print(cell[1,1])
ax.set_xlim([-1.0, 10.70])
ax.set_ylim([5.50, 16.50])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
# 0 1
ax = plt.subplot(gs[1, 0])
img = image.copy()
plot_conf(ax, img, rot=True)
ax.set_xlim([-1.0, 10.7])
ax.set_ylim([-1.50, 11.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
gs.update(wspace=0.00,hspace=0.00)
plt.tight_layout()
name ='TiO2_101sur_3by1supercell.png'
savefig(name,bbox_inches='tight')
plt.show()
|
[
"[email protected]"
] | |
b7518824fc43b6c989902289a1fef325022b1247
|
1bde114a847c629701e3acd004be5788594e0ef1
|
/Examples/Py4Prog/different_returns.py
|
1a92b3a3f7f74d4fb0490267fad9e52e0ae4e56b
|
[] |
no_license
|
BruceEckel/ThinkingInPython
|
0b234cad088ee144bb8511e1e7db9fd5bba78877
|
76a1310deaa51e02e9f83ab74520b8269aac6fff
|
refs/heads/master
| 2022-02-21T23:01:40.544505 | 2022-02-08T22:26:52 | 2022-02-08T22:26:52 | 97,673,620 | 106 | 33 | null | 2022-02-08T22:26:53 | 2017-07-19T04:43:50 |
Python
|
UTF-8
|
Python
| false | false | 200 |
py
|
# Py4Prog/different_returns.py
def different_returns(arg):
if arg == 1:
return "one"
if arg == "one":
return True
print(different_returns(1))
print(different_returns("one"))
|
[
"[email protected]"
] | |
959d294c8eca3319541da2fd16288d2149cf7a73
|
61c6f707403307bbf124f85689d0008c5fef7462
|
/removeElements.py
|
949269ae5a03d3f3af57c0ab3e5c55db9ef21a01
|
[] |
no_license
|
immzz/leetcode_solutions
|
1685bb82abccbcb7e8869e6df61d79241d66d17b
|
baddc09a5e626d919011aa222667a40e2ef4c375
|
refs/heads/master
| 2016-09-06T06:44:56.971046 | 2015-10-06T01:35:39 | 2015-10-06T01:35:39 | 23,483,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 933 |
py
|
class Solution:
# @param {integer[]} nums
# @param {integer} val
# @return {integer}
def removeElement(self, nums, val):
if (not nums) or len(nums) < 1:
return 0
length = len(nums)
val_ptr = 0
current_ptr = 0
while (val_ptr < len(nums)) and (current_ptr < len(nums)):
while (val_ptr < len(nums)) and (not (nums[val_ptr] == val)):
val_ptr += 1
current_ptr = val_ptr + 1
while (current_ptr < len(nums)) and (nums[current_ptr] == val):
current_ptr += 1
if (current_ptr < len(nums)) and (val_ptr < len(nums)):
temp = nums[current_ptr]
nums[current_ptr] = nums[val_ptr]
nums[val_ptr] = temp
val_ptr += 1
current_ptr += 1
return val_ptr
sol = Solution()
a = [2,3,3]
print sol.removeElement(a,2)
print a
|
[
"[email protected]"
] | |
5fba49f6abf7fd870f403d743c941edc418ac119
|
350db570521d3fc43f07df645addb9d6e648c17e
|
/1439_Find_the_Kth_Smallest_Sum_of_a_Matrix_With_Sorted_Rows/solution.py
|
410bee6160c17cc3dcfb17136fda39f7cf4af231
|
[] |
no_license
|
benjaminhuanghuang/ben-leetcode
|
2efcc9185459a1dd881c6e2ded96c42c5715560a
|
a2cd0dc5e098080df87c4fb57d16877d21ca47a3
|
refs/heads/master
| 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 277 |
py
|
'''
1439. Find the Kth Smallest Sum of a Matrix With Sorted Rows
Level: Hard
https://leetcode.com/problems/find-the-kth-smallest-sum-of-a-matrix-with-sorted-rows
'''
'''
Solution:
'''
class Solution:
def kthSmallest(self, mat: List[List[int]], k: int) -> int:
|
[
"[email protected]"
] | |
4eac8110cbbd1dda68c1f2fefbb625f8e9167722
|
6fa0d5d3b61fbce01fad5a7dd50258c09298ee00
|
/Algorithm/BOJ/2754.py
|
57138e66826228ebad25635ac3866d1e5de91457
|
[] |
no_license
|
athletejuan/TIL
|
c8e6bd9f7e2c6f999dbac759adcdb6b2959de384
|
16b854928af2f27d91ba140ebc1aec0007e5eb04
|
refs/heads/master
| 2023-02-19T13:59:06.495110 | 2022-03-23T15:08:04 | 2022-03-23T15:08:04 | 188,750,527 | 1 | 0 | null | 2023-02-15T22:54:50 | 2019-05-27T01:27:09 |
Python
|
UTF-8
|
Python
| false | false | 181 |
py
|
C = input()
credit = {'A+': 4.3, 'A0': 4.0, 'A-': 3.7, 'B+': 3.3, 'B0': 3.0, 'B-': 2.7, 'C+': 2.3, 'C0': 2.0, 'C-': 1.7, 'D+': 1.3, 'D0': 1.0, 'D-': 0.7, 'F': 0.0}
print(credit[C])
|
[
"[email protected]"
] | |
cb31f7b367a251571a54b7e2aab1667b3ba85ac5
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/slxos/v17r_1_01a/brocade_mpls_rpc/get_mpls_ldp_neighbor_brief/output/mpls_ldp_neighbor_brief/__init__.py
|
fb2366eb8d69bbdc880d19062b68f022dcca95ef
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,820 |
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import summary
class mpls_ldp_neighbor_brief(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/get-mpls-ldp-neighbor-brief/output/mpls-ldp-neighbor-brief. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__num_link_neighbors','__num_targeted_neighbors','__summary',)
_yang_name = 'mpls-ldp-neighbor-brief'
_rest_name = 'mpls-ldp-neighbor-brief'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__num_link_neighbors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-link-neighbors", rest_name="num-link-neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__num_targeted_neighbors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-targeted-neighbors", rest_name="num-targeted-neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
self.__summary = YANGDynClass(base=YANGListType("neighbor_transport",summary.summary, yang_name="summary", rest_name="summary", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-transport', extensions=None), is_container='list', yang_name="summary", rest_name="summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'get-mpls-ldp-neighbor-brief', u'output', u'mpls-ldp-neighbor-brief']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'get-mpls-ldp-neighbor-brief', u'output', u'mpls-ldp-neighbor-brief']
def _get_num_link_neighbors(self):
"""
Getter method for num_link_neighbors, mapped from YANG variable /brocade_mpls_rpc/get_mpls_ldp_neighbor_brief/output/mpls_ldp_neighbor_brief/num_link_neighbors (uint32)
YANG Description: Number of link neighbors
"""
return self.__num_link_neighbors
def _set_num_link_neighbors(self, v, load=False):
"""
Setter method for num_link_neighbors, mapped from YANG variable /brocade_mpls_rpc/get_mpls_ldp_neighbor_brief/output/mpls_ldp_neighbor_brief/num_link_neighbors (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_num_link_neighbors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_num_link_neighbors() directly.
YANG Description: Number of link neighbors
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-link-neighbors", rest_name="num-link-neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """num_link_neighbors must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-link-neighbors", rest_name="num-link-neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__num_link_neighbors = t
if hasattr(self, '_set'):
self._set()
def _unset_num_link_neighbors(self):
self.__num_link_neighbors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-link-neighbors", rest_name="num-link-neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_num_targeted_neighbors(self):
"""
Getter method for num_targeted_neighbors, mapped from YANG variable /brocade_mpls_rpc/get_mpls_ldp_neighbor_brief/output/mpls_ldp_neighbor_brief/num_targeted_neighbors (uint32)
YANG Description: Number of targeted neighbors
"""
return self.__num_targeted_neighbors
def _set_num_targeted_neighbors(self, v, load=False):
"""
Setter method for num_targeted_neighbors, mapped from YANG variable /brocade_mpls_rpc/get_mpls_ldp_neighbor_brief/output/mpls_ldp_neighbor_brief/num_targeted_neighbors (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_num_targeted_neighbors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_num_targeted_neighbors() directly.
YANG Description: Number of targeted neighbors
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-targeted-neighbors", rest_name="num-targeted-neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """num_targeted_neighbors must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-targeted-neighbors", rest_name="num-targeted-neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__num_targeted_neighbors = t
if hasattr(self, '_set'):
self._set()
def _unset_num_targeted_neighbors(self):
self.__num_targeted_neighbors = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="num-targeted-neighbors", rest_name="num-targeted-neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
def _get_summary(self):
"""
Getter method for summary, mapped from YANG variable /brocade_mpls_rpc/get_mpls_ldp_neighbor_brief/output/mpls_ldp_neighbor_brief/summary (list)
"""
return self.__summary
def _set_summary(self, v, load=False):
"""
Setter method for summary, mapped from YANG variable /brocade_mpls_rpc/get_mpls_ldp_neighbor_brief/output/mpls_ldp_neighbor_brief/summary (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_summary is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_summary() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("neighbor_transport",summary.summary, yang_name="summary", rest_name="summary", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-transport', extensions=None), is_container='list', yang_name="summary", rest_name="summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """summary must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("neighbor_transport",summary.summary, yang_name="summary", rest_name="summary", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-transport', extensions=None), is_container='list', yang_name="summary", rest_name="summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__summary = t
if hasattr(self, '_set'):
self._set()
def _unset_summary(self):
self.__summary = YANGDynClass(base=YANGListType("neighbor_transport",summary.summary, yang_name="summary", rest_name="summary", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-transport', extensions=None), is_container='list', yang_name="summary", rest_name="summary", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
num_link_neighbors = __builtin__.property(_get_num_link_neighbors, _set_num_link_neighbors)
num_targeted_neighbors = __builtin__.property(_get_num_targeted_neighbors, _set_num_targeted_neighbors)
summary = __builtin__.property(_get_summary, _set_summary)
_pyangbind_elements = {'num_link_neighbors': num_link_neighbors, 'num_targeted_neighbors': num_targeted_neighbors, 'summary': summary, }
|
[
"[email protected]"
] | |
446e368dfaaedf1674c3ed268346ea05f820c598
|
b662fcc29eda65211bccef35fbe42f5a072986c5
|
/pysm/transformation/models/scope.py
|
06d7849413eb18afa2962a0377db2e4826224b1d
|
[
"MIT"
] |
permissive
|
binh-vu/semantic-modeling
|
08edb87ed36724046049e1caf10a6cb3da69ccaf
|
b387584502ba1daa6abd6b7573828416f6426b49
|
refs/heads/master
| 2022-01-09T10:24:08.840566 | 2019-05-11T06:09:30 | 2019-05-11T06:09:30 | 186,094,653 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,359 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from typing import Dict, Tuple, List, Set, Union, Optional
from transformation.models.table_schema import Schema
class Scope:
def __init__(self, path: str):
self.path = path
if path == "":
self.attr_paths = []
else:
self.attr_paths = path.split(Schema.PATH_DELIMITER)
def is_outer_scope_of(self, scope: 'Scope') -> bool:
return scope.path.startswith(self.path) and scope.path != self.path
def is_same_scope(self, scope: 'Scope') -> bool:
return scope.path == self.path
def get_parent(self):
return Scope(Schema.PATH_DELIMITER.join(self.attr_paths[:-1]))
def get_inner_scope(self):
assert len(self.attr_paths) > 0
return Scope(Schema.PATH_DELIMITER.join(self.attr_paths[1:]))
def contain_path(self, path: str):
return path.startswith(self.path)
def get_relative_path(self, path: str):
if self.path == "":
return path
return path[len(self.path)+1:]
def get_relative_path2scope(self, scope: 'Scope'):
"""Return a relative path to another scope"""
return scope.attr_paths[len(self.attr_paths):]
def extract_data(self, global_row: dict):
if self.path == "":
return global_row
return _extract_data(self.attr_paths, global_row)
def __eq__(self, other):
if other is None or not isinstance(other, Scope):
return False
return self.path == other.path
def __lt__(self, other):
if other is None or not isinstance(other, Scope):
raise NotImplementedError()
return other.path.startswith(self.path) and other.path != self.path
def __gt__(self, other):
if other is None or not isinstance(other, Scope):
raise NotImplementedError()
return self.path.startswith(other.path) and other.path != self.path
def __repr__(self):
return self.path
def _extract_data(attr_paths: List[str], local_row: dict):
attr = attr_paths[0]
if len(attr_paths) == 1:
return local_row[attr]
for attr in attr_paths:
if isinstance(local_row[attr], list):
return [_extract_data(attr_paths[1:], val) for val in local_row[attr]]
return _extract_data(attr_paths[1:], local_row[attr])
|
[
"[email protected]"
] | |
97f4d4bf6bbeea91ac82ddf267e8c6fc3e00e32a
|
4fe68a329da64dceef27b4f62dcde538e07ea3e0
|
/109.py
|
b697c5c7fc08aa9c5037fc713f60c654bad6da5a
|
[] |
no_license
|
LiuXPeng/leetcode
|
9bf18a8c86952e12d8ca2032793d2abfe918a730
|
1d583df4089e1678996f481fa1a903b22ff43182
|
refs/heads/master
| 2020-03-20T00:52:01.736500 | 2018-07-26T03:25:04 | 2018-07-26T03:25:04 | 137,058,642 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 966 |
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
if not head:
return
tag = head
count = 1 #让count多一个
while tag:
tag = tag.next
count += 1
if count == 2:
return TreeNode(head.val)
if count == 3:
res = TreeNode(head.next.val)
res.left = TreeNode(head.val)
return res
k = count // 2
tag = head
for i in range(0, k - 2):
tag = tag.next
temp = tag.next
res = TreeNode(temp.val)
tag.next = None
res.left = self.sortedListToBST(head)
res.right = self.sortedListToBST(temp.next)
return res
|
[
"[email protected]"
] | |
c05c576d43957856efff8eecd62f30a51faddc53
|
3b1053ea38fee9a59d335dd75bb6a6906d298594
|
/virtool/jobs/api.py
|
0167f78f13fd311dd71976150a0e196319ee992b
|
[
"MIT"
] |
permissive
|
tianshengsui/virtool
|
8c59bb36c7e2924586be34fabc6b861e16691b7d
|
eb75637eb6ca9dcba647ad8acad5d316877dd55e
|
refs/heads/master
| 2023-04-19T16:36:54.894894 | 2021-04-23T19:09:33 | 2021-04-23T19:09:33 | 295,793,679 | 0 | 0 |
MIT
| 2020-09-30T23:53:54 | 2020-09-15T16:55:59 | null |
UTF-8
|
Python
| false | false | 3,295 |
py
|
import os
import virtool.api.utils
import virtool.http.routes
import virtool.jobs.db
import virtool.resources
import virtool.users.db
import virtool.utils
from virtool.api.response import conflict, json_response, no_content, not_found
routes = virtool.http.routes.Routes()
@routes.get("/api/jobs")
async def find(req):
"""
Return a list of job documents.
"""
db = req.app["db"]
term = req.query.get("find")
db_query = dict()
if term:
db_query.update(virtool.api.utils.compose_regex_query(term, ["task", "user.id"]))
data = await virtool.api.utils.paginate(
db.jobs,
db_query,
req.query,
projection=virtool.jobs.db.PROJECTION
)
data["documents"].sort(key=lambda d: d["created_at"])
return json_response(data)
@routes.get("/api/jobs/{job_id}")
async def get(req):
"""
Return the complete document for a given job.
"""
job_id = req.match_info["job_id"]
document = await req.app["db"].jobs.find_one(job_id)
if not document:
return not_found()
return json_response(virtool.utils.base_processor(document))
@routes.put("/api/jobs/{job_id}/cancel", permission="cancel_job")
async def cancel(req):
"""
Cancel a job.
"""
db = req.app["db"]
job_id = req.match_info["job_id"]
document = await db.jobs.find_one(job_id, ["status"])
if not document:
return not_found()
if not virtool.jobs.is_running_or_waiting(document):
return conflict("Not cancellable")
await req.app["jobs"].cancel(job_id)
document = await db.jobs.find_one(job_id)
return json_response(virtool.utils.base_processor(document))
@routes.delete("/api/jobs", permission="remove_job")
async def clear(req):
db = req.app["db"]
job_filter = req.query.get("filter")
# Remove jobs that completed successfully.
complete = job_filter in [None, "finished", "complete"]
# Remove jobs that errored or were cancelled.
failed = job_filter in [None, "finished", "failed"]
removed = await virtool.jobs.db.clear(db, complete=complete, failed=failed)
return json_response({
"removed": removed
})
@routes.delete("/api/jobs/{job_id}", permission="remove_job")
async def remove(req):
"""
Remove a job.
"""
db = req.app["db"]
job_id = req.match_info["job_id"]
document = await db.jobs.find_one(job_id)
if not document:
return not_found()
if virtool.jobs.is_running_or_waiting(document):
return conflict("Job is running or waiting and cannot be removed")
# Removed the documents associated with the job ids from the database.
await db.jobs.delete_one({"_id": job_id})
try:
# Calculate the log path and remove the log file. If it exists, return True.
path = os.path.join(req.app["settings"]["data_path"], "logs", "jobs", job_id + ".log")
await req.app["run_in_thread"](virtool.utils.rm, path)
except OSError:
pass
return no_content()
@routes.get("/api/resources")
async def get_resources(req):
"""
Get a object describing compute resource usage on the server.
"""
resources = virtool.resources.get()
req.app["resources"].update(resources)
return json_response(resources)
|
[
"[email protected]"
] | |
0a55920724eb79f070f95f641c0239ee7ab5f1e2
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/raw_value.py
|
c8334c0dd5a90195b94e73878d1c4f0eef47f204
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 3,922 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RawValue:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'property_name': 'str',
'values': 'list[object]'
}
attribute_map = {
'property_name': 'property_name',
'values': 'values'
}
def __init__(self, property_name=None, values=None):
"""RawValue
The model defined in huaweicloud sdk
:param property_name: 属性名称
:type property_name: str
:param values: 资产属性的历史值序列,示例:[1,2]
:type values: list[object]
"""
self._property_name = None
self._values = None
self.discriminator = None
if property_name is not None:
self.property_name = property_name
if values is not None:
self.values = values
@property
def property_name(self):
"""Gets the property_name of this RawValue.
属性名称
:return: The property_name of this RawValue.
:rtype: str
"""
return self._property_name
@property_name.setter
def property_name(self, property_name):
"""Sets the property_name of this RawValue.
属性名称
:param property_name: The property_name of this RawValue.
:type property_name: str
"""
self._property_name = property_name
@property
def values(self):
"""Gets the values of this RawValue.
资产属性的历史值序列,示例:[1,2]
:return: The values of this RawValue.
:rtype: list[object]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this RawValue.
资产属性的历史值序列,示例:[1,2]
:param values: The values of this RawValue.
:type values: list[object]
"""
self._values = values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RawValue):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
06c2eca239012d6d77d4c43bd14eb9ece6e2e980
|
5a6937aa6cea8312bc8964f86f77f4595d1b940f
|
/Testing/test_mypoints_plot2.py
|
54c7c5566745e968ea82108d41f131f4c0c5c225
|
[] |
no_license
|
emonson/MultiScaleSVD
|
39d8059f3f286a1936c6634740d1bd1e87ddbc7f
|
c231859bffe1eb0a7eaf15fd4d1a4c8ca9cfe8ed
|
refs/heads/master
| 2020-12-24T13:17:53.369613 | 2012-12-19T18:09:39 | 2012-12-19T18:09:39 | 699,727 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,206 |
py
|
# Translated to Python from [VTK]/Charts/Testing/Cxx/TestLinePlot.cxx
# This version is for testing reworked subclasses 8/13/2010
import vtk
from vtk.util import numpy_support as VN
import numpy as N
import math
import vtkvtg
from data_source import DataSource
data_file = '/Users/emonson/Data/Fodava/EMoGWDataSets/mnist12_1k_20101119.mat'
# DataSource loads .mat file and can generate data from it for other views
ds = DataSource(data_file)
# Set up a 2D scene, add an XY chart to it
view = vtk.vtkContextView()
view.GetRenderWindow().SetSize(400, 300)
# Testing my custom chart class which has image hover tooltips
chart = vtkvtg.vtkMyChartXY()
chart.SetActionToButton(vtk.vtkChart.PAN, 2)
chart.SetActionToButton(vtk.vtkChart.ZOOM, 4)
chart.SetActionToButton(vtk.vtkChart.SELECT, 1)
view.GetScene().AddItem(chart)
# Create a annotation link to access selection in parallel coordinates view
annotationLink = vtk.vtkAnnotationLink()
# If you don't set the FieldType explicitly it ends up as UNKNOWN (as of 21 Feb 2010)
# See vtkSelectionNode doc for field and content type enum values
annotationLink.GetCurrentSelection().GetNode(0).SetFieldType(1) # Point
annotationLink.GetCurrentSelection().GetNode(0).SetContentType(4) # Indices
# Connect the annotation link to the parallel coordinates representation
chart.SetAnnotationLink(annotationLink)
test_id = 3
table = ds.GetNodeOneScaleCoeffTable(test_id)
chart.ClearPlots()
line1 = vtkvtg.vtkMyPlotPoints()
chart.AddPlot(line1) # POINTS
line1.SetInput(table, 0, 1)
line1.SetMarkerStyle(2)
line1.SetColor(0, 0, 0, 255)
# Tooltip image stack will now be owned by the tooltip, so need to do that differently...
id_list = ds.PointsInNet[test_id]
image_stack = ds.GetProjectedImages(id_list)
# DEBUG
writer = vtk.vtkXMLImageDataWriter()
writer.SetFileName('out.vti')
writer.SetInput(image_stack)
writer.Write()
chart.SetTooltipImageStack(image_stack)
chart.SetTooltipShowImage(True)
# chart.SetTooltipImageScalingFactor(2.0)
chart.SetTooltipImageTargetSize(40)
# Set up annotation link which will carry indices to parallel coordinates chart
# for highlighting outside selections (e.g. back from image_flow)
# This needs to carry indices, while image_flow link outputs pedigree ids
# so conversion happens in HighlightSelectionCallback
highlight_link_idxs = vtk.vtkAnnotationLink()
highlight_link_idxs.GetCurrentSelection().GetNode(0).SetFieldType(1) # Point
highlight_link_idxs.GetCurrentSelection().GetNode(0).SetContentType(4) # 2 = PedigreeIds, 4 = Indices
chart.SetHighlightLink(highlight_link_idxs)
# Finally render the scene and compare the image to a reference image
# view.GetRenderWindow().SetMultiSamples(0)
def selectionCallback(caller, event):
annSel = annotationLink.GetCurrentSelection()
if annSel.GetNumberOfNodes() > 0:
idxArr = annSel.GetNode(0).GetSelectionList()
if idxArr.GetNumberOfTuples() > 0:
print VN.vtk_to_numpy(idxArr)
annotationLink.AddObserver("AnnotationChangedEvent",selectionCallback)
# view.ResetCamera()
view.Render()
# Fill selection link with dummy IDs
id_array = N.array([0],dtype='int64')
id_list = VN.numpy_to_vtkIdTypeArray(id_array)
highlight_link_idxs.GetCurrentSelection().GetNode(0).SetSelectionList(id_list)
highlight_link_idxs.InvokeEvent("AnnotationChangedEvent")
# Set up annotation link which will carry indices to parallel coordinates chart
# for highlighting outside selections (e.g. back from image_flow)
# This needs to carry indices, while image_flow link outputs pedigree ids
# so conversion happens in HighlightSelectionCallback
data_col_idxs = vtk.vtkAnnotationLink()
data_col_idxs.GetCurrentSelection().GetNode(0).SetFieldType(1) # Point
data_col_idxs.GetCurrentSelection().GetNode(0).SetContentType(4) # 2 = PedigreeIds, 4 = Indices
chart.SetDataColumnsLink(data_col_idxs)
# Fill selection link with dummy IDs
col_array = N.array([1,2],dtype='int64')
col_list = VN.numpy_to_vtkIdTypeArray(col_array)
data_col_idxs.GetCurrentSelection().GetNode(0).SetSelectionList(col_list)
data_col_idxs.InvokeEvent("AnnotationChangedEvent")
# Start interaction event loop
view.GetInteractor().Start()
|
[
"[email protected]"
] | |
63fcd4d76ce8b2403bd98acdaf30ee839d32b68d
|
8337bfdd69708f4bfbe345240dcccc7b8c7f5718
|
/loglette/parser/loglette/parser.py
|
7e501e1c1f1c862b4151be8d842b3feac3762e3d
|
[
"MIT"
] |
permissive
|
siku2/Loglette
|
8f1c12ceb7f1009b5eab503ab7608b292be98739
|
d69f99c3ead2bb24f2aa491a61a7f82cb9ca8095
|
refs/heads/master
| 2020-03-24T07:10:26.454200 | 2018-08-01T13:01:02 | 2018-08-01T13:01:02 | 142,555,185 | 1 | 0 |
MIT
| 2018-08-01T13:01:03 | 2018-07-27T09:12:51 |
Python
|
UTF-8
|
Python
| false | false | 1,705 |
py
|
from typing import Dict, List, Tuple
from . import pattern
from .. import Parser, parser
@parser("loglette")
class LogletteParser(Parser):
@classmethod
def parse_value(cls, value: str, text_style: str = None) -> str:
if text_style:
if text_style == "|":
value = value.strip()
elif text_style == ">":
value = pattern.WHITESPACE_STRIPPER.sub(" ", value).strip()
else:
raise SyntaxError(f"Unknown text style ({text_style})")
return value
def parse_header(self, text: str) -> Dict[str, str]:
headers = {}
for match in pattern.HEADER_PARSER.finditer(text):
key, text_style, value = match.groups(None)
value = self.parse_value(value, text_style)
headers[key] = value
headers["release_date"] = headers.get("release")
return headers
def parse_changes(self, text: str) -> List[Dict[str, str]]:
changes = []
for match in pattern.CHANGES_PARSER.finditer(text):
change_type, priority, text_style, value = match.groups(None)
text = self.parse_value(value, text_style)
change = {
"type": change_type.upper(),
"priority": priority,
"text": text
}
changes.append(change)
return changes
@classmethod
def split_changelog(cls, text: str) -> Tuple[str, str]:
header, changes = pattern.HEADER_SPLITTER.split(text, maxsplit=1)
return header, changes
@classmethod
def split_changelogs(cls, text: str) -> List[str]:
return pattern.CHANGELOG_SPLITTER.split(text)
|
[
"[email protected]"
] | |
84c46959396968ef4f12dd949fddc9e19ebf9cf9
|
41f13d82b46b158c5c2997915122e4f5e8a700fa
|
/falcon/main/views/things.py
|
ba065c6c1f18b0ea2b7e65e393bbb668750a72b1
|
[] |
no_license
|
ashishRay12/server
|
0959e2e5789f886b327f51a83487fd3919593b22
|
bb68c14398390e004148e848df234277f382a50c
|
refs/heads/master
| 2021-01-10T13:58:43.307036 | 2016-03-21T16:10:35 | 2016-03-21T16:10:35 | 54,403,543 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 572 |
py
|
import falcon
class ThingsResource:
def on_get(self, req, resp,form={},files={}):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
resp.body = ("GET request accepted")
def on_post(self, req, resp,form={},files={}):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
#resp.content_type = 'application/text'
#print(form["id"]) -> value captured from hook defined in common.py
resp.body = ("POST request accepted")
|
[
"[email protected]"
] | |
8145c8f75fc9b4722eb29c669bff3aba2ff807e5
|
1ffcad5482a95e8133b0adbd5fb38652c765a7d6
|
/COT/remove_file.py
|
0354f162faa9788f18e5031217ff5a01cce74652
|
[
"MIT"
] |
permissive
|
duanshuaimin/cot
|
71f51d1270de2609c99ac302ed932d4c72e83c77
|
6da4345f57849620765e88d1e406366509070745
|
refs/heads/master
| 2021-01-21T06:38:45.458816 | 2017-02-21T15:13:09 | 2017-02-21T15:13:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,428 |
py
|
#!/usr/bin/env python
#
# remove_file.py - Implements "cot remove-file" command
#
# June 2016, Glenn F. Matthews
# Copyright (c) 2016 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Module for removing files from VM definitions.
.. autosummary::
COTRemoveFile
"""
import logging
from COT.submodule import COTSubmodule
from COT.data_validation import check_for_conflict, match_or_die
from COT.data_validation import InvalidInputError
logger = logging.getLogger(__name__)
class COTRemoveFile(COTSubmodule):
"""Remove a file (such as a README) from the package.
Inherited attributes:
:attr:`~COTGenericSubmodule.ui`,
:attr:`~COTSubmodule.package`,
:attr:`~COTSubmodule.output`
Attributes:
:attr:`file_path`,
:attr:`file_id`
"""
def __init__(self, ui):
"""Instantiate this submodule with the given UI.
Args:
ui (UI): User interface instance.
"""
super(COTRemoveFile, self).__init__(ui)
self.file_path = None
"""File name or path to be removed from the package."""
self.file_id = None
"""File identifier to be removed from the package."""
def ready_to_run(self):
"""Check whether the module is ready to :meth:`run`.
Returns:
tuple: ``(True, ready_message)`` or ``(False, reason_why_not)``
"""
if self.file_path is None and self.file_id is None:
return False, "No file information provided!"
return super(COTRemoveFile, self).ready_to_run()
def run(self):
"""Do the actual work of this submodule.
Raises:
InvalidInputError: if :func:`ready_to_run` reports ``False``
"""
super(COTRemoveFile, self).run()
vm = self.vm
# Find the existing file entry.
# There may also be a disk entry for this file.
# There may also be a disk device that maps this file to a drive.
(file1, disk1, _, disk_dev1) = vm.search_from_filename(self.file_path)
(file2, disk2, _, disk_dev2) = vm.search_from_file_id(self.file_id)
file_obj = check_for_conflict("file to remove", [file1, file2])
disk = check_for_conflict("disk associated with file to remove",
[disk1, disk2])
disk_drive = check_for_conflict("disk drive mapping this file",
[disk_dev1, disk_dev2])
if file_obj is None:
raise InvalidInputError("No such file found")
if self.file_id is None:
self.file_id = vm.get_id_from_file(file_obj)
else:
match_or_die('--file-id', self.file_id,
'file id in OVF', vm.get_id_from_file(file_obj))
if self.file_path is None:
self.file_path = vm.get_path_from_file(file_obj)
else:
match_or_die('--file-path', self.file_path,
'file path in OVF', vm.get_path_from_file(file_obj))
prompt_info = "file '{0}' (ID '{1}')".format(self.file_path,
self.file_id)
if disk is not None:
prompt_info += " and disk '{0}'".format(vm.get_id_from_disk(disk))
if disk_drive is not None:
prompt_info += " and device '{0}'".format(
vm.device_info_str(disk_drive))
self.ui.confirm_or_die("Remove {0}?".format(prompt_info))
vm.remove_file(file_obj, disk=disk,
disk_drive=disk_drive)
def create_subparser(self):
"""Create 'remove-file' CLI subparser."""
p = self.ui.add_subparser(
'remove-file',
aliases=['delete-file'],
add_help=False,
usage=self.ui.fill_usage("remove-file", [
"[-f FILE_PATH] [-i FILE_ID] PACKAGE [-o OUTPUT]",
]),
help="Remove a file from an OVF package",
description="""
Remove a file from the given OVF. Will prompt for confirmation unless
--force is set.""")
group = p.add_argument_group("general options")
group.add_argument('-h', '--help', action='help',
help="""Show this help message and exit""")
group.add_argument('-o', '--output',
help="""Name/path of new OVF/OVA package to """
"""create instead of updating the existing OVF""")
group = p.add_argument_group("file selection options")
group.add_argument('-f', '--file-path',
help="""File name or path within the package""")
group.add_argument('-i', '--file-id',
help="""File ID string within the package""")
p.add_argument('PACKAGE',
help="""Package, OVF descriptor or OVA file to edit""")
p.set_defaults(instance=self)
|
[
"[email protected]"
] | |
a1e1e36aac226ae6fb4c681d891bb90b5b64a966
|
b9a86fac908ef196537f3e86bbf1776056e0354f
|
/gatifore_snmp/scripts/append_diamond_conf.py
|
0611f0a49c8085d13c79f5258fb328dc33a9e9b4
|
[] |
no_license
|
tamirko/cfyApps
|
a2ff8514a6591f004a0d57cb1feaad5b267e7f75
|
d7f9b751bc7d1c7bf3d07c36e6f9737c83bd1c78
|
refs/heads/master
| 2021-01-17T00:53:14.475503 | 2017-11-28T08:09:34 | 2017-11-28T08:09:34 | 26,909,041 | 1 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,161 |
py
|
from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
APPEND_DIAMOND_STR = "append_diamond_conf"
ctx.logger.info("Starting {0} ... ".format(APPEND_DIAMOND_STR))
target_instance = ctx.target.instance
ctx.logger.info("{0} target_instance {1} ... ".format(APPEND_DIAMOND_STR, target_instance))
target_node = ctx.target.node
ctx.logger.info("{0} target_node {1} ... ".format(APPEND_DIAMOND_STR, target_node))
src_instance = ctx.source.instance
ctx.logger.info("{0} src_instance {1} ... ".format(APPEND_DIAMOND_STR, src_instance))
ctx.logger.info("{0} ctx.target.node.name {1} ... ".format(APPEND_DIAMOND_STR, ctx.target.node.name))
config = src_instance.runtime_properties.get('snmp_collector_config', {})
for key, val in config.items():
if isinstance(val, dict):
ctx.logger.info(" {0} config.{1} b4 -> ... ".format(APPEND_DIAMOND_STR, key))
for k, v in val.items():
ctx.logger.info(" {0} config.{1} b4 -> {2}:{3} ... ".format(APPEND_DIAMOND_STR, key, k, v))
else:
ctx.logger.info("{0} config b4 -> {1}:{2} ... ".format(APPEND_DIAMOND_STR, key, str(val)))
devices_conf = config.get('devices', {})
devices_conf[ctx.target.node.name] = device_config = {}
device_config['node_instance_id'] = target_instance.id
device_config['node_id'] = target_node.id
if 'host' in inputs:
device_config['host'] = inputs.host
else:
device_config['host'] = target_instance.host_ip
ctx.logger.info("xxx {0} host is {1} ... yyy".format(APPEND_DIAMOND_STR, device_config['host']))
device_config['port'] = inputs.port
device_config['community'] = inputs.community
device_config['oids'] = inputs.oids
config['devices'] = devices_conf
for key, val in config.items():
if isinstance(val, dict):
ctx.logger.info(" {0} config.{1} after -> ... ".format(APPEND_DIAMOND_STR, key))
for k, v in val.items():
ctx.logger.info(" {0} config.{1} after -> {2}:{3} ... ".format(APPEND_DIAMOND_STR, key, k, v))
else:
ctx.logger.info("{0} config after -> {1}:{2} ... ".format(APPEND_DIAMOND_STR, key, str(val)))
src_instance.runtime_properties['snmp_collector_config'] = config
|
[
"[email protected]"
] | |
25c50028e507c050a5742263271cfb361423e81d
|
c2f6722d51f119b9f588cbea4121d2f8fddafcd2
|
/bdv/code/procdistrib/clnt_processes.py
|
4ba0c9d0a721669c97972e490f425b8b7b113ac3
|
[] |
no_license
|
facundobatista/blog
|
f4c670b48b52b0e651c7a89ad9de702abd16a39c
|
4461457c185ef3949e0d6b1398b0a7feb4a68cde
|
refs/heads/master
| 2023-07-09T09:02:17.503586 | 2023-07-04T00:36:22 | 2023-07-04T00:36:22 | 30,439,511 | 3 | 0 | null | 2019-01-24T15:43:51 | 2015-02-07T00:33:57 |
PHP
|
UTF-8
|
Python
| false | false | 634 |
py
|
import xmlrpclib, time, sys
import reparteThreads
#reparteThreads.debugmode = True
usage = """
Usar client_processes.py sever:port [[server:port] ...]
ej: client_processes.py localhost:9000 10.12.33.112:9000 10.12.33.113:9000
"""
if len(sys.argv) < 2:
print usage
sys.exit(-1)
servers = sys.argv[1:]
servers = [xmlrpclib.Server('http://' + x) for x in servers]
repartidor = reparteThreads.Repartidor(servers, "factoriz_sum")
base = 23434252232434
tini = time.time()
for i in range(10):
repartidor.enviar(str(base+i))
resultados = repartidor.terminar()
print "\n".join(resultados)
print "Tiempo:", time.time() - tini
|
[
"[email protected]"
] | |
6143d20eef5d2414ba5d2734ab41250ff4d2ef98
|
477eae855f1f295c7ade594ee64fc3501ac59ef9
|
/recipes/D/test_package/conanfile.py
|
4bb3be2882b9b6c86b50ae84192ed0ea228efe0c
|
[] |
no_license
|
xingao0803/skynet_example
|
b5fa8e2f5dac0066c773bf9624e803105787ed5c
|
f955dde1041ef264dc33e239771190755b43104a
|
refs/heads/master
| 2020-03-31T08:07:30.670256 | 2018-10-13T03:13:29 | 2018-10-13T03:13:29 | 152,046,747 | 0 | 1 | null | 2018-10-08T08:43:38 | 2018-10-08T08:43:38 | null |
UTF-8
|
Python
| false | false | 334 |
py
|
from conans import ConanFile
import os
channel = os.getenv("CONAN_CHANNEL", "stable")
username = os.getenv("CONAN_USERNAME", "lasote")
class BTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "LIB_D/1.0@%s/%s" % (username, channel)
def test(self):
self.output.info("Test OK!")
|
[
"[email protected]"
] | |
d417d2e64005189a9b67e0af6c9b3badc1fb0ef0
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Games/Tetris/modules/gameboard.py
|
d279d17889bfea61c36f40ae050f4df4273e990a
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:9408a572aa27ab62fce8357d7b6c321f2df4cfaf6de7a27be290c97d70f3178a
size 8445
|
[
"[email protected]"
] | |
ea659a93d51874fb9441c1e921808db5c68cdfe4
|
6e601105760f09d3c9f5306e18e4cf085f0bb4a2
|
/1000-9999/1449.py
|
9e9b7eaf20d4ff4f2f80aa9e6ca37c6e2ba89700
|
[] |
no_license
|
WSJI0/BOJ
|
6412f69fddd46c4bcc96377e2b6e013f3bb1b524
|
160d8c13f72d7da835d938686f433e7b245be682
|
refs/heads/master
| 2023-07-06T15:35:50.815021 | 2023-07-04T01:39:48 | 2023-07-04T01:39:48 | 199,650,520 | 2 | 0 | null | 2020-04-20T09:03:03 | 2019-07-30T12:48:37 |
Python
|
UTF-8
|
Python
| false | false | 221 |
py
|
'''
1449번
수리공 항승
'''
import sys
input=sys.stdin.readline
n, l=map(int, input().split())
a=list(map(int, input().split()))
a.sort()
L=0
cnt=0
for R in a:
if L<R:
L=R+l-1
cnt+=1
print(cnt)
|
[
"[email protected]"
] | |
2ca0d45e9ea3b99ccef5f4796d87ba41c840ec09
|
8a780cb47eac9da046bdb5d6917f97a086887603
|
/problems/knight_probability_in_chessboard/solution.py
|
10dda0383885961f2992842af8d8eaf2428dcd79
|
[] |
no_license
|
dengl11/Leetcode
|
d16315bc98842922569a5526d71b7fd0609ee9fb
|
43a5e436b6ec8950c6952554329ae0314430afea
|
refs/heads/master
| 2022-12-20T03:15:30.993739 | 2020-09-05T01:04:08 | 2020-09-05T01:04:08 | 279,178,665 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 672 |
py
|
class Solution:
def knightProbability(self, N: int, K: int, r: int, c: int) -> float:
cache = {} # {(i, j, k): prob of next being on-board}
def query(i, j, K):
if i < 0 or i >= N or j < 0 or j >= N: return 0
if K == 0: return 1
if (i, j, K) in cache: return cache[(i, j, K)]
ans = 0
for di, dj in [(1, 2), (1, -2), (2, 1), (2, -1), (-1, 2), (-1, -2), (-2, 1), (-2, -1)]:
ii, jj = i + di, j + dj
ans += query(ii, jj, K-1)
ans = ans / 8
cache[(i, j, K)] = ans
return ans
return query(r, c, K)
|
[
"[email protected]"
] | |
913f92b3adfc7f05a0427c18c2608c21d2b86f48
|
21540ab033e180a3d94b270b7faffac7fe4af68f
|
/wordshop5/Exercise_1-3_page_158/Exercise_2.py
|
2d13b4e75725362eed75c0970cc5a55c08a9b96d
|
[] |
no_license
|
tuan102081/wordshop1.2.3.5
|
eaa344bdb04f565d1354b9476b4d4ecafc5cc7f3
|
70e75b56f48a2e5b1622d956f33831f80e64d368
|
refs/heads/master
| 2023-07-14T23:26:31.089484 | 2021-08-30T18:53:24 | 2021-08-30T18:53:24 | 401,411,439 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 640 |
py
|
"""
Author: Nguyen Duy Tuan
Date: 31/08/2021
Program: Exercise_02.py
Problem:
Assume that the variable data refers to the dictionary {'b':20, 'a':35}. Write the
values of the following expressions:
a. data['a']
b. data.get('c', None)
c. len(data)
d. data.keys()
e. data.values()
f. data.pop('b')
g. data # After the pop above
Solution:
display:
35
None
2
dict_keys(['b', 'a'])
dict_values([20, 35])
20
{'a': 35}
"""
data = {'b': 20, 'a': 35}
print(data['a'])
print(data.get('c', None))
print(len(data))
print(data.keys())
print(data.values())
print(data.pop('b'))
print(data)
|
[
"[email protected]"
] | |
1996b3e9a35ebcabee9dfb1f740e7a9d69fd5760
|
0cf3c46500aed987a4760acd4821957c2877c6c9
|
/tools/upgrade/ast.py
|
6ba70b0ecd977595005abccae9d1a7e02bc69416
|
[
"MIT"
] |
permissive
|
ahmed1231234/pyre-check
|
eca4de113c5346e28c8826a5ff53d3f8ec6c6ddd
|
aa40c5ea9095eb66ec63c361545b2c51df3e14ff
|
refs/heads/master
| 2022-04-23T11:03:25.798965 | 2020-04-18T18:37:38 | 2020-04-18T18:37:38 | 256,826,159 | 1 | 0 | null | 2020-04-18T18:35:55 | 2020-04-18T18:35:55 | null |
UTF-8
|
Python
| false | false | 1,787 |
py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import logging
import pathlib
from logging import Logger
from typing import Callable
from pyre_extensions import ListVariadic
Ts = ListVariadic("Ts")
LOG: Logger = logging.getLogger(__name__)
# pyre-fixme[11]: Annotation `Ts` is not defined as a type.
# pyre-fixme[11]: Annotation `Ts` is not defined as a type.
# pyre-fixme[11]: Annotation `Ts` is not defined as a type.
def verify_stable_ast(file_modifier: Callable[[Ts], None]) -> Callable[[Ts], None]:
# pyre-fixme[2]: Missing parameter annotation for *args
def wrapper(filename: str, *args, **kwargs) -> None:
# AST before changes
path = pathlib.Path(filename)
try:
text = path.read_text()
ast_before = ast.parse(text)
# AST after changes
file_modifier(filename, *args, **kwargs)
new_text = path.read_text()
try:
ast_after = ast.parse(new_text)
# Undo changes if AST does not match
if not ast.dump(ast_before) == ast.dump(ast_after):
LOG.warning(
"Attempted file changes modified the AST in %s. Undoing.",
filename,
)
path.write_text(text)
except Exception as e:
LOG.warning("Could not parse file %s. Undoing.", filename)
LOG.warning(e)
path.write_text(text)
except FileNotFoundError:
LOG.warning("File %s cannot be found, skipping.", filename)
return
return wrapper
|
[
"[email protected]"
] | |
f205428b32562f728bb76ab16080f526548226ee
|
7cd6950ab3034cb0cf403ee1b8410bf475360a8d
|
/venv/bin/schema-salad-tool
|
e251adb51c1dd93871748bd902611e801bb09b8e
|
[] |
no_license
|
mr-c/george_murray
|
ef6d5f77a4f4c0b64cbc64534ce23d7546a3cee0
|
612c68c6b27ed2d8097f1309820ccdbb05530176
|
refs/heads/master
| 2022-09-20T11:12:58.582547 | 2019-08-15T19:32:34 | 2019-08-15T19:32:34 | 268,844,811 | 0 | 0 | null | 2020-06-02T15:55:27 | 2020-06-02T15:55:26 | null |
UTF-8
|
Python
| false | false | 265 |
#!/Users/George1/Documents/GitHub/george_murray/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from schema_salad.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
fa72589e55f4fe20a47d28a533d5439e1b6ff87c
|
2c5b25d0b5d6ba66d013251f93ebf4c642fd787b
|
/accepted_codes/ZigZag_Conversion/ZigZag Conversion_278768995.py
|
fb8f43a76f31a8429e8a1102bb46a1a61c7b0a46
|
[] |
no_license
|
abhinay-b/Leetcode-Submissions
|
da8099ac54b5d36ae23db42580064d0f9d9bc63b
|
d034705813f3f908f555f1d1677b827af751bf42
|
refs/heads/master
| 2022-10-15T22:09:36.328967 | 2020-06-14T15:39:17 | 2020-06-14T15:39:17 | 259,984,100 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 475 |
py
|
class Solution:
def convert(self, s: str, rows: int) -> str:
if rows == 1 or len(s) == 0:
return s
substrLists = ["" for i in range(rows)]
pos = 0
step = -1
for char in s:
substrLists[pos] += char
if pos == 0 or pos == rows - 1:
step *= -1
pos += step
result = ""
for subStr in substrLists:
result += subStr
return(result)
|
[
"[email protected]"
] | |
0568628bcad505745ba6e87004b2f72c7bf18c6a
|
0556754cd4765d05a1d831c48933c5f299bb095d
|
/Oct-16-2020/assignment 216.py
|
f4928b698ad2e9638ab966f814421984d0e075f7
|
[] |
no_license
|
rohitbhatghare/python
|
4fa5e5883743023ced841892a13a9798b7686f39
|
248d265e02ecbc1270a87081af26537eb401e535
|
refs/heads/main
| 2023-02-03T04:32:15.716805 | 2020-12-21T11:33:27 | 2020-12-21T11:33:27 | 302,831,244 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 102 |
py
|
for i in range(0,5):
for j in range(69-i,64,-1):
print(chr(j), end=" ")
print()
|
[
"[email protected]"
] | |
ad0fdfb641c6e94049ce76fdcc83f75b4d70cc21
|
c8a04384030c3af88a8e16de4cedc4ef8aebfae5
|
/stubs/pandas/tests/series/test_internals.pyi
|
f4ba1fc107185d5351a176a5bf578b51da1198f9
|
[
"MIT"
] |
permissive
|
Accern/accern-xyme
|
f61fce4b426262b4f67c722e563bb4297cfc4235
|
6ed6c52671d02745efabe7e6b8bdf0ad21f8762c
|
refs/heads/master
| 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 |
MIT
| 2023-07-19T02:13:18 | 2019-12-09T20:21:59 |
Python
|
UTF-8
|
Python
| false | false | 1,056 |
pyi
|
# Stubs for pandas.tests.series.test_internals (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level,line-too-long,arguments-differ
# pylint: disable=no-member,too-few-public-methods,keyword-arg-before-vararg
# pylint: disable=super-init-not-called,abstract-method,redefined-builtin
class TestSeriesInternals:
def test_convert(self) -> None:
...
def test_convert_no_arg_error(self) -> None:
...
def test_convert_preserve_bool(self) -> None:
...
def test_convert_preserve_all_bool(self) -> None:
...
def test_constructor_no_pandas_array(self) -> None:
...
def test_astype_no_pandas_dtype(self) -> None:
...
def test_from_array(self) -> None:
...
def test_from_list_dtype(self) -> None:
...
def test_hasnans_unchached_for_series() -> None:
...
def test_put_deprecated() -> None:
...
|
[
"[email protected]"
] | |
583c3151de04b103a70ef84b46909cbd243bdf38
|
9a8fe99c7316dfce343be81d2c3c1a6c4f22572c
|
/set89.py
|
98de78eac2b9ee81cbd42fa33eff655c83fe0589
|
[] |
no_license
|
Srija-U/codekatabeginner
|
5e4d540484529dbafada04d3eac96eab7f98a693
|
8d088e04de1d48d9befb975697e9121f06bb164a
|
refs/heads/master
| 2020-04-30T00:58:51.445394 | 2019-07-01T15:43:05 | 2019-07-01T15:43:05 | 176,516,229 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
import math
l=[int(i) for i in input().split()]
p=l[0]*l[1]
r=math.sqrt(p)
if(r-math.floor(r)==0):
print("yes")
else:
print("no")
|
[
"[email protected]"
] | |
c40fdba0ebc728e600f6cb077840adff8ec86a16
|
5dfed5b8fbcb2a62af3aab3beee299a8405ad50b
|
/ch05-视频/5.VideoCapture.py
|
0be5f3af2bce0deef2d782be92053f349c6a26f6
|
[
"MIT"
] |
permissive
|
zgle-fork/OpenCV-Python-Tutorial
|
7de5a7eda667401b3c7ac0e9306c0b0650bb459f
|
5a42b32de208a7f11ec9d04880f4b00e8986a0e5
|
refs/heads/master
| 2023-03-02T11:01:04.984257 | 2021-02-11T19:10:14 | 2021-02-11T19:10:14 | 268,380,833 | 0 | 0 |
MIT
| 2020-05-31T23:18:58 | 2020-05-31T23:18:58 | null |
UTF-8
|
Python
| false | false | 2,326 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 21:06:22 2014
@author: duan
"""
'''
注意 当你的程序报错时 你 先检查的是你的摄像头是否能够在其他程 序中正常工作 比如 linux 下的 Cheese 。
'''
import numpy as np
import cv2
cap = cv2.VideoCapture(0) # 一般的笔 本电脑 有内置摄像头。所以参数就是 0。你可以 设置成 1 或 者其他的来 择别的摄像头
'''
你可以使用函数 cap.get(propId) 来获得 的一些参数信息。
propId 可以是 0 到 18 之 的任何整数。
其中的一些值可以使用 cap.set(propId,value) 来修改 value 就是 你想 置成的新值。
例如 我可以使用 cap.get(3) cv2.CAP_PROP_FRAME_WIDTH和 cap.get(4) cv2.CAP_PROP_FRAME_HEIGHT来查看每一帧的宽和高。
默认情况下得到的值是 640X480。但是我可以使用 ret=cap.set(3,320) 和 ret=cap.set(4,240) 来把宽和高改成 320X240。
'''
# ret=cap.set(3,320)
# ret=cap.set(4,240)
# ret = cap.set(cv2.CAP_PROP_FRAME_WIDTH, 480)#避免计算量过大
# ret = cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 270)#
#等比缩放
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)#4 ,720
frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)#3 ,1280
frame_height=int(480/frame_width*frame_height)#270
ret = cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height)#高
ret = cap.set(cv2.CAP_PROP_FRAME_WIDTH, 480)
# while (True):
while cap.isOpened(): # 检查是否成功初始化,否则就 使用函数 cap.open()
# Capture frame-by-frame
ret, frame = cap.read() # ret 返回一个布尔值 True/False
# print('frame shape:',frame.shape)#(720, 1280, 3)
frame = cv2.flip(frame, flipCode=1) # 左右翻转,使用笔记本电脑摄像头才有用。
# flipCode:翻转方向:1:水平翻转;0:垂直翻转;-1:水平垂直翻转
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame', gray)
cv2.setWindowTitle('frame', 'COLOR_BGR2GRAY')
# Property=cv2.getWindowProperty('frame',0)#无用
# if cv2.waitKey(1) & 0xFF == ord('q'):#不行
# break
key = cv2.waitKey(delay=10)
if key == ord("q"):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
292ab22fc8c91f9f3a53b729047e4651abdbac4f
|
4ad809420a3cd82199b31fcb6033ad6b28c5ac60
|
/rustici_engine/models/xapi_interaction_component.py
|
85c0f932ab8c6ed4102f496b23b51da596ac6ce0
|
[] |
no_license
|
Myagi/python-rustici-engine-api
|
2e4eb21f01b156551a1f4d747aea466dec22f30c
|
20684845817cb9790b3bfc9be3db515f7ad5b0ee
|
refs/heads/master
| 2022-03-30T12:26:44.825580 | 2020-02-03T06:34:12 | 2020-02-03T06:34:12 | 237,883,063 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,874 |
py
|
# coding: utf-8
"""
Rustici Engine API
Rustici Engine API # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class XapiInteractionComponent(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'description': 'dict(str, str)'
}
attribute_map = {
'id': 'id',
'description': 'description'
}
def __init__(self, id=None, description=None): # noqa: E501
"""XapiInteractionComponent - a model defined in Swagger""" # noqa: E501
self._id = None
self._description = None
self.discriminator = None
self.id = id
if description is not None:
self.description = description
@property
def id(self):
"""Gets the id of this XapiInteractionComponent. # noqa: E501
:return: The id of this XapiInteractionComponent. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this XapiInteractionComponent.
:param id: The id of this XapiInteractionComponent. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def description(self):
"""Gets the description of this XapiInteractionComponent. # noqa: E501
:return: The description of this XapiInteractionComponent. # noqa: E501
:rtype: dict(str, str)
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this XapiInteractionComponent.
:param description: The description of this XapiInteractionComponent. # noqa: E501
:type: dict(str, str)
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(XapiInteractionComponent, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XapiInteractionComponent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
b124e445d79efd72ada13bc4b47a29bb1c1c7c9b
|
6c677098c78b3f410019ac26f116cd8539949d35
|
/snakeeyes/blueprints/admin/__init__.py
|
155ddeb8aded069a0c4eb38612f26529e8ea5df9
|
[
"MIT"
] |
permissive
|
Pythonian/bsawf
|
eb05dcf7eeb3fab10dad269f9018fc3aa56c967e
|
3e422a81cfb1b157119473c20b94a9a01f8b9672
|
refs/heads/master
| 2023-05-27T20:32:25.965703 | 2022-03-16T14:57:26 | 2022-03-16T14:57:26 | 253,907,876 | 0 | 0 |
MIT
| 2023-05-02T20:53:12 | 2020-04-07T20:44:53 |
Python
|
UTF-8
|
Python
| false | false | 51 |
py
|
from snakeeyes.blueprints.admin.views import admin
|
[
"[email protected]"
] | |
312a91bd56204674559e4af96b69bcf70afeae26
|
492e956cbc3f2d9af13b2b437760fba0451c3333
|
/setup.py
|
95956c2a83a15ec25d433e0f06201a46b22caaa7
|
[
"BSD-3-Clause"
] |
permissive
|
Ademan/markupsafe
|
40c6deb4b3035df61b65ce293f300d6b8433c045
|
6620b980d299b00d337e998f6cd13a800d51bcf9
|
refs/heads/master
| 2021-01-18T13:22:21.023640 | 2011-02-17T21:56:32 | 2011-02-17T21:56:32 | 1,380,064 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,104 |
py
|
import os
import sys
from setuptools import setup, Extension, Feature
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
# fail safe compilation shamelessly stolen from the simplejson
# setup.py file. Original author: Bob Ippolito
speedups = Feature(
'optional C speed-enhancement module',
standard=True,
ext_modules = [
Extension('markupsafe._speedups', ['markupsafe/_speedups.c']),
],
)
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
extra = {}
if sys.version_info >= (3, 0):
extra['use_2to3'] = True
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
"""This class allows C extension building to fail."""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
def echo(msg=''):
sys.stdout.write(msg + '\n')
readme = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
def run_setup(with_binary):
features = {}
if with_binary:
features['speedups'] = speedups
setup(
name='MarkupSafe',
version='0.10',
url='http://dev.pocoo.org/',
license='BSD',
author='Armin Ronacher',
author_email='[email protected]',
description='Implements a XML/HTML/XHTML Markup safe string for Python',
long_description=readme,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
packages=['markupsafe'],
test_suite='markupsafe.tests.suite',
include_package_data=True,
cmdclass={'build_ext': ve_build_ext},
features=features,
**extra
)
try:
run_setup(True)
except BuildFailed:
LINE = '=' * 74
BUILD_EXT_WARNING = 'WARNING: The C extension could not be compiled, speedups are not enabled.'
echo(LINE)
echo(BUILD_EXT_WARNING)
echo('Failure information, if any, is above.')
echo('Retrying the build without the C extension now.')
echo()
run_setup(False)
echo(LINE)
echo(BUILD_EXT_WARNING)
echo('Plain-Python installation succeeded.')
echo(LINE)
|
[
"[email protected]"
] | |
00930b3ccfaefe10510cceee4ac31f23ac6bd4b4
|
f0932f59d37adfbba9307ee31e6f78ce3c256c4a
|
/notebook/20170329_test_multiple_spacers/pybeeswarm.py
|
aa5f458c6ce868ee71e82153fde9fd35c1cddda0
|
[] |
no_license
|
kalekundert/ligrna
|
3785a1e5fb8ed6d07839a5314029f3fc882d4471
|
843963973c34c4976f5adfbd4d03f5f1d0344423
|
refs/heads/master
| 2020-04-12T12:52:32.828100 | 2020-02-22T00:59:57 | 2020-02-22T00:59:57 | 162,505,099 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,563 |
py
|
"""
The MIT License (MIT)
Copyright (c) 2014 Melissa Gymrek <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import math
import matplotlib.pyplot
import numpy
import pandas
import sys
def beeswarm(values, positions=None, method="swarm",
ax=None, s=20, col="black", xlim=None, ylim=None,
labels=None, labelrotation="vertical", **kwargs):
"""
beeswarm(values, positions=None, method="swarm",
ax=None, s=20, col="black", xlim=None, ylim=None,
labels=None)
Inputs:
* values: an array of a sequence of vectors
* positions: sets the horizontal positions of the swarms.
Ticks and labels are set to match the positions.
If none, set positions to range(len(values))
Default: None
* method: how to jitter the x coordinates. Choose from
"swarm", "hex", "center", "square"
Default: swarm
* ax: use this axis for plotting. If none supplied, make a new one
Default: None
* s: size of points in points^2 (assuming 72 points/inch).
Defautt: 20
* col: color of points. Can be:
- a single string: color all points that color
- a vector of strings length len(values): gives color for each group
- a vector of strings length sum([len(values[i]) for i in range(len(values))])
gives color for each point
- a vector of strings any other length: cycle through the list of colors.
(really pretty if not useful)
Default: "black"
Note, colors can also be rgb tubples instead of strings.
* xlim: tuple giving (xmin, xmax). If not specified, either get
from the supplied ax or recalculate
* ylim: tuple giving (ymin, ymax). If not specified, eiterh get
from the supplied as or recalculate
* labels: list of labels for each group.
Default: range(len(values))
* labelrotation: rotation of x label.
Default: "vertical"
Returns:
* bs: pandas.DataFrame with columns: xorig, yorig, xnew, ynew, color
* ax: the axis used for plotting
"""
# Check things before we go on
if method not in ["swarm", "hex", "center", "square"]:
sys.stderr.write("ERROR: Invalid method.\n")
return
if len(values) == 0: return None
if not hasattr(values[0], "__len__"): values = [values]
if positions is None:
positions = range(len(values))
else:
if len(positions) != len(values):
sys.stderr.write("ERROR: number of positions must match number of groups\n")
return None
yvals = list(itertools.chain.from_iterable(values))
xvals = list(itertools.chain.from_iterable([[positions[i]]*len(values[i]) for i in range(len(values))]))
# Get color vector
if type(col) == str:
colors = [[col]*len(values[i]) for i in range(len(values))]
elif type(col) == list:
if len(col) == len(positions):
colors = []
for i in range(len(col)):
colors.append([col[i]]*len(values[i]))
elif len(col) == len(yvals):
colors = []
sofar = 0
for i in range(len(values)):
colors.append(col[sofar:(sofar+len(values[i]))])
sofar = sofar + len(values[i])
else:
cx = col*(len(yvals)/len(col)) # hope for the best
if len(cx) < len(yvals):
cx.extend(col[0:(len(yvals)-len(cx))])
colors = []
sofar = 0
for i in range(len(values)):
colors.append(cx[sofar:(sofar+len(values[i]))])
sofar = sofar + len(values[i])
else:
sys.stderr.write("ERROR: Invalid argument for col\n")
return
# Get axis limits
if ax is None:
fig = matplotlib.pyplot.figure()
ax = fig.add_subplot(111)
if xlim is not None:
ax.set_xlim(left=xlim[0], right=xlim[1])
else:
xx = max(positions) - min(positions) + 1
xmin = min(positions)-0.1*xx
xmax = max(positions)+0.1*xx
ax.set_xlim(left=xmin, right=xmax)
if ylim is not None:
ax.set_ylim(bottom=ylim[0], top=ylim[1])
else:
yy = max(yvals) - min(yvals)
ymin = min(yvals)-.05*yy
ymax = max(yvals)+0.05*yy
ax.set_ylim(bottom=ymin, top=ymax)
# Determine dot size
figw, figh = ax.get_figure().get_size_inches()
w = (ax.get_position().xmax-ax.get_position().xmin)*figw
h = (ax.get_position().ymax-ax.get_position().ymin)*figh
xran = ax.get_xlim()[1]-ax.get_xlim()[0]
yran = ax.get_ylim()[1]-ax.get_ylim()[0]
xsize=math.sqrt(s)*1.0/72*xran*1.0/(w*0.8)
ysize=math.sqrt(s)*1.0/72*yran*1.0/(h*0.8)
# Get new arrangements
if method == "swarm":
bs = _beeswarm(positions, values, xsize=xsize, ysize=ysize, method="swarm", colors=colors)
else:
bs = _beeswarm(positions, values, ylim=ax.get_ylim(), xsize=xsize, ysize=ysize, method=method, colors=colors)
# plot
ax.scatter(bs["xnew"], bs["ynew"], c=list(bs["color"]), **kwargs)
ax.set_xticks(positions)
if labels is not None:
ax.set_xticklabels(labels, rotation=labelrotation)
return bs, ax
def unsplit(x,f):
"""
same as R's unsplit function
Read of the values specified in f from x to a vector
Inputs:
x: dictionary of value->[items]
f: vector specifying values to be read off to the vector
"""
y = pandas.DataFrame({"y":[None]*len(f)})
f = pandas.Series(f)
for item in set(f):
y.ix[f==item,"y"] = x[item]
return y["y"]
def grid(x, ylim, xsize=0, ysize=0, method="hex", colors="black"):
"""
Implement the non-swarm arrangement methods
"""
size_d = ysize
if method == "hex": size_d = size_d*math.sqrt(3)/2
size_g = xsize
breaks = numpy.arange(ylim[0], ylim[1]+size_d, size_d)
mids = (pandas.Series(breaks[:-1]) + pandas.Series(breaks[1:]))*1.0/2
d_index = pandas.Series(pandas.cut(pandas.Series(x), bins=breaks, labels=False))
d_pos = d_index.apply(lambda x: mids[x])
v_s = {}
for item in set(d_index):
odd_row = (item%2)==1
vals = range(list(d_index).count(item))
if method == "center":
v_s[item] = list(map(lambda a: a - numpy.mean(vals), vals))
elif method == "square":
v_s[item] = list(map(lambda a: a - math.floor(numpy.mean(vals)), vals))
elif method == "hex":
if odd_row:
v_s[item] = list(map(lambda a: a - math.floor(numpy.mean(vals)) - 0.25, vals))
else:
v_s[item] = list(map(lambda a: a - math.ceil(numpy.mean(vals)) + 0.25, vals))
else:
sys.stderr.write("ERROR: this block should never execute.\n")
return
x_index = unsplit(v_s, d_index)
if type(colors) == str: colors = [colors]*len(x_index)
return x_index.apply(lambda x: x*size_g), d_pos, colors
def swarm(x, xsize=0, ysize=0, colors="black"):
"""
Implement the swarm arrangement method
"""
gsize = xsize
dsize = ysize
out = pandas.DataFrame({"x": [item*1.0/dsize for item in x], "y": [0]*len(x), "color": colors, "order": range(len(x))})
out.sort_index(by='x', inplace=True)
if out.shape[0] > 1:
for i in range(1, out.shape[0]):
xi = out["x"].values[i]
yi = out["y"].values[i]
pre = out[0:i] # previous points
wh = (abs(xi-pre["x"]) < 1) # which are potentially overlapping
if any(wh):
pre = pre[wh]
poty_off = pre["x"].apply(lambda x: math.sqrt(1-(xi-x)**2)) # potential y offset
poty = pandas.Series([0] + (pre["y"] + poty_off).tolist() + (pre["y"]-poty_off).tolist()) # potential y values
poty_bad = []
for y in poty:
dists = (xi-pre["x"])**2 + (y-pre["y"])**2
if any([item < 0.999 for item in dists]): poty_bad.append(True)
else: poty_bad.append(False)
poty[poty_bad] = numpy.infty
abs_poty = [abs(item) for item in poty]
newoffset = poty[abs_poty.index(min(abs_poty))]
out.loc[i,"y"] = newoffset
else:
out.loc[i,"y"] = 0
out.ix[numpy.isnan(out["x"]), "y"] = numpy.nan
# Sort to maintain original order
out.sort_index(by="order", inplace=True)
return out["y"]*gsize, out["color"]
def _beeswarm(positions, values, xsize=0, ysize=0, ylim=None, method="swarm", colors="black"):
"""
Call the appropriate arrangement method
"""
xnew = []
ynew = []
xorig = []
yorig = []
newcolors = []
# group y by X
for i in range(len(positions)):
xval = positions[i]
ys = values[i]
cs = colors[i]
if method == "swarm":
g_offset, ncs = swarm(ys, xsize=xsize, ysize=ysize, colors=cs)
ynew.extend(ys)
else:
g_offset, new_values, ncs = grid(ys, xsize=xsize, ysize=ysize, ylim=ylim, method=method, colors=cs)
ynew.extend(new_values)
xnew.extend([xval+item for item in g_offset])
yorig.extend(ys)
xorig.extend([xval]*len(ys))
newcolors.extend(ncs)
out = pandas.DataFrame({"xnew":xnew, "yorig": yorig, "xorig":xorig, "ynew": ynew, "color": newcolors})
return out
|
[
"[email protected]"
] | |
8bd88de91b37e75f8e517c0a73942c18f57c5290
|
8d1309e560216fb84aee6b5fac4f7d8fac07f5d6
|
/liuxuheng/test.py
|
51b323da6666957cd711af2810efb513338f87b4
|
[] |
no_license
|
NWU-SMART/2019TEAMNEW-TrainCodes
|
2dfb4d160d6419d96538f033b1332c2cb171e3e0
|
91a6c8b9395fff72079f21c4a26401179f3ad8a6
|
refs/heads/master
| 2022-12-23T22:03:39.030888 | 2020-10-05T07:06:00 | 2020-10-05T07:06:00 | 270,465,529 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20 |
py
|
a=3
b=4
print(a+b)
|
[
"[email protected]"
] | |
2be1f29cb247b9a78bccf284d46cc677e921eb76
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/missing_data/test_missing_data_air_passengers_None_Median.py
|
2a181eddd4b0f9ad5e0f81313d275c09db13c348
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 |
BSD-3-Clause
| 2023-03-08T21:45:40 | 2016-10-13T09:30:30 |
Python
|
UTF-8
|
Python
| false | false | 128 |
py
|
import tests.missing_data.test_missing_data_air_passengers_generic as gen
gen.test_air_passengers_missing_data(None, 'Median')
|
[
"[email protected]"
] | |
3a9d64a9ea107918ee80b2d18620eca72ba779b1
|
76c3262a1ed4e6cbbf5008e8fc79c917035fe181
|
/src/mysite/posts/views.py
|
b12464ced838be501e90822580b6b41435a34213
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"bzip2-1.0.6",
"Python-2.0",
"TCL",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
aiegoo/django
|
ba6ee6be02b4e0fdf2fce007cb3ef739974ade35
|
2f508a318edd26403509a61eb44e99fda8b7ed64
|
refs/heads/master
| 2023-01-12T21:56:16.202262 | 2021-06-08T09:27:44 | 2021-06-08T09:27:44 | 211,546,261 | 0 | 0 |
MIT
| 2022-12-26T20:15:56 | 2019-09-28T18:41:35 |
Tcl
|
UTF-8
|
Python
| false | false | 164 |
py
|
from django.http import HttpRequest
from django.shortcuts import render
# Create your views here.
def post_home(request):
return HttpRequest("<h1>hello</h1>")
|
[
"[email protected]"
] | |
ae88ca665498f4b7533b6dac23ecbf987436a17f
|
52b79e4cd1e26969a3ebb3bca8620519071bea98
|
/answers/17_serialization/task_17_3.py
|
f5210b54e2ac9f160565ac6b077cafd54537708e
|
[] |
no_license
|
hariram32/pyneng-answers-en
|
631bc149b8a219a2de86de82681ffba3d1ff30ee
|
84b7240b00d3a4ab9011952db662f716d1cd31b8
|
refs/heads/main
| 2023-03-16T00:12:38.954431 | 2021-03-09T15:40:10 | 2021-03-09T15:40:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,543 |
py
|
# -*- coding: utf-8 -*-
"""
Task 17.3
Create a function parse_sh_cdp_neighbors that processes the output of
the show cdp neighbors command.
The function expects, as an argument, the output of the command
as a single string (not a filename).
The function should return a dictionary that describes the connections between devices.
For example, if the following output was passed as an argument:
R4>show cdp neighbors
Device ID Local Intrfce Holdtme Capability Platform Port ID
R5 Fa 0/1 122 R S I 2811 Fa 0/1
R6 Fa 0/2 143 R S I 2811 Fa 0/0
The function should return a dictionary like this:
{'R4': {'Fa 0/1': {'R5': 'Fa 0/1'},
'Fa 0/2': {'R6': 'Fa 0/0'}}}
Interfaces must be written with a space. That is, so Fa 0/0, and not so Fa0/0.
Check the function on the contents of the sh_cdp_n_sw1.txt file
"""
import re
def parse_sh_cdp_neighbors(command_output):
regex = re.compile(
r"(?P<r_dev>\w+) +(?P<l_intf>\S+ \S+)"
r" +\d+ +[\w ]+ +\S+ +(?P<r_intf>\S+ \S+)"
)
connect_dict = {}
l_dev = re.search(r"(\S+)[>#]", command_output).group(1)
connect_dict[l_dev] = {}
for match in regex.finditer(command_output):
r_dev, l_intf, r_intf = match.group("r_dev", "l_intf", "r_intf")
connect_dict[l_dev][l_intf] = {r_dev: r_intf}
return connect_dict
if __name__ == "__main__":
with open("sh_cdp_n_sw1.txt") as f:
print(parse_sh_cdp_neighbors(f.read()))
|
[
"[email protected]"
] | |
23e96be12a0904803e10a2ada162a77caaea4993
|
1850d2222b504bfa3c2390dc6fc186b3260334d0
|
/src/pbhla/dictionary.py
|
32ab51481614f77e383c8f8ee6393b79722894c4
|
[] |
no_license
|
la0hu2006/HlaTools
|
2f6964645615c6c35517f064111b362407701dea
|
082e45cd1cbdad941f0df59d71a0ca59bfdabf0c
|
refs/heads/master
| 2021-01-18T21:42:31.258878 | 2016-08-29T18:50:11 | 2016-08-29T18:50:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,290 |
py
|
import os, re, csv, logging
from pbcore.io.FastaIO import FastaReader
from pbhla.io.BlasrIO import BlasrReader, record_to_string
from pbhla.io.SamIO import SamReader
from pbhla.utils import get_base_sequence_name
log = logging.getLogger()
def create_amp_assem_reference( m1_file, reference=None ):
log.info('Parsing Blasr M1 results from "{0}"'.format( m1_file ))
results = {}
for record in BlasrReader( m1_file ):
qname = get_base_sequence_name( record.qname )
locus = qname.split('_')[1]
if qname in results:
msg = 'Duplicate sequence ids found! "{0}"'.format( qname )
log.info( msg )
raise KeyError( msg )
if reference:
results[qname] = reference[locus]
else:
results[qname] = locus
log.info('Finished reading Blasr results')
return results
def create_m1_reference( m1_file, reference=None ):
log.info('Parsing Blasr M1 results from "{0}"'.format( m1_file ))
results = {}
for record in BlasrReader( m1_file ):
qname = get_base_sequence_name( record.qname )
tname = get_base_sequence_name( record.tname )
if qname in results:
msg = 'Duplicate sequence ids found! "{0}"'.format( qname )
log.info( msg )
raise KeyError( msg )
if reference:
results[qname] = reference[tname]
else:
results[qname] = tname
log.info('Finished reading Blasr results')
return results
def create_m5_reference( m5_file ):
log.info('Parsing Blasr M5 results from "{0}"'.format( m5_file ))
results = {}
diffs = {}
for record in BlasrReader( m5_file ):
qname = get_base_sequence_name( record.qname )
tname = get_base_sequence_name( record.tname )
diff_count = int(record.nmis) + int(record.nins) + int(record.ndel)
if qname not in diffs:
results[qname] = tname
diffs[qname] = diff_count
elif diffs[qname] > diff_count:
results[qname] = tname
diffs[qname] = diff_count
log.info('Finished reading Blasr results')
return results
def create_sam_reference( sam_file, reference=None ):
log.info('Parsing SAM alignments from "{0}"'.format(sam_file))
results = {}
for record in SamReader(sam_file):
name = get_base_sequence_name( record.rname )
if record.qname in results:
msg = 'Duplicate sequence ids found! "{0}"'.format( record.qname )
log.info( msg )
raise KeyError( msg )
if reference:
results[record.qname] = reference[name]
else:
results[record.qname] = name
log.info('Finished reading SAM file results')
return results
def create_phased_reference( phased_fofn ):
log.info('Parsing Phased FOFN alignments from "{0}"'.format(phased_fofn))
results = {}
with open(phased_fofn, 'r') as handle:
for line in handle:
fasta_path = line.strip()
fasta_file = os.path.basename( fasta_path )
contig_name = fasta_file.split('.')[0] + '_cns'
for record in FastaReader(fasta_path):
name = record.name.split()[0]
results[name] = contig_name
log.info('Finished reading phased FOFN results')
return results
def filter_m5_file( m5_file, filtered_file ):
"""
Filter an M5 alignment file to contain only the alignments with the fewest diffs
"""
log.info('Filtering Blasr M5 results from "{0}"'.format( m5_file ))
selected = {}
diffs = {}
count = 0
for record in BlasrReader( m5_file ):
count += 1
diff_count = int(record.nmis) + int(record.nins) + int(record.ndel)
if record.qname not in diffs:
selected[record.qname] = record
diffs[record.qname] = diff_count
elif diffs[record.qname] > diff_count:
selected[record.qname] = record
diffs[record.qname] = diff_count
log.info('Selected %s records from %s alignments' % (count, len(selected)))
with open( filtered_file, 'w' ) as output:
for record in selected.itervalues():
output.write('%s\n' % record_to_string( record ))
log.info('Finished filtering Blasr results')
|
[
"[email protected]"
] | |
b36a85f206bf2bb3cab6829401349d38f3b51bd1
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D98B/SAFHAZD98BUN.py
|
a4eef18c6de9d10ed241bd2e4d7eeacdbcf968ba
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null |
UTF-8
|
Python
| false | false | 2,765 |
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD98BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'NAD', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'CTA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'DOC', MIN: 1, MAX: 1000, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 999},
{ID: 'PIA', MIN: 0, MAX: 10},
{ID: 'MEA', MIN: 0, MAX: 10},
{ID: 'RCS', MIN: 0, MAX: 10},
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'NAD', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'CTA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'SFI', MIN: 0, MAX: 20, LEVEL: [
{ID: 'PIA', MIN: 0, MAX: 10},
{ID: 'EQD', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'NAD', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'CTA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'HAN', MIN: 0, MAX: 10, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 99},
]},
{ID: 'IMD', MIN: 0, MAX: 999, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 10},
{ID: 'PCD', MIN: 0, MAX: 10},
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
]},
{ID: 'DGS', MIN: 0, MAX: 10, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'PAC', MIN: 0, MAX: 10, LEVEL: [
{ID: 'PCI', MIN: 0, MAX: 10},
]},
]},
{ID: 'CCI', MIN: 0, MAX: 999, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'CAV', MIN: 0, MAX: 10},
{ID: 'RFF', MIN: 0, MAX: 10, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 10},
]},
{ID: 'MEA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'TEM', MIN: 0, MAX: 10},
{ID: 'DTM', MIN: 0, MAX: 10},
{ID: 'RFF', MIN: 0, MAX: 10},
{ID: 'FTX', MIN: 0, MAX: 99},
]},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"[email protected]"
] | |
3857b3e7f9b4eeb9078b49a3de87dabf2611ef10
|
bb300c03d353e7752d20909995d2b0e4818d785d
|
/app1/admin.py
|
ca0089e5fcc6dd1e454695c7fe3e0900521046e2
|
[] |
no_license
|
ksuvarna85/django_unicode
|
3fe3340b63e92812c17a379a934fc8d1d3fc91bc
|
79d833a3009acf034a194a8daa71b6e8a209f748
|
refs/heads/master
| 2022-12-13T00:10:59.156715 | 2020-09-12T14:35:53 | 2020-09-12T14:35:53 | 292,186,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,179 |
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Group
from django.contrib.auth.forms import UserCreationForm
from .models import User, Student, Teacher
# Register your models here.
class BaseUserAdmin(UserAdmin):
list_display = ['email', 'is_admin', ]
search_fields = ("email", 'sap_id', 'is_admin', )
readonly_fields = (
'date_joined',
'last_login',
)
filter_horizontal =()
list_filter = ()
fieldsets = ()
class StudentAdmin(UserAdmin):
list_display = ('email','sap_id', 'year')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class TeacherAdmin(UserAdmin):
list_display = ('email', 'qualification', )
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class StudentDisplayAdmin(UserAdmin):
list_display = ['email', 'sap_id', ]
ordering = ['email']
search_fields = ['email']
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.unregister(Group)
admin.site.register(User, BaseUserAdmin)
admin.site.register(Student, StudentAdmin)
admin.site.register(Teacher, TeacherAdmin)
|
[
"[email protected]"
] | |
17d9b195670d4321d9dce54ad7d23f17981f2e8a
|
45b64f620e474ac6d6b2c04fbad2730f67a62b8e
|
/Varsity-Final-Project-by-Django-master/.history/project/project/urls_20210223211934.py
|
4c004fae9ae39239424d3a2f1db73d5e353e5265
|
[] |
no_license
|
ashimmitra/Final-Project
|
99de00b691960e25b1ad05c2c680015a439277e0
|
a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003
|
refs/heads/master
| 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 127 |
py
|
from django.contrib import admin
from django.urls import path
from .i
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"[email protected]"
] | |
b3ae963aeaacd1244bd96e68dca519a28aa5a5d2
|
525c6a69bcf924f0309b69f1d3aff341b06feb8e
|
/sunyata/backend/pytorch/layer/dot/conv.py
|
3c37be58bd4b04de352c0ba95e80282519e3fdcb
|
[] |
no_license
|
knighton/sunyata_2017
|
ba3af4f17184d92f6277d428a81802ac12ef50a4
|
4e9d8e7d5666d02f9bb0aa9dfbd16b7a8e97c1c8
|
refs/heads/master
| 2021-09-06T13:19:06.341771 | 2018-02-07T00:28:07 | 2018-02-07T00:28:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,550 |
py
|
from torch.nn import functional as F
from ....base.layer.dot.conv import BaseConvAPI
class PyTorchConvAPI(BaseConvAPI):
def __init__(self):
BaseConvAPI.__init__(self)
self._ndim2conv = {
1: self.conv1d,
2: self.conv2d,
3: self.conv3d,
}
def conv(self, x, kernel, bias, stride, pad, dilation):
ndim = x.dim() - 2
return self._ndim2conv[ndim](x, kernel, bias, stride, pad, dilation)
def _conv(self, func_name, x, kernel, bias, stride, pad, dilation):
ndim = self.ndim(x) - 2
face = self.shape(kernel)[2:]
stride = self.to_shape(stride, ndim)
dilation = self.to_shape(dilation, ndim)
pre_pad, conv_singles_pad = \
self.unpack_conv_pad_to_singles(face, pad, dilation)
func = getattr(F, func_name)
if ndim == 1:
stride, = stride
conv_singles_pad, = conv_singles_pad
dilation, = dilation
if pre_pad is not None:
x = self.constant_pad(x, pre_pad, 0)
return func(x, kernel, bias, stride, conv_singles_pad, dilation)
def conv1d(self, x, kernel, bias, stride, pad, dilation):
return self._conv('conv1d', x, kernel, bias, stride, pad, dilation)
def conv2d(self, x, kernel, bias, stride, pad, dilation):
return self._conv('conv2d', x, kernel, bias, stride, pad, dilation)
def conv3d(self, x, kernel, bias, stride, pad, dilation):
return self._conv('conv3d', x, kernel, bias, stride, pad, dilation)
|
[
"[email protected]"
] | |
cea44ccf0270fc4975dcc281347d17019d96aa29
|
2a68ce2f0f47370e2f57b9279cc8e1aab85e26da
|
/trojsten/results/migrations/0001_squashed_0003_auto_20160608_1143.py
|
b621f0ef934d8daff3195390d0dd3e897f207a6d
|
[
"MIT"
] |
permissive
|
trojsten/web
|
52007c3d575b21603bf205c1e7294a482eedbf85
|
97b7b3ae3ac46be786bde9c49a2cae6609dbf50f
|
refs/heads/master
| 2023-08-17T23:30:16.857469 | 2023-07-30T16:31:34 | 2023-07-30T16:31:34 | 10,618,952 | 6 | 10 |
MIT
| 2023-09-04T19:09:09 | 2013-06-11T10:04:10 |
Python
|
UTF-8
|
Python
| false | false | 5,589 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-08 10:11
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("people", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("contests", "0003_category_task"),
("schools", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="FrozenPoints",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"description_points",
models.CharField(max_length=10, verbose_name="body za popis"),
),
("source_points", models.CharField(max_length=10, verbose_name="body za program")),
("sum", models.CharField(max_length=10, verbose_name="body")),
(
"task",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contests.Task",
verbose_name="\xfaloha",
),
),
],
options={
"verbose_name": "Zmrazen\xe9 body za \xfalohu",
"verbose_name_plural": "Zmrazen\xe9 body za \xfalohy",
},
),
migrations.CreateModel(
name="FrozenResults",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"is_single_round",
models.BooleanField(verbose_name="vynecha\u0165 predo\u0161l\xe9 kol\xe1"),
),
(
"has_previous_results",
models.BooleanField(
default=False, verbose_name="zah\u0155\u0148a predo\u0161l\xe9 kol\xe1"
),
),
("time", models.DateTimeField(auto_now_add=True, verbose_name="\u010das")),
(
"category",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="contests.Category",
verbose_name="kateg\xf3ria",
),
),
(
"round",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contests.Round",
verbose_name="kolo",
),
),
],
options={
"verbose_name": "Zmrazen\xe1 v\xfdsledkovka",
"verbose_name_plural": "Zmrazen\xe9 v\xfdsledkovky",
},
),
migrations.CreateModel(
name="FrozenUserResult",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("rank", models.IntegerField(verbose_name="poradie")),
("prev_rank", models.IntegerField(blank=True, null=True, verbose_name="poradie")),
("fullname", models.CharField(max_length=500, verbose_name="pln\xe9 meno")),
("school_year", models.IntegerField(verbose_name="ro\u010dn\xedk")),
(
"previous_points",
models.CharField(
max_length=10, verbose_name="body z predo\u0161l\xfdch k\xf4l"
),
),
("sum", models.CharField(max_length=10, verbose_name="suma")),
(
"frozenresults",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="results.FrozenResults",
verbose_name="v\xfdsledkovka",
),
),
(
"original_user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
verbose_name="p\xf4vodn\xfd pou\u017e\xedvate\u013e",
),
),
(
"school",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="schools.School",
verbose_name="\u0161kola",
),
),
(
"task_points",
models.ManyToManyField(
to="results.FrozenPoints", verbose_name="body za \xfalohy"
),
),
],
options={
"verbose_name": "Zmrazen\xfd v\xfdsledok",
"verbose_name_plural": "Zmrazen\xe9 v\xfdsledky",
},
),
]
|
[
"[email protected]"
] | |
ef337ec276cc312703505f50f3e225a52769011e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02233/s579323796.py
|
2066403e585a327111189ca533edc9056ec5aec9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
from functools import lru_cache
n = int(input())
@lru_cache(maxsize=None)
def fib(n):
if n==0 or n==1:
return 1
else:
return fib(n-1)+fib(n-2)
print(fib(n))
|
[
"[email protected]"
] | |
24191139d0b94f103b412d4ed31f6b47eb63484e
|
df4a7c46c46d1eca6570493b9707bdf64e54f8d3
|
/py/35.search-insert-position.py
|
33dd984473f03e45395deae0415f989a0023fd70
|
[] |
no_license
|
CharmSun/my-leetcode
|
52a39bf719c507fb7032ed424fe857ba7340aea3
|
5325a56ba8c40d74d9fef2b19bac63a4e2c44a38
|
refs/heads/master
| 2023-03-29T06:39:49.614264 | 2021-03-28T16:33:52 | 2021-03-28T16:33:52 | 261,364,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 503 |
py
|
#
# @lc app=leetcode id=35 lang=python3
#
# [35] Search Insert Position
#
# @lc code=start
from typing import List
## 二分查找: 取左值
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
left = 0
right = len(nums) - 1
while left <= right:
mid = (left + right) // 2
if target <= nums[mid]:
right = mid - 1
else:
left = mid + 1
return left
# @lc code=end
|
[
"[email protected]"
] | |
6a9004fa89106410d1390d87e6956d939af9fc41
|
e6d4a87dcf98e93bab92faa03f1b16253b728ac9
|
/algorithms/python/imageSmoother/imageSmoother.py
|
710c11b1f7cfe46788763934ba78cc33142fdb3e
|
[] |
no_license
|
MichelleZ/leetcode
|
b5a58e1822e3f6ef8021b29d9bc9aca3fd3d416f
|
a390adeeb71e997b3c1a56c479825d4adda07ef9
|
refs/heads/main
| 2023-03-06T08:16:54.891699 | 2023-02-26T07:17:47 | 2023-02-26T07:17:47 | 326,904,500 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 783 |
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Source: https://leetcode.com/problems/image-smoother/
# Author: Miao Zhang
# Date: 2021-02-26
class Solution:
def imageSmoother(self, M: List[List[int]]) -> List[List[int]]:
m = len(M)
n = len(M[0])
res = [[0 for _ in range(n)] for _ in range(m)]
dirs = [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (1, -1), (-1, 1), (-1, -1), (0, 0)]
for i in range(m):
for j in range(n):
cnt = 0
for d in dirs:
x = i + d[0]
y = j + d[1]
if 0 <= x < m and 0 <= y < n:
res[i][j] += M[x][y]
cnt += 1
res[i][j] //= cnt
return res
|
[
"[email protected]"
] | |
bb049e65d59a06ac07b31316aaa604e7231f451e
|
0f9a97d48a9f0179bcf1e3d80c08340096eb561e
|
/ДЗ-10. Словари/E. Самое частое слово.py
|
5ab4d26f6bf5e6e5c8e4cc2561b81c51baac99e4
|
[] |
no_license
|
dmitryokh/python
|
96d8ec8c3f2d3428b90d510a1003aecf102b13d0
|
8efe761412779bed9a7516832d3152843088fa43
|
refs/heads/master
| 2020-04-24T03:08:42.865813 | 2019-02-20T11:41:52 | 2019-02-20T11:41:52 | 171,661,235 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 459 |
py
|
input = open('input.txt', 'r')
text = input.readline()
wordcount = {}
maxword = 0
while text != "":
text = text.split()
for word in text:
if word in wordcount:
wordcount[word] += 1
else:
wordcount[word] = 1
if wordcount[word] > maxword:
maxword = wordcount[word]
text = input.readline()
for word in sorted(wordcount):
if wordcount[word] == maxword:
print(word)
break
|
[
"[email protected]"
] | |
5080faaeaef838a6382729f2b935ab97aebfee4a
|
6efc62bc1aa82e09eb1740cb795ddb622d9069a1
|
/config/migrations/0002_auto_20181224_2241.py
|
740333528a41b62ca7887d99c9f83b889f1f49cc
|
[] |
no_license
|
EruDev/typeidea
|
c1147743a2d062cb2b21c7bf98db9377345809ef
|
8c8889ac75bd09298b93528d618fdffdae93ea03
|
refs/heads/master
| 2020-04-12T04:20:06.123801 | 2018-12-28T01:11:27 | 2018-12-28T01:11:27 | 147,651,297 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 457 |
py
|
# Generated by Django 2.0.4 on 2018-12-24 22:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('config', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sidebar',
name='display_type',
field=models.PositiveIntegerField(choices=[(1, '展示'), (0, '隐藏')], default=1, verbose_name='展示类型'),
),
]
|
[
"[email protected]"
] | |
dca16d10dcceb0ac60b9e637079684ea037c63f1
|
a9510540f25112a13a7b10772d8b12df4f80fcf8
|
/edsys_sequence/ir_sequence.py
|
e2b4ddb1b45cec29ac47cbe95b79b227cfb1cc46
|
[] |
no_license
|
babarlhr/edsys10
|
c291b7eae643bbd25c961e829beca9f5b108845e
|
84b43d0ed19145c88fa142e6cf1fa691fa9fedce
|
refs/heads/master
| 2022-03-15T03:03:27.479767 | 2019-11-20T21:22:50 | 2019-11-20T21:22:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,634 |
py
|
from eagle import models, fields, api, _
class ir_sequence(models.Model):
_inherit = 'ir.sequence'
model_id = fields.Many2one("ir.model", 'Model')
#field_id = fields.Many2one('ir.model.fields', 'Field', domain="[('model_id', '=', model_id), ('ttype', '=', 'integer')]")
field_id = fields.Many2one('ir.model.fields', 'Field', domain="[('model_id', '=', model_id)]")
@api.multi
def generate_sequence_button(self):
if self.model_id.model == 'registration' :
reg_ids = self.env['registration'].search([('state','!=', 'done')])
for reg_id in reg_ids :
number_seq = {self.field_id.name : self._next()}
reg_id.write(number_seq)
if self.model_id.model == 'account.voucher' :
vocuher_ids = self.env['account.voucher'].search([('state','=', 'draft')])
for vocuher_id in vocuher_ids :
number_seq = {self.field_id.name : self._next()}
vocuher_id.write(number_seq)
if self.model_id.model == 'account.invoice' :
invoice_ids = self.env['account.invoice'].search([('state','not in', ('paid', 'cancel'))])
for invoice_id in invoice_ids :
number_seq = {self.field_id.name : self._next()}
invoice_id.write(number_seq)
if self.model_id.model == 're.reg.waiting.responce.parents' :
re_reg_ids = self.env['re.reg.waiting.responce.parents'].search([('state','not in', ('re_registration_confirmed', 'tc_expected'))])
for re_reg_id in re_reg_ids :
number_seq = {self.field_id.name : self._next()}
re_reg_id.write(number_seq)
if self.model_id.model == 'trensfer.certificate' :
tc_ids = self.env['trensfer.certificate'].search([('state','not in', ('tc_complete', 'tc_cancel'))])
for tc_id in tc_ids :
number_seq = {self.field_id.name : self._next()}
tc_id.write(number_seq)
if self.model_id.model == 'hr.employee' :
emp_ids = self.env['hr.employee'].search([('employee_state','in', ('probation', 'employee'))])
for emp_id in emp_ids :
#if emp_id.employee_code :
# emp_id.biometric_id = emp_id.employee_code
#number_seq = {self.field_id.name : self._next()}
#emp_id.write(number_seq)
if not emp_id.employee_code :
number_seq = {self.field_id.name : self._next()}
emp_id.write(number_seq)
|
[
"[email protected]"
] | |
f83fcd905c084cce98fc955ccf1e309f691d2eec
|
fb78fd824e904705fb1ee09db8b3c20cc3902805
|
/python-scripts/points.py
|
b8ee9310c52b85f1fc806c4acc7ef631e09df80e
|
[] |
no_license
|
Roderich25/mac
|
8469833821ac49c539a744db29db5a41d755ad55
|
4f7fe281c88f0199b85d0ac99ce41ffb643d6e82
|
refs/heads/master
| 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,073 |
py
|
#!/usr/bin/env python3
from random import randint
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import cm
def set_direction():
while True:
x = randint(-1, 1)
y = randint(-1, 1)
if x != 0 or y != 0:
break
return x, y
class Point:
def __init__(self, x, y, m, color):
self.x = x
self.y = y
self.n = 0
self.m = m
self.color = color
self.dir_x, self.dir_y = set_direction()
self.array = [[self.x], [self.y], [self.color]]
def move(self):
self.x += self.dir_x
self.y += self.dir_y
self.n += 1
if self.x == 6 or self.x == -6:
self.dir_x = self.dir_x * -1
if self.y == 3 or self.y == -3:
self.dir_y = self.dir_y * -1
self.array[0].append(self.x)
self.array[1].append(self.y)
if self.n >= self.m:
self.array[2].append("blue")
else:
self.array[2].append(self.color)
return self
def move_n(self, n):
for _ in range(0, n):
self.move()
return self
def __str__(self):
return f"Point({self.x},{self.y}) #{self.n}"
sim = []
for i in enumerate(range(10)):
p = Point(randint(-5, 5), randint(-2, 2), i[0] * 3, (5 * ["red", "green"])[i[0]],)
s = p.move_n(30).array
sim.append(s)
data = np.array(sim)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlim(-6, 6)
plt.ylim(-3, 3)
plt.tick_params(
axis="both",
which="both",
bottom=False,
top=False,
labelbottom=False,
right=False,
left=False,
labelleft=False,
)
ims = []
for i, num in enumerate(range(0, 30)):
x, y, c = [], [], []
for point in data[:, :3, num]:
x.append(int(point[0]))
y.append(int(point[1]))
c.append(point[2])
print(i, x, y, c)
scat = ax.scatter(x=x, y=y, s=50, c=c)
ims.append([scat])
im_ani = animation.ArtistAnimation(fig, ims, interval=800, repeat_delay=300, blit=True)
plt.show()
|
[
"[email protected]"
] | |
a9d5499f5da02efa73b4381ff413faa0cdab877e
|
98a03799f45384e1bc88674d4fc1a7b14b997673
|
/cart/migrations/0005_auto_20210525_1933.py
|
e3bde4f7811053bc4e318bc7d665d7bc1b2b5638
|
[] |
no_license
|
munyuaDeveloper/07Ecommerce-backend
|
cda868260b56b044bdecf595b25492f5a3711e5d
|
cc73f4d33a7d6cdb823e9a719375f4c57ac49872
|
refs/heads/main
| 2023-08-11T19:42:42.169703 | 2021-09-21T22:58:27 | 2021-09-21T22:58:27 | 366,691,377 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 532 |
py
|
# Generated by Django 3.2.3 on 2021-05-25 19:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0004_auto_20210525_1218'),
]
operations = [
migrations.AlterModelOptions(
name='shoppingcartitem',
options={'verbose_name': 'shopping cart Items', 'verbose_name_plural': 'shopping cart Items'},
),
migrations.RemoveField(
model_name='orderinfo',
name='order_description',
),
]
|
[
"[email protected]"
] | |
f1ce3d0ba8ae07110c346dda91edaa7356e11913
|
d2df82be0a37c9fde9a1ecee34fbf311fd8b2840
|
/awesome_gans/sagan/sagan_train.py
|
3fc74c4963e2f4e90b5b14d0b029222310ce5c31
|
[
"MIT"
] |
permissive
|
qqyouhappy/Awesome-GANs
|
a13ded69a043bc257966fcd3e71dc7a87b3f524f
|
0f01852abbac0497baa8cc309a580ba720c0478f
|
refs/heads/master
| 2022-12-22T14:42:56.765953 | 2020-10-05T14:53:01 | 2020-10-05T14:53:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,134 |
py
|
import os
import time
import numpy as np
import tensorflow as tf
import awesome_gans.image_utils as iu
import awesome_gans.sagan.sagan_model as sagan
from awesome_gans.config import parse_args
from awesome_gans.datasets import CelebADataSet as DataSet
from awesome_gans.datasets import DataIterator
cfg = parse_args()
train_step = {
'epochs': 11,
'batch_size': 64,
'global_step': 10001,
'logging_interval': 500,
}
def main():
start_time = time.time() # Clocking start
height, width, channel = 128, 128, 3
# loading CelebA DataSet # from 'raw images' or 'h5'
use_h5 = True
if not use_h5:
ds = DataSet(
height=height,
width=height,
channel=channel,
# ds_image_path="D:\\DataSet/CelebA/CelebA-%d.h5" % height,
ds_label_path=os.path.join(cfg.celeba_path, "Anno/list_attr_celeba.txt"),
ds_image_path=os.path.join(cfg.celeba_path, "Img/img_align_celeba/"),
ds_type="CelebA",
use_save=True,
save_file_name=os.path.join(cfg.celeba_path, "CelebA-%d.h5" % height),
save_type="to_h5",
use_img_scale=False,
)
else:
ds = DataSet(
height=height,
width=height,
channel=channel,
ds_image_path=os.path.join(cfg.celeba_path, "CelebA-%d.h5" % height),
ds_label_path=os.path.join(cfg.celeba_path, "Anno/list_attr_celeba.txt"),
# ds_image_path=os.path.join(cfg.celeba, "Img/img_align_celeba/"),
ds_type="CelebA",
use_save=False,
# save_file_name=os.path.join(cfg.celeba, "CelebA-%d.h5" % height),
# save_type="to_h5",
use_img_scale=False,
)
num_images = ds.num_images
# saving sample images
test_images = np.reshape(iu.transform(ds.images[:16], inv_type='127'), (16, height, width, channel))
iu.save_images(test_images, size=[4, 4], image_path=os.path.join(cfg.output_path, "sample.png"), inv_type='127')
ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True)
del ds
# GPU configure
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# SAGAN Model
model = sagan.SAGAN(
s,
height=height,
width=width,
channel=channel,
batch_size=train_step['batch_size'],
use_gp=False,
use_hinge_loss=True,
)
# Initializing
s.run(tf.global_variables_initializer())
print("[*] Reading checkpoints...")
saved_global_step = 0
ckpt = tf.train.get_checkpoint_state(cfg.model_path)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
saved_global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %d" % saved_global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
global_step = saved_global_step
start_epoch = global_step // (num_images // model.batch_size) # recover n_epoch
ds_iter.pointer = saved_global_step % (num_images // model.batch_size) # recover n_iter
for epoch in range(start_epoch, train_step['epochs']):
for batch_x in ds_iter.iterate():
batch_x = iu.transform(batch_x, inv_type='127')
batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel))
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss = s.run([model.d_op, model.d_loss], feed_dict={model.x: batch_x, model.z: batch_z, })
# Update G network
_, g_loss = s.run([model.g_op, model.g_loss], feed_dict={model.x: batch_x, model.z: batch_z, })
if global_step % train_step['logging_interval'] == 0:
summary = s.run(model.merged, feed_dict={model.x: batch_x, model.z: batch_z, })
# Training G model with sample image and noise
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
samples = s.run(model.g_test, feed_dict={model.z_test: sample_z, })
# is_mean, is_std = t.inception_score(iu.inverse_transform(samples, inv_type='127'))
# fid_score = t.fid_score(real_img=batch_x, fake_img=samples[:model.batch_size])
# Print loss
print(
"[+] Epoch %04d Step %08d => " % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss),
# " Inception Score : {:.2f} (±{:.2f})".format(is_mean, is_std),
# " FID Score : {:.2f}".format(fid_score)
)
# Summary saver
model.writer.add_summary(summary, global_step)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir = os.path.join(cfg.output, 'train_{:08d}.png'.format(global_step))
# Generated image save
iu.save_images(
samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127'
)
# Model save
model.saver.save(s, os.path.join(cfg.model_path, "SAGAN.ckpt"), global_step)
global_step += 1
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
eacad1766cda661fcba77d56519716da6ba9aea3
|
0c325cf7a68ef51067ed8db566d525a20de5b635
|
/other/panda365/panda365/pd/conf/tests/test_api.py
|
bce8a29c452848ac5f2021879686f02beb3367f5
|
[] |
no_license
|
alinzel/NOTES
|
2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241
|
3e0594641a605580e920d0b08a251fbc99f34e2f
|
refs/heads/master
| 2023-01-08T22:48:30.762625 | 2020-01-17T09:14:47 | 2020-01-17T09:14:47 | 175,339,492 | 0 | 0 | null | 2022-12-27T15:01:19 | 2019-03-13T03:28:08 |
HTML
|
UTF-8
|
Python
| false | false | 535 |
py
|
from pd.test_utils import assert_dict_like
from pd.conf.factory import ConfFactory
def test_get(client, db_session):
conf = ConfFactory()
url_tpl = '/v1/conf/{}'
resp = client.get(url_tpl.format(conf.name))
assert resp.status_code == 200
assert_dict_like(resp.json, {
'name': conf.name,
'min_version': conf.min_version,
'latest_version': conf.latest_version,
'description': conf.description,
})
resp = client.get(url_tpl.format('blah'))
assert resp.status_code == 404
|
[
"[email protected]"
] | |
dc177bf4ac10bb80f0ea2fae215f680358e4ea80
|
1da3173e935cb6d32ec3d9da7bf01ee91e6c3199
|
/sbt/utils/enumerators.py
|
02d6047c598a13a4f10128b8ac8b4229579a8cdb
|
[
"Apache-2.0"
] |
permissive
|
PgBiel/sbt
|
492b99c254bda9b404a89c438b3e3f0f13f358b7
|
dfbca913751b13a251e335e271bee0e443d02afe
|
refs/heads/master
| 2022-01-23T09:59:19.289276 | 2019-06-06T00:32:20 | 2019-06-06T00:32:20 | 197,852,145 | 0 | 0 | null | 2019-07-19T22:54:27 | 2019-07-19T22:54:27 | null |
UTF-8
|
Python
| false | false | 1,030 |
py
|
"""
/utils/enumeration.py
Copyright (c) 2019 ShineyDev
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__authors__ = [("shineydev", "[email protected]")]
__maintainers__ = [("shineydev", "[email protected]")]
__version_info__ = (2, 0, 0, "alpha", 0)
__version__ = "{0}.{1}.{2}{3}{4}".format(*[str(n)[0] if (i == 3) else str(n) for (i, n) in enumerate(__version_info__)])
__all__ = {
"RPS",
}
class RPS():
r = "\N{MOYAI}"
p = "\N{PAGE FACING UP}"
s = "\N{BLACK SCISSORS}"
|
[
"[email protected]"
] | |
d3d27b0c4ef0f3a4e0890dfd6b88de514f40610e
|
4dc4345cca9c5f452bf4b87263505ee6b4e960af
|
/text_processing_exercise/letters_change_numbers.py
|
82fa25b63154fba7ad44fd25241f820d90ab2dd8
|
[] |
no_license
|
ivan-yosifov88/python_fundamentals
|
88c7eb5167bbe6692b95051d1551496a84893524
|
1cfe6d18453362fc26be984f6cb871b9d7dec63d
|
refs/heads/master
| 2023-03-29T16:46:55.363035 | 2021-04-07T10:39:44 | 2021-04-07T10:39:44 | 341,604,297 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 779 |
py
|
alphabet_dict = {}
for index in range(1, 27):
letter = 96 + index
alphabet_dict[chr(letter)] = int(index)
words_to_manipulate = input().split()
total_sum = 0
for word in words_to_manipulate:
first_letter = word[:1]
second_letter = word[-1:]
number = int(word[1:-1])
if first_letter.isupper():
divider = alphabet_dict[first_letter.lower()]
total_sum += number / divider
elif first_letter.islower():
multiplier = alphabet_dict[first_letter]
total_sum += number * multiplier
if second_letter.isupper():
subtract = alphabet_dict[second_letter.lower()]
total_sum -= subtract
elif second_letter.islower():
add = alphabet_dict[second_letter]
total_sum += add
print(f"{total_sum:.2f}")
|
[
"ivan.yosifov88gmail.com"
] |
ivan.yosifov88gmail.com
|
f20c7d5f767aae7d5913374a5a2ba3591d11f2dd
|
0e5aa2f88c770457e91289aa886c24d7faca0677
|
/viewport.py
|
601c4c33f5bc469663ed2cc056d465161ccebe44
|
[] |
no_license
|
su8/pygobject-examples
|
54ea6dbb894ef735f87703ce60a6b5bdbad8c3e9
|
27c3f94b21a731628dac13bb0ad573474864839b
|
refs/heads/master
| 2021-04-03T09:39:05.614517 | 2016-11-12T20:55:12 | 2016-11-12T20:55:12 | 124,675,038 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 693 |
py
|
#!/usr/bin/env python3
from gi.repository import Gtk
window = Gtk.Window()
window.connect("destroy", lambda q: Gtk.main_quit())
grid = Gtk.Grid()
window.add(grid)
viewport = Gtk.Viewport()
viewport.set_size_request(200, 200)
grid.attach(viewport, 0, 0, 1, 1)
vadjustment = viewport.get_vadjustment()
hadjustment = viewport.get_hadjustment()
vscrollbar = Gtk.VScrollbar()
grid.attach(vscrollbar, 1, 0, 1, 1)
hscrollbar = Gtk.HScrollbar()
grid.attach(hscrollbar, 0, 1, 1, 1)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
viewport.add(box)
for i in range(0, 15):
button = Gtk.Button(label="Button %s" % i)
box.pack_start(button, True, True, 0)
window.show_all()
Gtk.main()
|
[
"[email protected]"
] | |
f220eafb22d10d6f90076ad75da2189397169500
|
2f8f8171b3b996b0c866ede72367ec26f64eae39
|
/sampleproject/www/Project_Euler/problem001_050/problem024.py
|
4002e5d9acde3b0609b412965436d2994b511a5a
|
[] |
no_license
|
kabaksh0507/exercise_python_it-1
|
da46edce09301b03a5351ee1885fb01eb69d8240
|
2b6c80a79494c9981e51bd03696c3aa19d6625ec
|
refs/heads/main
| 2023-03-04T03:12:44.188468 | 2021-02-08T08:55:36 | 2021-02-08T08:55:36 | 337,014,697 | 0 | 0 | null | 2021-02-08T08:57:30 | 2021-02-08T08:57:30 | null |
UTF-8
|
Python
| false | false | 2,561 |
py
|
'''Project Euler Problem 24'''
def problem_24() -> str:
'''Lexicographic permutations'''
target_num = 1000000
pattern_list = list(range(10))
ret = search_pattern(target_num, pattern_list)
return ret
def search_pattern(target_num: int, select_list: list) -> str:
'''
select_list内データを1つずつ使用した数字を辞書順に並べた際にtarget_num番目の数字を返す。
使用した数字は再度使用できない。辞書順のため大きい桁にある数字が小さいほうから順に並べられる。
X桁の選択パターンがtarget_numを越えるタイミングを見つける。
ex)
10個の数字の10桁の組み合わせパターンは10!通り。
9桁以降の組み合わせは9!通り、8桁以降の組み合わせは8!通り、 ... 2桁以降の組み合わせは2!通り、1桁の組み合わせは1!通り。
辞書順のため大きい桁に小さい値を設定した場合の動作を感が会える。
X桁目の値は、X-1桁のパターン数がtarget_numに何回入るかで決まる。
ex) リスト[0, 1, 2]の組み合わせの4番目の数値の場合
3桁目の値は、2桁目のパターン数が2通り(2!通り)のであり、4番目に2回入る。
そのため3桁目の値はリスト内2番目値である1になる。
上記を再帰処理として繰り返すことでtarget_num番目の値を求める。
作成後追記
target_numが0以下の場合は1と同等の動きをする。
'''
if len(select_list) == 0:
# 再帰関数であり、選択肢がなくなると終了
return ''
if target_num > number_kai(len(select_list)):
# target_num番目のパターンがない。(全組み合わせパターンを超える値を要求される)
return 'out of range : target number'
select_list.sort()
next_keta_pattern_num = number_kai(len(select_list)-1)
select_num = ''
for i in range(len(select_list)):
if target_num <= next_keta_pattern_num*(i+1):
select_num = str(select_list.pop(i))
return select_num + search_pattern(target_num - next_keta_pattern_num*i, select_list)
return 'Error'
def number_kai(num: int) -> int:
'''
n! = n * (n-1) * (n-2) * ... * 1
'''
ret = 1
for i in range(1, num+1):
ret *= i
return ret
if __name__ == '__main__':
print(problem_24())
|
[
"[email protected]"
] | |
bafe1e52daa3fcf78f8cf84a110dea1f2ed01737
|
3dcfa266c4b7321a4c3a224b98f9ca0dff891e47
|
/archives/prioque.py
|
869bdbf0ed3ff28d32db9ff399a170e2b25b858f
|
[] |
no_license
|
CandyTt20/Notes
|
a2ef681d123c5219a29334e99aeb900b74bf1834
|
ec092f881122ebdd91ef9764ec7ce4d9cc4723ae
|
refs/heads/master
| 2022-08-21T18:08:33.204223 | 2020-05-19T23:55:49 | 2020-05-19T23:55:49 | 255,209,760 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 821 |
py
|
class PrioQue(object):
#! 线性表实现
def __init__(self, elist=[]):
self._elist = list(elist)
self._elist.sort(reverse=True)
def insert_que(self, elem):
i = len(self._elist) - 1
while i >= 0 and self._elist[i] < elem:
i -= 1
self._elist.insert(i + 1, elem)
def is_empty(self):
if len(self._elist) > 0:
return False
else:
return True
def peek(self):
if self.is_empty():
return None
else:
return self._elist[len(self._elist)-1]
def pop_que(self):
if self.is_empty():
return None
else:
return self._elist.pop()
x = PrioQue([5, 1, 2, 6, 3])
x.insert_que(4)
while x._elist:
print(x.pop_que())
print(x._elist)
|
[
"[email protected]"
] | |
b971c99a94802b5ef22c261ffe180a7af8d278a1
|
8924bd3df018bdee62a5e5d99069171771d9f459
|
/Important/aws-tutorial-code/lambda/lambda_read_pdf_s3_trigger.py
|
8e80689c37c5b36f7db72cf78b0876ed8c6100cf
|
[
"MIT"
] |
permissive
|
abhi15sep/Lambda-Final
|
f5985dc1d2aef4965764ec452a5e2949d78a202b
|
fed8be8ddeb7325e594fb426bfcd0f388f3d0f67
|
refs/heads/master
| 2023-04-19T02:06:09.808966 | 2021-05-02T21:23:22 | 2021-05-02T21:23:22 | 336,569,860 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,195 |
py
|
#-*- coding: utf-8 -*-
__author__ = "Chirag Rathod (Srce Cde)"
__license__ = "MIT"
__email__ = "[email protected]"
__maintainer__ = "Chirag Rathod (Srce Cde)"
import json
import boto3
import fitz
def lambda_handler(event, context):
"""Read file from s3 on trigger."""
# boto3 client
s3 = boto3.client("s3")
if event:
file_obj = event["Records"][0]
# fetching bucket name from event
bucketname = str(file_obj['s3']['bucket']['name'])
# fetching file name from event
filename = str(file_obj['s3']['object']['key'])
# retrieving object from S3
fileObj = s3.get_object(Bucket=bucketname, Key=filename)
# reading botocore stream
file_content = fileObj["Body"].read()
# loading pdf from memory/stream
with fitz.open(stream=file_content, filetype="pdf") as doc:
text = ""
# iterating through pdf file pages
for page in doc:
# fetching & appending text to text variable of each page
text += page.getText()
print(text)
return {
'statusCode': 200,
'body': json.dumps('Thanks from Srce Cde!')
}
|
[
"[email protected]"
] | |
b0c708b3b1fb2ed265dffef8b360333c76f2466a
|
36b75aac4236e928e22552e8812abd45d32aecf1
|
/modules/dbnd/test_dbnd/tracking/callable_tracking/test_no_side_affects.py
|
22f25cdb1e1c99a40b646c2ce2536d287438fd2d
|
[
"Apache-2.0"
] |
permissive
|
reloadbrain/dbnd
|
7793aa1864f678005de626068b0ac9361d637d65
|
ec0076f9a142b20e2f7afd886ed1a18683c553ec
|
refs/heads/master
| 2023-09-01T08:04:09.486666 | 2021-10-14T16:43:00 | 2021-10-14T16:43:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,701 |
py
|
import pickle
from pytest import fixture
from dbnd import config, task
from dbnd._core.configuration.environ_config import get_max_calls_per_func
@task
def task_pass_through_result_param(result):
assert isinstance(result, str)
return str(result)
@task
def task_pass_through_args_kwargs(*args, **kwargs):
return {
"args": args,
"kwargs": kwargs,
}
@task
def task_in_conf(param1="default_value", param2=None):
return param1, param2
class TestNoSideAffectsOnTracking(object):
@fixture(autouse=True)
def _tracking_context(self, set_tracking_context):
pass
def test_tracking_pass_through_result_param(self, pandas_data_frame_on_disk):
df, df_file = pandas_data_frame_on_disk
assert task_pass_through_result_param(result=str(df_file)) == str(df_file)
def test_tracking_pass_through_args_kwargs(self, pandas_data_frame_on_disk):
df, df_file = pandas_data_frame_on_disk
res = task_pass_through_args_kwargs(str(df_file), data=df, result=str(df_file))
assert res["args"] == (str(df_file),)
assert len(res["kwargs"]) == 2
assert res["kwargs"]["data"] is df
assert res["kwargs"]["result"] == str(df_file)
def test_partial_params(self):
param1, param2 = task_in_conf(param2="param2_value")
assert param1 == "default_value"
assert param2 == "param2_value"
def test_task_in_conf(self):
# in_conf - shouldn't affect anything
with config(
{"task_in_conf": {"param1": "conf_value", "param2": "conf_value"}},
source="test_source",
):
param1, param2 = task_in_conf(param2="param2_value")
assert param1 == "default_value"
assert param2 == "param2_value"
def test_pickle(self):
pickled = pickle.dumps(task_pass_through_args_kwargs)
assert task_pass_through_args_kwargs == pickle.loads(pickled)
def test_tracking_limit(self, mock_channel_tracker):
@task
def inc_task(x):
return x + 1
max_calls_allowed = get_max_calls_per_func()
extra_func_calls = 10
n = 0
for i in range(max_calls_allowed + extra_func_calls):
n = inc_task(n)
# ensure that function was actually invoked all the times (max_calls_allowed + extra_func_calls)
assert max_calls_allowed + extra_func_calls == n
# check that there was only max_calls_allowed "tracked" calls
track_call = [
x
for x in mock_channel_tracker.call_args_list
if x.args[0].__name__ == "log_targets"
]
assert max_calls_allowed == len(track_call)
|
[
"[email protected]"
] | |
40354a15c13bfb900f01fee589091789eb0e071f
|
6c58da2c54a3d35273e7984313d181f1da9981fc
|
/Users/djangoEnv/bin/easy_install-2.7
|
d2fc7d4f26da40fa01f2c42ae4b7550560e15d0e
|
[
"MIT-0"
] |
permissive
|
py1-10-2017/rgero215_PY1-10-2017
|
e582cb12cc63f84b1c0c14d09a922cb6cb228016
|
f455b335ec9c8c850571f3a75dcd95759b4cfdad
|
refs/heads/master
| 2021-09-04T03:23:48.062326 | 2018-01-14T21:07:26 | 2018-01-14T21:07:26 | 105,612,652 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
7
|
#!/Users/RGero13/Desktop/rgero215_PY1-10-2017/Users/djangoEnv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | |
a57fceade629c698b6dbdd102db127049d794fb7
|
2db1a0038d26ccb6adc572b536cb5cd401fd7498
|
/tryTen/Lib/encodings/cp1257.py
|
32d59e2193388a7f2d6fec34bc4f71f8defb7941
|
[] |
no_license
|
syurk/labpin
|
e795c557e7d7bcd4ff449cb9a3de32959a8c4968
|
04070dd5ce6c0a32c9ed03765f4f2e39039db411
|
refs/heads/master
| 2022-12-12T02:23:54.975797 | 2018-11-29T16:03:26 | 2018-11-29T16:03:26 | 159,692,630 | 0 | 1 | null | 2022-11-19T12:15:55 | 2018-11-29T16:04:20 |
Python
|
UTF-8
|
Python
| false | false | 13,374 |
py
|
""" Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1257',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\ufffe' # 0x83 -> UNDEFINED
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\xa8' # 0x8D -> DIAERESIS
'\u02c7' # 0x8E -> CARON
'\xb8' # 0x8F -> CEDILLA
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\xaf' # 0x9D -> MACRON
'\u02db' # 0x9E -> OGONEK
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\ufffe' # 0xA1 -> UNDEFINED
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe' # 0xA5 -> UNDEFINED
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xe6' # 0xBF -> LATIN SMALL LETTER AE
'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
[
"[email protected]"
] | |
9b3c07180d0aae51da6beadf9de05ef72f4b3789
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-live/aliyunsdklive/request/v20161101/DeleteLiveStreamRecordIndexFilesRequest.py
|
b699537542cfd3d96cc4b06bcf2359fa98da73b4
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 |
NOASSERTION
| 2023-09-14T08:51:06 | 2015-07-23T09:39:45 |
Python
|
UTF-8
|
Python
| false | false | 2,480 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class DeleteLiveStreamRecordIndexFilesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'DeleteLiveStreamRecordIndexFiles','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RemoveFile(self): # String
return self.get_query_params().get('RemoveFile')
def set_RemoveFile(self, RemoveFile): # String
self.add_query_param('RemoveFile', RemoveFile)
def get_AppName(self): # String
return self.get_query_params().get('AppName')
def set_AppName(self, AppName): # String
self.add_query_param('AppName', AppName)
def get_StreamName(self): # String
return self.get_query_params().get('StreamName')
def set_StreamName(self, StreamName): # String
self.add_query_param('StreamName', StreamName)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_RecordIds(self): # RepeatList
return self.get_query_params().get('RecordId')
def set_RecordIds(self, RecordId): # RepeatList
for depth1 in range(len(RecordId)):
self.add_query_param('RecordId.' + str(depth1 + 1), RecordId[depth1])
|
[
"[email protected]"
] | |
a632af37ea6591032897d65fc5beb33456c14888
|
6b7f11270232000042d51f97f616c1a19c047389
|
/elepay/model/location_dto.pyi
|
a1dccde82442a223f0ad9e4dac8562e6718ab2f1
|
[] |
no_license
|
elestyle/elepay-python-sdk
|
07e6bc8d0a42b5217a4144ab5632b0cc548aef58
|
36dd8502047df4c6f8eaba53e216bae25843c3c5
|
refs/heads/master
| 2023-02-04T08:47:15.287825 | 2023-01-24T02:28:45 | 2023-01-24T02:28:45 | 206,721,107 | 2 | 0 | null | 2023-01-24T02:15:52 | 2019-09-06T05:41:33 |
Python
|
UTF-8
|
Python
| false | false | 4,612 |
pyi
|
# coding: utf-8
"""
elepay API リファレンス
elepay APIはRESTをベースに構成された決済APIです。支払い処理、返金処理など、決済に関わる運用における様々なことができます。 # noqa: E501
The version of the OpenAPI document: 1.2.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from elepay import schemas # noqa: F401
class LocationDto(
schemas.DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Locationオブジェクト
"""
class MetaOapg:
class properties:
id = schemas.StrSchema
name = schemas.StrSchema
country = schemas.StrSchema
description = schemas.StrSchema
logoUrl = schemas.StrSchema
__annotations__ = {
"id": id,
"name": name,
"country": country,
"description": description,
"logoUrl": logoUrl,
}
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["country"]) -> MetaOapg.properties.country: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ...
@typing.overload
def __getitem__(self, name: typing_extensions.Literal["logoUrl"]) -> MetaOapg.properties.logoUrl: ...
@typing.overload
def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ...
def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "country", "description", "logoUrl", ], str]):
# dict_instance[name] accessor
return super().__getitem__(name)
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> typing.Union[MetaOapg.properties.id, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["country"]) -> typing.Union[MetaOapg.properties.country, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: typing_extensions.Literal["logoUrl"]) -> typing.Union[MetaOapg.properties.logoUrl, schemas.Unset]: ...
@typing.overload
def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ...
def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "country", "description", "logoUrl", ], str]):
return super().get_item_oapg(name)
def __new__(
cls,
*args: typing.Union[dict, frozendict.frozendict, ],
id: typing.Union[MetaOapg.properties.id, str, schemas.Unset] = schemas.unset,
name: typing.Union[MetaOapg.properties.name, str, schemas.Unset] = schemas.unset,
country: typing.Union[MetaOapg.properties.country, str, schemas.Unset] = schemas.unset,
description: typing.Union[MetaOapg.properties.description, str, schemas.Unset] = schemas.unset,
logoUrl: typing.Union[MetaOapg.properties.logoUrl, str, schemas.Unset] = schemas.unset,
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
) -> 'LocationDto':
return super().__new__(
cls,
*args,
id=id,
name=name,
country=country,
description=description,
logoUrl=logoUrl,
_configuration=_configuration,
**kwargs,
)
|
[
"[email protected]"
] | |
79a9ca1ff80bd0b65d9154b7ec175969fbb27012
|
bcabd9b183bc011e1ccf7e367fbed0dcaa03eee6
|
/1 PYTHON/1 EDUREKA/EDUREKA OLD/15 Flow Control.py
|
5477c0e3805419ff7305edd254f4bc304944ea76
|
[] |
no_license
|
rajeshsvv/Lenovo_Back
|
287fe4da2c696aa248ec57a4c45c4f234f6ca9ed
|
7e49e38aaf934c65f9992a78404d2b81a4cd0204
|
refs/heads/master
| 2022-12-23T16:44:41.488128 | 2019-08-29T10:00:10 | 2019-08-29T10:00:10 | 204,859,914 | 0 | 1 | null | 2022-12-10T11:50:31 | 2019-08-28T06:05:35 |
Python
|
UTF-8
|
Python
| false | false | 543 |
py
|
'''
marks=20
if(marks>80) and (marks<=100):
print("GRADE A")
elif(marks>60) and (marks<=80):
print("GRADE B")
elif(marks>40) and (marks<=60):
print("GRADE C")
elif marks>=20 and marks<=40:
print("GRADE D")
else:
print("Please Enter Marks in between range 0 to 100")
'''
#while loop add numbers upto from given number to zero for example 5 then 5+4+3+2+1=15
num=int(input("Enter the value of n="))
if (num<=0):
print("Enter a valid value")
else:
sum=0
while (num>0):
sum+=num
num-=1
print(sum)
|
[
"[email protected]"
] | |
c27112ab8dbb9ade9e181d280ddd8534a8a5ca75
|
78d0d278d72afb500fc68ee3a45c39b80ccf193c
|
/utils/logging.py
|
795095f1c4e80a53d50aa23ec714fb3b77caa422
|
[] |
no_license
|
mzq308734881/P2Net.pytorch
|
b56e501e2257b8b017f96cc70f8ba724cb8ee83f
|
5e40745d0cba647dc02a9f6ea114e326e26e3a0a
|
refs/heads/master
| 2022-12-25T04:28:26.198875 | 2020-10-07T16:42:35 | 2020-10-07T16:42:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 969 |
py
|
from __future__ import absolute_import
import os
import errno
import sys
class Logger(object):
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
try:
os.makedirs(os.path.dirname(fpath))
except OSError as e:
if e.errno != errno.EEXIST:
raise
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
|
[
"[email protected]"
] | |
c671abade50acf9f857984a8cb57729e163f7ba9
|
0d9ba36b3efd76c369e7e64c995730d51bc62c0c
|
/Evaluation/TrainPOW2.py
|
a57c5fbb18ebbdce9bfb6a7bb4cf27a2a9ff2e58
|
[
"MIT"
] |
permissive
|
sagieppel/Generator-evaluator-selector-net-a-modular-approach-for-panoptic-segmentation
|
5b797a16d5a351a628f990276cede44fd356295a
|
5d482ed471054e85d6a63b7357b2de404ffa6ec6
|
refs/heads/master
| 2023-04-30T08:14:24.804209 | 2023-04-26T01:25:00 | 2023-04-26T01:25:00 | 204,122,232 | 15 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,131 |
py
|
# Train the net (in train.py)
# 1) Download COCO panoptic dataset and train images from [here](http://cocodataset.org/#download)
# 2) Set the path to COCO train images folder in the ImageDir parameter
# 3) Set the path to COCO panoptic train annotations folder in the AnnotationDir parameter
# 4) Set the path to COCO panoptic data .json file in the DataFile parameter
#...............Other training paramters..............................................................................
# 5) Run script.
# Trained model weight and data will appear in the path given by the TrainedModelWeightDir parameter
#...............................Imports..................................................................
import os
import torch
import numpy as np
import Reader
import NetModel as NET_FCN # The net Class
import scipy.misc as misc
# os.environ["CUDA_VISIBLE_DEVICES"]="0"
TrainedModelWeightDir="logs/" # Folder where trained model weight and information will be stored"
if not os.path.exists(TrainedModelWeightDir): os.mkdir(TrainedModelWeightDir)
Trained_model_path="" # Path of trained model weights If you want to return to trained model, else should be =""
##################################Input paramaters#########################################################################################
ImageDir="/scratch/gobi1/seppel/DataSets/COCO_PANOPTIC/PanopticFull/train2017/"
ClassEqualDataDirs=[
"/scratch/gobi2/seppel/GeneratedPredictions/101MultiTestClassEquivalent/Pred/"
,
"/scratch/gobi2/seppel/GeneratedPredictions/101AddClassEquivalent/Pred/" ,
"/scratch/gobi2/seppel/GeneratedPredictions/51MultiTestClassEquivalent/Pred/" ,
"/scratch/gobi2/seppel/GeneratedPredictions/51AddClassEquivalent/Pred/" ,
]
AllFilesDataDirs=[
"/scratch/gobi2/seppel/GeneratedPredictions/101MultiTestAllFiles//Pred/" ,
"/scratch/gobi2/seppel/GeneratedPredictions/51MultiTestAllFiles//Pred/" ,
"/scratch/gobi2/seppel/GeneratedPredictions/51AddAllfiles//Pred/"
]
NumClasses=205
Learning_Rate_Init=1e-5 # Initial learning rate
Learning_Rate=1e-5 # learning rate
#Learning_Rate_Decay=Learning_Rate[0]/40 # Used for standart
Learning_Rate_Decay=Learning_Rate/20
StartLRDecayAfterSteps=100000
MaxBatchSize=7 # Max images in batch
MinSize=250 # Min image Height/Width
MaxSize=1000# Max image Height/Width
MaxPixels=340000*4# Max pixel in batch can have (to keep oom out of memory problems) if the image larger it will be resized.
TrainLossTxtFile=TrainedModelWeightDir+"TrainLoss.txt" #Where train losses will be writen
Weight_Decay=1e-5# Weight for the weight decay loss function
MAX_ITERATION = int(10000000010) # Max number of training iteration
InitStep=0
MinPrecision=0.0
#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
ClassEqualReader=Reader.Reader(ImageDir=ImageDir,MaskDirs=ClassEqualDataDirs,NumClasses=NumClasses,ClassBalance=True,MinSize=MinSize,MaxSize=MaxSize,MaxPixels=MaxPixels,MinPrecision=MinPrecision,MaxBatchSize=MaxBatchSize,AugmentImage=False,ReadRatio=1.1)
AllFileReader=Reader.Reader(ImageDir=ImageDir,MaskDirs=AllFilesDataDirs,NumClasses=NumClasses,ClassBalance=False,MinSize=MinSize,MaxSize=MaxSize,MaxPixels=MaxPixels,MinPrecision=MinPrecision,MaxBatchSize=MaxBatchSize,AugmentImage=False,ReadRatio=1.1)
#=========================Load Paramters====================================================================================================================
if os.path.exists(TrainedModelWeightDir + "/Defult.torch"):
Trained_model_path=TrainedModelWeightDir + "/Defult.torch"
if os.path.exists(TrainedModelWeightDir+"/Learning_Rate.npy"): Learning_Rate=np.load(TrainedModelWeightDir+"/Learning_Rate.npy")
if os.path.exists(TrainedModelWeightDir+"/Learning_Rate_Init.npy"): Learning_Rate_Init=np.load(TrainedModelWeightDir+"/Learning_Rate_Init.npy")
if os.path.exists(TrainedModelWeightDir+"/itr.npy"): InitStep=int(np.load(TrainedModelWeightDir+"/itr.npy"))
#---------------------Create and Initiate net and create optimizer---------------------------------------------------
#---------------------Create and Initiate net and create optimizer------------------------------------------------------------------------------------
Net=NET_FCN.Net() # Create net and load pretrained encoder path
if Trained_model_path!="": # Optional initiate full net by loading a pretrained net
Net.load_state_dict(torch.load(Trained_model_path))
Net=Net.cuda()
#optimizer=torch.optim.SGD(params=Net.parameters(),lr=Learning_Rate,weight_decay=Weight_Decay,momentum=0.5)
optimizer=torch.optim.Adam(params=Net.parameters(),lr=Learning_Rate,weight_decay=Weight_Decay) # Create adam optimizer
#--------------------------- Create logs files for saving loss during training----------------------------------------------------------------------------------------------------------
#--------------------------- Create logs files for saving loss during training----------------------------------------------------------------------------------------------------------
if not os.path.exists(TrainedModelWeightDir): os.makedirs(TrainedModelWeightDir) # Create folder for trained weight
f = open(TrainLossTxtFile, "w+")# Training loss log file
f.write("Iteration\tloss\t Learning Rate=")
f.close()
#..............Start Training loop: Main Training....................................................................
AVGLoss=-1# running average loss
print("Start Training")
for itr in range(InitStep,MAX_ITERATION): # Main training loop
if np.random.rand()<0.5:
Images, SegmentMask, GtIOU = ClassEqualReader.LoadBatch()
else:
Images, SegmentMask, GtIOU = AllFileReader.LoadBatch()
# for oo in range(SegmentMask.shape[0]):
# # misc.imshow(Imgs[oo])
# # Imgs[oo,:,:,0] *=1 - PredMask[oo,:,:]
# im= Images[oo].copy()
# im[:,:,0] *= 1 - SegmentMask[oo,:,:]
# print(GtIOU[oo])
# # misc.imshow((PredMask[oo,:,:]*0+GTMask[oo,:,:]))
# misc.imshow(np.concatenate([Images[oo],im],axis=0))
# **************************Run Trainin cycle***************************************************************************************
PredIOU = Net.forward(Images, SegmentMask, TrainMode=True) # Run net inference and get prediction
Net.zero_grad()
TorchGtIOU = torch.autograd.Variable(torch.from_numpy(GtIOU.astype(np.float32)).cuda(), requires_grad=False)
Loss = torch.pow(PredIOU - TorchGtIOU, 2).mean()
#Loss = torch.abs(PredIOU - TorchGtIOU, 2).mean()
# -torch.mean((OneHotLabels * torch.log(Prob + 0.0000001))) # Calculate cross entropy loss
if AVGLoss == -1:
AVGLoss = float(Loss.data.cpu().numpy()) # Caclculate average loss for display
else:
AVGLoss = AVGLoss * 0.999 + 0.001 * float(Loss.data.cpu().numpy())
Loss.backward() # Backpropogate loss
optimizer.step() # Apply gradient decend change weight
# torch.cuda.empty_cache()
# --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------
if itr % 2000 == 0:# and itr>0: #Save model weight once every 10k steps
print("Saving Model to file in "+TrainedModelWeightDir+"/Defult.torch")
torch.save(Net.state_dict(), TrainedModelWeightDir + "/Defult.torch")
torch.save(Net.state_dict(), TrainedModelWeightDir + "/DefultBack.torch")
print("model saved")
np.save(TrainedModelWeightDir+"/Learning_Rate.npy",Learning_Rate)
np.save(TrainedModelWeightDir+"/Learning_Rate_Init.npy",Learning_Rate_Init)
np.save(TrainedModelWeightDir+"/itr.npy",itr)
if itr % 40000 == 0 and itr>0: #Save model weight once every 10k steps
print("Saving Model to file in "+TrainedModelWeightDir+"/"+ str(itr) + ".torch")
torch.save(Net.state_dict(), TrainedModelWeightDir + "/" + str(itr) + ".torch")
print("model saved")
#......................Write and display train loss..........................................................................
if itr % 50==0: # Display train loss
txt="\n"+str(itr)+"\t"+str(float(Loss.data.cpu().numpy()))+"\t"+str(AVGLoss)+"\t"+str(Learning_Rate)+" Init_LR"+str(Learning_Rate_Init)
print(txt)
#Write train loss to file
with open(TrainLossTxtFile, "a") as f:
f.write(txt)
f.close()
#----------------Update learning rate fractal manner-------------------------------------------------------------------------------
if itr%10000==0 and itr>=StartLRDecayAfterSteps:
Learning_Rate-= Learning_Rate_Decay
if Learning_Rate<=1e-7:
Learning_Rate_Init-=2e-6
if Learning_Rate_Init<1e-6: Learning_Rate_Init=1e-6
Learning_Rate=Learning_Rate_Init*1.00001
Learning_Rate_Decay=Learning_Rate/20
print("Learning Rate="+str(Learning_Rate)+" Learning_Rate_Init="+str(Learning_Rate_Init))
print("======================================================================================================================")
optimizer = torch.optim.Adam(params=Net.parameters(), lr=Learning_Rate,weight_decay=Weight_Decay) # Create adam optimizer
torch.cuda.empty_cache() # Empty cuda memory to avoid memory leaks
# if itr%200000==0:
# ClassEqualReader = Reader.Reader(ImageDir=ImageDir, MaskDirs=ClassEqualDataDirs, NumClasses=NumClasses,
# ClassBalance=True, MinSize=MinSize, MaxSize=MaxSize, MaxPixels=MaxPixels,
# MinPrecision=MinPrecision, MaxBatchSize=MaxBatchSize, AugmentImage=True)
# AllFileReader = Reader.Reader(ImageDir=ImageDir, MaskDirs=AllFilesDataDirs, NumClasses=NumClasses,
# ClassBalance=False, MinSize=MinSize, MaxSize=MaxSize, MaxPixels=MaxPixels,
# MinPrecision=MinPrecision, MaxBatchSize=MaxBatchSize, AugmentImage=True)
|
[
"[email protected]"
] | |
421316f64e6767eb54d7e1b4351d921a48b5a002
|
e57d7785276053332c633b57f6925c90ad660580
|
/sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/aio/operations/_protection_policy_operation_results_operations.py
|
7f2085e9859ca31d51d469884b033b61357d8c8b
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
adriananeci/azure-sdk-for-python
|
0d560308497616a563b6afecbb494a88535da4c5
|
b2bdfe659210998d6d479e73b133b6c51eb2c009
|
refs/heads/main
| 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 |
MIT
| 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null |
UTF-8
|
Python
| false | false | 5,236 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ProtectionPolicyOperationResultsOperations:
"""ProtectionPolicyOperationResultsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
vault_name: str,
resource_group_name: str,
policy_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.ProtectionPolicyResource":
"""Provides the result of an operation.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param policy_name: Backup policy name whose operation's result needs to be fetched.
:type policy_name: str
:param operation_id: Operation ID which represents the operation whose result needs to be
fetched.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionPolicyResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.models.ProtectionPolicyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProtectionPolicyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProtectionPolicyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}/operationResults/{operationId}'} # type: ignore
|
[
"[email protected]"
] | |
2a5e83c5e9b26ae41173ccc7e6577802969b9966
|
3db89b4f5ada46de1534e65e1ddf38d5fb10202a
|
/cookiespostform/testapp/views.py
|
ad081fc8a6ad25b3cd0f4929773c837725a95232
|
[] |
no_license
|
abhinav375/DjangoProjects
|
42a5744527a5cf170daa2af3369e5652a5e87591
|
9d7de14631f8e2b57c66b517da240e36872b11c3
|
refs/heads/master
| 2022-11-12T14:05:51.983610 | 2020-07-11T08:32:59 | 2020-07-11T08:32:59 | 278,821,227 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,389 |
py
|
from django.shortcuts import render
from . import forms
# Create your views here.
'''COOKIES CODE'''
def index(request):
form=forms.studentRegustration()
response=render(request,'testapp/wish.html',{'form':form})
if request.method=='POST':
print('inside post')
form=forms.studentRegustration(request.POST)
if(form.is_valid()):
print("inside 2nd post")
#name=request.POST['name']
#roll_no=request.POST['roll_No']
name=form.cleaned_data['name']
roll_no=form.cleaned_data['roll_No']
response.set_cookie('name',name)
response.set_cookie('roll_no',roll_no)
print(name,roll_no)
return response
'''SESSION CODE'''
'''def index(request):
form=forms.studentRegustration()
if request.method=='POST':
print('inside post')
form=forms.studentRegustration(request.POST)
if(form.is_valid()):
print("inside 2nd post")
#name=request.POST['name']
#roll_no=request.POST['roll_No']
name=form.cleaned_data['name']
roll_no=form.cleaned_data['roll_No']
request.session['name']=name
request.session['roll_no']=roll_no
print(name,roll_no)
return render(request,'testapp/wish.html',{'form':form})'''
def show(request):
return render(request,'testapp/index.html')
|
[
"[email protected]"
] | |
eaf5393d03c4bfada9933355ce396fc33623620d
|
584e9c42e6240b9facc866703a6f26b06773df94
|
/Oreilly/index_power.py
|
16e65372a6b847c25ea55fe8a59c26a35c99ad56
|
[] |
no_license
|
anton-dovnar/checkio
|
48fbaf84c244b0fca7bed5cf7f34179cf850adf9
|
10aed757ec36f182871a03ed8c9e73319cc8824a
|
refs/heads/master
| 2023-03-24T16:23:39.524060 | 2021-03-12T13:07:04 | 2021-03-12T13:07:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,487 |
py
|
#!/home/fode4cun/.local/share/virtualenvs/checkio-ufRDicT7/bin/checkio --domain=py run index-power
# You are given an array with positive numbers and a number N. You should find the N-th power of the element in the array with the index N. If N is outside of the array, then return -1. Don't forget that the first element has the index 0.
#
# Let's look at a few examples:
# - array = [1, 2, 3, 4] and N = 2, then the result is 32== 9;
# - array = [1, 2, 3] and N = 3, but N is outside of the array, so the result is -1.
#
# Input:Two arguments. An array as a list of integers and a number as a integer.
#
# Output:The result as an integer.
#
# Precondition:0 < len(array) ≤ 10
# 0 ≤ N
# all(0 ≤ x ≤ 100 for x in array)
#
#
#
# END_DESC
import math
def index_power(array: list, n: int) -> int:
"""
Find Nth power of the element with index N.
"""
try:
val = array[n]
return int(math.pow(val, n))
except IndexError:
return -1
if __name__ == '__main__':
print('Example:')
print(index_power([1, 2, 3, 4], 2))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert index_power([1, 2, 3, 4], 2) == 9, "Square"
assert index_power([1, 3, 10, 100], 3) == 1000000, "Cube"
assert index_power([0, 1], 0) == 1, "Zero power"
assert index_power([1, 2], 3) == -1, "IndexError"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
|
[
"[email protected]"
] | |
62ed34b4b13a2835dc9d930611c8f042e571d23f
|
fc023464f57fd32d404682ed80d49ac70236db3b
|
/denovo_SDA.smk
|
920e585fc85abf2015e8333ac5d371ad58e6c29d
|
[
"MIT"
] |
permissive
|
tw7649116/SDA
|
d64dfd78c6dd82e0b6ce29d9f1d7c23af4d3242a
|
1ef2f89a1ca277ffc2c85b5251a5038fc09786e8
|
refs/heads/master
| 2020-11-28T17:59:05.820059 | 2019-12-19T19:34:27 | 2019-12-19T19:34:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,114 |
smk
|
import os
import sys
import tempfile
import numpy as np
import re
import pysam
from collections import Counter
from Bio import SeqIO
import pandas as pd
import tempfile
snake_dir = os.path.dirname(workflow.snakefile) + "/"
CWD = os.getcwd()
python3 = f"cd {snake_dir} && source env_sda.sh && cd {snake_dir}envs/ && source env_python3.sh && cd {CWD}"
shell.executable("/bin/bash")
shell.prefix(f"{python3} && set -eo pipefail; ")
base = snake_dir + "scripts/"
TRF = snake_dir + "/externalRepos/trf-v4.09/bin/trf"
#
# Get tmp dir
#
if "TMPDIR" in os.environ:
TMPDIR = os.environ['TMPDIR']
elif "TMPDIR" in config:
TMPDIR = config['TMPDIR']
else:
TMPDIR = tempfile.gettempdir()
#
# script locations and configurations
#
FOFN = config["fofn"]
PLAT = config["platform"].upper()
REF = os.path.abspath( config["ref"] )
MINALN = config["minaln"]
BANDWIDTH = config["bandwidth"]
PRE = config["prefix"]
DIR = config["dir"]
LRT = config["lrt"]
MINNUMSHARED = config["minNumShared"]
MAXPOSREP = config["maxPosRep"]
MINCUTSIZE = config["minCutSize"]
MINCUTLEN = config["minCutLen"]
ITERATIONS = config["iterations"]
RM_DB = config["species"]
ASSEMBLERS = config["assemblers"]
DEBUG = config["debug"]
MAX_TIME="300m"
# window size of calcualting coverage
WINDOW = 1000
# minimum size for a collapse
MINCOLLEN = 15000
# maximum percentage of a collapse that can be common repeat
MAXCR = 75
READS = [ line.strip() for line in open(FOFN).readlines() ]
IDS = list(range(len(READS ) ) )
#
# geting ready to run TRF and RM by splitting up the genome into multiple parts to run seperatly
#
splitSize = 200
splitSize = 60
FAI = REF + ".fai"
recs = open(FAI).readlines()
if(splitSize > len(recs)):
splitSize = len(recs)
FRACS = list(range(splitSize))
#
# function to remove temp files
#
def tempd(File):
if(DEBUG):
return(File)
return(temp(File))
wildcard_constraints:
PRE = PRE,
DIR = DIR,
ID = "\d+",
FRAC = "\d+",
onsuccess:
sys.stderr.write("SDA DENOVO FINISHED\n")
onerror:
sys.stderr.write("SDA DENOVO FAILED\n")
rule all:
input:
hcr = f"{DIR}/coverage/{PRE}.collapses.bed",
final = f"{DIR}/{PRE}.done",
bam=f"{DIR}/{PRE}.reads.bam",
bai=f"{DIR}/{PRE}.reads.bam.bai",
###################################################################################
# #
# ALIGN READS TO THE INPUT REFERENCE #
# #
###################################################################################
def get_reads(wildcards):
ID = int(str(wildcards.ID))
return(READS[ID])
if(PLAT in ["CCS", "SUBREAD"] ):
rule pbmm2:
input:
reads = get_reads,
ref = REF,
output:
tempd("{DIR}/{PRE}.{ID}.reads.bam")
resources:
mem=6
threads: 8
shell: """
# SUBREAD gives bettern alignmetns for CCS reads than CCS
pbmm2 align -j {threads} \
--preset SUBREAD -N 50 --min-length {MINALN} -r {BANDWIDTH} \
--sample FAKE_SAMPLE \
{input.ref} {input.reads} | \
samtools view -u -F 2308 - | \
samtools sort -@ {threads} -m 4G -T {TMPDIR}/pbmm2 -o {output}
"""
elif(PLAT in ["ONT"] ):
rule minimap2:
input:
reads = get_reads,
ref = REF,
output:
tempd("{DIR}/{PRE}.{ID}.reads.bam")
resources:
mem=6
threads: 8
shell:"""
samtools fasta {input.reads} | \
minimap2 \
-ax map-ont \
--eqx -L \
-R '@RG\\tID:MINIMAP\\tSM:FAKE_SAMPLE\\tPL:ONT' \
-t {threads} \
-m {MINALN} -r {BANDWIDTH} \
{input.ref} /dev/stdin | \
samtools view -u -F 2308 - | \
samtools sort -@ {threads} -m 4G -T {TMPDIR}/pbmm2 -o {output}
"""
else:
sys.stderr.write("Platform {} not recongnized!\n".format(PLAT))
rule merge_bam:
input:
bams = expand("{{DIR}}/{{PRE}}.{ID}.reads.bam", ID=IDS),
output:
bam = "{DIR}/{PRE}.reads.bam",
resources:
mem=4
threads: 12
shell:"""
samtools merge -@ {threads} {output.bam} {input.bams}
"""
rule index_bam:
input:
bam=rules.merge_bam.output.bam,
output:
bai="{DIR}/{PRE}.reads.bam.bai"
resources:
mem=16
threads: 1
shell:"""
samtools index {input}
"""
#
# this rule creats a bed file that is incremented by 1000 for every contig
# these will be the feautes upon which we calculate depth wtih bedtools
#
rule fai_to_bed:
input:
asmfai= REF + ".fai",
output:
regions=tempd("{DIR}/coverage/{PRE}.regions.bed"),
resources:
mem=16
threads: 1
run:
fai = open(input["asmfai"])
window = WINDOW
out = ""
for line in fai:
token = line.strip().split("\t")
length = int(token[1])
contig = token[0]
for start in range(0, length, window):
end = start + window -1
if(end > length):
end = length
out += "{}\t{}\t{}\n".format(contig, start, end)
open(output["regions"], "w+").write(out)
rule bam_to_coverage:
input:
bam=rules.index_bam.input.bam,
#bai=rules.index_bam.output.bai,
regions=rules.fai_to_bed.output.regions,
output:
cov=tempd("{DIR}/coverage/{PRE}.coverage.bed"),
resources:
mem=16
threads: 1
shell:"""
# get coverage and then sort by contig and then pos
bedtools coverage -bed -mean -sorted -a {input.regions} -b {input.bam} | \
sort -k 1,1 -k2,2n > {output.cov}
"""
###################################################################################
# #
# TRF AND REPEATMASKER ON THE INPUT REFERENCE #
# #
###################################################################################
MAX_RM_LEN = 48
rule split_ref:
input:
ref=REF,
output:
split = tempd(expand("{{DIR}}/common_repeats/{{PRE}}.ref.{FRAC}.fasta", FRAC=FRACS) ),
resources:
mem=16
threads: 1
run:
seqs = list(SeqIO.parse(input["ref"], "fasta"))
toWrite = {}
count = 0
for idx, seq in enumerate(seqs):
if(count not in toWrite):
toWrite[count] = []
seq.id = str(idx)
seq.name = str(idx)
seq.description = str(idx)
toWrite[count].append(seq)
count += 1
if(count == splitSize):
count = 0
for key in toWrite:
print(key, len(toWrite[key]))
SeqIO.write(toWrite[key], output["split"][key], "fasta")
#
# Run RepeatMasker
#
rule RepeatMasker:
input:
split = "{DIR}/common_repeats/{PRE}.ref.{FRAC}.fasta",
output:
out = tempd("{DIR}/common_repeats/{PRE}.ref.{FRAC}.fasta.out"),
cat = tempd("{DIR}/common_repeats/{PRE}.ref.{FRAC}.fasta.cat"),
ref = tempd("{DIR}/common_repeats/{PRE}.ref.{FRAC}.fasta.ref"),
tbl = tempd("{DIR}/common_repeats/{PRE}.ref.{FRAC}.fasta.tbl"),
msk = tempd("{DIR}/common_repeats/{PRE}.ref.{FRAC}.fasta.masked"),
resources:
mem=8,
threads:4
run:
rmdir = os.path.dirname(input["split"])
shell("""
RepeatMasker \
-species {RM_DB} \
-e wublast \
-dir {rmdir} \
-pa {threads} \
{input.split}
""")
#
# Run TRF
#
rule TRF:
input:
split = "{DIR}/common_repeats/{PRE}.ref.{FRAC}.fasta",
output:
trf = tempd("{DIR}/common_repeats/{PRE}.ref.{FRAC}.trf.dat"),
resources:
mem=16,
threads:1
run:
pre = os.path.basename(input["split"])
fasta = os.path.abspath(input["split"])
dat = os.path.abspath(output["trf"])
trfdir = os.path.dirname(input["split"])
param = ["2", "7", "7", "80", "10", "50", "2000"]
trfparam = " ".join(param)
trfout = ".".join(param)
shell("""{TRF} {fasta} {trfparam} -h -ngs > {output.trf} """ )
rule merge_trf_rm:
input:
rm = expand(f"{DIR}/common_repeats/{PRE}.ref.{{FRAC}}.fasta.out", FRAC = FRACS),
asmfai= REF + ".fai",
trf = expand(f"{DIR}/common_repeats/{PRE}.ref.{{FRAC}}.trf.dat", FRAC = FRACS),
output:
crtmp = temp(f"{DIR}/common_repeats/{PRE}.common_repeats.bed"),
cr = f"{DIR}/common_repeats/{PRE}.common_repeats.sort.merge.bed",
rm = f"{DIR}/common_repeats/{PRE}.rm.all.tbl",
trf = f"{DIR}/common_repeats/{PRE}.trf.all.tbl",
resources:
mem=32,
threads:1
run:
fai = pd.read_csv(input["asmfai"], sep="\t", names=["contig", "len", "x", "y", "z"] )
convert = { idx:contig for idx, contig in enumerate(fai["contig"]) }
#
# PARSE TRF
#
trfnames = 'contig start end PeriodSize CopyNumber ConsensusSize PercentMatches PercentIndels Score A C G T Entropy Motif Sequence'.split()
trf= []
for ftrf in input["trf"]:
chrom = None
sys.stderr.write(ftrf + "\n" )
with open(ftrf, 'r') as dat:
for line in dat:
splitline = line.split()
if( line.startswith("Sequence:") ):
chrom = int(line.split()[1].strip())
#sys.stderr.write(chrom + "\n")
elif( line.startswith("@") ):
chrom = int(splitline[0][1:].strip()) # grab everything after the @ in the first word
#sys.stderr.write(chrom + "\n")
else:
# Catch index errors when line is blank
try:
# Check if in header sequence (all non-header lines start with an int: start pos)
try:
int(splitline[0])
except ValueError:
continue
trf.append([chrom] + splitline[ 0: (len(trfnames)-1) ] )
except IndexError:
pass
trf = pd.DataFrame(trf, columns=trfnames)
print(trf.shape )
trf["start"] = trf["start"].astype(int)
trf.sort_values(by=["contig", "start"], inplace=True)
print("done sorting trf")
# insert original contig names
#trf["contig"].replace(convert, inplace=True) # This function is very slow for no good reason.
trf["contig"] = trf["contig"].map(convert.get)
print("done converting trf")
trf.to_csv(output["trf"], sep="\t", index=False)
print("done writing trf")
#
# PARSE REPEAT MASKER
#
rms = []
for frm in input["rm"]:
sys.stderr.write(frm + "\n" )
rms.append( pd.read_csv(frm, delim_whitespace=True, header=None, skiprows=[0,1,2], comment="*",
names = ["score", "div", "del", "ins", "contig", "start", "end",
"q_left", "strand", "repeat", "class", "r_st", "r_en", "r_left", "id"]) )
rm = pd.concat(rms, ignore_index=True)
print(rm.shape )
rm.sort_values(by=["contig", "start"], inplace=True)
print("done sorting rm")
# insert original contig names
#rm["contig"].replace(convert, inplace=True)
rm["contig"] = rm["contig"].map(convert.get)
print("done converting rm")
rm.to_csv(output["rm"], sep="\t", index=False)
print("done writing rm")
#
# WRITE TO BED
#
bed = ["contig", "start", "end"]
cm = pd.concat([rm[bed], trf[bed]], ignore_index=True)
cm.to_csv(output["crtmp"], sep="\t", header=False, index=False)
shell("bedtools sort -i {output.crtmp} | bedtools merge -i - > {output.cr}")
###################################################################################
# #
# FIND HIGH COVERAGE REGIONS #
# #
###################################################################################
rule count_cm_per_window:
input:
cr = rules.merge_trf_rm.output.cr,
cov = rules.bam_to_coverage.output.cov,
output:
cov = "{DIR}/coverage/{PRE}.coverage.repeat_counted.bed",
resources:
mem=16,
threads:1
shell:"""
# count number of overlaping bases with cm | eliminate extra colums | merge overlapping entries and calculate sum
bedtools intersect -a {input.cov} -b {input.cr} -wao | cut -f 1,2,3,4,8 | bedtools merge -c 4,5 -o mean,sum > {output.cov}
# final output:
# contig\tstart\tend\tcoverage\tcommon repeat bases
"""
rule high_coverage_regions:
input:
cov=rules.count_cm_per_window.output.cov,
asmfai= REF + ".fai",
output:
stats = "{DIR}/coverage/{PRE}.coverage.stats",
hcr = "{DIR}/coverage/{PRE}.collapses.bed",
hcr_cm = "{DIR}/coverage/{PRE}.collapses.with.cm.bed",
resources:
mem=16,
threads:1
run:
bed = pd.read_csv( input["cov"], sep = "\t", header=None, names=['contig', 'start', 'end',"coverage", "cr"])
# I want to eliminte the really low or really high coverage things because they are probably
# not assembled correctly and then assecess what the mean and standard deviation is
top = bed.coverage.quantile(.90)
bot = bed.coverage.quantile(.10)
# save stats like mean coverage
stats = bed["coverage"][ (bed.coverage < top) & ( bed.coverage > bot) ].describe()
out = "mean_coverage\tstd_coverage\n{}\t{}\n".format(stats["mean"], stats["std"])
open(output["stats"], "w+").write(out)
# filter for high coverage regsion
MINCOV = stats["mean"] + 3 * np.sqrt(stats["mean"])
shell("""
awk '{{ if ($4 > {MINCOV}) print;}}' {input.cov} \
| bedtools merge -d 10 -c 4,4,5 -o mean,median,sum \
| awk '{{ if ($3-$2 > {MINCOLLEN}) {{ print $0"\t"$3-$2;}} }}' > {output.hcr_cm} """)
shell("""
awk '{{ if ($6/$7*100 <= {MAXCR}) {{ print $0}} }}' {output.hcr_cm} > {output.hcr} """)
###################################################################################
# #
# RUN SDA ON HIGH COVERAGE REGIONS #
# #
###################################################################################
COL_DIR = "{DIR}/{PRE}.LocalAssemblies/region_{LA_ID}"
COL_RGN_FMT = os.path.join( COL_DIR , "rgn.bed")
COL_REF_FMT = os.path.join( COL_DIR , "ref.fasta")
COL_BAM_FMT = os.path.join( COL_DIR , "reads.orig.bam")
COL_SDA_FMT = os.path.join( COL_DIR , "region_{LA_ID}.done")
checkpoint local_asm_dirs:
input:
ref = REF,
hcr = rules.high_coverage_regions.output.hcr,
output:
LAs = directory("{DIR}/{PRE}.LocalAssemblies/")
resources:
mem=16,
threads:1
run:
for LA_ID, line in enumerate(open(input["hcr"]).readlines()):
rgn = COL_RGN_FMT.format(DIR=DIR, PRE=PRE, LA_ID=LA_ID)
shell("mkdir -p " + os.path.dirname(rgn) )
open(rgn, "w+").write(line)
#
# helper functions for getting inputs/params for SDA local assemblies
#
def get_ids(wildcards):
checkpoint_output = checkpoints.local_asm_dirs.get(**wildcards).output.LAs
PRE = wildcards.PRE
DIR = wildcards.DIR
TMPS = glob_wildcards( os.path.join(checkpoint_output, "region_{LA_ID}/rgn.bed" ) ).LA_ID
LA_IDs = []
# filter for only IDs that are \d+ and convert to ints
for LA_ID in TMPS:
if( re.match("\d+", LA_ID)):
LA_IDs.append(int(LA_ID))
# sort cuts to garuntee the same ordering each times
LA_IDs = sorted(LA_IDs)
# assert that all the cut IDs that should be there are, (no numbers are skipped)
for idx, val in enumerate(LA_IDs):
assert idx == val
return(LA_IDs)
def get_rgn(wildcards):
LA_ID = int(str(wildcards.LA_ID))
token = open(f"{DIR}/coverage/{PRE}.collapses.bed").readlines()[LA_ID].strip().split()
return("{}:{}-{}".format(token[0], token[1], token[2]))
def get_dir(wildcards):
LA_ID = int(str(wildcards.LA_ID))
return(COL_DIR.format(DIR=DIR, PRE=PRE, LA_ID=LA_ID))
def get_pre(wildcards):
LA_ID = int(str(wildcards.LA_ID))
return(f"region_{LA_ID}")
def get_cov(wildcards):
stats = pd.read_csv(f"{DIR}/coverage/{PRE}.coverage.stats", sep="\t")
mean = stats["mean_coverage"][0]
return(mean)
#
# rules to gather data for and run SDA
#
rule la_ref:
input:
ref = REF,
hcr = rules.high_coverage_regions.output.hcr,
output:
ref = COL_REF_FMT,
fai = COL_REF_FMT + ".fai",
params:
rgn = get_rgn,
resources:
mem=16,
threads:1
shell:"""
samtools faidx {input.ref} '{params.rgn}' > {output.ref}
samtools faidx {output.ref}
"""
def get_refs(wildcards):
return( [ COL_REF_FMT.format(DIR=DIR, PRE=PRE, LA_ID=LA_ID) for LA_ID in get_ids(wildcards) ] )
rule la_bam:
input:
hcr = rules.high_coverage_regions.output.hcr,
bam=rules.index_bam.input.bam,
bai=rules.index_bam.output.bai,
output:
bam = COL_BAM_FMT,
params:
rgn = get_rgn,
resources:
mem=16,
threads:1
shell:"""
samtools view -b {input.bam} '{params.rgn}' > {output.bam}
"""
def get_bams(wildcards):
return( [ COL_BAM_FMT.format(DIR=DIR, PRE=PRE, LA_ID=LA_ID) for LA_ID in get_ids(wildcards) ] )
rule la_sda:
input:
bam = COL_BAM_FMT,
ref = COL_REF_FMT,
fai = COL_REF_FMT + ".fai",
output:
sda = COL_SDA_FMT,
out = COL_SDA_FMT + ".log",
params:
sda_dir = get_dir,
pre = get_pre,
cov = get_cov,
resources:
mem=4,
threads:8
run:
SDA_BAM = os.path.abspath(input["bam"])
SDA_REF = os.path.abspath(input["ref"])
SDA_DIR = os.path.abspath(params["sda_dir"])
SDA_SDA = os.path.abspath(output["sda"])
SDA_OUT = os.path.abspath(output["out"])
cmd = """
# clear any previous runs
rm -rf {params.sda_dir}/{params.pre}*
rm -rf {params.sda_dir}/*/{params.pre}*
# move to execution dir
pushd {params.sda_dir}
{snake_dir}SDA collapse --ref {SDA_REF} --reads {SDA_BAM} --coverage {params.cov} \
-d {SDA_DIR} -p {params.pre} -t {threads} \
--platform {PLAT} --minaln {MINALN} --bandwidth {BANDWIDTH} --iterations {ITERATIONS} \
--assemblers {ASSEMBLERS} --lrt {LRT} --minNumShared {MINNUMSHARED} --maxPosRep {MAXPOSREP} \
--minCutSize {MINCUTSIZE} --minCutLen {MINCUTLEN} --unlock &> \
/dev/null || echo "Already unlocked."
timeout {MAX_TIME} {snake_dir}SDA collapse --ref {SDA_REF} --reads {SDA_BAM} --coverage {params.cov} \
-d {SDA_DIR} -p {params.pre} -t {threads} \
--platform {PLAT} --minaln {MINALN} --bandwidth {BANDWIDTH} --iterations {ITERATIONS} \
--assemblers {ASSEMBLERS} --lrt {LRT} --minNumShared {MINNUMSHARED} --maxPosRep {MAXPOSREP} \
--minCutSize {MINCUTSIZE} --minCutLen {MINCUTLEN} &> \
{SDA_OUT} || echo "SDA failed on this collapse" && touch {SDA_SDA}
popd
"""
shell(cmd)
def get_sda(wildcards):
return( [ COL_SDA_FMT.format(DIR=DIR, PRE=PRE, LA_ID=LA_ID) for LA_ID in get_ids(wildcards) ] )
rule merge_results:
input:
get_sda,
output:
fasta = "{DIR}/{PRE}.assemblies.fasta",
preads = "{DIR}/{PRE}.phased.readids",
summary = "{DIR}/{PRE}.summary.txt",
psvs = "{DIR}/{PRE}.psv.tbl",
resources:
mem=4,
threads: 1
run:
preads = []
summary = []
for LA_ID, sda_log in enumerate(input):
print(LA_ID, sda_log)
sda_dir = os.path.dirname(sda_log)
fasta_path = os.path.join(sda_dir, f"region_{LA_ID}.assemblies.fasta")
if(os.path.exists(fasta_path)):
shell("cat {fasta_path} >> {output.fasta}")
preads_path = os.path.join(sda_dir, f"region_{LA_ID}.phased.readids")
if(os.path.exists(preads_path)):
preads.append(pd.read_csv(preads_path, sep = "\t"))
summary_path = os.path.join(sda_dir, f"region_{LA_ID}.summary.txt")
if(os.path.exists(summary_path)):
summary.append(pd.read_csv(summary_path, sep = "\t"))
psv_path = os.path.join(sda_dir, f"region_{LA_ID}.psv.tbl")
if(os.path.exists(psv_path)):
shell("cat {psv_path} >> {output.psvs}")
pd.concat(preads, ignore_index=True).to_csv(output["preads"], sep="\t", index=False)
pd.concat(summary, ignore_index=True).to_csv(output["summary"], sep="\t", index=False)
rule summary_plots:
input:
fasta = "{DIR}/{PRE}.assemblies.fasta",
preads = "{DIR}/{PRE}.phased.readids",
summary = "{DIR}/{PRE}.summary.txt",
psvs = "{DIR}/{PRE}.psv.tbl",
output:
pair = "{DIR}/summary_plots/{PRE}.paired.pdf",
length = "{DIR}/summary_plots/{PRE}.assembled_lengths.pdf",
collapse = "{DIR}/summary_plots/{PRE}.collapse_vs_assemblies.pdf",
bar = "{DIR}/summary_plots/{PRE}.assembled_mbp.pdf",
resources:
mem=16,
threads: 1
shell:"""
{snake_dir}scripts/SummarySDAPlots.py {input.summary} \
--dir {DIR} \
--prefix {PRE} \
--pair {output.pair} \
--length {output.length} \
--collapse {output.collapse} \
--bar {output.bar}
"""
rule final:
input:
fasta = "{DIR}/{PRE}.assemblies.fasta",
preads = "{DIR}/{PRE}.phased.readids",
summary = "{DIR}/{PRE}.summary.txt",
psvs = "{DIR}/{PRE}.psv.tbl",
pair = rules.summary_plots.output.pair,
length = rules.summary_plots.output.length,
collapse = rules.summary_plots.output.collapse,
bar = rules.summary_plots.output.bar,
output:
final = "{DIR}/{PRE}.done",
resources:
mem=16,
threads:1
shell:"""
touch {output.final}
"""
|
[
"[email protected]"
] | |
d368191c4b07a6fe52a4920118299fd10d73bf23
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_09_01/aio/operations/_snapshots_operations.py
|
5b62b8146db604663959850f0fb0470a54a72c31
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 |
MIT
| 2023-09-08T08:38:48 | 2019-11-18T07:09:24 |
Python
|
UTF-8
|
Python
| false | false | 27,220 |
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._snapshots_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_resource_group_request,
build_list_request,
build_update_tags_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SnapshotsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2021_09_01.aio.ContainerServiceClient`'s
:attr:`snapshots` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Snapshot"]:
"""Gets a list of snapshots in the specified subscription.
Gets a list of snapshots in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Snapshot or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2021_09_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/snapshots"}
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.Snapshot"]:
"""Lists snapshots in the specified subscription and resource group.
Lists snapshots in the specified subscription and resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Snapshot or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2021_09_01.models.Snapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
cls: ClsType[_models.SnapshotListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots"
}
@distributed_trace_async
async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.Snapshot:
"""Gets a snapshot.
Gets a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.Snapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Snapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}"
}
@overload
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.Snapshot,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Snapshot:
"""Creates or updates a snapshot.
Creates or updates a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The snapshot to create or update. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_09_01.models.Snapshot
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.Snapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Snapshot:
"""Creates or updates a snapshot.
Creates or updates a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The snapshot to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.Snapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self, resource_group_name: str, resource_name: str, parameters: Union[_models.Snapshot, IO], **kwargs: Any
) -> _models.Snapshot:
"""Creates or updates a snapshot.
Creates or updates a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The snapshot to create or update. Is either a Snapshot type or a IO type.
Required.
:type parameters: ~azure.mgmt.containerservice.v2021_09_01.models.Snapshot or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.Snapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Snapshot")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Snapshot", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Snapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}"
}
@overload
async def update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.TagsObject,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Snapshot:
"""Updates tags on a snapshot.
Updates tags on a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update snapshot Tags operation. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_09_01.models.TagsObject
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.Snapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Snapshot:
"""Updates tags on a snapshot.
Updates tags on a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update snapshot Tags operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.Snapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update_tags(
self, resource_group_name: str, resource_name: str, parameters: Union[_models.TagsObject, IO], **kwargs: Any
) -> _models.Snapshot:
"""Updates tags on a snapshot.
Updates tags on a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update snapshot Tags operation. Is either a
TagsObject type or a IO type. Required.
:type parameters: ~azure.mgmt.containerservice.v2021_09_01.models.TagsObject or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Snapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.Snapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "TagsObject")
request = build_update_tags_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update_tags.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Snapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> None:
"""Deletes a snapshot.
Deletes a snapshot.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/snapshots/{resourceName}"
}
|
[
"[email protected]"
] | |
2e1af657659894cbd7a5d9684965d24d2015b4b0
|
82fce9aae9e855a73f4e92d750e6a8df2ef877a5
|
/Lab/venv/lib/python3.8/site-packages/OpenGL/GL/ARB/separate_shader_objects.py
|
a90c2006467db131f2aec16f7fbfdfce037f58f3
|
[] |
no_license
|
BartoszRudnik/GK
|
1294f7708902e867dacd7da591b9f2e741bfe9e5
|
6dc09184a3af07143b9729e42a6f62f13da50128
|
refs/heads/main
| 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,289 |
py
|
'''OpenGL extension ARB.separate_shader_objects
This module customises the behaviour of the
OpenGL.raw.GL.ARB.separate_shader_objects to provide a more
Python-friendly API
Overview (from the spec)
Conventional GLSL requires multiple shader stages (vertex,
fragment, geometry, tessellation control, and tessellation
evaluation) to be linked into a single monolithic program object to
specify a GLSL shader for each stage.
While GLSL's monolithic approach has some advantages for
optimizing shaders as a unit that span multiple stages, all
existing GPU hardware supports the more flexible mix-and-match
approach.
Shaders written for HLSL9, Cg, the prior OpenGL assembly program
extensions, and game console favor a more flexible "mix-and-match"
approach to specifying shaders independently for these different
shader stages. Many developers build their shader content around
the mix-and-match approach where they can use a single vertex shader
with multiple fragment shaders (or vice versa).
This extension adopts a "mix-and-match" shader stage model for GLSL
allowing multiple different GLSL program objects to be bound at once
each to an individual rendering pipeline stage independently of
other stage bindings. This allows program objects to contain only
the shader stages that best suit the applications needs.
This extension introduces the program pipeline object that serves as
a container for the program bound to any particular rendering stage.
It can be bound, unbound, and rebound to simply save and restore the
complete shader stage to program object bindings. Like framebuffer
and vertex array objects, program pipeline objects are "container"
objects that are not shared between contexts.
To bind a program object to a specific shader stage or set of
stages, UseProgramStages is used. The VERTEX_SHADER_BIT,
GEOMETRY_SHADER_BIT, FRAGMENT_SHADER_BIT, TESS_CONTROL_SHADER_BIT,
and TESS_EVALUATION_SHADER_BIT tokens refer to the conventional
vertex, geometry, fragment, tessellation control and tessellation
evaluation stages respectively. ActiveShaderProgram specifies the
program that Uniform* commands will update.
While ActiveShaderProgram allows the use of conventional Uniform*
commands to update uniform variable values for separable program
objects, this extension provides a preferrable interface in a set
of ProgramUniform* commands that update the same uniform variables
but take a parameter indicating the program object to be updated,
rather than updating the currently active program object. These
commands mirror those introduced in EXT_direct_state_access.
While glActiveShaderProgram provides a selector for setting and
querying uniform values of a program object, the glProgramUniform*
commands provide a selector-free way to modify uniforms of a GLSL
program object without an explicit bind. This selector-free model
reduces API overhead and provides a cleaner interface for
applications.
Separate linking creates the possibility that certain output varyings
of a shader may go unread by the subsequent shader inputting varyings.
In this case, the output varyings are simply ignored. It is also
possible input varyings from a shader may not be written as output
varyings of a preceding shader. In this case, the unwritten input
varying values are undefined.
This extension builds on the proof-of-concept provided by
EXT_separate_shader_objects which demonstrated that separate
shader objects can work for GLSL. EXT_separate_shader_objects
was a response to repeated requests for this functionality from
3D developers.
This ARB version addresses several "loose ends" in the prior
EXT extension. In particular, it allows user-defined varyings
with explicitly defined locations or implicitly assigned locations.
This ARB extension extends the GLSL language's use of layout
qualifiers to provide cross-stage interfacing.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/separate_shader_objects.txt
'''
from OpenGL import wrapper
from OpenGL.raw.GL import _glgets
from OpenGL.raw.GL.ARB.separate_shader_objects import *
from OpenGL.raw.GL.ARB.separate_shader_objects import _EXTENSION_NAME
def glInitSeparateShaderObjectsARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension(_EXTENSION_NAME)
# INPUT glCreateShaderProgramv.strings size not checked against count
glCreateShaderProgramv = wrapper.wrapper(glCreateShaderProgramv).setInputArraySize(
'strings', None
)
# INPUT glDeleteProgramPipelines.pipelines size not checked against n
glDeleteProgramPipelines=wrapper.wrapper(glDeleteProgramPipelines).setInputArraySize(
'pipelines', None
)
glGenProgramPipelines=wrapper.wrapper(glGenProgramPipelines).setOutput(
'pipelines',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
glGetProgramPipelineiv=wrapper.wrapper(glGetProgramPipelineiv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# INPUT glProgramUniform1iv.value size not checked against count
glProgramUniform1iv=wrapper.wrapper(glProgramUniform1iv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1fv.value size not checked against count
glProgramUniform1fv=wrapper.wrapper(glProgramUniform1fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1dv.value size not checked against count
glProgramUniform1dv=wrapper.wrapper(glProgramUniform1dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1uiv.value size not checked against count
glProgramUniform1uiv=wrapper.wrapper(glProgramUniform1uiv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2iv.value size not checked against count*2
glProgramUniform2iv=wrapper.wrapper(glProgramUniform2iv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2fv.value size not checked against count*2
glProgramUniform2fv=wrapper.wrapper(glProgramUniform2fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2dv.value size not checked against count*2
glProgramUniform2dv=wrapper.wrapper(glProgramUniform2dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2uiv.value size not checked against count*2
glProgramUniform2uiv=wrapper.wrapper(glProgramUniform2uiv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3iv.value size not checked against count*3
glProgramUniform3iv=wrapper.wrapper(glProgramUniform3iv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3fv.value size not checked against count*3
glProgramUniform3fv=wrapper.wrapper(glProgramUniform3fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3dv.value size not checked against count*3
glProgramUniform3dv=wrapper.wrapper(glProgramUniform3dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3uiv.value size not checked against count*3
glProgramUniform3uiv=wrapper.wrapper(glProgramUniform3uiv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4iv.value size not checked against count*4
glProgramUniform4iv=wrapper.wrapper(glProgramUniform4iv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4fv.value size not checked against count*4
glProgramUniform4fv=wrapper.wrapper(glProgramUniform4fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4dv.value size not checked against count*4
glProgramUniform4dv=wrapper.wrapper(glProgramUniform4dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4uiv.value size not checked against count*4
glProgramUniform4uiv=wrapper.wrapper(glProgramUniform4uiv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2fv.value size not checked against count*4
glProgramUniformMatrix2fv=wrapper.wrapper(glProgramUniformMatrix2fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3fv.value size not checked against count*9
glProgramUniformMatrix3fv=wrapper.wrapper(glProgramUniformMatrix3fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4fv.value size not checked against count*16
glProgramUniformMatrix4fv=wrapper.wrapper(glProgramUniformMatrix4fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2dv.value size not checked against count*4
glProgramUniformMatrix2dv=wrapper.wrapper(glProgramUniformMatrix2dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3dv.value size not checked against count*9
glProgramUniformMatrix3dv=wrapper.wrapper(glProgramUniformMatrix3dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4dv.value size not checked against count*16
glProgramUniformMatrix4dv=wrapper.wrapper(glProgramUniformMatrix4dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x3fv.value size not checked against count*6
glProgramUniformMatrix2x3fv=wrapper.wrapper(glProgramUniformMatrix2x3fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x2fv.value size not checked against count*6
glProgramUniformMatrix3x2fv=wrapper.wrapper(glProgramUniformMatrix3x2fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x4fv.value size not checked against count*8
glProgramUniformMatrix2x4fv=wrapper.wrapper(glProgramUniformMatrix2x4fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x2fv.value size not checked against count*8
glProgramUniformMatrix4x2fv=wrapper.wrapper(glProgramUniformMatrix4x2fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x4fv.value size not checked against count*12
glProgramUniformMatrix3x4fv=wrapper.wrapper(glProgramUniformMatrix3x4fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x3fv.value size not checked against count*12
glProgramUniformMatrix4x3fv=wrapper.wrapper(glProgramUniformMatrix4x3fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x3dv.value size not checked against count*6
glProgramUniformMatrix2x3dv=wrapper.wrapper(glProgramUniformMatrix2x3dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x2dv.value size not checked against count*6
glProgramUniformMatrix3x2dv=wrapper.wrapper(glProgramUniformMatrix3x2dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x4dv.value size not checked against count*8
glProgramUniformMatrix2x4dv=wrapper.wrapper(glProgramUniformMatrix2x4dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x2dv.value size not checked against count*8
glProgramUniformMatrix4x2dv=wrapper.wrapper(glProgramUniformMatrix4x2dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x4dv.value size not checked against count*12
glProgramUniformMatrix3x4dv=wrapper.wrapper(glProgramUniformMatrix3x4dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x3dv.value size not checked against count*12
glProgramUniformMatrix4x3dv=wrapper.wrapper(glProgramUniformMatrix4x3dv).setInputArraySize(
'value', None
)
glGetProgramPipelineInfoLog=wrapper.wrapper(glGetProgramPipelineInfoLog).setOutput(
'infoLog',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
).setOutput(
'length',size=(1,),orPassIn=True
)
### END AUTOGENERATED SECTION
|
[
"[email protected]"
] | |
567a743c25df140f8aaac2ceef3ce8511d071da6
|
b1e7286dbd522cd5eecd7b686d446f1645681f66
|
/src/backend/datasets/migrations/0015_proj_exp_run_unique.py
|
35a96b742c3023906426a34aa931b838743710a2
|
[] |
no_license
|
glormph/kantele
|
69f9c040a885b7b4ff412db945fafb7b4cfc7c07
|
6b8dfea90968ad34db95e2c0e37289288330f6d3
|
refs/heads/master
| 2023-08-07T13:51:32.595737 | 2023-07-20T09:15:02 | 2023-07-20T09:15:02 | 7,146,448 | 3 | 2 | null | 2023-09-13T08:20:15 | 2012-12-13T10:25:42 |
Python
|
UTF-8
|
Python
| false | false | 696 |
py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datasets', '0014_merge_duplicate_projects'),
]
operations = [
migrations.AlterField(
model_name='project',
name='name',
field=models.TextField(unique=True),
),
migrations.AddConstraint(
model_name='experiment',
constraint=models.UniqueConstraint(fields=('name', 'project'), name='uni_expproj'),
),
migrations.AddConstraint(
model_name='runname',
constraint=models.UniqueConstraint(fields=('name', 'experiment'), name='uni_runexp'),
),
]
|
[
"[email protected]"
] | |
7b5b2cfee06f7fc847e10c4a981a7a5955ab1d0b
|
11aaeaeb55d587a950456fd1480063e1aed1d9e5
|
/.history/ex45-test_20190608175151.py
|
f62ed598c4c6b6f8d7975013b865de111467448e
|
[] |
no_license
|
Gr4cchus/Learn-Python-3-The-Hard-Way
|
8ce9e68f6a91ea33ea45fe64bfff82d65422c4a8
|
f5fa34db16cdd6377faa7fcf45c70f94bb4aec0d
|
refs/heads/master
| 2020-05-17T23:18:29.483160 | 2019-06-26T18:42:52 | 2019-06-26T18:42:52 | 184,023,439 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 423 |
py
|
class Room1():
def enter():
print("You enter room 1")
class Room2():
def enter():
print("You enter room 2")
class Map():
def __init__(self, starting_room):
self.starting_room = starting_room
self.locations = {
'room1': Room1,
'room2': Room2
}
class Engine():
def __init__(self, map):
self.map = map
def play(self):
|
[
"[email protected]"
] | |
4ac036ed6281c7ea308ec337312bb37f199de5f4
|
a3d058c6a80d4068fa4d3185ddd2dec91abc82d7
|
/merge_sort.py
|
ec31be8a8d73eaaca1cf87b074ff8b196a0dcbc5
|
[] |
no_license
|
guard1000/Everyday-coding
|
d6f496654b635738a4284270f6c5d285116a760e
|
7755f99cdb512d623392af82282bf17b47cb77f2
|
refs/heads/master
| 2021-08-18T22:26:04.322162 | 2021-07-21T14:53:28 | 2021-07-21T14:53:28 | 161,440,626 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,160 |
py
|
def mergeSort(alist):
if len(alist)>1:
mid = len(alist)//2 #mid를 기준으로
lefthalf = alist[:mid] #리스트 슬라이싱으로 분할
righthalf = alist[mid:]
mergeSort(lefthalf) #재귀 호출
mergeSort(righthalf)
i=0
j=0
k=0
while i < len(lefthalf) and j < len(righthalf): #
if lefthalf[i] < righthalf[j]:
alist[k]=lefthalf[i]
i=i+1
else:
alist[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
alist[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
alist[k]=righthalf[j]
j=j+1
k=k+1
alist=[]
with open('data.txt') as f: #데이터를 READ하는 모듈
lines = f.read().split() #data.txt 파일에 들어있는 데이터들을 line별로 읽어와서
for line in lines: #All리스트에 append 시켜줍니다.
alist.append(int(line))
print('머지소팅 전')
print(alist)
mergeSort(alist)
print('머지소팅 후')
print(alist)
|
[
"[email protected]"
] | |
8ea75956b53a4c9c5448c8251610a48c0c984be0
|
83bacfbdb7ad17cbc2fc897b3460de1a6726a3b1
|
/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/base.py
|
f62dd5e8c630d27738b184ae6a50ad9abd9d2104
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] |
permissive
|
cool2528/miniblink49
|
d909e39012f2c5d8ab658dc2a8b314ad0050d8ea
|
7f646289d8074f098cf1244adc87b95e34ab87a8
|
refs/heads/master
| 2020-06-05T03:18:43.211372 | 2019-06-01T08:57:37 | 2019-06-01T08:59:56 | 192,294,645 | 2 | 0 |
Apache-2.0
| 2019-06-17T07:16:28 | 2019-06-17T07:16:27 | null |
UTF-8
|
Python
| false | false | 78,674 |
py
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Abstract base class of Port-specific entry points for the layout tests
test infrastructure (the Port and Driver classes)."""
import cgi
import difflib
import errno
import itertools
import json
import logging
import os
import operator
import optparse
import re
import sys
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
from webkitpy.common import find_files
from webkitpy.common import read_checksum_from_png
from webkitpy.common.memoized import memoized
from webkitpy.common.system import path
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.path import cygpath
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port import config as port_config
from webkitpy.layout_tests.port import driver
from webkitpy.layout_tests.port import server_process
from webkitpy.layout_tests.port.factory import PortFactory
from webkitpy.layout_tests.servers import apache_http
from webkitpy.layout_tests.servers import pywebsocket
from webkitpy.layout_tests.servers import wptserve
_log = logging.getLogger(__name__)
# FIXME: This class should merge with WebKitPort now that Chromium behaves mostly like other webkit ports.
class Port(object):
"""Abstract class for Port-specific hooks for the layout_test package."""
# Subclasses override this. This should indicate the basic implementation
# part of the port name, e.g., 'mac', 'win', 'gtk'; there is probably (?)
# one unique value per class.
# FIXME: We should probably rename this to something like 'implementation_name'.
port_name = None
# Test names resemble unix relative paths, and use '/' as a directory separator.
TEST_PATH_SEPARATOR = '/'
ALL_BUILD_TYPES = ('debug', 'release')
CONTENT_SHELL_NAME = 'content_shell'
# True if the port as aac and mp3 codecs built in.
PORT_HAS_AUDIO_CODECS_BUILT_IN = False
ALL_SYSTEMS = (
('snowleopard', 'x86'),
('lion', 'x86'),
# FIXME: We treat Retina (High-DPI) devices as if they are running
# a different operating system version. This isn't accurate, but will work until
# we need to test and support baselines across multiple O/S versions.
('retina', 'x86'),
('mountainlion', 'x86'),
('mavericks', 'x86'),
('yosemite', 'x86'),
('xp', 'x86'),
('win7', 'x86'),
('lucid', 'x86'),
('lucid', 'x86_64'),
# FIXME: Technically this should be 'arm', but adding a third architecture type breaks TestConfigurationConverter.
# If we need this to be 'arm' in the future, then we first have to fix TestConfigurationConverter.
('icecreamsandwich', 'x86'),
)
ALL_BASELINE_VARIANTS = [
'mac-yosemite', 'mac-mavericks', 'mac-retina', 'mac-mountainlion', 'mac-lion', 'mac-snowleopard',
'win-win7', 'win-xp',
'linux-x86_64', 'linux-x86',
]
CONFIGURATION_SPECIFIER_MACROS = {
'mac': ['snowleopard', 'lion', 'mountainlion', 'retina', 'mavericks', 'yosemite'],
'win': ['xp', 'win7'],
'linux': ['lucid'],
'android': ['icecreamsandwich'],
}
DEFAULT_BUILD_DIRECTORIES = ('out',)
# overridden in subclasses.
FALLBACK_PATHS = {}
SUPPORTED_VERSIONS = []
# URL to the build requirements page.
BUILD_REQUIREMENTS_URL = ''
@classmethod
def latest_platform_fallback_path(cls):
return cls.FALLBACK_PATHS[cls.SUPPORTED_VERSIONS[-1]]
@classmethod
def _static_build_path(cls, filesystem, build_directory, chromium_base, configuration, comps):
if build_directory:
return filesystem.join(build_directory, configuration, *comps)
hits = []
for directory in cls.DEFAULT_BUILD_DIRECTORIES:
base_dir = filesystem.join(chromium_base, directory, configuration)
path = filesystem.join(base_dir, *comps)
if filesystem.exists(path):
hits.append((filesystem.mtime(path), path))
if hits:
hits.sort(reverse=True)
return hits[0][1] # Return the newest file found.
# We have to default to something, so pick the last one.
return filesystem.join(base_dir, *comps)
@classmethod
def determine_full_port_name(cls, host, options, port_name):
"""Return a fully-specified port name that can be used to construct objects."""
# Subclasses will usually override this.
assert port_name.startswith(cls.port_name)
return port_name
def __init__(self, host, port_name, options=None, **kwargs):
# This value may be different from cls.port_name by having version modifiers
# and other fields appended to it (for example, 'qt-arm' or 'mac-wk2').
self._name = port_name
# These are default values that should be overridden in a subclasses.
self._version = ''
self._architecture = 'x86'
# FIXME: Ideally we'd have a package-wide way to get a
# well-formed options object that had all of the necessary
# options defined on it.
self._options = options or optparse.Values()
self.host = host
self._executive = host.executive
self._filesystem = host.filesystem
self._webkit_finder = WebKitFinder(host.filesystem)
self._config = port_config.Config(self._executive, self._filesystem, self.port_name)
self._helper = None
self._http_server = None
self._websocket_server = None
self._is_wpt_enabled = hasattr(options, 'enable_wptserve') and options.enable_wptserve
self._wpt_server = None
self._image_differ = None
self._server_process_constructor = server_process.ServerProcess # overridable for testing
self._http_lock = None # FIXME: Why does this live on the port object?
self._dump_reader = None
# Python's Popen has a bug that causes any pipes opened to a
# process that can't be executed to be leaked. Since this
# code is specifically designed to tolerate exec failures
# to gracefully handle cases where wdiff is not installed,
# the bug results in a massive file descriptor leak. As a
# workaround, if an exec failure is ever experienced for
# wdiff, assume it's not available. This will leak one
# file descriptor but that's better than leaking each time
# wdiff would be run.
#
# http://mail.python.org/pipermail/python-list/
# 2008-August/505753.html
# http://bugs.python.org/issue3210
self._wdiff_available = None
# FIXME: prettypatch.py knows this path, why is it copied here?
self._pretty_patch_path = self.path_from_webkit_base("Tools", "Scripts", "webkitruby", "PrettyPatch", "prettify.rb")
self._pretty_patch_available = None
if not hasattr(options, 'configuration') or not options.configuration:
self.set_option_default('configuration', self.default_configuration())
self._test_configuration = None
self._reftest_list = {}
self._results_directory = None
self._virtual_test_suites = None
def buildbot_archives_baselines(self):
return True
def additional_driver_flag(self):
if self.driver_name() == self.CONTENT_SHELL_NAME:
return ['--run-layout-test']
return []
def supports_per_test_timeout(self):
return False
def default_pixel_tests(self):
return True
def default_smoke_test_only(self):
return False
def default_timeout_ms(self):
timeout_ms = 6 * 1000
if self.get_option('configuration') == 'Debug':
# Debug is usually 2x-3x slower than Release.
return 3 * timeout_ms
return timeout_ms
def driver_stop_timeout(self):
""" Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
# We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as
# well (for things like ASAN, Valgrind, etc.)
return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms()
def wdiff_available(self):
if self._wdiff_available is None:
self._wdiff_available = self.check_wdiff(logging=False)
return self._wdiff_available
def pretty_patch_available(self):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
return self._pretty_patch_available
def default_batch_size(self):
"""Return the default batch size to use for this port."""
if self.get_option('enable_sanitizer'):
# ASAN/MSAN/TSAN use more memory than regular content_shell. Their
# memory usage may also grow over time, up to a certain point.
# Relaunching the driver periodically helps keep it under control.
return 40
# The default is infinte batch size.
return None
def default_child_processes(self):
"""Return the number of child processes to use for this port."""
return self._executive.cpu_count()
def max_drivers_per_process(self):
"""The maximum number of drivers a child process can use for this port."""
return 2
def default_max_locked_shards(self):
"""Return the number of "locked" shards to run in parallel (like the http tests)."""
max_locked_shards = int(self.default_child_processes()) / 4
if not max_locked_shards:
return 1
return max_locked_shards
def baseline_path(self):
"""Return the absolute path to the directory to store new baselines in for this port."""
# FIXME: remove once all callers are calling either baseline_version_dir() or baseline_platform_dir()
return self.baseline_version_dir()
def baseline_platform_dir(self):
"""Return the absolute path to the default (version-independent) platform-specific results."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', self.port_name)
def baseline_version_dir(self):
"""Return the absolute path to the platform-and-version-specific results."""
baseline_search_paths = self.baseline_search_path()
return baseline_search_paths[0]
def virtual_baseline_search_path(self, test_name):
suite = self.lookup_virtual_suite(test_name)
if not suite:
return None
return [self._filesystem.join(path, suite.name) for path in self.default_baseline_search_path()]
def baseline_search_path(self):
return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path()
def default_baseline_search_path(self):
"""Return a list of absolute paths to directories to search under for
baselines. The directories are searched in order."""
return map(self._webkit_baseline_path, self.FALLBACK_PATHS[self.version()])
@memoized
def _compare_baseline(self):
factory = PortFactory(self.host)
target_port = self.get_option('compare_port')
if target_port:
return factory.get(target_port).default_baseline_search_path()
return []
def _check_file_exists(self, path_to_file, file_description,
override_step=None, logging=True):
"""Verify the file is present where expected or log an error.
Args:
file_name: The (human friendly) name or description of the file
you're looking for (e.g., "HTTP Server"). Used for error logging.
override_step: An optional string to be logged if the check fails.
logging: Whether or not log the error messages."""
if not self._filesystem.exists(path_to_file):
if logging:
_log.error('Unable to find %s' % file_description)
_log.error(' at %s' % path_to_file)
if override_step:
_log.error(' %s' % override_step)
_log.error('')
return False
return True
def check_build(self, needs_http, printer):
result = True
dump_render_tree_binary_path = self._path_to_driver()
result = self._check_file_exists(dump_render_tree_binary_path,
'test driver') and result
if not result and self.get_option('build'):
result = self._check_driver_build_up_to_date(
self.get_option('configuration'))
else:
_log.error('')
helper_path = self._path_to_helper()
if helper_path:
result = self._check_file_exists(helper_path,
'layout test helper') and result
if self.get_option('pixel_tests'):
result = self.check_image_diff(
'To override, invoke with --no-pixel-tests') and result
# It's okay if pretty patch and wdiff aren't available, but we will at least log messages.
self._pretty_patch_available = self.check_pretty_patch()
self._wdiff_available = self.check_wdiff()
if self._dump_reader:
result = self._dump_reader.check_is_functional() and result
if needs_http:
result = self.check_httpd() and result
return test_run_results.OK_EXIT_STATUS if result else test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
def _check_driver(self):
driver_path = self._path_to_driver()
if not self._filesystem.exists(driver_path):
_log.error("%s was not found at %s" % (self.driver_name(), driver_path))
return False
return True
def _check_port_build(self):
# Ports can override this method to do additional checks.
return True
def check_sys_deps(self, needs_http):
"""If the port needs to do some runtime checks to ensure that the
tests can be run successfully, it should override this routine.
This step can be skipped with --nocheck-sys-deps.
Returns whether the system is properly configured."""
cmd = [self._path_to_driver(), '--check-layout-test-sys-deps']
local_error = ScriptError()
def error_handler(script_error):
local_error.exit_code = script_error.exit_code
output = self._executive.run_command(cmd, error_handler=error_handler)
if local_error.exit_code:
_log.error('System dependencies check failed.')
_log.error('To override, invoke with --nocheck-sys-deps')
_log.error('')
_log.error(output)
if self.BUILD_REQUIREMENTS_URL is not '':
_log.error('')
_log.error('For complete build requirements, please see:')
_log.error(self.BUILD_REQUIREMENTS_URL)
return test_run_results.SYS_DEPS_EXIT_STATUS
return test_run_results.OK_EXIT_STATUS
def check_image_diff(self, override_step=None, logging=True):
"""This routine is used to check whether image_diff binary exists."""
image_diff_path = self._path_to_image_diff()
if not self._filesystem.exists(image_diff_path):
_log.error("image_diff was not found at %s" % image_diff_path)
return False
return True
def check_pretty_patch(self, logging=True):
"""Checks whether we can use the PrettyPatch ruby script."""
try:
_ = self._executive.run_command(['ruby', '--version'])
except OSError, e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
if logging:
_log.warning("Ruby is not installed; can't generate pretty patches.")
_log.warning('')
return False
if not self._filesystem.exists(self._pretty_patch_path):
if logging:
_log.warning("Unable to find %s; can't generate pretty patches." % self._pretty_patch_path)
_log.warning('')
return False
return True
def check_wdiff(self, logging=True):
if not self._path_to_wdiff():
# Don't need to log here since this is the port choosing not to use wdiff.
return False
try:
_ = self._executive.run_command([self._path_to_wdiff(), '--help'])
except OSError:
if logging:
message = self._wdiff_missing_message()
if message:
for line in message.splitlines():
_log.warning(' ' + line)
_log.warning('')
return False
return True
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install it to generate word-by-word diffs.'
def check_httpd(self):
httpd_path = self.path_to_apache()
try:
server_name = self._filesystem.basename(httpd_path)
env = self.setup_environ_for_server(server_name)
if self._executive.run_command([httpd_path, "-v"], env=env, return_exit_code=True) != 0:
_log.error("httpd seems broken. Cannot run http tests.")
return False
return True
except OSError:
_log.error("No httpd found. Cannot run http tests.")
return False
def do_text_results_differ(self, expected_text, actual_text):
return expected_text != actual_text
def do_audio_results_differ(self, expected_audio, actual_audio):
return expected_audio != actual_audio
def diff_image(self, expected_contents, actual_contents):
"""Compare two images and return a tuple of an image diff, and an error string.
If an error occurs (like image_diff isn't found, or crashes, we log an error and return True (for a diff).
"""
# If only one of them exists, return that one.
if not actual_contents and not expected_contents:
return (None, None)
if not actual_contents:
return (expected_contents, None)
if not expected_contents:
return (actual_contents, None)
tempdir = self._filesystem.mkdtemp()
expected_filename = self._filesystem.join(str(tempdir), "expected.png")
self._filesystem.write_binary_file(expected_filename, expected_contents)
actual_filename = self._filesystem.join(str(tempdir), "actual.png")
self._filesystem.write_binary_file(actual_filename, actual_contents)
diff_filename = self._filesystem.join(str(tempdir), "diff.png")
# image_diff needs native win paths as arguments, so we need to convert them if running under cygwin.
native_expected_filename = self._convert_path(expected_filename)
native_actual_filename = self._convert_path(actual_filename)
native_diff_filename = self._convert_path(diff_filename)
executable = self._path_to_image_diff()
# Note that although we are handed 'old', 'new', image_diff wants 'new', 'old'.
comand = [executable, '--diff', native_actual_filename, native_expected_filename, native_diff_filename]
result = None
err_str = None
try:
exit_code = self._executive.run_command(comand, return_exit_code=True)
if exit_code == 0:
# The images are the same.
result = None
elif exit_code == 1:
result = self._filesystem.read_binary_file(native_diff_filename)
else:
err_str = "Image diff returned an exit code of %s. See http://crbug.com/278596" % exit_code
except OSError, e:
err_str = 'error running image diff: %s' % str(e)
finally:
self._filesystem.rmtree(str(tempdir))
return (result, err_str or None)
def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
"""Returns a string containing the diff of the two text strings
in 'unified diff' format."""
# The filenames show up in the diff output, make sure they're
# raw bytes and not unicode, so that they don't trigger join()
# trying to decode the input.
def to_raw_bytes(string_value):
if isinstance(string_value, unicode):
return string_value.encode('utf-8')
return string_value
expected_filename = to_raw_bytes(expected_filename)
actual_filename = to_raw_bytes(actual_filename)
diff = difflib.unified_diff(expected_text.splitlines(True),
actual_text.splitlines(True),
expected_filename,
actual_filename)
# The diff generated by the difflib is incorrect if one of the files
# does not have a newline at the end of the file and it is present in
# the diff. Relevant Python issue: http://bugs.python.org/issue2142
def diff_fixup(diff):
for line in diff:
yield line
if not line.endswith('\n'):
yield '\n\ No newline at end of file\n'
return ''.join(diff_fixup(diff))
def driver_name(self):
if self.get_option('driver_name'):
return self.get_option('driver_name')
return self.CONTENT_SHELL_NAME
def expected_baselines_by_extension(self, test_name):
"""Returns a dict mapping baseline suffix to relative path for each baseline in
a test. For reftests, it returns ".==" or ".!=" instead of the suffix."""
# FIXME: The name similarity between this and expected_baselines() below, is unfortunate.
# We should probably rename them both.
baseline_dict = {}
reference_files = self.reference_files(test_name)
if reference_files:
# FIXME: How should this handle more than one type of reftest?
baseline_dict['.' + reference_files[0][0]] = self.relative_test_filename(reference_files[0][1])
for extension in self.baseline_extensions():
path = self.expected_filename(test_name, extension, return_default=False)
baseline_dict[extension] = self.relative_test_filename(path) if path else path
return baseline_dict
def baseline_extensions(self):
"""Returns a tuple of all of the non-reftest baseline extensions we use. The extensions include the leading '.'."""
return ('.wav', '.txt', '.png')
def expected_baselines(self, test_name, suffix, all_baselines=False):
"""Given a test name, finds where the baseline results are located.
Args:
test_name: name of test file (usually a relative path under LayoutTests/)
suffix: file suffix of the expected results, including dot; e.g.
'.txt' or '.png'. This should not be None, but may be an empty
string.
all_baselines: If True, return an ordered list of all baseline paths
for the given platform. If False, return only the first one.
Returns
a list of ( platform_dir, results_filename ), where
platform_dir - abs path to the top of the results tree (or test
tree)
results_filename - relative path from top of tree to the results
file
(port.join() of the two gives you the full path to the file,
unless None was returned.)
Return values will be in the format appropriate for the current
platform (e.g., "\\" for path separators on Windows). If the results
file is not found, then None will be returned for the directory,
but the expected relative pathname will still be returned.
This routine is generic but lives here since it is used in
conjunction with the other baseline and filename routines that are
platform specific.
"""
baseline_filename = self._filesystem.splitext(test_name)[0] + '-expected' + suffix
baseline_search_path = self.baseline_search_path()
baselines = []
for platform_dir in baseline_search_path:
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if not all_baselines and baselines:
return baselines
# If it wasn't found in a platform directory, return the expected
# result in the test directory, even if no such file actually exists.
platform_dir = self.layout_tests_dir()
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if baselines:
return baselines
return [(None, baseline_filename)]
def expected_filename(self, test_name, suffix, return_default=True):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories,
the directory in which the test itself is located will be returned.
The return value is in the format appropriate for the platform
(e.g., "\\" for path separators on windows).
Args:
test_name: name of test file (usually a relative path under LayoutTests/)
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: the most-specific directory name to use to build the
search list of directories, e.g., 'win', or
'chromium-cg-mac-leopard' (we follow the WebKit format)
return_default: if True, returns the path to the generic expectation if nothing
else is found; if False, returns None.
This routine is generic but is implemented here to live alongside
the other baseline and filename manipulation routines.
"""
# FIXME: The [0] here is very mysterious, as is the destructured return.
platform_dir, baseline_filename = self.expected_baselines(test_name, suffix)[0]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
return self.expected_filename(actual_test_name, suffix)
if return_default:
return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
return None
def expected_checksum(self, test_name):
"""Returns the checksum of the image we expect the test to produce, or None if it is a text-only test."""
png_path = self.expected_filename(test_name, '.png')
if self._filesystem.exists(png_path):
with self._filesystem.open_binary_file_for_reading(png_path) as filehandle:
return read_checksum_from_png.read_checksum(filehandle)
return None
def expected_image(self, test_name):
"""Returns the image we expect the test to produce."""
baseline_path = self.expected_filename(test_name, '.png')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_audio(self, test_name):
baseline_path = self.expected_filename(test_name, '.wav')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_text(self, test_name):
"""Returns the text output we expect the test to produce, or None
if we don't expect there to be any text output.
End-of-line characters are normalized to '\n'."""
# FIXME: DRT output is actually utf-8, but since we don't decode the
# output from DRT (instead treating it as a binary string), we read the
# baselines as a binary string, too.
baseline_path = self.expected_filename(test_name, '.txt')
if not self._filesystem.exists(baseline_path):
return None
text = self._filesystem.read_binary_file(baseline_path)
return text.replace("\r\n", "\n")
def _get_reftest_list(self, test_name):
dirname = self._filesystem.join(self.layout_tests_dir(), self._filesystem.dirname(test_name))
if dirname not in self._reftest_list:
self._reftest_list[dirname] = Port._parse_reftest_list(self._filesystem, dirname)
return self._reftest_list[dirname]
@staticmethod
def _parse_reftest_list(filesystem, test_dirpath):
reftest_list_path = filesystem.join(test_dirpath, 'reftest.list')
if not filesystem.isfile(reftest_list_path):
return None
reftest_list_file = filesystem.read_text_file(reftest_list_path)
parsed_list = {}
for line in reftest_list_file.split('\n'):
line = re.sub('#.+$', '', line)
split_line = line.split()
if len(split_line) == 4:
# FIXME: Probably one of mozilla's extensions in the reftest.list format. Do we need to support this?
_log.warning("unsupported reftest.list line '%s' in %s" % (line, reftest_list_path))
continue
if len(split_line) < 3:
continue
expectation_type, test_file, ref_file = split_line
parsed_list.setdefault(filesystem.join(test_dirpath, test_file), []).append((expectation_type, filesystem.join(test_dirpath, ref_file)))
return parsed_list
def reference_files(self, test_name):
"""Return a list of expectation (== or !=) and filename pairs"""
reftest_list = self._get_reftest_list(test_name)
if not reftest_list:
reftest_list = []
for expectation, prefix in (('==', ''), ('!=', '-mismatch')):
for extention in Port._supported_file_extensions:
path = self.expected_filename(test_name, prefix + extention)
if self._filesystem.exists(path):
reftest_list.append((expectation, path))
return reftest_list
return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), []) # pylint: disable=E1103
def tests(self, paths):
"""Return the list of tests found matching paths."""
tests = self._real_tests(paths)
suites = self.virtual_test_suites()
if paths:
tests.extend(self._virtual_tests_matching_paths(paths, suites))
else:
tests.extend(self._all_virtual_tests(suites))
return tests
def _real_tests(self, paths):
# When collecting test cases, skip these directories
skipped_directories = set(['.svn', '_svn', 'platform', 'resources', 'support', 'script-tests', 'reference', 'reftest'])
files = find_files.find(self._filesystem, self.layout_tests_dir(), paths, skipped_directories, Port.is_test_file, self.test_key)
return [self.relative_test_filename(f) for f in files]
# When collecting test cases, we include any file with these extensions.
_supported_file_extensions = set(['.html', '.xml', '.xhtml', '.xht', '.pl',
'.htm', '.php', '.svg', '.mht', '.pdf'])
@staticmethod
# If any changes are made here be sure to update the isUsedInReftest method in old-run-webkit-tests as well.
def is_reference_html_file(filesystem, dirname, filename):
if filename.startswith('ref-') or filename.startswith('notref-'):
return True
filename_wihout_ext, unused = filesystem.splitext(filename)
for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
if filename_wihout_ext.endswith(suffix):
return True
return False
@staticmethod
def _has_supported_extension(filesystem, filename):
"""Return true if filename is one of the file extensions we want to run a test on."""
extension = filesystem.splitext(filename)[1]
return extension in Port._supported_file_extensions
@staticmethod
def is_test_file(filesystem, dirname, filename):
return Port._has_supported_extension(filesystem, filename) and not Port.is_reference_html_file(filesystem, dirname, filename)
ALL_TEST_TYPES = ['audio', 'harness', 'pixel', 'ref', 'text', 'unknown']
def test_type(self, test_name):
fs = self._filesystem
if fs.exists(self.expected_filename(test_name, '.png')):
return 'pixel'
if fs.exists(self.expected_filename(test_name, '.wav')):
return 'audio'
if self.reference_files(test_name):
return 'ref'
txt = self.expected_text(test_name)
if txt:
if 'layer at (0,0) size 800x600' in txt:
return 'pixel'
for line in txt.splitlines():
if line.startswith('FAIL') or line.startswith('TIMEOUT') or line.startswith('PASS'):
return 'harness'
return 'text'
return 'unknown'
def test_key(self, test_name):
"""Turns a test name into a list with two sublists, the natural key of the
dirname, and the natural key of the basename.
This can be used when sorting paths so that files in a directory.
directory are kept together rather than being mixed in with files in
subdirectories."""
dirname, basename = self.split_test(test_name)
return (self._natural_sort_key(dirname + self.TEST_PATH_SEPARATOR), self._natural_sort_key(basename))
def _natural_sort_key(self, string_to_split):
""" Turns a string into a list of string and number chunks, i.e. "z23a" -> ["z", 23, "a"]
This can be used to implement "natural sort" order. See:
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
http://nedbatchelder.com/blog/200712.html#e20071211T054956
"""
def tryint(val):
try:
return int(val)
except ValueError:
return val
return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
def test_dirs(self):
"""Returns the list of top-level test directories."""
layout_tests_dir = self.layout_tests_dir()
return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)),
self._filesystem.listdir(layout_tests_dir))
@memoized
def test_isfile(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isfile(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isfile(self.abspath_for_test(base))
@memoized
def test_isdir(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isdir(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isdir(self.abspath_for_test(base))
@memoized
def test_exists(self, test_name):
"""Return True if the test name refers to an existing test or baseline."""
# Used by test_expectations.py to determine if an entry refers to a
# valid test and by printing.py to determine if baselines exist.
return self.test_isfile(test_name) or self.test_isdir(test_name)
def split_test(self, test_name):
"""Splits a test name into the 'directory' part and the 'basename' part."""
index = test_name.rfind(self.TEST_PATH_SEPARATOR)
if index < 1:
return ('', test_name)
return (test_name[0:index], test_name[index:])
def normalize_test_name(self, test_name):
"""Returns a normalized version of the test name or test directory."""
if test_name.endswith('/'):
return test_name
if self.test_isdir(test_name):
return test_name + '/'
return test_name
def driver_cmd_line(self):
"""Prints the DRT command line that will be used."""
driver = self.create_driver(0)
return driver.cmd_line(self.get_option('pixel_tests'), [])
def update_baseline(self, baseline_path, data):
"""Updates the baseline for a test.
Args:
baseline_path: the actual path to use for baseline, not the path to
the test. This function is used to update either generic or
platform-specific baselines, but we can't infer which here.
data: contents of the baseline.
"""
self._filesystem.write_binary_file(baseline_path, data)
# FIXME: update callers to create a finder and call it instead of these next five routines (which should be protected).
def webkit_base(self):
return self._webkit_finder.webkit_base()
def path_from_webkit_base(self, *comps):
return self._webkit_finder.path_from_webkit_base(*comps)
def path_from_chromium_base(self, *comps):
return self._webkit_finder.path_from_chromium_base(*comps)
def path_to_script(self, script_name):
return self._webkit_finder.path_to_script(script_name)
def layout_tests_dir(self):
return self._webkit_finder.layout_tests_dir()
def perf_tests_dir(self):
return self._webkit_finder.perf_tests_dir()
def skipped_layout_tests(self, test_list):
"""Returns tests skipped outside of the TestExpectations files."""
tests = set(self._skipped_tests_for_unsupported_features(test_list))
# We explicitly skip any tests in LayoutTests/w3c if need be to avoid running any tests
# left over from the old DEPS-pulled repos.
# We also will warn at the end of the test run if these directories still exist.
#
# TODO(dpranke): Remove this check after 1/1/2015 and let people deal with the warnings.
# Remove the check in controllers/manager.py as well.
if self._filesystem.isdir(self._filesystem.join(self.layout_tests_dir(), 'w3c')):
tests.add('w3c')
return tests
def _tests_from_skipped_file_contents(self, skipped_file_contents):
tests_to_skip = []
for line in skipped_file_contents.split('\n'):
line = line.strip()
line = line.rstrip('/') # Best to normalize directory names to not include the trailing slash.
if line.startswith('#') or not len(line):
continue
tests_to_skip.append(line)
return tests_to_skip
def _expectations_from_skipped_files(self, skipped_file_paths):
tests_to_skip = []
for search_path in skipped_file_paths:
filename = self._filesystem.join(self._webkit_baseline_path(search_path), "Skipped")
if not self._filesystem.exists(filename):
_log.debug("Skipped does not exist: %s" % filename)
continue
_log.debug("Using Skipped file: %s" % filename)
skipped_file_contents = self._filesystem.read_text_file(filename)
tests_to_skip.extend(self._tests_from_skipped_file_contents(skipped_file_contents))
return tests_to_skip
@memoized
def skipped_perf_tests(self):
return self._expectations_from_skipped_files([self.perf_tests_dir()])
def skips_perf_test(self, test_name):
for test_or_category in self.skipped_perf_tests():
if test_or_category == test_name:
return True
category = self._filesystem.join(self.perf_tests_dir(), test_or_category)
if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
return True
return False
def is_chromium(self):
return True
def name(self):
"""Returns a name that uniquely identifies this particular type of port
(e.g., "mac-snowleopard" or "linux-x86_x64" and can be passed
to factory.get() to instantiate the port."""
return self._name
def operating_system(self):
# Subclasses should override this default implementation.
return 'mac'
def version(self):
"""Returns a string indicating the version of a given platform, e.g.
'leopard' or 'xp'.
This is used to help identify the exact port when parsing test
expectations, determining search paths, and logging information."""
return self._version
def architecture(self):
return self._architecture
def get_option(self, name, default_value=None):
return getattr(self._options, name, default_value)
def set_option_default(self, name, default_value):
return self._options.ensure_value(name, default_value)
@memoized
def path_to_generic_test_expectations_file(self):
return self._filesystem.join(self.layout_tests_dir(), 'TestExpectations')
def relative_test_filename(self, filename):
"""Returns a test_name a relative unix-style path for a filename under the LayoutTests
directory. Ports may legitimately return abspaths here if no relpath makes sense."""
# Ports that run on windows need to override this method to deal with
# filenames with backslashes in them.
if filename.startswith(self.layout_tests_dir()):
return self.host.filesystem.relpath(filename, self.layout_tests_dir())
else:
return self.host.filesystem.abspath(filename)
@memoized
def abspath_for_test(self, test_name):
"""Returns the full path to the file for a given test name. This is the
inverse of relative_test_filename()."""
return self._filesystem.join(self.layout_tests_dir(), test_name)
def results_directory(self):
"""Absolute path to the place to store the test results (uses --results-directory)."""
if not self._results_directory:
option_val = self.get_option('results_directory') or self.default_results_directory()
self._results_directory = self._filesystem.abspath(option_val)
return self._results_directory
def perf_results_directory(self):
return self._build_path()
def inspector_build_directory(self):
return self._build_path('resources', 'inspector')
def default_results_directory(self):
"""Absolute path to the default place to store the test results."""
try:
return self.path_from_chromium_base('webkit', self.get_option('configuration'), 'layout-test-results')
except AssertionError:
return self._build_path('layout-test-results')
def setup_test_run(self):
"""Perform port-specific work at the beginning of a test run."""
# Delete the disk cache if any to ensure a clean test run.
dump_render_tree_binary_path = self._path_to_driver()
cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
cachedir = self._filesystem.join(cachedir, "cache")
if self._filesystem.exists(cachedir):
self._filesystem.rmtree(cachedir)
if self._dump_reader:
self._filesystem.maybe_make_directory(self._dump_reader.crash_dumps_directory())
def num_workers(self, requested_num_workers):
"""Returns the number of available workers (possibly less than the number requested)."""
return requested_num_workers
def clean_up_test_run(self):
"""Perform port-specific work at the end of a test run."""
if self._image_differ:
self._image_differ.stop()
self._image_differ = None
# FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
def _value_or_default_from_environ(self, name, default=None):
if name in os.environ:
return os.environ[name]
return default
def _copy_value_from_environ_if_set(self, clean_env, name):
if name in os.environ:
clean_env[name] = os.environ[name]
def setup_environ_for_server(self, server_name=None):
# We intentionally copy only a subset of os.environ when
# launching subprocesses to ensure consistent test results.
clean_env = {
'LOCAL_RESOURCE_ROOT': self.layout_tests_dir(), # FIXME: Is this used?
}
variables_to_copy = [
'WEBKIT_TESTFONTS', # FIXME: Is this still used?
'WEBKITOUTPUTDIR', # FIXME: Is this still used?
'CHROME_DEVEL_SANDBOX',
'CHROME_IPC_LOGGING',
'ASAN_OPTIONS',
'TSAN_OPTIONS',
'MSAN_OPTIONS',
'LSAN_OPTIONS',
'UBSAN_OPTIONS',
'VALGRIND_LIB',
'VALGRIND_LIB_INNER',
]
if self.host.platform.is_linux() or self.host.platform.is_freebsd():
variables_to_copy += [
'XAUTHORITY',
'HOME',
'LANG',
'LD_LIBRARY_PATH',
'DBUS_SESSION_BUS_ADDRESS',
'XDG_DATA_DIRS',
]
clean_env['DISPLAY'] = self._value_or_default_from_environ('DISPLAY', ':1')
if self.host.platform.is_mac():
clean_env['DYLD_LIBRARY_PATH'] = self._build_path()
clean_env['DYLD_FRAMEWORK_PATH'] = self._build_path()
variables_to_copy += [
'HOME',
]
if self.host.platform.is_win():
variables_to_copy += [
'PATH',
'GYP_DEFINES', # Required to locate win sdk.
]
if self.host.platform.is_cygwin():
variables_to_copy += [
'HOMEDRIVE',
'HOMEPATH',
'_NT_SYMBOL_PATH',
]
for variable in variables_to_copy:
self._copy_value_from_environ_if_set(clean_env, variable)
for string_variable in self.get_option('additional_env_var', []):
[name, value] = string_variable.split('=', 1)
clean_env[name] = value
return clean_env
def show_results_html_file(self, results_filename):
"""This routine should display the HTML file pointed at by
results_filename in a users' browser."""
return self.host.user.open_url(path.abspath_to_uri(self.host.platform, results_filename))
def create_driver(self, worker_number, no_timeout=False):
"""Return a newly created Driver subclass for starting/stopping the test driver."""
return self._driver_class()(self, worker_number, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
def start_helper(self):
"""If a port needs to reconfigure graphics settings or do other
things to ensure a known test configuration, it should override this
method."""
helper_path = self._path_to_helper()
if helper_path:
_log.debug("Starting layout helper %s" % helper_path)
# Note: Not thread safe: http://bugs.python.org/issue2320
self._helper = self._executive.popen([helper_path],
stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=None)
is_ready = self._helper.stdout.readline()
if not is_ready.startswith('ready'):
_log.error("layout_test_helper failed to be ready")
def requires_http_server(self):
"""Does the port require an HTTP server for running tests? This could
be the case when the tests aren't run on the host platform."""
return False
def start_http_server(self, additional_dirs, number_of_drivers):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a web server to be running."""
assert not self._http_server, 'Already running an http server.'
server = apache_http.ApacheHTTP(self, self.results_directory(),
additional_dirs=additional_dirs,
number_of_servers=(number_of_drivers * 4))
server.start()
self._http_server = server
def start_websocket_server(self):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a websocket server to be running."""
assert not self._websocket_server, 'Already running a websocket server.'
server = pywebsocket.PyWebSocket(self, self.results_directory())
server.start()
self._websocket_server = server
def is_wpt_enabled(self):
"""Used as feature flag for WPT Serve feature."""
return self._is_wpt_enabled
def is_wpt_test(self, test):
"""Whether this test is part of a web-platform-tests which require wptserve servers."""
return "web-platform-tests" in test
def start_wptserve(self):
"""Start a WPT web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a WPT web server to be running."""
assert not self._wpt_server, 'Already running an http server.'
assert self.is_wpt_enabled(), 'Cannot start server if WPT is not enabled.'
# We currently don't support any output mechanism for the WPT server.
server = wptserve.WPTServe(self, self.results_directory())
server.start()
self._wpt_server = server
def stop_wptserve(self):
"""Shut down the WPT server if it is running. Do nothing if it isn't."""
if self._wpt_server:
self._wpt_server.stop()
self._wpt_server = None
def http_server_supports_ipv6(self):
# Apache < 2.4 on win32 does not support IPv6, nor does cygwin apache.
if self.host.platform.is_cygwin() or self.host.platform.is_win():
return False
return True
def stop_helper(self):
"""Shut down the test helper if it is running. Do nothing if
it isn't, or it isn't available. If a port overrides start_helper()
it must override this routine as well."""
if self._helper:
_log.debug("Stopping layout test helper")
try:
self._helper.stdin.write("x\n")
self._helper.stdin.close()
self._helper.wait()
except IOError, e:
pass
finally:
self._helper = None
def stop_http_server(self):
"""Shut down the http server if it is running. Do nothing if it isn't."""
if self._http_server:
self._http_server.stop()
self._http_server = None
def stop_websocket_server(self):
"""Shut down the websocket server if it is running. Do nothing if it isn't."""
if self._websocket_server:
self._websocket_server.stop()
self._websocket_server = None
#
# TEST EXPECTATION-RELATED METHODS
#
def test_configuration(self):
"""Returns the current TestConfiguration for the port."""
if not self._test_configuration:
self._test_configuration = TestConfiguration(self._version, self._architecture, self._options.configuration.lower())
return self._test_configuration
# FIXME: Belongs on a Platform object.
@memoized
def all_test_configurations(self):
"""Returns a list of TestConfiguration instances, representing all available
test configurations for this port."""
return self._generate_all_test_configurations()
# FIXME: Belongs on a Platform object.
def configuration_specifier_macros(self):
"""Ports may provide a way to abbreviate configuration specifiers to conveniently
refer to them as one term or alias specific values to more generic ones. For example:
(xp, vista, win7) -> win # Abbreviate all Windows versions into one namesake.
(lucid) -> linux # Change specific name of the Linux distro to a more generic term.
Returns a dictionary, each key representing a macro term ('win', for example),
and value being a list of valid configuration specifiers (such as ['xp', 'vista', 'win7'])."""
return self.CONFIGURATION_SPECIFIER_MACROS
def all_baseline_variants(self):
"""Returns a list of platform names sufficient to cover all the baselines.
The list should be sorted so that a later platform will reuse
an earlier platform's baselines if they are the same (e.g.,
'snowleopard' should precede 'leopard')."""
return self.ALL_BASELINE_VARIANTS
def _generate_all_test_configurations(self):
"""Returns a sequence of the TestConfigurations the port supports."""
# By default, we assume we want to test every graphics type in
# every configuration on every system.
test_configurations = []
for version, architecture in self.ALL_SYSTEMS:
for build_type in self.ALL_BUILD_TYPES:
test_configurations.append(TestConfiguration(version, architecture, build_type))
return test_configurations
try_builder_names = frozenset([
'linux_layout',
'mac_layout',
'win_layout',
'linux_layout_rel',
'mac_layout_rel',
'win_layout_rel',
])
def warn_if_bug_missing_in_test_expectations(self):
return True
def _port_specific_expectations_files(self):
paths = []
paths.append(self.path_from_chromium_base('skia', 'skia_test_expectations.txt'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'NeverFixTests'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'StaleTestExpectations'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'SlowTests'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'FlakyTests'))
return paths
def expectations_dict(self):
"""Returns an OrderedDict of name -> expectations strings.
The names are expected to be (but not required to be) paths in the filesystem.
If the name is a path, the file can be considered updatable for things like rebaselining,
so don't use names that are paths if they're not paths.
Generally speaking the ordering should be files in the filesystem in cascade order
(TestExpectations followed by Skipped, if the port honors both formats),
then any built-in expectations (e.g., from compile-time exclusions), then --additional-expectations options."""
# FIXME: rename this to test_expectations() once all the callers are updated to know about the ordered dict.
expectations = OrderedDict()
for path in self.expectations_files():
if self._filesystem.exists(path):
expectations[path] = self._filesystem.read_text_file(path)
for path in self.get_option('additional_expectations', []):
expanded_path = self._filesystem.expanduser(path)
if self._filesystem.exists(expanded_path):
_log.debug("reading additional_expectations from path '%s'" % path)
expectations[path] = self._filesystem.read_text_file(expanded_path)
else:
_log.warning("additional_expectations path '%s' does not exist" % path)
return expectations
def bot_expectations(self):
if not self.get_option('ignore_flaky_tests'):
return {}
full_port_name = self.determine_full_port_name(self.host, self._options, self.port_name)
builder_category = self.get_option('ignore_builder_category', 'layout')
factory = BotTestExpectationsFactory()
# FIXME: This only grabs release builder's flakiness data. If we're running debug,
# when we should grab the debug builder's data.
expectations = factory.expectations_for_port(full_port_name, builder_category)
if not expectations:
return {}
ignore_mode = self.get_option('ignore_flaky_tests')
if ignore_mode == 'very-flaky' or ignore_mode == 'maybe-flaky':
return expectations.flakes_by_path(ignore_mode == 'very-flaky')
if ignore_mode == 'unexpected':
return expectations.unexpected_results_by_path()
_log.warning("Unexpected ignore mode: '%s'." % ignore_mode)
return {}
def expectations_files(self):
return [self.path_to_generic_test_expectations_file()] + self._port_specific_expectations_files()
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base."""
return [('blink', self.layout_tests_dir()),
('chromium', self.path_from_chromium_base('build'))]
_WDIFF_DEL = '##WDIFF_DEL##'
_WDIFF_ADD = '##WDIFF_ADD##'
_WDIFF_END = '##WDIFF_END##'
def _format_wdiff_output_as_html(self, wdiff):
wdiff = cgi.escape(wdiff)
wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>")
wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>")
wdiff = wdiff.replace(self._WDIFF_END, "</span>")
html = "<head><style>.del { background: #faa; } "
html += ".add { background: #afa; }</style></head>"
html += "<pre>%s</pre>" % wdiff
return html
def _wdiff_command(self, actual_filename, expected_filename):
executable = self._path_to_wdiff()
return [executable,
"--start-delete=%s" % self._WDIFF_DEL,
"--end-delete=%s" % self._WDIFF_END,
"--start-insert=%s" % self._WDIFF_ADD,
"--end-insert=%s" % self._WDIFF_END,
actual_filename,
expected_filename]
@staticmethod
def _handle_wdiff_error(script_error):
# Exit 1 means the files differed, any other exit code is an error.
if script_error.exit_code != 1:
raise script_error
def _run_wdiff(self, actual_filename, expected_filename):
"""Runs wdiff and may throw exceptions.
This is mostly a hook for unit testing."""
# Diffs are treated as binary as they may include multiple files
# with conflicting encodings. Thus we do not decode the output.
command = self._wdiff_command(actual_filename, expected_filename)
wdiff = self._executive.run_command(command, decode_output=False,
error_handler=self._handle_wdiff_error)
return self._format_wdiff_output_as_html(wdiff)
_wdiff_error_html = "Failed to run wdiff, see error log."
def wdiff_text(self, actual_filename, expected_filename):
"""Returns a string of HTML indicating the word-level diff of the
contents of the two filenames. Returns an empty string if word-level
diffing isn't available."""
if not self.wdiff_available():
return ""
try:
# It's possible to raise a ScriptError we pass wdiff invalid paths.
return self._run_wdiff(actual_filename, expected_filename)
except OSError as e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
# Silently ignore cases where wdiff is missing.
self._wdiff_available = False
return ""
raise
except ScriptError as e:
_log.error("Failed to run wdiff: %s" % e)
self._wdiff_available = False
return self._wdiff_error_html
# This is a class variable so we can test error output easily.
_pretty_patch_error_html = "Failed to run PrettyPatch, see error log."
def pretty_patch_text(self, diff_path):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
if not self._pretty_patch_available:
return self._pretty_patch_error_html
command = ("ruby", "-I", self._filesystem.dirname(self._pretty_patch_path),
self._pretty_patch_path, diff_path)
try:
# Diffs are treated as binary (we pass decode_output=False) as they
# may contain multiple files of conflicting encodings.
return self._executive.run_command(command, decode_output=False)
except OSError, e:
# If the system is missing ruby log the error and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s): %s" % (command, e))
return self._pretty_patch_error_html
except ScriptError, e:
# If ruby failed to run for some reason, log the command
# output and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s):\n%s" % (command, e.message_with_output()))
return self._pretty_patch_error_html
def default_configuration(self):
return self._config.default_configuration()
def clobber_old_port_specific_results(self):
pass
# FIXME: This does not belong on the port object.
@memoized
def path_to_apache(self):
"""Returns the full path to the apache binary.
This is needed only by ports that use the apache_http_server module."""
raise NotImplementedError('Port.path_to_apache')
def path_to_apache_config_file(self):
"""Returns the full path to the apache configuration file.
If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
contents will be used instead.
This is needed only by ports that use the apache_http_server module."""
config_file_from_env = os.environ.get('WEBKIT_HTTP_SERVER_CONF_PATH')
if config_file_from_env:
if not self._filesystem.exists(config_file_from_env):
raise IOError('%s was not found on the system' % config_file_from_env)
return config_file_from_env
config_file_name = self._apache_config_file_name_for_platform()
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
#
# PROTECTED ROUTINES
#
# The routines below should only be called by routines in this class
# or any of its subclasses.
#
def _apache_version(self):
config = self._executive.run_command([self.path_to_apache(), '-v'])
return re.sub(r'(?:.|\n)*Server version: Apache/(\d+\.\d+)(?:.|\n)*', r'\1', config)
def _apache_config_file_name_for_platform(self):
if self.host.platform.is_cygwin():
return 'cygwin-httpd.conf' # CYGWIN is the only platform to still use Apache 1.3.
if self.host.platform.is_linux():
distribution = self.host.platform.linux_distribution()
custom_configuration_distributions = ['arch', 'debian', 'redhat']
if distribution in custom_configuration_distributions:
return "%s-httpd-%s.conf" % (distribution, self._apache_version())
return 'apache2-httpd-' + self._apache_version() + '.conf'
def _path_to_driver(self, configuration=None):
"""Returns the full path to the test driver."""
return self._build_path(self.driver_name())
def _path_to_webcore_library(self):
"""Returns the full path to a built copy of WebCore."""
return None
def _path_to_helper(self):
"""Returns the full path to the layout_test_helper binary, which
is used to help configure the system for the test run, or None
if no helper is needed.
This is likely only used by start/stop_helper()."""
return None
def _path_to_image_diff(self):
"""Returns the full path to the image_diff binary, or None if it is not available.
This is likely used only by diff_image()"""
return self._build_path('image_diff')
@memoized
def _path_to_wdiff(self):
"""Returns the full path to the wdiff binary, or None if it is not available.
This is likely used only by wdiff_text()"""
for path in ("/usr/bin/wdiff", "/usr/bin/dwdiff"):
if self._filesystem.exists(path):
return path
return None
def _webkit_baseline_path(self, platform):
"""Return the full path to the top of the baseline tree for a
given platform."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', platform)
def _driver_class(self):
"""Returns the port's driver implementation."""
return driver.Driver
def output_contains_sanitizer_messages(self, output):
if not output:
return None
if 'AddressSanitizer' in output:
return 'AddressSanitizer'
if 'MemorySanitizer' in output:
return 'MemorySanitizer'
return None
def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
if self.output_contains_sanitizer_messages(stderr):
# Running the symbolizer script can take a lot of memory, so we need to
# serialize access to it across all the concurrently running drivers.
llvm_symbolizer_path = self.path_from_chromium_base('third_party', 'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer')
if self._filesystem.exists(llvm_symbolizer_path):
env = os.environ.copy()
env['LLVM_SYMBOLIZER_PATH'] = llvm_symbolizer_path
else:
env = None
sanitizer_filter_path = self.path_from_chromium_base('tools', 'valgrind', 'asan', 'asan_symbolize.py')
sanitizer_strip_path_prefix = 'Release/../../'
if self._filesystem.exists(sanitizer_filter_path):
stderr = self._executive.run_command(['flock', sys.executable, sanitizer_filter_path, sanitizer_strip_path_prefix], input=stderr, decode_output=False, env=env)
name_str = name or '<unknown process name>'
pid_str = str(pid or '<unknown>')
# We require stdout and stderr to be bytestrings, not character strings.
if stdout:
assert isinstance(stdout, str)
stdout_lines = stdout.decode('utf8', 'replace').splitlines()
else:
stdout_lines = [u'<empty>']
if stderr:
assert isinstance(stderr, str)
stderr_lines = stderr.decode('utf8', 'replace').splitlines()
else:
stderr_lines = [u'<empty>']
return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
'\n'.join(('STDOUT: ' + l) for l in stdout_lines),
'\n'.join(('STDERR: ' + l) for l in stderr_lines)))
def look_for_new_crash_logs(self, crashed_processes, start_time):
pass
def look_for_new_samples(self, unresponsive_processes, start_time):
pass
def sample_process(self, name, pid):
pass
def physical_test_suites(self):
return [
# For example, to turn on force-compositing-mode in the svg/ directory:
# PhysicalTestSuite('svg', ['--force-compositing-mode']),
]
def virtual_test_suites(self):
if self._virtual_test_suites is None:
path_to_virtual_test_suites = self._filesystem.join(self.layout_tests_dir(), 'VirtualTestSuites')
assert self._filesystem.exists(path_to_virtual_test_suites), 'LayoutTests/VirtualTestSuites not found'
try:
test_suite_json = json.loads(self._filesystem.read_text_file(path_to_virtual_test_suites))
self._virtual_test_suites = [VirtualTestSuite(**d) for d in test_suite_json]
except ValueError as e:
raise ValueError("LayoutTests/VirtualTestSuites is not a valid JSON file: %s" % str(e))
return self._virtual_test_suites
def _all_virtual_tests(self, suites):
tests = []
for suite in suites:
self._populate_virtual_suite(suite)
tests.extend(suite.tests.keys())
return tests
def _virtual_tests_matching_paths(self, paths, suites):
tests = []
for suite in suites:
if any(p.startswith(suite.name) for p in paths):
self._populate_virtual_suite(suite)
for test in suite.tests:
if any(test.startswith(p) for p in paths):
tests.append(test)
return tests
def _populate_virtual_suite(self, suite):
if not suite.tests:
base_tests = self._real_tests([suite.base])
suite.tests = {}
for test in base_tests:
suite.tests[test.replace(suite.base, suite.name, 1)] = test
def is_virtual_test(self, test_name):
return bool(self.lookup_virtual_suite(test_name))
def lookup_virtual_suite(self, test_name):
for suite in self.virtual_test_suites():
if test_name.startswith(suite.name):
return suite
return None
def lookup_virtual_test_base(self, test_name):
suite = self.lookup_virtual_suite(test_name)
if not suite:
return None
return test_name.replace(suite.name, suite.base, 1)
def lookup_virtual_test_args(self, test_name):
for suite in self.virtual_test_suites():
if test_name.startswith(suite.name):
return suite.args
return []
def lookup_virtual_reference_args(self, test_name):
for suite in self.virtual_test_suites():
if test_name.startswith(suite.name):
return suite.reference_args
return []
def lookup_physical_test_args(self, test_name):
for suite in self.physical_test_suites():
if test_name.startswith(suite.name):
return suite.args
return []
def lookup_physical_reference_args(self, test_name):
for suite in self.physical_test_suites():
if test_name.startswith(suite.name):
return suite.reference_args
return []
def should_run_as_pixel_test(self, test_input):
if not self._options.pixel_tests:
return False
if self._options.pixel_test_directories:
return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
# TODO(burnik): Make sure this is the right way to do it.
if self.is_wpt_enabled() and self.is_wpt_test(test_input.test_name):
return False
return True
def _modules_to_search_for_symbols(self):
path = self._path_to_webcore_library()
if path:
return [path]
return []
def _symbols_string(self):
symbols = ''
for path_to_module in self._modules_to_search_for_symbols():
try:
symbols += self._executive.run_command(['nm', path_to_module], error_handler=self._executive.ignore_error)
except OSError, e:
_log.warn("Failed to run nm: %s. Can't determine supported features correctly." % e)
return symbols
# Ports which use compile-time feature detection should define this method and return
# a dictionary mapping from symbol substrings to possibly disabled test directories.
# When the symbol substrings are not matched, the directories will be skipped.
# If ports don't ever enable certain features, then those directories can just be
# in the Skipped list instead of compile-time-checked here.
def _missing_symbol_to_skipped_tests(self):
if self.PORT_HAS_AUDIO_CODECS_BUILT_IN:
return {}
else:
return {
"ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
"ff_aac_decoder": ["webaudio/codec-tests/aac"],
}
def _has_test_in_directories(self, directory_lists, test_list):
if not test_list:
return False
directories = itertools.chain.from_iterable(directory_lists)
for directory, test in itertools.product(directories, test_list):
if test.startswith(directory):
return True
return False
def _skipped_tests_for_unsupported_features(self, test_list):
# Only check the symbols of there are tests in the test_list that might get skipped.
# This is a performance optimization to avoid the calling nm.
# Runtime feature detection not supported, fallback to static detection:
# Disable any tests for symbols missing from the executable or libraries.
if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
symbols_string = self._symbols_string()
if symbols_string is not None:
return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
return []
def _convert_path(self, path):
"""Handles filename conversion for subprocess command line args."""
# See note above in diff_image() for why we need this.
if sys.platform == 'cygwin':
return cygpath(path)
return path
def _build_path(self, *comps):
return self._build_path_with_configuration(None, *comps)
def _build_path_with_configuration(self, configuration, *comps):
# Note that we don't do the option caching that the
# base class does, because finding the right directory is relatively
# fast.
configuration = configuration or self.get_option('configuration')
return self._static_build_path(self._filesystem, self.get_option('build_directory'),
self.path_from_chromium_base(), configuration, comps)
def _check_driver_build_up_to_date(self, configuration):
if configuration in ('Debug', 'Release'):
try:
debug_path = self._path_to_driver('Debug')
release_path = self._path_to_driver('Release')
debug_mtime = self._filesystem.mtime(debug_path)
release_mtime = self._filesystem.mtime(release_path)
if (debug_mtime > release_mtime and configuration == 'Release' or
release_mtime > debug_mtime and configuration == 'Debug'):
most_recent_binary = 'Release' if configuration == 'Debug' else 'Debug'
_log.warning('You are running the %s binary. However the %s binary appears to be more recent. '
'Please pass --%s.', configuration, most_recent_binary, most_recent_binary.lower())
_log.warning('')
# This will fail if we don't have both a debug and release binary.
# That's fine because, in this case, we must already be running the
# most up-to-date one.
except OSError:
pass
return True
def _chromium_baseline_path(self, platform):
if platform is None:
platform = self.name()
return self.path_from_webkit_base('LayoutTests', 'platform', platform)
class VirtualTestSuite(object):
def __init__(self, prefix=None, base=None, args=None, references_use_default_args=False):
assert base
assert args
assert prefix.find('/') == -1, "Virtual test suites prefixes cannot contain /'s: %s" % prefix
self.name = 'virtual/' + prefix + '/' + base
self.base = base
self.args = args
self.reference_args = [] if references_use_default_args else args
self.tests = {}
def __repr__(self):
return "VirtualTestSuite('%s', '%s', %s, %s)" % (self.name, self.base, self.args, self.reference_args)
class PhysicalTestSuite(object):
def __init__(self, base, args, reference_args=None):
self.name = base
self.base = base
self.args = args
self.reference_args = args if reference_args is None else reference_args
self.tests = set()
def __repr__(self):
return "PhysicalTestSuite('%s', '%s', %s, %s)" % (self.name, self.base, self.args, self.reference_args)
|
[
"[email protected]"
] | |
0c66d8a9ced9f7ec361a037765805fa6792abdb6
|
072e68a2edddd98e3d534207169e9bbd0dda86d1
|
/math_/math_floor.py
|
4765869282478340137655799f4efff8588d714f
|
[] |
no_license
|
raul-jr3/PyTuts
|
22f6171476f707acdb5beb80fc7974202c765717
|
546d92676ce5790a5865349ff11adc35b245bb09
|
refs/heads/master
| 2020-03-07T09:34:18.240237 | 2018-03-30T12:43:34 | 2018-03-30T12:43:34 | 127,411,063 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 464 |
py
|
import math
# FLOOR
# the floor function returns the largest integer which is less than or equal to the
# number on which floor is applied
a = 23.66
# so when the floor is applied on 23.66 the greatest value which is <= 23.66 is 23.0
result = math.floor(a)
# so this returns 23.0
print(result)
# and if we have a negative value
b = -23.77
# then I apply floor on it
output = math.floor(b)
# the output will be -24.00 because -24.00 < -23.77
print(output)
|
[
"[email protected]"
] | |
aff6565707f7b4e3ab9e9b6d44ff5ca4a8df9e0f
|
e35fd52fe4367320024a26f2ee357755b5d5f4bd
|
/leetcode/problems/1227.airplane-seat-assignment-probability.py
|
5ea53e78f549aa1ca11296ef89c68e5f4bb6a8f1
|
[] |
no_license
|
liseyko/CtCI
|
a451967b0a0ce108c491d30b81e88d20ad84d2cd
|
c27f19fac14b4acef8c631ad5569e1a5c29e9e1f
|
refs/heads/master
| 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,084 |
py
|
#
# @lc app=leetcode id=1227 lang=python3
#
# [1227] Airplane Seat Assignment Probability
#
# https://leetcode.com/problems/airplane-seat-assignment-probability/description/
#
# algorithms
# Medium (61.16%)
# Total Accepted: 2.6K
# Total Submissions: 4.2K
# Testcase Example: '1'
#
# n passengers board an airplane with exactly n seats. The first passenger has
# lost the ticket and picks a seat randomly. But after that, the rest of
# passengers will:
#
#
# Take their own seat if it is still available,
# Pick other seats randomly when they find their seat occupied
#
#
# What is the probability that the n-th person can get his own seat?
#
#
# Example 1:
#
#
# Input: n = 1
# Output: 1.00000
# Explanation: The first person can only get the first seat.
#
# Example 2:
#
#
# Input: n = 2
# Output: 0.50000
# Explanation: The second person has a probability of 0.5 to get the second
# seat (when first person gets the first seat).
#
#
#
# Constraints:
#
#
# 1 <= n <= 10^5
#
#
class Solution:
def nthPersonGetsNthSeat(self, n: int) -> float:
|
[
"[email protected]"
] | |
f0f8a062872fffa9fc34360a0ee4a31e40c80774
|
e5453b6a4b84a32ccca7281d438b7a7fa1853f58
|
/src/hmm/checks/huawei_hmm_mezz28_check.py
|
96fe9b7fb699bad7f1d4588465e23c627a7e1565
|
[
"MIT"
] |
permissive
|
Huawei/Server_Management_Plugin_Check_MK
|
88445d9da581c347c5e82cf590453c4cb2c3d53c
|
88398c7c8affe0b2064f418de931d69e36afde67
|
refs/heads/master
| 2021-05-11T11:40:55.302518 | 2021-01-27T09:53:17 | 2021-01-27T09:53:17 | 117,641,709 | 1 | 4 | null | 2018-01-31T05:38:01 | 2018-01-16T06:30:39 | null |
UTF-8
|
Python
| false | false | 1,244 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
def inventory_huawei_mezz_health(info):
return [("blade 28 mezz status", None)]
def scan(oid):
return (oid(".1.3.6.1.4.1.2011.2.82.1.82.4.28.6.0") == '1')
check_info["huawei_hmm_mezz28_check"] = {
"inventory_function": inventory_huawei_mezz_health,
"check_function": check_huawei_mezz_health,
"service_description": "%s",
'includes': ["huawei_hmm_util.include", ],
"snmp_info": (".1.3.6.1.4.1.2011.2.82.1.82.4.28.2008.1", ["4", "5", "2", ]),
'snmp_scan_function': scan,
}
|
[
"[email protected]"
] | |
62627307352f8f7f4837cc9fa705a66e90b039fa
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/0070.0_Climbing_Stairs.py
|
2f2e3545798d3174f99fdd72378d905d9ded0543
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,089 |
py
|
'''
DP
执行用时:48 ms, 在所有 Python3 提交中击败了10.11% 的用户
内存消耗:14.9 MB, 在所有 Python3 提交中击败了59.27% 的用户
通过测试用例:45 / 45
'''
class Solution:
def climbStairs(self, n: int) -> int:
a = b = 1
for _ in range(n - 1):
a, b = b, a + b
return b
'''
DFS + memo
执行用时:40 ms, 在所有 Python3 提交中击败了10.11% 的用户
内存消耗:14.9 MB, 在所有 Python3 提交中击败了71.85% 的用户
通过测试用例:45 / 45
'''
class Solution:
@functools.lru_cache(50)
def climbStairs(self, n: int) -> int:
return self.climbStairs(n - 1) + self.climbStairs(n - 2) if n > 2 else n
'''
DFS
执行用时:40 ms, 在所有 Python3 提交中击败了32.24% 的用户
内存消耗:15 MB, 在所有 Python3 提交中击败了9.49% 的用户
通过测试用例:45 / 45
'''
class Solution:
@cache
def climbStairs(self, n: int) -> int:
if n in [1, 2]:
return n
return self.climbStairs(n - 1) + self.climbStairs(n - 2)
|
[
"[email protected]"
] | |
0218c00f09c5f1dc0f0d683789c457b31b05923c
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnbulwark.py
|
75a4fc3fcf6cc2f77b80571218d82f60eea06c2a
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 571 |
py
|
ii = [('CookGHP3.py', 2), ('KembFFF.py', 2), ('GodwWSL2.py', 1), ('WilbRLW4.py', 1), ('AubePRP2.py', 1), ('MartHSI2.py', 1), ('WilkJMC3.py', 3), ('LeakWTI3.py', 1), ('MarrFDI3.py', 2), ('WilbRLW2.py', 1), ('ClarGE2.py', 1), ('CarlTFR.py', 2), ('TalfTAC.py', 1), ('CookGHP2.py', 2), ('BailJD1.py', 1), ('LyelCPG.py', 1), ('LandWPA2.py', 1), ('NewmJLP.py', 1), ('BackGNE.py', 7), ('MedwTAI2.py', 1), ('MereHHB3.py', 1), ('WilkJMC.py', 3), ('RoscTTI.py', 1), ('StorJCC.py', 4), ('MereHHB2.py', 1), ('BrewDTO.py', 4), ('FitzRNS2.py', 7), ('DwigTHH.py', 2), ('NortSTC.py', 1)]
|
[
"[email protected]"
] | |
7a4d0807aba6f1a6a45123141383320c7e47457b
|
76e6d4f93078327fef8672133fc75a6f12abc240
|
/ABC115/B.py
|
a7a98c56a06aeac07943f556983967b981e0dbbc
|
[] |
no_license
|
adusa1019/atcoder
|
1e8f33253f6f80a91d069b2f3b568ce7a2964940
|
f7dbdfc021425160a072f4ce4e324953a376133a
|
refs/heads/master
| 2021-08-08T04:41:36.098678 | 2021-02-01T07:34:34 | 2021-02-01T07:34:34 | 89,038,783 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 267 |
py
|
def solve(string):
ins = list(map(int, string.split("\n")))[1:]
ins.sort()
ins[-1] //= 2
return str(sum(ins))
if __name__ == '__main__':
n = int(input())
ins = [input() for _ in range(n)]
print(solve("{}\n{}".format(n, "\n".join(ins))))
|
[
"[email protected]"
] | |
540f50617caf6c1021261198e47bb8183bc6dc47
|
9879c8a1f1ac5884d9220e51c6256bb651fc800e
|
/pyot/utils/locks.py
|
685040cb08256a4e767a743a6490ad93cd67e174
|
[
"MIT"
] |
permissive
|
rasmusdoh/Pyot
|
d260ee37b59cca026c9edd3e9be85f2197604df6
|
de5065c55f171bb39691ddc76da99c5f16da94d9
|
refs/heads/master
| 2023-08-05T15:56:17.726457 | 2021-09-15T20:59:39 | 2021-09-15T20:59:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,887 |
py
|
from threading import Lock
import asyncio
try: # Delay exception if aioredlock is not installed
from aioredlock import Aioredlock
except ModuleNotFoundError as e:
Aioredlock = e
from .eventloop import LoopSensitiveManager
class SealLock:
'''
An asynchronous threading Lock. The event loop won't be blocked when acquiring the lock.
'''
def __init__(self):
self._lock = Lock()
async def __aenter__(self, *args):
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, self._lock.acquire)
return self
async def __aexit__(self, *args):
self._lock.release()
async def acquire(self):
'''Acquire the lock without locking the loop'''
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, self._lock.acquire)
return self
def release(self):
'''Release the lock, this is not async for the sake of easier cleanup (e.g. registering `atexit`)'''
self._lock.release()
class RedisLock:
'''
An asynchronous redis Lock. The event loop won't be blocked when acquiring the lock.
'''
def __init__(self, host: str, port: int, db: int, retry_count: int, retry_delay_min: float, retry_delay_max: float):
if isinstance(Aioredlock, Exception):
raise Aioredlock
self._connections = [f"redis://{host}:{port}/{db}"]
self.retry_count = retry_count
self.retry_delay_min = retry_delay_min
self.retry_delay_max = retry_delay_max
self._lock_managers = LoopSensitiveManager(self._new_lock_manager)
def _new_lock_manager(self):
return Aioredlock(self._connections, self.retry_count, self.retry_delay_min, self.retry_delay_max)
async def __call__(self, name: str, timeout: int = 10):
return await (await self._lock_managers.get()).lock(name, lock_timeout=timeout)
|
[
"[email protected]"
] | |
2eccd7e9659193d54269e649310e0f205b2c5d00
|
777b5c266360b29b6d4af916726abd5d364b74a1
|
/mypy_stubs/django/core/files/uploadhandler.pyi
|
3950a128c4d65f66db341a6e76f3740782aca4ee
|
[] |
no_license
|
uryyyyyyy/django-graphql
|
44d08afc3e44514270d1d5c183caa9d1c1cf3f88
|
f3d6513d2325a8e675e47500cc71d8ef56c01537
|
refs/heads/master
| 2021-06-10T11:11:45.110271 | 2019-02-28T07:39:54 | 2019-02-28T07:39:54 | 172,325,424 | 0 | 0 | null | 2021-04-20T17:56:57 | 2019-02-24T10:44:31 |
Python
|
UTF-8
|
Python
| false | false | 1,972 |
pyi
|
# Stubs for django.core.files.uploadhandler (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any, Optional
class UploadFileException(Exception): ...
class StopUpload(UploadFileException):
connection_reset: Any = ...
def __init__(self, connection_reset: bool = ...) -> None: ...
class SkipFile(UploadFileException): ...
class StopFutureHandlers(UploadFileException): ...
class FileUploadHandler:
chunk_size: Any = ...
file_name: Any = ...
content_type: Any = ...
content_length: Any = ...
charset: Any = ...
content_type_extra: Any = ...
request: Any = ...
def __init__(self, request: Optional[Any] = ...) -> None: ...
def handle_raw_input(self, input_data: Any, META: Any, content_length: Any, boundary: Any, encoding: Optional[Any] = ...) -> None: ...
field_name: Any = ...
def new_file(self, field_name: Any, file_name: Any, content_type: Any, content_length: Any, charset: Optional[Any] = ..., content_type_extra: Optional[Any] = ...) -> None: ...
def receive_data_chunk(self, raw_data: Any, start: Any) -> None: ...
def file_complete(self, file_size: Any) -> None: ...
def upload_complete(self) -> None: ...
class TemporaryFileUploadHandler(FileUploadHandler):
file: Any = ...
def new_file(self, *args: Any, **kwargs: Any) -> None: ...
def receive_data_chunk(self, raw_data: Any, start: Any) -> None: ...
def file_complete(self, file_size: Any): ...
class MemoryFileUploadHandler(FileUploadHandler):
activated: Any = ...
def handle_raw_input(self, input_data: Any, META: Any, content_length: Any, boundary: Any, encoding: Optional[Any] = ...) -> None: ...
file: Any = ...
def new_file(self, *args: Any, **kwargs: Any) -> None: ...
def receive_data_chunk(self, raw_data: Any, start: Any): ...
def file_complete(self, file_size: Any): ...
def load_handler(path: Any, *args: Any, **kwargs: Any): ...
|
[
"[email protected]"
] | |
5fa8d2290732c1766a64213648e7761131dda078
|
060c409f2b6282e1bf08a2101eb09be5f0927c4e
|
/pp_medialist.py
|
a16ac2bfa38225e93c76b54987c80beda8c3c65f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-mit-taylor-variant"
] |
permissive
|
KenT2/pipresents-gapless
|
cf850951e1c5911524c8e984d72df1621867c556
|
31a347bb8b45898a3fe08b1daf765e31d47b7a87
|
refs/heads/master
| 2022-01-30T06:41:41.601213 | 2022-01-08T17:22:24 | 2022-01-08T17:22:24 | 22,849,661 | 219 | 47 |
NOASSERTION
| 2021-09-20T09:09:31 | 2014-08-11T18:23:30 |
Python
|
UTF-8
|
Python
| false | false | 11,281 |
py
|
import json
import copy
import string
import random
from pp_utils import Monitor
"""
31/12/2016 - fixed crash if mediashow id shffled and there is one track - taks Drew Keller
"""
# *************************************
# MEDIALIST CLASS
# ************************************
class MediaList(object):
"""
manages a media list of tracks and the track selected from the medialist
"""
def __init__(self,sequence):
self.clear()
self.mon=Monitor()
self.sequence=sequence
# Functions for the editor dealing with complete list
def clear(self):
self._tracks = [] #MediaList, stored as a list of dicts
self._num_tracks=0
self._selected_track_index=-1 # index of currently selected track
def print_list(self):
print '\n'
print self._tracks
def first(self):
self._selected_track_index=-1
self.next(self.sequence) #let this do the work of randomaising or advancing to 0
def length(self):
return self._num_tracks
def append(self, track_dict):
# print '\ntrack dict',track_dict
"""appends a track dictionary to the end of the medialist store"""
self._tracks.append(copy.deepcopy(track_dict))
self._num_tracks+=1
def update(self,index,values):
self._tracks[index].update(values)
def remove(self,index):
self._tracks.pop(index)
self._num_tracks-=1
# deselect any track, saves worrying about whether index needs changing
self._selected_track_index=-1
def move_up(self):
if self._selected_track_index != 0:
self._tracks.insert(self._selected_track_index-1, self._tracks.pop(self._selected_track_index))
self.select(self._selected_track_index-1)
def move_down(self):
if self._selected_track_index != self._num_tracks-1:
self._tracks.insert(self._selected_track_index+1, self._tracks.pop(self._selected_track_index))
self.select(self._selected_track_index+1)
def copy(self):
self._tracks.insert(self._selected_track_index+1, copy.deepcopy(self._tracks[self._selected_track_index]))
self._num_tracks+=1
self.select(self._selected_track_index+1)
def replace(self,index,replacement):
self._tracks[index]= replacement
# Common functions work for anything
def track_is_selected(self):
if self._selected_track_index>=0:
return True
else:
return False
def selected_track_index(self):
return self._selected_track_index
def track(self,index):
return self._tracks[index]
def selected_track(self):
"""returns a dictionary containing all fields in the selected track """
return self._selected_track
def select(self,index):
"""does housekeeping necessary when a track is selected"""
if self._num_tracks>0 and index>=0 and index< self._num_tracks:
self._selected_track_index=index
self._selected_track = self._tracks[index]
return True
else:
return False
# Dealing with anonymous tracks for use and display
def at_end(self):
# true is selected track is last anon
index=self._num_tracks-1
while index>=0:
if self._tracks[index] ['track-ref'] =="":
end=index
if self._selected_track_index==end:
return True
else:
return False
index -=1
return False
def index_of_end(self):
if self.anon_length()==0:
return False
index=self._num_tracks-1
while index >= 0:
if self._tracks[index] ['track-ref'] =="":
return index
index -=1
return -1
def at_start(self):
if self.anon_length()==0:
return False
index=0
while index<self._num_tracks:
if self._tracks[index] ['track-ref'] =="":
start = index
if self._selected_track_index==start:
return True
else:
return False
index +=1
return False
def index_of_start(self):
if self.anon_length()==0:
return False
index=0
while index<self._num_tracks:
if self._tracks[index] ['track-ref'] =="":
return index
index +=1
return False
def anon_length(self):
# number of anonymous tracks
count=0
index=0
while index<self._num_tracks:
if self._tracks[index] ['track-ref'] =="":
count+=1
index +=1
return count
def start(self):
if self.anon_length()==0:
return False
# select first anonymous track in the list
if self.sequence == 'ordered':
index=0
while index<self._num_tracks:
if self._tracks[index] ['track-ref'] =="":
self.select(index)
return True
index +=1
return False
else:
match=random.randint(0,self.anon_length()-1)
# print 'match',match
index=0
while index<self._num_tracks:
if self._tracks[index] ['track-ref'] =="" and index==match:
self.select(index)
# print index
return index
index +=1
def finish(self):
if self.anon_length()==0:
return False
if self.sequence == 'ordered':
# select last anymous track in the list
index=self._num_tracks-1
while index>=0:
if self._tracks[index] ['track-ref'] =="":
self.select(index)
return True
index -=1
return False
else:
match=random.randint(0,self.anon_length()-1)
# print 'match',match
index=0
while index<self._num_tracks:
if self._tracks[index] ['track-ref'] =="" and index==match:
self.select(index)
# print index
return index
index +=1
def select_anon_by_index(self,wanted):
if self.anon_length()==0:
return False
index=0
anon_index=0
while index != self._num_tracks:
# print index,self._tracks[index] ['track-ref'],wanted
if self._tracks[index] ['track-ref'] =="":
if anon_index==wanted:
# print 'match\n'
self.select(index)
return True
anon_index+=1
index= index+1
return False
def next(self,sequence):
if self.anon_length()==0:
return False
if sequence=='ordered':
if self._selected_track_index== self._num_tracks-1:
index=0
else:
index= self._selected_track_index+1
end=self._selected_track_index
else:
index=random.randint(0,self.anon_length()-1)
if index==0:
end=self._num_tracks-1
else:
end=index-1
# search for next anonymous track
# print 'index', index, 'end',end
while index != end:
if self._tracks[index] ['track-ref'] =="":
self.select(index)
return True
if index== self._num_tracks-1:
index=0
else:
index= index+1
return False
def previous(self,sequence):
if self.anon_length()==0:
return False
if sequence=='ordered':
if self._selected_track_index == 0:
index=self._num_tracks-1
else:
index= self._selected_track_index-1
end = self._selected_track_index
else:
index=random.randint(0,self.anon_length()-1)
if index==self._num_tracks-1:
end=0
else:
end=index+1
# print 'index', index, 'end',end
# search for previous anonymous track
while index != end :
if self._tracks[index] ['track-ref'] =="":
self.select(index)
return True
if index == 0:
index=self._num_tracks-1
else:
index= index-1
return False
# Lookup for labelled tracks
def index_of_track(self,wanted_track):
index = 0
for track in self._tracks:
if track['track-ref']==wanted_track:
return index
index +=1
return -1
# open and save
def open_list(self,filename,profile_version):
"""
opens a saved medialist
medialists are stored as json arrays.
"""
ifile = open(filename, 'rb')
mdict = json.load(ifile)
ifile.close()
self._tracks = mdict['tracks']
if 'issue' in mdict:
self.medialist_version_string= mdict['issue']
else:
self.medialist_version_string="1.0"
if self.medialist_version()==profile_version:
self._num_tracks=len(self._tracks)
self.last_num_tracks=self._num_tracks
self._selected_track_index=-1
return True
else:
return False
def medialist_version(self):
vitems=self.medialist_version_string.split('.')
if len(vitems)==2:
# cope with 2 digit version numbers before 1.3.2
return 1000*int(vitems[0])+100*int(vitems[1])
else:
return 1000*int(vitems[0])+100*int(vitems[1])+int(vitems[2])
# dummy for mediliast, in livelist the list is created from the live_track directories
def use_new_livelist(self):
pass
def create_new_livelist(self):
pass
def new_length(self):
return self.length()
# for medialist the content of the list never changes so return False
def livelist_changed(self):
return False
def save_list(self,filename):
""" save a medialist """
if filename=="":
return False
dic={'issue':self.medialist_version_string,'tracks':self._tracks}
filename=str(filename)
filename = string.replace(filename,'\\','/')
tries = 1
while tries<=10:
# print "save medialist ",filename
try:
ofile = open(filename, "wb")
json.dump(dic,ofile,sort_keys=True,indent=1)
ofile.close()
self.mon.log(self,"Saved medialist "+ filename)
break
except IOError:
self.mon.err(self,"failed to save medialist, trying again " + str(tries))
tries+=1
return
|
[
"[email protected]"
] | |
7db42065296d3a546b8f9ca6e08852751979f507
|
b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a
|
/examples/pwr_run/checkpointing/final/high_overhead/job74.py
|
220089b171b01acea0550fae22e4fd603da2fcd0
|
[
"MIT"
] |
permissive
|
boringlee24/keras_old
|
3bf7e3ef455dd4262e41248f13c04c071039270e
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
refs/heads/master
| 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,348 |
py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.nasnet import NASNetMobile
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'mnasnet'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'
total_epochs = 20
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
time.sleep(100)
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
base_model = NASNetMobile(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[
"[email protected]"
] | |
f808a718f5ebd272a9b31cf79d0915365ebb039c
|
e90c47a620bceb24403728853caf60e97368dcb8
|
/xnr_0429/xnr/facebook/fb_operate.py
|
0c64a67ba8793aadb0cf8148eaf28970786eb5f7
|
[] |
no_license
|
yuanhr/xnr1
|
d31af93fcdeb8c24752c6cdf7cce819bd4c4cbf8
|
a8211c9bb299e063c2e5aeaca927ed0bc97d82ab
|
refs/heads/master
| 2021-07-10T03:23:45.812991 | 2018-05-07T07:14:38 | 2018-05-07T07:14:38 | 96,008,450 | 0 | 0 | null | 2017-07-02T07:21:04 | 2017-07-02T07:21:04 | null |
UTF-8
|
Python
| false | false | 15,539 |
py
|
#!/usr/bin/env python
# encoding: utf-8
import requests
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import json
from lxml import etree
import re
from pybloom import BloomFilter
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import *
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from launcher import Launcher
class Operation():
def __init__(self, username, password):
print '11'
self.launcher = Launcher(username, password)
print '22'
self.driver = self.launcher.login()
print '33'
def publish(self, text):
try:
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
try:
self.driver.find_element_by_xpath('//textarea[@title="分享新鲜事"]').click()
self.driver.find_element_by_xpath('//textarea[@title="分享新鲜事"]').send_keys(text)
except:
try:
self.driver.find_element_by_xpath('//textarea[@class="_3en1 _480e navigationFocus"]').click()
self.driver.find_element_by_xpath('//textarea[@class="_3en1 _480e navigationFocus"]').send_keys(text)
except:
self.driver.find_element_by_xpath('//div[@class="_1mwp navigationFocus _395 _1mwq _4c_p _5bu_ _34nd _21mu _5yk1"]').click()
self.driver.find_element_by_xpath('//div[@class="_1mwp navigationFocus _395 _1mwq _4c_p _5bu_ _34nd _21mu _5yk1"]').send_keys(text)
try:
self.driver.find_element_by_xpath('//button[@class="_1mf7 _4jy0 _4jy3 _4jy1 _51sy selected _42ft"]').click()
except:
try:
self.driver.find_element_by_xpath('//button[@class="_42ft _4jy0 _ej1 _4jy3 _4jy1 selected _51sy"]').click()
except:
self.driver.find_element_by_xpath('//button[@data-testid="react-composer-post-button"]').click()
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
self.driver.quit()
self.launcher.display.popen.kill()
def mention(self, username, text):
try:
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
try:
self.driver.find_element_by_xpath('//textarea[@title="分享新鲜事"]').click()
self.driver.find_element_by_xpath('//textarea[@title="分享新鲜事"]').send_keys(text)
except:
self.driver.find_element_by_xpath('//div[@class="_1mwp navigationFocus _395 _1mwq _4c_p _5bu_ _34nd _21mu _5yk1"]').click()
self.driver.find_element_by_xpath('//div[@class="_1mwp navigationFocus _395 _1mwq _4c_p _5bu_ _34nd _21mu _5yk1"]').send_keys(text)
time.sleep(2)
try:
self.driver.find_element_by_xpath('//table[@class="uiGrid _51mz _5f0n"]/tbody/tr[3]/td[1]//a/div').click()
except:
self.driver.find_element_by_xpath('//table[@class="uiGrid _51mz _5f0n"]/tbody/tr[2]/td[2]//a/div').click()
time.sleep(1)
self.driver.find_element_by_xpath('//input[@aria-label="你和谁一起?"]').send_keys(username)
self.driver.find_element_by_xpath('//input[@aria-label="你和谁一起?"]').send_keys(Keys.ENTER)
time.sleep(1)
try:
self.driver.find_element_by_xpath('//button[@class="_1mf7 _4jy0 _4jy3 _4jy1 _51sy selected _42ft"]').click()
except:
self.driver.find_element_by_xpath('//button[@data-testid="react-composer-post-button"]').click()
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
self.driver.quit()
self.launcher.display.popen.kill()
def follow(self, uid):
try:
driver = self.launcher.target_page(uid)
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
try:
driver.find_element_by_xpath('//button[@data-testid="page_profile_follow_button_test_id"]').click()
except:
driver.find_element_by_xpath('//div[@id="pagelet_timeline_profile_actions"]/div[2]/a[1]').click()
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
driver.quit()
self.launcher.display.popen.kill()
def not_follow(self, uid):
try:
driver = self.launcher.target_page(uid)
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
chain = ActionChains(driver)
try:
implement = driver.find_element_by_xpath('//div[@id="pagelet_timeline_profile_actions"]/div[2]/div[1]/div[1]')
chain.move_to_element(implement).perform()
time.sleep(2)
implement = driver.find_element_by_xpath('//div[@id="pagelet_timeline_profile_actions"]/div[2]/div[1]/div[1]')
chain.move_to_element(implement).perform()
time.sleep(2)
driver.find_element_by_xpath('//a[@ajaxify="/ajax/follow/unfollow_profile.php?profile_id=%s&location=1"]'%uid).click()
except:
try:
implement = driver.find_element_by_xpath('//button[@data-testid="page_profile_follow_button_test_id"]')
chain.move_to_element(implement).perform()
time.sleep(2)
implement = driver.find_element_by_xpath('//button[@data-testid="page_profile_follow_button_test_id"]')
chain.move_to_element(implement).perform()
time.sleep(2)
driver.find_element_by_xpath('//a[@ajaxify="/ajax/follow/unfollow_profile.php?profile_id=%s&location=1"]'%uid).click()
except:
implement = driver.find_element_by_xpath('//button[@class="_42ft _4jy0 _3oz- _52-0 _4jy4 _517h _51sy"]')
chain.move_to_element(implement).perform()
time.sleep(2)
implement = driver.find_element_by_xpath('//button[@class="_42ft _4jy0 _3oz- _52-0 _4jy4 _517h _51sy"]')
chain.move_to_element(implement).perform()
time.sleep(2)
driver.find_element_by_xpath('//a[@ajaxify="/ajax/follow/unfollow_profile.php?profile_id=%s&location=1"]'%uid).click()
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
driver.quit()
self.launcher.display.popen.kill()
# 私信(未关注)
def send_message(self, uid, text):
#发送给未关注的用户
try:
driver = self.launcher.target_page(uid)
message_url = 'https://www.facebook.com/messages/t/' + uid
driver.get(message_url)
time.sleep(5)
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
driver.find_element_by_xpath('//div[@aria-label="输入消息..."]').send_keys(text)
driver.find_element_by_xpath('//div[@aria-label="输入消息..."]').send_keys(Keys.ENTER)
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
driver.quit()
self.launcher.display.popen.kill()
# 私信(已关注)
# def send_message2(self, uid, text):
# #发送给已关注的用户
# try:
# driver = self.launcher.target_page(uid)
# url = driver.find_element_by_xpath('//a[@class="_51xa _2yfv _3y89"]/a[1]').get_attribute('href')
# driver.get('https://www.facebook.com' + url)
# time.sleep(4)
# driver.find_element_by_xpath('//div[@class="_1mf _1mj"]').click()
# driver.find_element_by_xpath('//div[@class="_1mf _1mj"]').send_keys(text)
# driver.find_element_by_xpath('//div[@class="_1mf _1mj"]').send_keys(Keys.ENTER)
# finally:
# driver.quit()
# 点赞
def like(self, uid, fid):
try:
post_url = 'https://www.facebook.com/' + uid + '/posts/' + fid
video_url = 'https://www.facebook.com/' + uid + '/videos/' + fid
self.driver.get(post_url)
time.sleep(3)
try:
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
self.driver.find_element_by_xpath('//div[@aria-label="Facebook 照片剧场模式"]')
self.driver.get(video_url)
time.sleep(2)
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
for each in self.driver.find_elements_by_xpath('//a[@data-testid="fb-ufi-likelink"]'):
try:
each.click()
except:
pass
except:
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
for each in self.driver.find_elements_by_xpath('//a[@data-testid="fb-ufi-likelink"]'):
try:
each.click()
except:
pass
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
self.driver.quit()
self.launcher.display.popen.kill()
# 评论
def comment(self, uid, fid, text):
try:
post_url = 'https://www.facebook.com/' + uid + '/posts/' + fid
video_url = 'https://www.facebook.com/' + uid + '/videos/' + fid
self.driver.get(post_url)
time.sleep(3)
try:
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
self.driver.find_element_by_xpath('//div[@aria-label="Facebook 照片剧场模式"]')
self.driver.get(video_url)
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
time.sleep(3)
self.driver.find_element_by_xpath('//div[@class="UFICommentContainer"]/div/div').click()
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').click()
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').send_keys(text)
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').send_keys(keys.ENTER)
except:
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
time.sleep(3)
self.driver.find_element_by_xpath('//div[@class="UFICommentContainer"]/div/div').click()
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').click()
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').send_keys(text)
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').send_keys(keys.ENTER)
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
time.sleep(3)
self.driver.quit()
self.launcher.display.popen.kill()
# 分享
def share(self, uid, fid, text):
try:
print 'uid, fid, text...',uid, fid, text
post_url = 'https://www.facebook.com/' + uid + '/posts/' + fid
video_url = 'https://www.facebook.com/' + uid + '/videos/' + fid
self.driver.get(post_url)
time.sleep(3)
try:
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
self.driver.find_element_by_xpath('//div[@aria-label="Facebook 照片剧场模式"]')
self.driver.get(video_url)
time.sleep(1)
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
self.driver.find_element_by_xpath('//a[@title="发送给好友或发布到你的时间线上。"]').click()
self.driver.find_element_by_xpath('//a[@title="发送给好友或发布到你的时间线上。"]').click()
time.sleep(3)
self.driver.find_element_by_xpath('//ul[@class="_54nf"]/li[2]').click()
time.sleep(3)
try:
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').click()
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').send_keys(text)
time.sleep(1)
except:
self.driver.find_element_by_xpath('//div[@class="_1mwp navigationFocus _395 _21mu _5yk1"]/div').click()
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="_1mwp navigationFocus _395 _21mu _5yk1"]/div').send_keys(text)
time.sleep(1)
self.driver.find_element_by_xpath('//button[@data-testid="react_share_dialog_post_button"]').click()
except:
# 退出通知弹窗进入页面
time.sleep(1)
try:
self.driver.find_element_by_xpath('//div[@class="_n8 _3qx uiLayer _3qw"]').click()
except:
pass
self.driver.find_element_by_xpath('//a[@title="发送给好友或发布到你的时间线上。"]').click()
self.driver.find_element_by_xpath('//a[@title="发送给好友或发布到你的时间线上。"]').click()
time.sleep(5)
self.driver.find_element_by_xpath('//ul[@class="_54nf"]/li[2]').click()
time.sleep(5)
try:
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').click()
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="notranslate _5rpu"]').send_keys(text)
time.sleep(1)
except:
self.driver.find_element_by_xpath('//div[@class="_1mwp navigationFocus _395 _21mu _5yk1"]/div').click()
time.sleep(1)
self.driver.find_element_by_xpath('//div[@class="_1mwp navigationFocus _395 _21mu _5yk1"]/div').send_keys(text)
time.sleep(1)
self.driver.find_element_by_xpath('//button[@data-testid="react_share_dialog_post_button"]').click()
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
self.driver.quit()
self.launcher.display.popen.kill()
#添加好友
def add_friend(self, uid):
try:
driver = self.launcher.target_page(uid)
driver.find_element_by_xpath('//button[@class="_42ft _4jy0 FriendRequestAdd addButton _4jy4 _517h _9c6"]').click()
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
driver.quit()
self.launcher.display.popen.kill()
#确认好友请求
def confirm(self, uid):
try:
driver = self.launcher.target_page(uid)
time.sleep(5)
driver.find_element_by_xpath('//div[@class="incomingButton"]/button').click()
time.sleep(1)
driver.find_element_by_xpath('//li[@data-label="确认"]/a').click()
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
driver.quit()
self.launcher.display.popen.kill()
#删除好友
def delete_friend(self, uid):
try:
driver = self.launcher.target_page(uid)
time.sleep(1)
driver.find_element_by_xpath('//div[@id="pagelet_timeline_profile_actions"]/div/a').click()
time.sleep(2)
driver.find_element_by_xpath('//li[@data-label="删除好友"]/a').click()
time.sleep(5)
return [True, '']
except Exception as e:
return [False, e]
finally:
driver.quit()
self.launcher.display.popen.kill()
if __name__ == '__main__':
operation = Operation('13041233988','han8528520258')
time.sleep(1)
list = operation.publish(u'四月十日')
print(list)
#operation.mention('xxx','4.9')
#operation.not_follow('100023080760480')
#operation.send_message('100023080760480', 'hello')
#operation.like('183774741715570','1487409108018787')
#operation.comment('100012258524129','418205591931388','emmmm')
#operation.share('183774741715570','1487409108018787','emmmm')
#operation.add_friend('183774741715570')
|
[
"[email protected]"
] | |
dae1a76cf051ae977abd68756071be26c70941dd
|
0db19410e9751790af8ce4a0a9332293e379c02f
|
/configs/animal_2d_keypoint/topdown_heatmap/ak/td-hm_hrnet-w32_8xb32-300e_animalkingdom_P3_amphibian-256x256.py
|
5a83e7a97b9478031f7ca4dcc4dccba0350d432d
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpose
|
2c9986521d35eee35d822fb255e8e68486026d94
|
537bd8e543ab463fb55120d5caaa1ae22d6aaf06
|
refs/heads/main
| 2023-08-30T19:44:21.349410 | 2023-07-04T13:18:22 | 2023-07-04T13:18:22 | 278,003,645 | 4,037 | 1,171 |
Apache-2.0
| 2023-09-14T09:44:55 | 2020-07-08T06:02:55 |
Python
|
UTF-8
|
Python
| false | false | 4,070 |
py
|
_base_ = ['../../../_base_/default_runtime.py']
# runtime
train_cfg = dict(max_epochs=300, val_interval=10)
# optimizer
optim_wrapper = dict(optimizer=dict(
type='AdamW',
lr=5e-4,
))
# learning policy
param_scheduler = [
dict(
type='LinearLR', begin=0, end=500, start_factor=0.001,
by_epoch=False), # warm-up
dict(
type='MultiStepLR',
begin=0,
end=210,
milestones=[170, 200],
gamma=0.1,
by_epoch=True)
]
# automatically scaling LR based on the actual training batch size
auto_scale_lr = dict(base_batch_size=512)
# hooks
default_hooks = dict(checkpoint=dict(save_best='PCK', rule='greater'))
# codec settings
codec = dict(
type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)
# model settings
model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained',
checkpoint='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth'),
),
head=dict(
type='HeatmapHead',
in_channels=32,
out_channels=23,
deconv_out_channels=None,
loss=dict(type='KeypointMSELoss', use_target_weight=True),
decoder=codec),
test_cfg=dict(
flip_test=True,
flip_mode='heatmap',
shift_heatmap=True,
))
# base dataset settings
dataset_type = 'AnimalKingdomDataset'
data_mode = 'topdown'
data_root = 'data/ak/'
# pipelines
train_pipeline = [
dict(type='LoadImage'),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(type='RandomBBoxTransform'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
val_pipeline = [
dict(type='LoadImage'),
dict(type='GetBBoxCenterScale'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]
# data loaders
train_dataloader = dict(
batch_size=32,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/ak_P3_amphibian/train.json',
data_prefix=dict(img='images/'),
pipeline=train_pipeline,
))
val_dataloader = dict(
batch_size=24,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/ak_P3_amphibian/test.json',
data_prefix=dict(img='images/'),
test_mode=True,
pipeline=val_pipeline,
))
test_dataloader = val_dataloader
# evaluators
val_evaluator = [dict(type='PCKAccuracy', thr=0.05), dict(type='AUC')]
test_evaluator = val_evaluator
|
[
"[email protected]"
] | |
385fe72cda2198ecc51b116e6215cc8c0d8e7955
|
161dcb4b1f3939231728e91a8129a2571842d23a
|
/unit_09/4.py
|
8932eb0bf69d1f4f441ffe3adeef6786a7aeed3f
|
[] |
no_license
|
bm1120836/21-python
|
3162896e1b9e41d57c4249ea5f3bcaf06eef0361
|
8924f9b53e68b08f9203f48b215ea5b3a420d075
|
refs/heads/master
| 2023-05-03T16:11:42.864607 | 2015-10-01T13:26:29 | 2015-10-01T13:26:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 618 |
py
|
filename = 'examples/portfolio.csv'
a_file = open(filename)
a_str = a_file.read()
print(a_str)
print(a_file.name)
print(a_file.encoding)
print(a_file.mode)
print('file seek')
print(a_file.seek(0))
print('file read(16)')
print(a_file.read(16))
print('file read(1)')
print(a_file.read(1))
print('file tell')
print(a_file.tell())
print('file read(1)')
print(a_file.read(1))
print('file tell')
print(a_file.tell())
line_number = 1
with open('examples/favorite-people.txt', encoding='utf-8') as a_file:
for a_line in a_file:
print('{:>4} {}'.format(line_number, a_line.rstrip()))
line_number += 1
|
[
"[email protected]"
] | |
a0c52d2269793b3e8ea7cec09cd1a740d775da41
|
a80874300e561174068bf510608465bb318a35f2
|
/guhaisong/edu_information/edu_information/spiders/news_eastday_com_gd2008_world_62.py
|
0940c07215ded42feab0cfe34bd4e13ab71c2176
|
[] |
no_license
|
lemonbiz/guhaisong
|
effa8af4b679511e4fa8017d71fe26ab2ce51392
|
029890f8e3c6954efdefb184fa077f2ce646d1df
|
refs/heads/master
| 2022-12-13T08:21:37.911535 | 2020-09-15T16:15:10 | 2020-09-15T16:15:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,061 |
py
|
# -*- coding: utf-8 -*-
import datetime
import scrapy
from urllib.parse import urljoin
from edu_information.commom.commom_method import summay_slice,title_slice,keyman_slice,writer_slice,news_source_slice,requests_detail_page
import re,time
from edu_information.commom.custom_settings import *
from edu_information.commom.bloomfilter import BloomFilter,BL
from edu_information.commom.filter import contentfilter
from scrapy.selector import Selector
from ..items import EduInformationItem
class XueqianSpider(scrapy.Spider):
name = "news_eastday_com_gd2008_world_62"
allowed_domains = ["news.eastday.com"]
start_urls = ["http://news.eastday.com/gd2008/world/index.html","http://news.eastday.com/eastday/13news/auto/news/world/index_K32.html"]
custom_settings = {"DOWNLOAD_DELAY": 0.2}
class_id = 62
num = 1
items = EduInformationItem()
flags = True
bf = BloomFilter()
next_index = ""
header = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Accept - Encoding": "gzip, deflate",
"Accept - Language": "zh-CN,zh;q=0.9",
"Cache - Control": "no - cache",
# "Connection": "keep - alive",
"Host": "news.eastday.com",
"Pragma": "no - cache",
"Referer": "http://news.eastday.com",
"Upgrade - Insecure - Requests": 1,
"User - Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
}
def parse(self, response):
node_obj = response.xpath('''//div[@id="left"]/ul/li|//div[@class="leftsection"]/ul/li''')
if not node_obj:
print("error_spider",self.name)
for detail in node_obj:
url = detail.xpath('a/@href').extract_first()
time_node = detail.xpath('span[@class="hui12"]/text()|span[@class="black12 fr text4"]/text()').extract_first(default="").strip()
url = urljoin(response.url, url)
if url == None or url =="":
pass
else:
if BL:
if self.bf.isContains(url): # 判断字符串是否存在
print('url exists!')
else:
self.bf.insert(url)
print("请求详情页:",url)
yield scrapy.Request(url,callback=self.parse_detail,headers=self.header,meta={"time_node":time_node})
else:
yield scrapy.Request(url, callback=self.parse_detail, headers=self.header,
meta={"time_node": time_node})
# # # 多页
# next_node = response.xpath('''//div[@class="plist"]/div/a[contains(text(),"下一页")]/@href''').extract_first()
# if next_node != None:
# next_page = urljoin(response.url,next_node)
# print("请求下页链接:",next_page)
# yield scrapy.Request(next_page, callback=self.parse)
def parse_detail(self,response):
#标题title
title = response.xpath('//div[@id="biaoti"]/text()').extract_first(default="")
title = title.strip()
title = title_slice(title)
#关键字keyman
keyman = response.xpath('''//meta[@name="keywords"]/@content''').extract_first(default="")
if keyman:
keyman = keyman_slice(keyman)
else:
keyman = ""
if title:
#简介summary
try:
summary = response.xpath('//meta[@name="description"]/@content').extract_first(default="").strip()
summary = summary.replace("东方网-东方新闻-", "")
except Exception as e:
summary = ""
summary = summay_slice(summary)
index_node = response.xpath('string(//div[@class="time grey12a fc lh22"]/p[last()])').extract_first()
try:
time_node = response.meta.get("time_node","")
time_node = time_node.replace("/","-")
news_time = datetime.datetime.strptime(str(time_node).strip(),"%Y-%m-%d %H:%M:%S")
news_time = int(time.mktime(news_time.timetuple()))
except Exception as e:
print(e,"time")
news_time = None
# '来源:新华社 作者:胡浩 林晖 朱基钗 史竞男 选稿:刘晓晶 '
#writer作者
try:
writer = re.search(r".*?作者:(.*?)选稿:.*?", index_node,re.S).group(1)
writer = writer.strip()
except Exception as e:
print(e,"writer")
writer = writer_defined
writer = writer_slice(writer)
# 新闻来源news_source
try:
source = re.search(r".*?来源:(.*?)作者:.*?", index_node,re.S).group(1)
source = source.strip()
except Exception as e:
try:
source = re.search(r".*?来源:(.*?)选稿:.*?", index_node, re.S).group(1)
source = source.strip()
except Exception as e:
try:
source = re.search(r".*?来源:(.*)", index_node, re.S).group(1)
source = source.strip()
except Exception as e:
print(e,"source")
source = news_source_defined
news_source = news_source_slice(source)
#新闻内容content
content = response.xpath('//div[@id="zw"]').extract_first()
content = content.replace(" ", "")
content = content.replace(" ", "")
content = content.replace("    ", "")
content = content.replace("&", "")
content = content.replace("nbsp", "")
content = content.replace("&nbsp", "")
content = contentfilter(content)
self.items["news_keyman"] = keyman
self.items["title"] = title
self.items["content"] = content
self.items['content_summary'] = summary
self.items['click_num'] = click_num
self.items['news_time'] = news_time
self.items['news_source'] = news_source
self.items['writer'] = writer
#
#
self.items["class_id"] = self.class_id
self.items["user_id"] = user_id
self.items["istop"] = istop
self.items["ismember"] = ismember
self.items["userfen"] = userfen
self.items["isgood"] = isgood
self.items["user_name"] = "admin"
self.items["group_id"] = group_id
self.items["plnum"] = plnum
self.items["first_title"] = first_title
self.items["is_qf"] = is_qf
self.items["totaldown"] = totaldown
self.items["have_html"] = have_html
self.items["last_dotime"] = int(time.time())
self.items["diggtop"] = diggtop
self.items["stb"] = stb
self.items["ttid"] = ttid
self.items["ispic"] = ispic
self.items["isurl"] = isurl
self.items["fstb"] = fstb
self.items["restb"] = restb
self.items["news_tem_pid"] = news_tem_pid
self.items["dokey"] = dokey
self.items["closepl"] = closepl
self.items["haveaddfen"] = haveaddfen
self.items["infotags"] = keyman
self.items["checked"] = checked
self.items["keyid"] = keyid
self.items["news_path"] = news_path
self.items["titlepic"] = titlepic
self.items["ftitle"] = ftitle
#
#
self.items['filename'] = filename
self.items['titlefont'] = titlefont
self.items['title_url_z'] = title_url_z
self.items['originalurl'] = response.url
#
yield self.items
|
[
"[email protected]"
] | |
b35f1a4ab850ed9cdbf6edf18e90c57f3efa4b87
|
14028bea18dcd4f89fca2306bf51dcbf6acabb44
|
/apps/accounts/migrations/0032_auto_20170519_1322.py
|
b8a64807200ec938e87b45d1deac6b6a1e3a2b96
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
CMSgov/bluebutton-web-server
|
5694c7149d9f2f6efed9a2814c928d8a7539e4cb
|
fb0904c0b9d77dfb00523fe6ce69b946b640441e
|
refs/heads/master
| 2023-09-01T18:17:31.088628 | 2023-08-25T20:43:14 | 2023-08-25T20:43:14 | 50,062,960 | 30 | 33 |
NOASSERTION
| 2023-09-14T10:24:34 | 2016-01-20T21:52:00 |
Python
|
UTF-8
|
Python
| false | false | 1,783 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-05-19 13:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0031_auto_20170517_1350'),
]
operations = [
migrations.AddField(
model_name='requestinvite',
name='user_type',
field=models.CharField(choices=[('BEN', 'Beneficiary'), ('DEV', 'Developer')], default='', max_length=3),
),
migrations.AlterField(
model_name='requestinvite',
name='organization',
field=models.CharField(blank=True, default='', max_length=150),
),
migrations.AlterField(
model_name='userprofile',
name='aal',
field=models.CharField(blank=True, choices=[('', 'Undefined'), ('1', 'AAL1'), ('2', 'AAL2'), ('3', 'AAL3')], default='1', help_text='See NIST SP 800 63 B for definitions.', max_length=1, verbose_name='Authenticator Assurance Level'),
),
migrations.AlterField(
model_name='userprofile',
name='ial',
field=models.CharField(blank=True, choices=[('', 'Undefined'), ('1', 'IAL1'), ('2', 'IAL2'), ('3', 'IAL3')], default='', help_text='See NIST SP 800 63 A for definitions.', max_length=1, verbose_name='Identity Assurance Level'),
),
migrations.AlterField(
model_name='userprofile',
name='loa',
field=models.CharField(blank=True, choices=[('', 'Undefined'), ('1', 'LOA-1'), ('2', 'LOA-2'), ('3', 'LOA-3'), ('4', 'LOA-4')], default='', help_text='Legacy and Deprecated. Using IAL AAL is recommended.', max_length=1, verbose_name='Level of Assurance'),
),
]
|
[
"[email protected]"
] | |
7f1badc4cc8eeef844cccd8d948e1135d2df5301
|
50cce7441685fdc5b9d1bd2b80272078f637e7c9
|
/SingleTop/test/macros/ZjetSF_2.py
|
e7b64f1dc42fe10dff576632df9ae006aecf38c1
|
[] |
no_license
|
dnoonan08/tWAnalysisCode
|
4b622177f9401007cf873a295d71b1cee4140396
|
34a3ed1cc92ff317bbebc6c54cb634d32572999a
|
refs/heads/master
| 2021-01-19T20:18:42.857885 | 2014-12-31T04:20:47 | 2014-12-31T04:20:47 | 22,884,401 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,169 |
py
|
#!/usr/bin/env python
###SF from the Zpeak0jets region
metbins = [ 10, 20, 30, 40, 50, 60, 9999]
sf = [[0.8938539148917144, 0.94500843235182508, 1.0442753286019268, 1.1831266443479298, 1.3636037004840249, 1.5927083462800735, 1.9687105145585893],
[0.82970539332200888, 0.87674221693579291, 0.96776313658464252, 1.0935264684057753, 1.2617201102857636, 1.4802939663303316, 1.8092727571765537],
[0.95800243646141992, 1.0132746477678571, 1.1207875206192113, 1.2727268202900843, 1.4654872906822862, 1.7051227262298156, 2.1281482719406246]]
sf = [[1., 1., 1., 1., 1., 1., 1. ],
[0.82970539332200888, 0.87674221693579291, 0.96776313658464252, 1.0935264684057753, 1.2617201102857636, 1.4802939663303316, 1.8092727571765537],
[0.95800243646141992, 1.0132746477678571, 1.1207875206192113, 1.2727268202900843, 1.4654872906822862, 1.7051227262298156, 2.1281482719406246]]
def ZjetSF(met,mode):
for i in range(len(metbins)):
if met < metbins[i]:
return sf[mode][i]
return 1.
|
[
"[email protected]"
] | |
7090d09db83db6c9378b938bdb72d854637a8cf9
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-7012.py
|
91416980ccf25d9abc1e69bb70203ae9d9d13bf2
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 31,749 |
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if ($Member() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.