blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ec1082c420c57632e1d8fbdbff3c24e3f426d14
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/yubinbai/pcuva-problems/UVa 11262 - Weird Fence/EdmondsKarp.py
|
a3cd3aa934f2d8f5f06832cbe4c94dceea41b641
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,846 |
py
|
'''
Created on 2013-6-24
@author: Yubin Bai
'''
from _collections import deque
INF = 1 << 32
def edmondsKarp(graph, s, t):
def augmentPath(v, minEdge):
if (v == s): # managed to get back to source
f[0] = minEdge # minEdge of the path
return
elif (v in p): # augment if there is a path
# we need AdjMat for fast lookup here
augmentPath(p[v], min(minEdge, graph[p[v]][v]))
graph[p[v]][v] -= f[0] # forward edges -> decrease
graph[v][p[v]] += f[0] # backward edges -> increase
p = {} # parent map to reconstruct path
f = [0] # global variables, use list as mutable
max_flow = 0
while True: # this will be run max O(VE) times
f[0] = 0
q = deque()
dist = {s: 0} # O(E) BFS and record path p
q.append(s)
while q:
u = q.popleft() # queue: layer by layer!
if (u == t):
break # modification 1: reach sink t, stop BFS
for v in graph[u]: # for each neighbors of u
# modification 2: also check AdjMat as edges may disappear
if graph[u][v] > 0 and v not in dist:
dist[v] = dist[u] + 1 # then v is reachable from u
q.append(v) # enqueue v for next steps
p[v] = u # modification 3: parent of v->first is u
augmentPath(t, INF) # path augmentation in O(V)
if (f[0] == 0):
break # seems that we cannot pass any more flow
max_flow += f[0]
return max_flow
if __name__ == '__main__':
graph = {1: {1: 0, 2: 0, 3: 70, 4: 30}, 3: {1: 0, 2: 25, 3: 0, 4: 5},
4: {1: 0, 2: 70, 3: 0, 4: 0}, 2: {1: 0, 2: 0, 3: 0, 4: 0}}
max_flow = edmondsKarp(graph, 1, 2)
print("Max flow = %d\n" % max_flow)
|
[
"[email protected]"
] | |
2b0ced7fa82699bf40379314a33e83ddcdf35160
|
7e9c0243c48bbf0ddca9779ef03fc13bb9ac0496
|
/candle.py
|
ef7ed0eabce07b078b04bab06a40c9c69cbbb75e
|
[] |
no_license
|
suchismitarout/tt
|
c47f1f59659d2678392e2f0c3aaee8cfaa147ff4
|
54a5b625a82dab854b679050d67e340e74d71edd
|
refs/heads/master
| 2020-09-16T20:25:34.146741 | 2019-11-25T06:52:07 | 2019-11-25T06:52:07 | 223,880,569 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
def birthdayCakeCandles(ar):
max_ele = ar[0]
count = 0
for i in range(len(ar)):
if ar[i] > max_ele:
max_ele = ar[i]
for j in ar:
if j == max_ele:
count +=1
# for j in ar:
# if j == max_ele:
# count +=1
return count
candle = birthdayCakeCandles([44,53,31,27,77,60,66,77,26,36])
print(candle)
|
[
"[email protected]"
] | |
3630fd00235c8d64e6fa8c41cb6b0031acc8d051
|
996967405d3ee07e011ee0f0404d03b6d04d3492
|
/dataloader/get_coco/select_image.py
|
e4e45d53306e8d53996618fd3de1138d855286eb
|
[] |
no_license
|
wyyy04/MyRepository
|
797936fc757a2eee4793d5b1b47ebf8b57216ab8
|
91f1a7ff969e91d9649b96796c5827c9910a8183
|
refs/heads/main
| 2023-02-22T09:56:21.926013 | 2021-01-27T15:34:00 | 2021-01-27T15:34:00 | 315,524,193 | 0 | 0 | null | 2020-11-24T07:30:05 | 2020-11-24T05:05:28 | null |
UTF-8
|
Python
| false | false | 504 |
py
|
from readtxt import loadDataset
import os
import shutil
#从COCO训练集中选取motivations_clean中训练和测试使用的所有图片
rdir='D:\download\\train2014'#源目录
odir='D:\data'#目标目录
data = loadDataset()
data = data[:,0]
print(data)
for im_name in data:
print(im_name) #文件名
r = os.path.join(rdir,im_name)
o = os.path.join(odir,im_name) #得到源文件&目标文件完整目录
print(r,o)
shutil.copy(r,o) # 复制文件到目标路径;移动move
|
[
"[email protected]"
] | |
716119ca0680e969a5c9b15d2f93c196e377873b
|
7b4e9342d42be2b55af5dc23a8abedd672d68e99
|
/MobileApps/libs/flows/web/jweb/eventing_plugin.py
|
83d4c6d1bad5eef4658ff26f41ebc08452999a87
|
[] |
no_license
|
Amal548/QAMA
|
af5bb335c92a90b461f1ee9a3870435d83d46802
|
b5230c51d3bc7bb04b3448d1a1fe5a076d8898d5
|
refs/heads/master
| 2023-07-12T09:17:04.624677 | 2021-08-06T08:01:11 | 2021-08-06T08:01:11 | 389,595,655 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,065 |
py
|
from MobileApps.libs.flows.web.jweb.jweb_flow import JwebFlow
import json
class EventingPlugin(JwebFlow):
flow_name = "eventing_plugin"
########################################################################################################################
# #
# ACTION FLOWS #
# #
########################################################################################################################
def select_eventing_dispatch_open(self):
"""
clicks the eventing dispatch open item
:return:
"""
self.driver.click("eventing_dispatch_open_item")
def select_eventing_dispatch_open(self):
"""
clicks the eventing dispatch close item
:return:
"""
self.driver.click("eventing_dispatch_close_item")
def select_eventing_plugin_test(self):
"""
clicks the eventing plugin test button
:return:
"""
self.driver.swipe(direction="up")
self.driver.click("eventing_test_button")
def eventing_test_result(self):
"""
:return: eventing test result text
"""
return self.driver.wait_for_object("eventing_test_result_txt").text
def add_listener_multiple_event_results(self):
"""
:return: add multiple event result text
"""
return self.driver.wait_for_object("multiple_event_result_text").text
def add_listener_event_result(self):
"""
:return: add listener test result
"""
return json.loads(self.driver.get_attribute(obj_name="add_listener_test_result_txt", attribute="value"))
def add_listener_test_result(self):
"""
:return: add listener test result text
"""
self.driver.swipe(direction="down")
return self.driver.wait_for_object("add_listener_test_result_text").text
def select_add_listener_pop_up_close_btn(self):
"""
clicks the add listener pop up close btn
:return:
"""
self.driver.click("add_listener_pop_up_close_btn")
def get_add_listener_pop_up_toast_text(self):
"""
:return: main and sub text found from the toast pop up notification
"""
pop_up_toast_text = {}
pop_up_toast_text['main_text'] = self.driver.wait_for_object("pop_up_toast_text", index=0).text
pop_up_toast_text['sub_text'] = self.driver.wait_for_object("pop_up_toast_text", index=1).text
return pop_up_toast_text
def select_add_listener_test_btn(self):
"""
clicks the add listener test btn
:return:
"""
self.driver.click("eventing_add_listener_btn")
def enter_add_listener_event(self, option):
"""
sends name of event listener in Eventing.addListener() tab
:param option:
:return:
"""
self.driver.send_keys("eventing_native_element_listener_field", option)
def enter_name_field(self,option):
"""
sends the name field
:param option:
:return:
"""
self.driver.send_keys("eventing_name_field", option)
def enter_data_field(self,option):
"""
sends the data field
:param option:
:return:
"""
self.driver.send_keys("eventing_data_field", option)
def select_jarvis_event_option_test(self):
"""
clicks the send jarvis event test btn
:return:
"""
self.driver.click("eventing_send_jarvis_test_btn")
def jarvis_event_option_test_result(self):
"""
:return: text after clicking jarvis event option test btn
"""
return self.driver.find_object("eventing_jarvis_options_test_result").text
|
[
"[email protected]"
] | |
5effb4f8168c2ae2b22c3d5bdf47fbc2371234a7
|
08c7f146d82da572731f6ad0fd7d96bd4553f3d8
|
/backend/wispy_bread_26347/settings.py
|
440dca6d8ada9cc66236256b5fe96e07ed38d97b
|
[] |
no_license
|
crowdbotics-apps/wispy-bread-26347
|
9c7b081b280e709f6eb5dccd3d38e7be306c18a8
|
04532cb6c4ac227bd104c2210e9997cdc5ff530d
|
refs/heads/master
| 2023-05-01T09:20:01.995863 | 2021-05-07T19:06:03 | 2021-05-07T19:06:03 | 365,329,281 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,117 |
py
|
"""
Django settings for wispy_bread_26347 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wispy_bread_26347.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wispy_bread_26347.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"[email protected]"
] | |
9902ebd2e00cc805ec5bdc9703e6ca797ea372dc
|
41ede4fd3bfba1bff0166bca7aee80dcf21434c6
|
/suvari/gtk2chain/reverses/xcb-util/actions.py
|
25adb86a956a71e443321f8a2ef6661d3e2d6833
|
[] |
no_license
|
pisilinux/playground
|
a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c
|
e4e12fff8a847ba210befc8db7e2af8556c3adf7
|
refs/heads/master
| 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 |
Python
|
UTF-8
|
Python
| false | false | 572 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static \
--with-pic")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("README")
|
[
"[email protected]"
] | |
878a8d6f13a4d962da19b20180204a0a90f19306
|
74c368b2511fd62cb4f71db64bd728d0354d7191
|
/refinenet/datasets.py
|
6ea166c7a35a6e2ea5c30236b9881e9fa3bc3e65
|
[] |
no_license
|
nocotan/RefineNet
|
318e8867eca263127e573323f0225934adcf77b8
|
05e5a465807016b913f1f2d58a14c0fdad72beed
|
refs/heads/master
| 2021-04-03T06:07:40.295234 | 2018-03-20T14:48:44 | 2018-03-20T14:48:44 | 124,654,926 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,754 |
py
|
# -*- coding: utf-8 -*-
import os
import random
import cv2
import numpy as np
import PIL.Image
from chainer.dataset import dataset_mixin
class ImageDataset(dataset_mixin.DatasetMixin):
def __init__(self, data_dir, data_list, crop_size=(300, 300)):
self.data_dir = data_dir
self.data_list = os.path.join(self.data_dir, data_list)
self.crop_size = crop_size
self.crop_h = self.crop_size[0]
self.crop_w = self.crop_size[1]
self.img_ids = [i_id.strip() for i_id in open(self.data_list)]
self.files = []
for name in self.img_ids:
img_file = os.path.join(self.data_dir, "images/%s.jpg" % name)
label_file = os.path.join(self.data_dir, "labels/%s.png" % name)
self.files.append({
"image": img_file,
"label": label_file,
"name": name,
})
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 11) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale,
interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale,
interpolation=cv2.INTER_NEAREST)
return image, label
def get_example(self, i):
datafiles = self.files[i]
image = cv2.imread(datafiles["image"], cv2.IMREAD_COLOR)
label = np.asarray(PIL.Image.open(datafiles["label"]), dtype=np.int32)
image, label = self.generate_scale_label(image, label)
image = np.asarray(image, np.int32)
image -= (128, 128, 128)
img_h, img_w = label.shape
pad_h = max(self.crop_size[0] - img_h, 0)
pad_w = max(self.crop_size[1] - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(255,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2) * 2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy()
|
[
"[email protected]"
] | |
9935830816782ca4bbe14f5537a51ca72ff16bc6
|
b109001ec3ca8aa4b2cfc4d4520d8644c58ad5e0
|
/navigation/Mappers.py
|
e6b134df0a24b3ea97c7ed69c07d70c972f65cf3
|
[] |
no_license
|
Chandanpanda/navigation-benchmark
|
b3e25e3672150413299a3d2566ad601156317acf
|
d83431d6648ac1147f53056ed32ce2caae4f702d
|
refs/heads/master
| 2021-10-24T04:42:56.436909 | 2019-01-31T12:43:48 | 2019-01-31T12:43:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,626 |
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import ceil,floor
import math
from .Reprojection import getMapSizeInCells, project2dPClIntoWorldMap, ReprojectLocal2Global
def DepthToLocal3D(depth, fx, fy, cx, cy):
r"""Projects depth map to 3d point cloud
with origin in the camera focus
"""
device = depth.device
h,w = depth.squeeze().size()
npts = h*w
x = torch.linspace(0, w-1, w).to(device)
y = torch.linspace(0, h-1, h).to(device)
xv, yv = torch.meshgrid([x, y])
dfl = depth.t().flatten()
return torch.cat([(dfl *(xv.flatten() - cx) / fx).unsqueeze(-1), #x
(dfl *(yv.flatten() - cy) / fy).unsqueeze(-1), #y
dfl.unsqueeze(-1)], dim = 1) #z
def pointCloud2ObstaclesNonDifferentiable(pts3D,
map_size = 40,
cell_size = 0.2):
r"""Counts number of 3d points in 2d map cell
height is sum-pooled.
"""
device = pts3D.device
map_size_in_cells = getMapSizeInCells(map_size,cell_size) - 1
init_map = torch.zeros((map_size_in_cells,map_size_in_cells), device = device)
if len(pts3D) <= 1:
return init_map
num_pts,dim = pts3D.size()
pts2D = torch.cat([pts3D[:,2:3],pts3D[:,0:1]], dim = 1)
data_idxs = torch.round(project2dPClIntoWorldMap(pts2D, map_size, cell_size))
if len(data_idxs) > 10:
u, counts = np.unique(data_idxs.detach().cpu().numpy(), axis=0, return_counts = True)
init_map[u[:,0],u[:,1] ] = torch.from_numpy(counts).to(dtype=torch.float32, device=device)
return init_map
class DirectDepthMapper(nn.Module):
r"""Estimates obstacle map given the depth image
ToDo: replace numpy histogram counting with differentiable
pytorch soft count like in
https://papers.nips.cc/paper/7545-unsupervised-learning-of-shape-and-pose-with-differentiable-point-clouds.pdf
"""
def __init__(self,
#fx = 0,
#fy = 0,
#cx = 0,
#cy = 0,
camera_height = 0,
near_th = 0.1, far_th = 4.0, h_min = 0.0, h_max = 1.0,
map_size = 40, map_cell_size = 0.1,
device = torch.device('cpu'),
**kwargs):
super(DirectDepthMapper, self).__init__()
self.device = device
#self.fx = fx
#self.fy = fy
#self.cx = cx
#self.cy = cy
self.near_th = near_th
self.far_th = far_th
self.h_min_th = h_min
self.h_max_th = h_max
self.camera_height = camera_height
self.map_size_meters = map_size
self.map_cell_size = map_cell_size
return
def forward(self, depth, pose = torch.eye(4).float()):
self.device = depth.device
#Works for FOV = 45 degrees in minos/sensors.yml. Should be adjusted, if FOV changed
self.fx = float(depth.size(1))# / 2.0
self.fy = float(depth.size(0))# / 2.0
self.cx = int(self.fx)//2 - 1
self.cy = int(self.fy)//2 - 1
pose = pose.to(self.device)
local_3d_pcl = DepthToLocal3D(depth, self.fx, self.fy, self.cx, self.cy)
idxs = (torch.abs(local_3d_pcl[:,2]) < self.far_th) * (torch.abs(local_3d_pcl[:,2]) >= self.near_th)
survived_points = local_3d_pcl[idxs]
if len(survived_points) < 20:
map_size_in_cells = getMapSizeInCells(self.map_size_meters,self.map_cell_size) - 1
init_map = torch.zeros((map_size_in_cells,map_size_in_cells), device = self.device)
return init_map
global_3d_pcl = ReprojectLocal2Global(survived_points, pose)[:,:3]
#Because originally y looks down and from agent camera height
global_3d_pcl[:,1] = -global_3d_pcl[:,1] + self.camera_height
idxs = (global_3d_pcl[:,1] > self.h_min_th) * (global_3d_pcl[:,1] < self.h_max_th)
global_3d_pcl = global_3d_pcl[idxs]
obstacle_map = pointCloud2ObstaclesNonDifferentiable(
global_3d_pcl,
self.map_size_meters,
self.map_cell_size)
return obstacle_map
class SparseDepthMapper(nn.Module):
r"""Estimates obstacle map given the 3d points from ORBSLAM
Does not work well.
"""
def __init__(self,
fx = 0,
fy = 0,
cx = 0,
cy = 0,
camera_height = 0,
near_th = 0.1, far_th = 4.0, h_min = 0.0, h_max = 1.0,
map_size = 40, map_cell_size = 0.1,
device = torch.device('cpu'),
**kwargs):
super(SparseDepthMapper, self).__init__()
self.device = device
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.near_th = near_th
self.far_th = far_th
self.h_min_th = h_min
self.h_max_th = h_max
self.camera_height = camera_height
self.map_size_meters = map_size
self.map_cell_size = map_cell_size
return
def forward(self, sparse_depth, pose = torch.eye(4).float()):
global_3d_pcl = sparse_depth
#Because originally y looks down and from agent camera height
global_3d_pcl[:,1] = -global_3d_pcl[:,1]# + self.camera_height
idxs = (global_3d_pcl[:,1] > self.h_min_th) * (global_3d_pcl[:,1] < self.h_max_th)
global_3d_pcl = global_3d_pcl[idxs]
obstacle_map = pointCloud2ObstaclesNonDifferentiable(
global_3d_pcl,
self.map_size_meters,
self.map_cell_size)
return obstacle_map
|
[
"[email protected]"
] | |
d399b2d3a8ff12446dacbf96a4e46f7b8f5d2e92
|
52555a17cdb6058565696585c978c9012b0bfad7
|
/examples/synthetic/park2_4/park2_4_mf.py
|
b8d64549b8c4f770d2f4fd70d7fcabdc1ba4bee4
|
[
"MIT"
] |
permissive
|
kirthevasank/dragonfly
|
8685d6aff272bd262d9b47c455fc1f1dc77a42aa
|
8e09d5ba602d14922455bf09bdd4ca0fa09ef3ee
|
refs/heads/master
| 2020-05-02T00:38:35.252889 | 2019-05-17T03:40:23 | 2019-05-17T03:40:23 | 177,675,339 | 3 | 0 |
MIT
| 2019-05-06T04:07:41 | 2019-03-25T22:39:37 |
Python
|
UTF-8
|
Python
| false | false | 489 |
py
|
"""
Parkd function with multi-fidelity.
-- [email protected]
"""
# pylint: disable=invalid-name
from park2_4 import park2_4_z
# Write a function like this called 'obj'.
def park2_4_mf(z, x):
""" Computes the Parkd function. """
return park2_4_z(z[0], x)
def objective(z, x):
""" Objective. """
return park2_4_mf(z, x)
def cost(z):
""" Cost function. """
return 0.05 + 0.95 * z[0]**1.5
def main(z, x):
""" main function. """
return park2_4_mf(z, x), cost(z)
|
[
"[email protected]"
] | |
1a2a244f5a7ffd2c4a3c4534e593dc75e9823e55
|
49b827bb587d50c5092837749a7d5b88c024e854
|
/experiments/ACOSlite/HDF5_to_GeoJSON.py
|
722867db0ee2e86786f2b64806e22f0365deda70
|
[] |
no_license
|
SpaceAppsXploration/oco-2-data-network
|
7d836bf77cf79a5aac1cd22b02c75af316432b56
|
7d1fd709c7c219c83b7ea9f8075f7df46b460f23
|
refs/heads/master
| 2020-12-11T05:43:45.979066 | 2015-07-18T08:56:29 | 2015-07-18T08:56:29 | 34,137,221 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,793 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 15:23:47 2015
@author: jacopo
"""
import json
from pprint import pprint
import h5py
#
# TO DOs
#
# 1. Add the reference to Sensors ontology
# ACOS LITE file in the same directory
f = h5py.File('ACOSv3.4r02_L3_20100101_000000_20130515_000000.h5', libver='earliest')
xco2 = f['xco2']
lon = f['lon']
lat = f['lat']
lon_bnds = f['lon_bnds']
lat_bnds = f['lat_bnds']
xco2_set = xco2[0,0,0,:]
geo = {"type" : "FeatureCollection",
"features" : [
{
"type" : "Feature",
"geometry" : {"type": "Point",
"coordinates" : [lat[0], lon[0]]
}
},
{
"type" : "Feature",
"geometry" : {
"type" : "polygon",
"coordinates" : [
[
lon_bnds[0,0],
lat_bnds[0,0]
],
[
lon_bnds[0,0],
lat_bnds[0,1]
],
[
lon_bnds[0,1],
lat_bnds[0,0]
],
[
lon_bnds[0,1],
lat_bnds[0,1]
]
]
},
"properties": {
"xco2" : xco2_set[12]
}
}
]
}
#with open('geo.json', 'w') as outfile:
#json.dump(geo, outfile)
# print a JSON with the quantity of xco2 for the given geometry
print(json.dumps(geo, indent=4))
|
[
"[email protected]"
] | |
ff20f97e522dad036e7df019b8c4e0a5caae626a
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_unguents.py
|
87d4634aa61496578132ed4c4606ab4ff28ddf79
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 245 |
py
|
from xai.brain.wordbase.nouns._unguent import _UNGUENT
#calss header
class _UNGUENTS(_UNGUENT, ):
def __init__(self,):
_UNGUENT.__init__(self)
self.name = "UNGUENTS"
self.specie = 'nouns'
self.basic = "unguent"
self.jsondata = {}
|
[
"[email protected]"
] | |
58b2baef07663c5e82c8e96e9e9e199a40108943
|
af685f9625dc3fc1892171df396ed46155caa092
|
/WORC/resources/fastr_tools/worc/bin/FeatureConverter_tool.py
|
84635983ccc0a62c9a1aa63c19be4a548ed16b53
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MStarmans91/WORC
|
b66d7de70e2f3acab5100a3431855216b31bd7b1
|
f267b3d05c8193939aa4f43e47c6e24f9307864e
|
refs/heads/master
| 2023-08-17T14:02:29.566811 | 2023-08-15T08:58:42 | 2023-08-15T08:58:42 | 92,295,542 | 65 | 20 |
NOASSERTION
| 2023-08-15T08:58:44 | 2017-05-24T13:31:31 |
Python
|
UTF-8
|
Python
| false | false | 2,404 |
py
|
#!/usr/bin/env python
# Copyright 2017-2020 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from WORC.featureprocessing.FeatureConverter import FeatureConverter
def main():
parser = argparse.ArgumentParser(description='Radiomics classification')
parser.add_argument('-feat_in', '--feat_in', metavar='feat_in',
nargs='+', dest='feat_in', type=str, required=True,
help='Patient features input of first modality (HDF)')
parser.add_argument('-toolbox', '--toolbox', metavar='toolbox', nargs='+',
dest='toolbox', type=str, required=True,
help='Toolbox used for feature calculation')
parser.add_argument('-cf', '--conf', metavar='config', nargs='+',
dest='cf', type=str, required=True,
help='Configuration')
parser.add_argument('-feat_out', '--feat_out', metavar='feat_out',
nargs='+', dest='feat_out', type=str, required=True,
default=None,
help='Patient features input of second modality (HDF)')
args = parser.parse_args()
# Convert several input arguments from lists to strings
if type(args.feat_in) is list:
args.feat_in = ''.join(args.feat_in)
if type(args.toolbox) is list:
args.toolbox = ''.join(args.toolbox)
if type(args.cf) is list:
args.cf = ''.join(args.cf)
if type(args.feat_out) is list:
args.feat_out = ''.join(args.feat_out)
# Run converter
FeatureConverter(feat_in=args.feat_in,
toolbox=args.toolbox,
config=args.cf,
feat_out=args.feat_out)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
4b68733a5da1facd4daa9d36b3eafb06d1b7bea2
|
79a484e91a8df432a0ded93806a1e8237df7c253
|
/umibukela/migrations/0020_auto_20170124_1443.py
|
03d19703ba05730c59fd74bd2588eed73576e207
|
[
"MIT"
] |
permissive
|
OpenUpSA/umibukela
|
7ba14397ad543154d3a32ebfd84e89aa07f7011e
|
34c1a29a429b88c2f574e9120cfe93ba524633da
|
refs/heads/master
| 2023-07-26T19:45:12.531887 | 2023-07-10T15:53:07 | 2023-07-10T15:53:07 | 47,106,932 | 0 | 0 |
MIT
| 2023-02-02T01:36:59 | 2015-11-30T09:03:27 |
Python
|
UTF-8
|
Python
| false | false | 618 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0019_auto_20170124_1252'),
]
operations = [
migrations.AlterField(
model_name='cycleresultset',
name='monitors',
field=models.ManyToManyField(help_text=b"Only monitors for the current partner are shown. If you update the Partner you'll have to save and edit this Cycle Result Set again to see the available monitors.", to='umibukela.Monitor', blank=True),
),
]
|
[
"[email protected]"
] | |
d21050a17e15ff92bccfbce4604ba90af3d3d95f
|
56818903f60b5e7b88645f88badc92bfa5d2c65f
|
/automlcli/settings.py
|
05d100770da7b6b2f4c87b22a2dd400e38345549
|
[
"MIT"
] |
permissive
|
altescy/automlcli
|
23e82ad957ac8cbeb43d734741dd8dfb9b24b0ff
|
ec57ac57df5d9d9f8a7ef79bb7a96a86801f32f4
|
refs/heads/main
| 2023-04-29T03:57:06.181052 | 2021-05-23T12:19:34 | 2021-05-23T12:19:34 | 341,651,976 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 286 |
py
|
from pathlib import Path
# colt settings
DEFAULT_COLT_SETTING = {
"typekey": "type",
}
# automlcli directory settings
AUTOMLCLI_ROOT = Path.home() / ".automlcli"
# plugin settings
LOCAL_PLUGINS_FILENAME = ".automlcli_plugins"
GLOBAL_PLUGINS_FILENAME = AUTOMLCLI_ROOT / "plugins"
|
[
"[email protected]"
] | |
0f904e64473e0a25754c0b977e1599a61fcaaa7b
|
660e35c822423685aea19d038daa8356722dc744
|
/account_statement_ofx/tests/__init__.py
|
eef3074bc7837bf7d59e074cce70d4916358feba
|
[] |
no_license
|
saifkazi/tryton_modules
|
a05cb4a90ae2c46ba39d60d2005ffc18ce5e44bb
|
94bd3a4e3fd86556725cdff33b314274dcb20afd
|
refs/heads/main
| 2023-05-05T12:20:02.059236 | 2021-05-19T10:46:37 | 2021-05-19T10:46:37 | 368,768,310 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 348 |
py
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
try:
from trytond.modules.account_statement_ofx.tests.test_account_statement_ofx import suite # noqa: E501
except ImportError:
from .test_account_statement_ofx import suite
__all__ = ['suite']
|
[
"[email protected]"
] | |
062d01992b4ff6403439725111428e675235023b
|
ca12492b8fe66e34d7152a5118a573175b0a176f
|
/backend/wallet/migrations/0001_initial.py
|
06c04d1d09b8fdac41184f9f6cca8bc684953e59
|
[] |
no_license
|
crowdbotics-apps/asile-mobile-22968
|
3d02c0de123ba1b13d79a098ea7eb543658d5f8f
|
c5005ad17c262f87bdd8eefb89145ee75fdca168
|
refs/heads/master
| 2023-01-24T17:16:53.239439 | 2020-11-25T08:42:33 | 2020-11-25T08:42:33 | 315,842,223 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,944 |
py
|
# Generated by Django 2.2.17 on 2020-11-25 05:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_profile', '0001_initial'),
('task', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomerWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField()),
('expiration_date', models.DateTimeField()),
('last_transaction', models.DateTimeField()),
('customer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerwallet_customer', to='task_profile.CustomerProfile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_token', models.CharField(max_length=255)),
('payment_account', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('wallet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paymentmethod_wallet', to='wallet.CustomerWallet')),
],
),
migrations.CreateModel(
name='TaskerWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField(max_length=254)),
('expiration_date', models.DateTimeField()),
('last_transaction', models.DateTimeField()),
('tasker', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerwallet_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='TaskerPaymentAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_token', models.CharField(max_length=255)),
('payment_account', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('wallet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taskerpaymentaccount_wallet', to='wallet.TaskerWallet')),
],
),
migrations.CreateModel(
name='PaymentTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.FloatField()),
('tip', models.FloatField()),
('tracking_id', models.CharField(max_length=50)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_customer', to='task_profile.CustomerProfile')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_payment_method', to='wallet.PaymentMethod')),
('tasker', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_tasker', to='task_profile.TaskerProfile')),
('transaction', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='paymenttransaction_transaction', to='task.TaskTransaction')),
],
),
]
|
[
"[email protected]"
] | |
86aad0348b322a2f956b6383ab4d9264b7a71afd
|
0ebec1e899789ae2597c01bae7ca2c3382c4266d
|
/session5/a_customising_plots.py
|
0a6ea8df7506befcbf9f9e859b1a2d01d340e160
|
[
"Apache-2.0"
] |
permissive
|
TugdualSarazin/MACT20.21_Digital_tools_Big_Data_part_1
|
02fda6b401bcdad2a240de00960ff0dbc61fc94d
|
b43b9f50ec42bb413c2c3a090cf11f9886676c58
|
refs/heads/main
| 2023-01-13T20:51:44.000981 | 2020-11-09T12:25:11 | 2020-11-09T12:25:11 | 313,076,622 | 0 | 0 |
Apache-2.0
| 2020-11-15T16:44:29 | 2020-11-15T16:44:28 | null |
UTF-8
|
Python
| false | false | 2,890 |
py
|
# encoding: utf-8
##################################################
# This script shows uses the pandas and matplotlib libraries to produce different kind of plots
# It also combines data from two sources and create multiple plots
# Find extra documentation about data frame here:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.scatter.html
##################################################
#
##################################################
# Author: Diego Pajarito
# Copyright: Copyright 2020, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: [email protected]
# Status: development
##################################################
# We need to import pandas library as well as the plot library matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# We read the file for population data and gross domestic product
amb_mplts = pd.read_csv('../data/catalunya/AMB_municipalities_min.csv')
lu_mplts = pd.read_csv('../data/luxembourg/population.csv', skiprows=[2,3])
# First, we filter data for a single country, mind the way to select only columns having numeric data
pop_cat = amb_mplts['population']
area_cat = amb_mplts['area']
pop_lu = lu_mplts[['Year', '2020']]
pop_lu.columns = ['canton', 'population']
pop_lu_1821 = lu_mplts[['Year', '1821']]
pop_lu_1821.columns = ['canton', 'population']
# Plots allow basic configuration of visual features. Here some of the most common
colors = np.random.rand(len(pop_cat))
plt.scatter(x=pop_cat, y=area_cat, c=colors)
plt.show()
# Charts can also use lines to represent patterns from different subsets
for value in lu_mplts['Year']:
a_pop = lu_mplts[lu_mplts['Year'] == value]
a_pop = a_pop.iloc[0, 1:15]
plt.plot(a_pop)
plt.show()
# try to customise axis
#plt.xticks(np.arange(0, 2020, 100))
plt.yticks(np.arange(0,175000, 50000))
# There are different ways to represent data density,
# this 2d histogram shows population and area distribution
plt.hist2d(pop_cat, area_cat)
plt.show()
# We can create the arrangement for multiple plots and compare the differences in patterns
fig, axs = plt.subplots(2, 2, sharex=False, sharey=False)
axs[0, 0].scatter(x=pop_cat, y=area_cat, c=colors)
axs[1, 0].hist2d(pop_cat, area_cat, bins=20)
axs[0, 1].scatter(x=pop_lu['population'], y=pop_lu_1821['population'])
axs[1, 1].hist2d(x=pop_lu['population'], y=pop_lu_1821['population'], bins=20)
plt.show()
# We can create the arrangement for multiple plots and compare the differences in patterns
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
axs[0].scatter(x=pop_lu['population'], y=pop_lu_1821['population'])
axs[1].hist2d(x=pop_lu['population'], y=pop_lu_1821['population'], bins=20)
plt.show()
|
[
"[email protected]"
] | |
8c6977a6a88267049f29f0ab21620a01356f8d36
|
39cb67781018e23428312610ded87c5d384bb690
|
/swinger.py
|
23a441d49c82499b30ed56afe259a80e11ef8692
|
[] |
no_license
|
yi75798/Swinger
|
afd8e528cc1bcce3a4db83ce54def54372619717
|
b158c4f358fbebe655627969231cf1f0276cf708
|
refs/heads/master
| 2022-02-25T14:10:25.104740 | 2019-10-25T07:24:12 | 2019-10-25T07:24:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,314 |
py
|
# -*- coding: utf-8 -*-
import nltk, json, pickle
import itertools
from random import shuffle
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist
import sklearn
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
def bag_of_words(words):
return dict([(word, True) for word in words])
def bigram(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
bigram_finder = BigramCollocationFinder.from_words(words) #把文本变成双词搭配的形式
bigrams = bigram_finder.nbest(score_fn, n) #使用了卡方统计的方法,选择排名前1000的双词
return bag_of_words(bigrams)
def bigram_words(words, score_fn=BigramAssocMeasures.chi_sq, n=1000):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return bag_of_words(words + bigrams) #所有词和(信息量大的)双词搭配一起作为特征
def create_word_scores():
posWords = json.load(open('p.json','r'))
negWords = json.load(open('n.json','r'))
posWords = list(itertools.chain(*posWords)) #把多维数组解链成一维数组
negWords = list(itertools.chain(*negWords)) #同理
word_fd = FreqDist() #可统计所有词的词频
cond_word_fd = ConditionalFreqDist() #可统计积极文本中的词频和消极文本中的词频
for word in posWords:
word_fd[word] += 1
cond_word_fd['pos'][word] += 1
for word in negWords:
word_fd[word] += 1
cond_word_fd['neg'][word] += 1
pos_word_count = cond_word_fd['pos'].N() #积极词的数量
neg_word_count = cond_word_fd['neg'].N() #消极词的数量
total_word_count = pos_word_count + neg_word_count
word_scores = {}
for word, freq in word_fd.items():
pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count) #计算积极词的卡方统计量,这里也可以计算互信息等其它统计量
neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count) #同理
word_scores[word] = pos_score + neg_score #一个词的信息量等于积极卡方统计量加上消极卡方统计量
return word_scores #包括了每个词和这个词的信息量
def create_word_bigram_scores():
posdata = json.load(open('p.json','r'))
negdata = json.load(open('n.json','r'))
posWords = list(itertools.chain(*posdata))
negWords = list(itertools.chain(*negdata))
bigram_finder = BigramCollocationFinder.from_words(posWords)
bigram_finder = BigramCollocationFinder.from_words(negWords)
posBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000)
negBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000)
pos = posWords + posBigrams #词和双词搭配
neg = negWords + negBigrams
word_fd = FreqDist()
cond_word_fd = ConditionalFreqDist()
for word in pos:
word_fd[word] += 1
cond_word_fd['pos'][word] += 1
for word in neg:
word_fd[word] += 1
cond_word_fd['neg'][word] += 1
pos_word_count = cond_word_fd['pos'].N()
neg_word_count = cond_word_fd['neg'].N()
total_word_count = pos_word_count + neg_word_count
word_scores = {}
for word, freq in word_fd.items():
pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count)
neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count)
word_scores[word] = pos_score + neg_score
return word_scores
def find_best_words(word_scores, number):
best_vals = sorted(word_scores.items(), key=lambda x: -x[1])[:number] #把词按信息量倒序排序。number是特征的维度,是可以不断调整直至最优的
best_words = set([w for w, s in best_vals])
return best_words
def score(classifier, name):
classifier = SklearnClassifier(classifier) #在nltk 中使用scikit-learn 的接口
classifier.train(train) #训练分类器
pickle.dump(classifier, open(name + '.pickle','wb'))
pred = classifier.classify_many(test) #对开发测试集的数据进行分类,给出预测的标签
return accuracy_score(tag_test, pred) #对比分类预测结果和人工标注的正确结果,给出分类器准确度
def best_word_features(words):
return dict([(word, True) for word in words if word in best_words])
def pos_features(feature_extraction_method):
posFeatures = []
for i in pos:
posWords = [feature_extraction_method(i),'pos'] #为积极文本赋予"pos"
posFeatures.append(posWords)
return posFeatures
def neg_features(feature_extraction_method):
negFeatures = []
for j in neg:
negWords = [feature_extraction_method(j),'neg'] #为消极文本赋予"neg"
negFeatures.append(negWords)
return negFeatures
pos_review = json.load(open('p.json','r'))
neg_review = json.load(open('n.json','r'))
word_scores_1 = create_word_scores()
word_scores_2 = create_word_bigram_scores()
shuffle(pos_review) #把积极文本的排列随机化
pos = pos_review
neg = neg_review
posFeatures = pos_features(bag_of_words) #使用所有词作为特征
negFeatures = neg_features(bag_of_words)
train = posFeatures+negFeatures
# train = posFeatures[174:]+negFeatures[174:]
# devtest = posFeatures[124:174]+negFeatures[124:174]
test = posFeatures+negFeatures
test, tag_test = zip(*test)
# dev, tag_dev = zip(*devtest) #把开发测试集(已经经过特征化和赋予标签了)分为数据和标签
print('BernoulliNB`s accuracy is %f' %score(BernoulliNB(), 'BernoulliNB'))
print('MultinomiaNB`s accuracy is %f' %score(MultinomialNB(), 'MultinomialNB'))
print('LogisticRegression`s accuracy is %f' %score(LogisticRegression(), 'LogisticRegression'))
print('SVC`s accuracy is %f' %score(SVC(), 'SVC'))
print('LinearSVC`s accuracy is %f' %score(LinearSVC(), 'LinearSVC'))
print('NuSVC`s accuracy is %f' %score(NuSVC(), 'NuSVC'))
|
[
"[email protected]"
] | |
90f01e806124c7ca87d8fa588c9283d06b53bfcb
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2485/60623/234199.py
|
d1fc22fa3226c63bdda6a1c2a234b5d3b02955ce
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 935 |
py
|
# 给定一个单词数组,按排序顺序(计数的递增顺序)一起打印所有字符相同组的计数。
# 例如,如果给定的数组是{“ cat”,“ dog”,“ tac”,“ god”,“ act”},则分组的字谜是“(dog,god)(cat,tac,act)”。因此输出为2 3
size=int(input())
a=0
while a<size:
b=input()#也没有用
strList=input().split()
i=0
while i<len(strList):
l=list(strList[i])
#列表的sort是针对自己,而字典的sort则是返回一个排好序的,但本身并没有排好序
l.sort()
s="".join(l)
strList[i]=s
i=i+1
strList.sort()
j=0
k=1
myList=[]
while j<len(strList):
if j==len(strList)-1:
break
if(strList[j]==strList[j+1]):
k=k+1
else:
myList.append(k)
k=1
j=j+1
myList.append(k)
myList.sort()
m=0
while m<len(myList):
if m!=len(myList)-1:
print(""+myList[m]+" ", end='')
else:
print(myList[m])
m=m+1
a=a+1
|
[
"[email protected]"
] | |
69464c3b9cc44fc360e52b78b6397ca102998b16
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=43/sched.py
|
96e86f88d4ec9f87aad6a16a3dbd922eb335bcd5
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 529 |
py
|
-S 1 -X RUN -Q 0 -L 2 132 400
-S 0 -X RUN -Q 0 -L 2 104 300
-S 0 -X RUN -Q 0 -L 2 93 300
-S 0 -X RUN -Q 0 -L 2 56 300
-S 2 -X RUN -Q 1 -L 1 50 400
-S 2 -X RUN -Q 1 -L 1 46 300
-S 2 -X RUN -Q 1 -L 1 45 150
-S 2 -X RUN -Q 1 -L 1 42 125
-S 3 -X RUN -Q 2 -L 1 35 175
-S 3 -X RUN -Q 2 -L 1 34 125
-S 3 -X RUN -Q 2 -L 1 32 200
-S 3 -X RUN -Q 2 -L 1 28 125
-S 4 -X RUN -Q 3 -L 1 24 125
-S 4 -X RUN -Q 3 -L 1 21 125
-S 4 -X RUN -Q 3 -L 1 20 175
-S 4 -X RUN -Q 3 -L 1 8 100
|
[
"[email protected]"
] | |
ea531889bf01ff9b71405fc6ad2e84ec1a764813
|
ba8f5d23d9878a25b30a32cf16e8833f93b25853
|
/source_py2/python_toolbox/nifty_collections/emitting_weak_key_default_dict.py
|
46c4c7701214a78895301bc8c7a7931a9b878581
|
[
"MIT"
] |
permissive
|
nastako/python_toolbox
|
af520cbec1468c8e0aae0b3b1c467ca5623af45b
|
9713fd728608818630ee409ac6a6fdaf863af31b
|
refs/heads/master
| 2020-12-11T09:07:19.681161 | 2015-01-16T21:26:37 | 2015-01-16T21:26:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,427 |
py
|
# Copyright 2009-2015 Ram Rachum.
# This program is distributed under the MIT license.
'''
Defines the `EmittingWeakKeyDefaultDict` class.
See its documentation for more details.
'''
from .weak_key_default_dict import WeakKeyDefaultDict
class EmittingWeakKeyDefaultDict(WeakKeyDefaultDict):
'''
A key that references keys weakly, has a default factory, and emits.
This is a combination of `weakref.WeakKeyDictionary` and
`collections.defaultdict`, which emits every time it's modified.
The keys are referenced weakly, so if there are no more references to the
key, it gets removed from this dict.
If a "default factory" is supplied, when a key is attempted that doesn't
exist the default factory will be called to create its new value.
Every time that a change is made, like a key is added or removed or gets
its value changed, we do `.emitter.emit()`.
'''
def __init__(self, emitter, *args, **kwargs):
super(EmittingWeakKeyDefaultDict, self).__init__(*args, **kwargs)
self.emitter = emitter
def set_emitter(self, emitter):
'''Set the emitter that will be emitted every time a change is made.'''
self.emitter = emitter
def __setitem__(self, key, value):
result = \
super(EmittingWeakKeyDefaultDict, self).__setitem__(key, value)
if self.emitter:
self.emitter.emit()
return result
def __delitem__(self, key):
result = super(EmittingWeakKeyDefaultDict, self).__delitem__(key)
if self.emitter:
self.emitter.emit()
return result
def pop(self, key, *args):
""" D.pop(k[,d]) -> v, remove specified key and return the
corresponding value. If key is not found, d is returned if given,
otherwise KeyError is raised """
result = super(EmittingWeakKeyDefaultDict, self).pop(key, *args)
if self.emitter:
self.emitter.emit()
return result
def popitem(self):
""" D.popitem() -> (k, v), remove and return some (key, value)
pair as a 2-tuple; but raise KeyError if D is empty """
result = super(EmittingWeakKeyDefaultDict, self).popitem()
if self.emitter:
self.emitter.emit()
return result
def clear(self):
""" D.clear() -> None. Remove all items from D. """
result = super(EmittingWeakKeyDefaultDict, self).clear()
if self.emitter:
self.emitter.emit()
return result
def __repr__(self):
return '%s(%s, %s, %s)' % (
type(self).__name__,
self.emitter,
self.default_factory,
dict(self)
)
def __reduce__(self):
"""
__reduce__ must return a 5-tuple as follows:
- factory function
- tuple of args for the factory function
- additional state (here None)
- sequence iterator (here None)
- dictionary iterator (yielding successive (key, value) pairs
This API is used by pickle.py and copy.py.
"""
if self.default_factory:
parameters = (self.emitter, self.default_factory)
else: # not self.default_factory
parameters = (self.emitter)
return (type(self), parameters, None, None, self.iteritems())
|
[
"[email protected]"
] | |
150c5fd8c3bd60bd78428844d15da7c125771b39
|
3937f340aafed20f7b3bb9e36a62d0dfe6ca985d
|
/CrispyProject/WebApp/forms.py
|
02e71ff1a3beb904dbd38af4006ac6e1f687b0b3
|
[] |
no_license
|
sunnywralph/Django7AM
|
8f6f7e52847882d35ee7f7c4c263c5e90c79b6da
|
ffef6c6e5ab5231416ca743ebae299622eab9791
|
refs/heads/master
| 2022-04-25T15:04:45.733504 | 2020-05-05T14:53:57 | 2020-05-05T14:53:57 | 261,496,619 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 192 |
py
|
from django import forms
from WebApp.models import Person
# Fields with Validation
class EmpForm(forms.ModelForm):
class Meta:
model = Person
fields = '__all__'
|
[
"[email protected]"
] | |
4335e43e879c0ef68bff953743aa51e096e7bc6b
|
abfa70e1da5b4ba8e465cdc046fa36e81386744a
|
/base_ml/10.5.Iris_RandomForest_Enum.py
|
cb324f72bde8cb4ca167d5b6c13a703a16f9b8bc
|
[] |
no_license
|
superman666ai/crazy_project
|
f850819ff2287e345b67500111733bafa5629d1f
|
99dcba0fe246ecaf3f556f747d44731a04231921
|
refs/heads/master
| 2020-05-15T09:32:56.523875 | 2019-05-16T00:57:23 | 2019-05-16T00:57:23 | 182,179,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,796 |
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.ensemble import RandomForestClassifier
def iris_type(s):
it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
return it[s]
# 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
if __name__ == "__main__":
mpl.rcParams['font.sans-serif'] = [u'SimHei'] # 黑体 FangSong/KaiTi
mpl.rcParams['axes.unicode_minus'] = False
path = '../data/8.iris.data' # 数据文件路径
data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type}, encoding="utf-8")
x_prime, y = np.split(data, (4,), axis=1)
feature_pairs = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
plt.figure(figsize=(10, 9), facecolor='#FFFFFF')
for i, pair in enumerate(feature_pairs):
# 准备数据
x = x_prime[:, pair]
# 随机森林
clf = RandomForestClassifier(n_estimators=200, criterion='entropy', max_depth=4)
rf_clf = clf.fit(x, y.ravel())
# 画图
N, M = 500, 500 # 横纵各采样多少个值
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1列的范围
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_test = np.stack((x1.flat, x2.flat), axis=1) # 测试点
# 训练集上的预测结果
y_hat = rf_clf.predict(x)
y = y.reshape(-1)
c = np.count_nonzero(y_hat == y) # 统计预测正确的个数
# print '特征: ', iris_feature[pair[0]], ' + ', iris_feature[pair[1]],
# print '\t预测正确数目:', c,
# print '\t准确率: %.2f%%' % (100 * float(c) / float(len(y)))
# 显示
cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
y_hat = rf_clf.predict(x_test) # 预测值
y_hat = y_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.subplot(2, 3, i+1)
plt.pcolormesh(x1, x2, y_hat, cmap=cm_light) # 预测值
plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', cmap=cm_dark) # 样本
plt.xlabel(iris_feature[pair[0]], fontsize=14)
plt.ylabel(iris_feature[pair[1]], fontsize=14)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.grid()
plt.tight_layout(2.5)
plt.subplots_adjust(top=0.92)
plt.suptitle(u'随机森林对鸢尾花数据的两特征组合的分类结果', fontsize=18)
plt.show()
|
[
"[email protected]"
] | |
0e5cd2b71cfca2920b63884ab1b03dedd57aecaa
|
11763b1150a3a05db89c13dcd6152f8fcca87eaa
|
/designs/linear/homomorphic/latticebased/qtpiepublickey3.py
|
19c3c2cc331daaa31305c3217bbc670ba8c7c944
|
[] |
no_license
|
acad2/crypto
|
343c32fa25aaec73e169290579fc3d02c4b226f6
|
cb283df4101fcd618a0478a0018273f00d0734ae
|
refs/heads/master
| 2021-08-19T06:36:26.068033 | 2017-11-25T00:41:03 | 2017-11-25T00:41:03 | 113,048,326 | 2 | 0 | null | 2017-12-04T13:49:02 | 2017-12-04T13:49:01 | null |
UTF-8
|
Python
| false | false | 2,559 |
py
|
raise NotImplementedError("q needs to be randomized")
from math import log
from crypto.utilities import random_integer, modular_inverse, big_prime, modular_subtraction
N = 90539821999601667010016498433538092350601848065509335050382778168697877622963864208930434463149476126948597274673237394102007067278620641565896411613073030816577188842779580374266789048335983054644275218968175557708746520394332802669663905398219996016670100164984335380923506018480655093350503827781686978776229638642089304344631494761269485972746732373941020070672786206415658964116130730308165771888427795803742667890483359830546442752189681755577087465203943328026696639053982199960166701001649843353809235060184806550933505038277816869787762296386420893043446314947612694859727467323739410200706727862064156589641161307303081657718884277958037426678904833598305464427521896817555770874652039433280266966390539821999601667010016498433538092350601848065509335050382778168697877622963864208930434463149476126948597274673237394102007067278620641565896411613073030816577188842779580374266789048335983054644275218968175557708746520394332802669663
#1 + 33 + 32 = 66
# prq + e
# q + pirie
#65 + 97 + 32 =
#pq1 + e1 * (pq2 + e2)
#pq1(pq2 + e2) + e1(pq2 + e2)
#ppq1q2 + pq1e2 + pq2e1 + e1e2
# prrq1q2 + rq1e2 + rq2e1 + e1e2
#pq1 + e1 + pq2 + e2
#p(q1 + q2) + e1 + e2
def generate_pi(pi_size=65, n=N):
pi = random_integer(pi_size)
assert log(n, 2) - log(pi, 2) > 256, log(n, 2) - log(pi, 2)
return pi
def generate_pq(private_key, q_size=32, n=N):
p = modular_inverse(private_key, n)
q = random_integer(q_size)
pq = (p * q) % n
assert log(n, 2) - log(pq, 2) < 256
assert log(n, 2) - log(modular_inverse(pq, n), 2) < 256, (log(n, 2), log(n - modular_inverse(pq, n), 2))
return pq, q
def generate_keypair():
pi = generate_pi()
pq, q = generate_pq(pi)
public_key = pq
private_key = (pi, q)
return public_key, private_key
def encrypt(q, public_key, e_size=32, n=N):
assert n == N
e = random_integer(e_size)
return ((public_key * q) + e) % n
def decrypt(ciphertext, private_key, n=N, operation_count=1):
pi, r = private_key
pie_q = (pi * ciphertext) % n
q = pie_q % pi
return q / (r ** operation_count)
def test_encrypt_decrypt():
from unittesting import test_asymmetric_encrypt_decrypt
test_asymmetric_encrypt_decrypt("qtpiepublickey3", generate_keypair, encrypt, decrypt, iterations=10000)
if __name__ == "__main__":
test_encrypt_decrypt()
|
[
"[email protected]"
] | |
c731e200e23ca2544520bae18655637937d939d8
|
325fde42058b2b82f8a4020048ff910cfdf737d7
|
/src/storage-blob-preview/azext_storage_blob_preview/vendored_sdks/azure_mgmt_storage/v2021_01_01/aio/operations/_usages_operations.py
|
4fb31d3652c09b18a8730846a6ade439f9280d2f
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
ebencarek/azure-cli-extensions
|
46b0d18fe536fe5884b00d7ffa30f54c7d6887d1
|
42491b284e38f8853712a5af01836f83b04a1aa8
|
refs/heads/master
| 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 |
MIT
| 2020-10-09T18:21:52 | 2020-05-06T01:25:58 |
Python
|
UTF-8
|
Python
| false | false | 5,262 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""UsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_location(
self,
location: str,
**kwargs
) -> AsyncIterable["_models.UsageListResult"]:
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_01_01.models.UsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_location.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages'} # type: ignore
|
[
"[email protected]"
] | |
eeaf201358b733d340ba20b8541a19ccc863938e
|
8e7279bc3de368e85129b8e59f12cbdbd8621da1
|
/myenv/bin/gifmaker.py
|
b0dd02f4131ba154bff4296b4730f87a960d0ce0
|
[] |
no_license
|
banziha104/dstagram2
|
34f5dca6deb9c19c03fa523d6e4b1c97f60e14d4
|
12dbecb4a727fe67faffc1b2208bd4b5152a8672
|
refs/heads/master
| 2021-07-09T23:51:17.262219 | 2017-10-10T11:18:45 | 2017-10-10T11:18:45 | 105,170,644 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 684 |
py
|
#!/Users/iyeongjun/Downloads/dstagram_2nd/myenv/bin/python3.6
#
# The Python Imaging Library
# $Id$
#
# convert sequence format to GIF animation
#
# history:
# 97-01-03 fl created
#
# Copyright (c) Secret Labs AB 1997. All rights reserved.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import Image
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("GIFMAKER -- create GIF animations")
print("Usage: gifmaker infile outfile")
sys.exit(1)
im = Image.open(sys.argv[1])
im.save(sys.argv[2], save_all=True)
|
[
"[email protected]"
] | |
d9c94007b05b243ba95ace0dae93928d09561f45
|
bf0800eee5a43f600ab3ebd99d3486846d9f4834
|
/blog/views.py
|
aa024c57c30b7e613d9d778655bff923cef2a3e5
|
[] |
no_license
|
wonsik1012/my-first-blog
|
6de17de4bd13a9d36650ad2070b07190461dbd3e
|
e0124e41b8dabf20e23af5d969e55a2238841dba
|
refs/heads/master
| 2020-07-03T10:07:38.321239 | 2019-08-13T11:48:21 | 2019-08-13T11:48:21 | 201,874,737 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,924 |
py
|
from django.shortcuts import render
from django.utils import timezone
from .models import Post
from django.shortcuts import render, redirect, render_to_response
from django.http import HttpResponse
from django.template.loader import get_template
from django.template.context import RequestContext
import folium
from IPython.display import HTML, display
import numpy as np
import osmnx as ox
import networkx as nx
from sklearn.neighbors import KDTree
import folium
import folium.plugins
import pandas as pd
from folium import plugins
import requests
import googlemaps
import numbers
import math
from geopy.geocoders import Nominatim
def show_map(request):
G = ox.graph_from_place('종로구')
a = ox.elevation.add_node_elevations(G, 'AIzaSyBQYn4uBzdjr1ULXYqfn_z7lUWoIXYQB1Q', max_locations_per_batch=350, pause_duration=0.02)
b =ox.elevation.add_edge_grades(G, add_absolute=True)
nodes,edge = ox.graph_to_gdfs(b)
edge.head()
gmaps_key = "AIzaSyBQYn4uBzdjr1ULXYqfn_z7lUWoIXYQB1Q"
gmaps = googlemaps.Client(key=gmaps_key)
geolocator = Nominatim()
class GeoUtil:
"""
Geographical Utils
"""
@staticmethod
def degree2radius(degree):
return degree * (math.pi/180)
@staticmethod
def get_harversion_distance(x1, y1, x2, y2, round_decimal_digits=5):
if x1 is None or y1 is None or x2 is None or y2 is None:
return None
assert isinstance(x1, numbers.Number) and -180 <= x1 and x1 <= 180
assert isinstance(y1, numbers.Number) and -90 <= y1 and y1 <= 90
assert isinstance(x2, numbers.Number) and -180 <= x2 and x2 <= 180
assert isinstance(y2, numbers.Number) and -90 <= y2 and y2 <= 90
R = 6371 # 지구의 반경(단위: km)
dLon = GeoUtil.degree2radius(x2-x1)
dLat = GeoUtil.degree2radius(y2-y1)
a = math.sin(dLat/2) * math.sin(dLat/2) \
+ (math.cos(GeoUtil.degree2radius(y1)) \
*math.cos(GeoUtil.degree2radius(y2)) \
*math.sin(dLon/2) * math.sin(dLon/2))
b = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
return round(R * b, round_decimal_digits)
def seeshortestway(x1,x2):
#loc1 = ox.geocode(x1)
#loc2 = ox.geocode(x2)
place1=gmaps.geocode(x1)
lat1=place1[0]['geometry']['location']['lat']
lng1=place1[0]['geometry']['location']['lng']
place2=gmaps.geocode(x2)
lat2=place2[0]['geometry']['location']['lat']
lng2=place2[0]['geometry']['location']['lng']
loc1=(lat1,lng1)
loc2=(lat2,lng2)
#KD트리를 이용하면 최단거리를 쉽고 효율적으로 찾아준다.
tree = KDTree(nodes[['y', 'x']], metric='euclidean')
loc1_idx = tree.query([loc1], k=1, return_distance=False)[0]
loc2_idx = tree.query([loc2], k=1, return_distance=False)[0]
closest_node_to_loc1 = nodes.iloc[loc1_idx].index.values[0]
closest_node_to_loc2 = nodes.iloc[loc2_idx].index.values[0]
route = nx.shortest_path(G, closest_node_to_loc1,closest_node_to_loc2, weight='length')
onlygodoroute = nx.shortest_path(G, closest_node_to_loc1,closest_node_to_loc2, weight='grade_abs')
impedanceroute = nx.shortest_path(G, closest_node_to_loc1,closest_node_to_loc2, weight='impedance')
#distance=nx.shortest_path_length(G, closest_node_to_loc1,closest_node_to_loc2)
graderoute = []
impedance = []
for i in range(len(onlygodoroute)):
lng = G.node[onlygodoroute[i]]['x']
lat = G.node[onlygodoroute[i]]['y']
b = [lat,lng]
graderoute.append(b)
for i in range(len(impedanceroute)):
lng = G.node[impedanceroute[i]]['x']
lat = G.node[impedanceroute[i]]['y']
b = [lat,lng]
impedance.append(b)
m = ox.plot_route_folium(G, route, route_color='navy',tiles='stamen toner')
antpath = plugins.AntPath(locations=graderoute,color='purple')
antpath.add_to(m)
antpath = plugins.AntPath(locations=impedance,color='red')
antpath.add_to(m)
#folium.PolyLine(graderoute, color="purple", weight=4).add_to(m)
#folium.PolyLine(impedance, color="red", weight=4).add_to(m)
kw = {
'prefix': 'fa',
'color': 'green',
'icon': 'arrow-up'
}
ka = {
'prefix': 'fa',
'color': 'blue',
'icon': 'arrow-up'
}
icon1 = folium.Icon(angle=45, **kw)
folium.Marker(location=loc1, icon=icon1,popup=x1, tooltip='출발').add_to(m)
icon2 = folium.Icon(angle=180, **ka)
folium.Marker(location=loc2, icon=icon2, popup=x2,tooltip='도착').add_to(m)
#lium.Marker(location=loc1,
# icon=folium.Icon(color='red'), popup=x1, tooltip='출발').add_to(m)
#folium.Marker(location=loc2,
#icon=folium.Icon(color='blue'),popup=x2, tooltip='도착').add_to(m)
dobo=4
add = []
for i in range(len(route)-1):
lng1 = G.node[route[i]]['x']
lat1 = G.node[route[i]]['y']
lng2 = G.node[route[i+1]]['x']
lat2 = G.node[route[i+1]]['y']
result =GeoUtil.get_harversion_distance(lng1,lat1,lng2,lat2)
add.append(result)
noroundkm = sum(add)
km = round(noroundkm,1)
noroundminute = (km/dobo)*60
minute = round(noroundminute,1)
print('거리는',km,'KM 이며, ','시간은', minute,'분 걸립니다.')
return m
m=seeshortestway('안국역 3호선', '북촌생활사박물관')
a = m.save("blog/templates/blog/map.html")
context = {'my_map': m}
return render(request, 'blog/map.html', context)
|
[
"[email protected]"
] | |
64f4aecdc4ba0856009744c04d0a8cef73e58ae7
|
77db6591c5884204d6016bfa89b33691bac38813
|
/load.py
|
b2a9c8a3983643329620f4d7f7cd949b5ccd27f0
|
[] |
no_license
|
jbukoski/iltf-signal-webmap-suite
|
4fc0aafa977e911a1071872f7adbaf2e7d0da37c
|
b8374e9cfcc80501a8f632721a7cb9b76e668f6b
|
refs/heads/master
| 2021-03-27T11:20:37.174667 | 2020-12-31T18:03:20 | 2020-12-31T18:03:20 | 79,853,039 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 370 |
py
|
import os
from django.contrib.gis.utils import LayerMapping
from . import models
*_shp = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', '', '*.shp'))
def run(verbose=True):
*_lm = LayerMapping(
models.*, *_shp, *_mapping,
transform=False, encoding='iso-8859-1'
)
*_lm.save(strict=True, verbose=verbose)
|
[
"[email protected]"
] | |
29e16f0faaa4866bc0815c2235ece255f754032e
|
d5eb2fe5d49b581562ae2bc660d08ca80a03d331
|
/PythonSandbox/src/misc/num_digits_in_integer.py
|
6757434647672cd9e95d213417d05eed2cbab5ac
|
[] |
no_license
|
mcxu/code-sandbox
|
fd5aa2e593057901d281a0e74db8957777b06cf3
|
a785231582bda8578f79982e2dcddd2f2ab559b4
|
refs/heads/master
| 2023-07-10T02:07:24.180947 | 2023-07-08T03:31:48 | 2023-07-08T03:31:48 | 130,493,607 | 4 | 2 | null | 2023-01-15T22:53:29 | 2018-04-21T16:49:40 |
Python
|
UTF-8
|
Python
| false | false | 130 |
py
|
'''
Given an integer n, return a map showing the counts of each single digit in n.
Condition: Do not convert n into string.
'''
|
[
"[email protected]"
] | |
5ea5fe910e1ef86b506005a39e879e50f77d83f4
|
d532b85841b459c61d88d380e88dd08d29836d43
|
/solutions/473_matchsticks_to_square.py
|
99245aad8635cdcb9f58acde68ea9d0399c61f3b
|
[
"MIT"
] |
permissive
|
YiqunPeng/leetcode_pro
|
ad942468df5506de9dc48a4019933f658e2a3121
|
4a508a982b125a3a90ea893ae70863df7c99cc70
|
refs/heads/master
| 2022-05-15T09:32:02.699180 | 2022-05-14T16:32:17 | 2022-05-14T16:32:17 | 182,453,966 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 614 |
py
|
class Solution:
def makesquare(self, matchsticks: List[int]) -> bool:
l = sum(matchsticks)
if l % 4 != 0:
return False
size = l // 4
matchsticks.sort()
return self._dfs(matchsticks, len(matchsticks)-1, [size] * 4)
def _dfs(self, ms, pos, sizes):
if pos == -1:
return sum(sizes) == 0
for i in range(4):
if sizes[i] < ms[pos]:
continue
sizes[i] -= ms[pos]
if self._dfs(ms, pos - 1, sizes):
return True
sizes[i] += ms[pos]
return False
|
[
"[email protected]"
] | |
85f970aac1289aa71773cf2f9f5fee61ae7a289f
|
a939e018333a9ecd26ddc618f99835b7eb381686
|
/mapred_parser/user_merger/.svn/text-base/reducer.py.svn-base
|
c71b89a519663ca1e57f7a5f17e75be85bb0ab96
|
[] |
no_license
|
cash2one/crawl_youtube
|
bff5ba254001c2f31f770e55a4aca39bc54e45ee
|
0dc40186a1d89da2b00f29d4f4edfdc5470eb4fc
|
refs/heads/master
| 2021-01-16T22:30:17.800282 | 2016-02-18T11:50:09 | 2016-02-18T11:50:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,709 |
#!/usr/bin/python
# coding=utf-8
import os
import sys
import time
import base64
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../')
from le_crawler.proto.video.ttypes import OriginalUser
from le_crawler.common.utils import str2mediavideo, thrift2str
user_merge_field = set(['user_name', 'url', 'portrait_url', 'play_num', 'fans_num'])
class MergeItem:
def __init__(self):
self.reset('')
def reset(self, user_url=None):
self._data = []
self._user_url = user_url
self._url = None # used only in which length of self._data is 1
self._user = None
def get_user_url(self):
return self._user_url
def add_item(self, user_url, out_type, url, data_base64):
is_out_video = out_type == 'video'
self._data.append((data_base64, is_out_video))
self._url = url
def _merge_user(self, datas):
new_user = OriginalUser()
for k, v in new_user.__dict__.iteritems():
if k not in user_merge_field or v:
continue
for data in datas:
old_v = getattr(data[0].user, k)
if old_v:
setattr(new_user, k, old_v)
if k != 'url':
for item in user_merge_field:
old_v = getattr(data[0].user, item)
if not getattr(new_user, item) and old_v:
setattr(new_user, item, old_v)
break
new_user.update_time = int(time.time())
self._user = new_user
def _print_video(self, datas):
for data in datas:
data[0].user = self._user
video_str = thrift2str(data[0])
if not video_str:
sys.stderr.write('ERROR: failed in thrift2str. %s\n' % data[0].url)
continue
video_base64 = base64.b64encode(video_str)
if not video_base64:
sys.stderr.write('ERROR: failed in base64 encode. %s\n' % data[0].url)
continue
print 'unique' + '\t' + data[0].url + '\t' + str(self._user_url) + '\t' + video_base64
if data[1]:
print 'video' + '\t' + data[0].url + '\t' + str(self._user_url) + '\t' + video_base64
def print_item(self):
if not self._data:
return
if len(self._data) == 1:
print 'unique' + '\t' + self._url + '\t' + str(self._user_url) + '\t' + self._data[0][0]
if self._data[0][1]:
print 'video' + '\t' + self._url + '\t' + str(self._user_url) + '\t' + self._data[0][0]
return
for idx, data_group in enumerate(self._data):
try:
data = str2mediavideo(base64.b64decode(data_group[0]))
except:
sys.stderr.write('ERROR: failed in base64 decode. %s\n' % self._user_url)
self._data[idx] = (data, data_group[1])
self._data = [item for item in self._data if item[0]]
self._data.sort(cmp=lambda x, y: (y[0].user.update_time or 0) - (x[0].user.update_time or 0))
self._merge_user(self._data)
self._print_video(self._data)
def main():
merge_item = MergeItem()
while 1:
line = sys.stdin.readline()
if not line:
break
line_data = line.strip().split('\t', 3)
if len(line_data) != 4:
sys.stderr.write(str(len(line_data)) + ' ' + str(line_data) + '\n')
continue
user_url, url, out_type, data_base64 = line_data
if user_url == 'None':
print 'unique' + '\t' + url + '\t' + user_url + '\t' + data_base64
if out_type == 'video':
print 'video' + '\t' + url + '\t' + user_url + '\t' + data_base64
continue
if user_url == merge_item.get_user_url():
merge_item.add_item(user_url, out_type, url, data_base64)
else:
merge_item.print_item()
merge_item.reset(user_url)
merge_item.add_item(user_url, out_type, url, data_base64)
merge_item.print_item()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | ||
3f2cee0071989d5dddcf5e06d71d0c53ccf74a79
|
19ddab74600f71700a6b693281d0180d5271f295
|
/leetcode/119_杨辉三角2.py
|
5ca9f3c4a447aa5bf40bb8293558abdaa26cfa73
|
[] |
no_license
|
zhulf0804/Coding.Python
|
4d55a430da1a8077c81feba65c13ac654aaf094a
|
46ab03e23d15ebd5434ef4dd5ae99130000b00a5
|
refs/heads/master
| 2022-09-14T18:40:59.880941 | 2022-08-20T08:25:51 | 2022-08-20T08:25:51 | 213,113,482 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 452 |
py
|
from typing import List
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex == 0:
return [1]
if rowIndex == 1:
return [1, 1]
pre = [1, 1]
for i in range(2, rowIndex+1):
cur = [1] * (i + 1)
for j in range(1, i):
cur[j] = pre[j] + pre[j - 1]
pre = cur
return cur
rowIndex = 3
s = Solution()
print(s.getRow(3))
|
[
"[email protected]"
] | |
48c1e248c0c54b9df4c45b1abc82c3c75f4870a9
|
ae65873c3584cef7139066b224daad04410af6d2
|
/MySQL.py
|
a042fc4c7860d159e362459d73edbfefca29ad33
|
[] |
no_license
|
rajatkashyap/Python
|
2240c7472d07803c460c7a55d570e20694b694f9
|
f74c85c65b0e209a5f7ab25b653d42835222faaf
|
refs/heads/master
| 2022-06-25T19:20:52.847498 | 2022-06-08T14:40:45 | 2022-06-08T14:40:45 | 145,714,257 | 0 | 0 | null | 2022-04-25T00:18:37 | 2018-08-22T13:39:14 |
Python
|
UTF-8
|
Python
| false | false | 324 |
py
|
from mysql.connector import (connection)
cnx = connection.MySQLConnection(user='root', password='rajat',host='127.0.0.1',database='db')
cursor = cnx.cursor()
query=("select * from jobs")
cursor.execute(query)
for (city_id,city_name,country_id,x) in cursor:
print city_id,city_name,country_id
cnx.close()
|
[
"[email protected]"
] | |
9427dd2eb8619763631b53850f3d848d5866e9e7
|
eacff46eda2c6b509449979a16002b96d4645d8e
|
/Collections-a-installer/community-general-2.4.0/plugins/modules/monitoring/airbrake_deployment.py
|
3e7938bfba10ac8e1d2080f7ed8ae71ed9589628
|
[
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
permissive
|
d-amien-b/simple-getwordpress
|
5e6d4d15d5f87124ab591e46b63fec552998fdc3
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
refs/heads/master
| 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 |
MIT
| 2021-03-31T16:16:45 | 2021-03-26T07:30:00 |
HTML
|
UTF-8
|
Python
| false | false | 6,696 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: airbrake_deployment
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Patrick Humpal (@phumpal)"
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
- Parameter I(token) has been deprecated for community.general 0.2.0. Please remove entry.
options:
project_id:
description:
- Airbrake PROJECT_ID
required: false
type: str
version_added: '0.2.0'
project_key:
description:
- Airbrake PROJECT_KEY.
required: false
type: str
version_added: '0.2.0'
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
type: str
user:
description:
- The username of the person doing the deployment
required: false
type: str
repo:
description:
- URL of the project repository
required: false
type: str
revision:
description:
- A hash, number, tag, or other identifier showing what revision from version control was deployed
required: false
type: str
version:
description:
- A string identifying what version was deployed
required: false
type: str
version_added: '1.0.0'
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://api.airbrake.io/api/v4/projects/"
type: str
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
type: bool
token:
description:
- This parameter (API token) has been deprecated in community.general 0.2.0. Please remove it from your tasks.
required: false
type: str
requirements: []
'''
EXAMPLES = '''
- name: Notify airbrake about an app deployment
community.general.airbrake_deployment:
project_id: '12345'
project_key: 'AAAAAA'
environment: staging
user: ansible
revision: '4.2'
- name: Notify airbrake about an app deployment, using git hash as revision
community.general.airbrake_deployment:
project_id: '12345'
project_key: 'AAAAAA'
environment: staging
user: ansible
revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
version: '0.2.0'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=False, no_log=True, type='str'),
project_id=dict(required=False, no_log=True, type='str'),
project_key=dict(required=False, no_log=True, type='str'),
environment=dict(required=True, type='str'),
user=dict(required=False, type='str'),
repo=dict(required=False, type='str'),
revision=dict(required=False, type='str'),
version=dict(required=False, type='str'),
url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True,
required_together=[('project_id', 'project_key')],
mutually_exclusive=[('project_id', 'token')],
)
# Build list of params
params = {}
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
if module.params["token"]:
# v2 API documented at https://airbrake.io/docs/legacy-xml-api/#tracking-deploys
if module.params["environment"]:
params["deploy[rails_env]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[scm_repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[scm_revision]"] = module.params["revision"]
# version not supported in v2 API; omit
module.deprecate("Parameter 'token' is deprecated since community.general 0.2.0. Please remove "
"it and use 'project_id' and 'project_key' instead",
version='3.0.0', collection_name='community.general') # was Ansible 2.14
params["api_key"] = module.params["token"]
# Allow sending to Airbrake compliant v2 APIs
if module.params["url"] == 'https://api.airbrake.io/api/v4/projects/':
url = 'https://api.airbrake.io/deploys.txt'
else:
url = module.params["url"]
# Send the data to airbrake
data = urlencode(params)
response, info = fetch_url(module, url, data=data)
if module.params["project_id"] and module.params["project_key"]:
# v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
if module.params["environment"]:
params["environment"] = module.params["environment"]
if module.params["user"]:
params["username"] = module.params["user"]
if module.params["repo"]:
params["repository"] = module.params["repo"]
if module.params["revision"]:
params["revision"] = module.params["revision"]
if module.params["version"]:
params["version"] = module.params["version"]
# Build deploy url
url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
json_body = module.jsonify(params)
# Build header
headers = {'Content-Type': 'application/json'}
# Notify Airbrake of deploy
response, info = fetch_url(module, url, data=json_body,
headers=headers, method='POST')
if info['status'] == 200 or info['status'] == 201:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
6b4427adecbd6d4a38872c33dcbca2e3d68aeb29
|
e8ae11e5017507da59e2e92d423b6a1994490de4
|
/env/lib/python2.7/site-packages/azure/batch/models/pool_delete_options.py
|
d959c796b779edb07a5117788f554dc19bb6cab6
|
[] |
no_license
|
teopeurt/ansible-ubuntu-server
|
613d00cea28bc6531acf4a39aeeb9cd0baa2a391
|
b5b6127d2ee9723c5088443efe2ffb8ae30cfea7
|
refs/heads/master
| 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 |
Makefile
|
UTF-8
|
Python
| false | false | 3,192 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolDeleteOptions(Model):
"""Additional parameters for the Pool_Delete operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id identifier in the response.
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock
time.
:type ocp_date: datetime
:param if_match: An ETag is specified. Specify this header to perform the
operation only if the resource's ETag is an exact match as specified.
:type if_match: str
:param if_none_match: An ETag is specified. Specify this header to
perform the operation only if the resource's ETag does not match the
specified ETag.
:type if_none_match: str
:param if_modified_since: Specify this header to perform the operation
only if the resource has been modified since the specified date/time.
:type if_modified_since: datetime
:param if_unmodified_since: Specify this header to perform the operation
only if the resource has not been modified since the specified date/time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=None, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
[
"[email protected]"
] | |
202131d751e30e0b6464079f63e290c45a89c07a
|
6cdff1cccb229bd98c7b7fce0ad3df32e4f04557
|
/tests/conftest.py
|
4eb8621c176f7ad405450bd91027044cc1498eb9
|
[] |
no_license
|
MITLibraries/workflow
|
fb8cbdf809702318c8d7c64307da90c0acda28cc
|
63a17c3021e2bc0e0b13d22246ce3f13295349ca
|
refs/heads/main
| 2023-03-04T10:38:12.270942 | 2021-07-08T18:06:16 | 2021-07-08T18:06:16 | 211,862,997 | 2 | 1 | null | 2023-02-08T01:14:43 | 2019-09-30T13:12:20 |
Python
|
UTF-8
|
Python
| false | false | 3,108 |
py
|
from collections import namedtuple
import json
from unittest import mock
import boto3
from moto import mock_ecs, mock_ec2
from moto.ec2.utils import generate_instance_identity_document
import pytest
from manager.cluster import Cluster
@pytest.fixture(autouse=True)
def aws_credentials(monkeypatch):
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'foo')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'correct horse battery staple')
monkeypatch.setenv('AWS_SESSION_TOKEN', 'baz')
monkeypatch.setenv('AWS_DEFAULT_REGION', 'us-east-1')
@pytest.fixture
def cluster():
"""Create the mock Airflow cluster.
moto doesn't support the Fargate launch type, so we have to pretend
like we're going to launch our containers in EC2. There's a little
hand waving to make this work. moto comes with some predefined images
that seem to work fine.
Also see the ``patch_cluster_config`` fixture below.
"""
C = namedtuple('C', ['name', 'scheduler', 'worker', 'web'])
cluster = C('airflow-test', 'airflow-test-scheduler',
'airflow-test-worker', 'airflow-test-web')
with mock_ecs(), mock_ec2():
ec2_client = boto3.client('ec2')
ec2 = boto3.resource('ec2')
ecs = boto3.client('ecs')
image = ec2_client.describe_images()['Images'][0]
instance = ec2.create_instances(ImageId=image['ImageId'], MinCount=1,
MaxCount=1)[0]
doc = json.dumps(generate_instance_identity_document(instance))
ecs.create_cluster(clusterName=cluster.name)
ecs.register_container_instance(cluster=cluster.name,
instanceIdentityDocument=doc)
for service in cluster[1:]:
ecs.register_task_definition(family=service,
containerDefinitions=[])
ecs.create_service(cluster=cluster.name,
serviceName=service,
desiredCount=1,
taskDefinition=f'{service}:1')
ecs.update_service(cluster=cluster.name,
service=cluster.worker,
desiredCount=3)
yield cluster
@pytest.fixture(autouse=True)
def patch_cluster_config():
"""Patch the private config method on Cluster.
moto does not add the networkConfiguration to the service description.
Rather than just patching the whole thing, this effectively provides a
runtime decorator on the ``Cluster.__get_config`` method to augment the
reponse.
"""
def wraps(f):
def wrapped(*args, **kwargs):
network_config = {
'awsvpcConfiguration': {
'subnets': ['awesome-subnet', 'dumb-subnet']
}
}
res = f(*args, **kwargs)
[r.update(networkConfiguration=network_config) for r in res]
return res
return wrapped
func = wraps(Cluster._Cluster__get_config)
with mock.patch.object(Cluster, '_Cluster__get_config', func):
yield
|
[
"[email protected]"
] | |
e16189f36956843b3dfa3909dccea36da75ad30e
|
5de4aed3d9a9230404150d4c3c553ea05ac4e088
|
/afm/logger.py
|
c872f1d55b593e4a85f55bd2fb43d16e0e878e5a
|
[] |
no_license
|
UfSoft/afm
|
db4df3189095aa916b3a3f770d5366bb3e0a9b74
|
2e85c65389a10f7bed032956b0c603bbb2af2dac
|
refs/heads/master
| 2021-01-19T13:25:08.121356 | 2009-10-29T15:24:49 | 2009-10-29T15:24:49 | 26,618,925 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,483 |
py
|
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8 et
# ==============================================================================
# Copyright © 2009 UfSoft.org - Pedro Algarvio <[email protected]>
#
# License: BSD - Please view the LICENSE file for additional information.
# ==============================================================================
import logging
from twisted.internet import defer
LoggingLoggerClass = logging.getLoggerClass()
class Logging(LoggingLoggerClass):
def __init__(self, logger_name='afm', level=logging.DEBUG):
LoggingLoggerClass.__init__(self, logger_name, level)
@defer.inlineCallbacks
def debug(self, msg, *args, **kwargs):
yield LoggingLoggerClass.debug(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def info(self, msg, *args, **kwargs):
yield LoggingLoggerClass.info(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def warning(self, msg, *args, **kwargs):
yield LoggingLoggerClass.warning(self, msg, *args, **kwargs)
warn = warning
@defer.inlineCallbacks
def error(self, msg, *args, **kwargs):
yield LoggingLoggerClass.error(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def critical(self, msg, *args, **kwargs):
yield LoggingLoggerClass.critical(self, msg, *args, **kwargs)
@defer.inlineCallbacks
def exception(self, msg, *args, **kwargs):
yield LoggingLoggerClass.exception(self, msg, *args, **kwargs)
|
[
"[email protected]"
] | |
7bf9961f9abe963c51fc315c0be7e3c57d39a529
|
1b8a99a4ff80da51dc81dd8354bf9bf1cbd25a8b
|
/2023/longest_nice_substring.py
|
a832858e6eafd4d321f1afb296fd2304b2ca0cb5
|
[] |
no_license
|
eronekogin/leetcode
|
ea639eebe0cd70af9eb4cba59bc68f636d7b3e0c
|
edb870f83f0c4568cce0cacec04ee70cf6b545bf
|
refs/heads/master
| 2023-08-16T10:35:57.164176 | 2023-08-14T11:25:33 | 2023-08-14T11:25:33 | 163,679,450 | 0 | 0 | null | 2021-09-09T12:04:44 | 2018-12-31T15:33:06 |
Python
|
UTF-8
|
Python
| false | false | 449 |
py
|
"""
https://leetcode.com/problems/longest-nice-substring/
"""
class Solution:
def longestNiceSubstring(self, s: str) -> str:
if not s:
return ''
ss = set(s)
for i, c in enumerate(s):
if c.swapcase() not in ss:
left = self.longestNiceSubstring(s[:i])
right = self.longestNiceSubstring(s[i + 1:])
return max(left, right, key=len)
return s
|
[
"[email protected]"
] | |
1607a3e3331e20d9281ee04b374c3d4ea110cb01
|
c2849586a8f376cf96fcbdc1c7e5bce6522398ca
|
/ch21/ex21-15.pybench2.py
|
a110d546ad7c35d5e88ae11bbd6ee12cc27e3857
|
[] |
no_license
|
freebz/Learning-Python
|
0559d7691517b4acb0228d1cc76de3e93915fb27
|
7f577edb6249f4bbcac4f590908b385192dbf308
|
refs/heads/master
| 2020-09-23T01:48:24.009383 | 2019-12-02T12:26:40 | 2019-12-02T12:26:40 | 225,371,155 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,214 |
py
|
# pybench2.py
...
def runner(stmts, pythons=None, tracemd=False):
for (number, repeat, setup, stmt) in stmts:
if not pythons:
...
best = min(timeit.repeat(
setup=setup, stmt=stmt, number=number, repeat=repeat))
else:
setup = setup.replace('\t', ' ' * 4)
setup = ' '.join('-s "%s"' % line for line in setup.split('\n'))
...
for (ispy3, python) in pythons:
...
cmd = '%s -m timeit -n %s -r %s %s %s' %
(python, number, repeat, setup, args)
# pybench2_cases.py
import pybench2, sys
...
stmts = [ # (num,rep,setup,stmt)
(0, 0, "", "[x ** 2 for x in range(1000)]"),
(0, 0, "", "res=[]\nfor x in range(1000): res.append(x ** 2)")
(0, 0, "def f(x):\n\treturn x",
"[f(x) for x in 'spam' * 2500]"),
(0, 0, "def f(x):\n\treturn x",
"res=[]\nfor x in 'spam' * 2500:\n\tres.append(f(x))"),
(0, 0, "L = [1, 2, 3, 4, 5]", "for i in range(len(L)): L[i] += 1"),
(0, 0, "L = [1, 2, 3, 4, 5]", "i=0\nwhile i < len(L):\n\tL[i] += 1\n\ti += 1")]
...
pybench2.runner(stmts, pythons, tracemd)
|
[
"[email protected]"
] | |
e3a20c33463c6737ce0a9c7ef85e374de481845f
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/wsCshmu5zkN5BfeAC_11.py
|
16ee4c11004690dd94154fad7dd29ce965bcbb66
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 739 |
py
|
"""
Create a function that takes a number `n` and checks if each digit is
divisible by the digit on its left. Return a boolean array depending on the
condition checks.
### Examples
divisible_by_left(73312) ➞ [False, False, True, False, True]
# no element left to 7 = False
# 3/7 = False
# 3/3 = True
# 1/3 = False
# 2/1 = True
divisible_by_left(1) ➞ [False]
divisible_by_left(635) ➞ [False, False, False]
### Notes
The array should always start with `False` as there is no digit to the left of
the first digit.
"""
def divisible_by_left(n):
nums = list(map(int, str(n)))
return [False] + [
False if not i else (j / i).is_integer()
for i, j in zip(nums, nums[1:])
]
|
[
"[email protected]"
] | |
7586f2806ece479ea1e2d474b53558d8c88144b2
|
fdc3d2daf484e8b500368987930d85b833d43fd6
|
/sandbox/python/spectrogram3.py
|
07bb5de54e8d25c13ce1d5af9224dc0a0bb27ecc
|
[] |
no_license
|
luiarthur/signal_processing
|
9d61b368603b965ab526b9226a1114022e08463b
|
f6f00ce57b94bfa020ac494fcb4e83549d05c902
|
refs/heads/master
| 2021-01-01T19:47:45.076460 | 2017-08-12T15:50:18 | 2017-08-12T15:50:18 | 98,684,524 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,513 |
py
|
import os
import numpy as np
from scipy.io import wavfile
from scipy import signal
import matplotlib.pyplot as plt
from notes import pitch, piano_freq, freq_dict, bin_spec
HOME = os.path.expanduser('~')
### Read a wavfile
(fs, x) = wavfile.read(HOME+"/wav/embraceableYou.wav")
if x.ndim > 1: x = x[:,1]
w_size = 4096
f, t, Zxx = signal.spectrogram(x, fs, nperseg=w_size, window=signal.get_window('blackman', Nx=w_size))
### Plot Spectrogram
### Spectrogram (traditional)
#Z = np.log(Zxx) - np.log(Zxx.max())
#plt.pcolormesh(t, f, Z, vmin=Z.min(), vmax=0, cmap=plt.cm.gist_heat)
### Spectrogram (peak frequency)
Z = np.exp( np.log(Zxx) - np.log(Zxx.max()) )
plt.pcolormesh(t, f, Z, vmin=.00001, vmax=.0005, cmap=plt.cm.gist_heat)
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.ylim([0, 4200])
plt.xlabel('Time [sec]')
#plt.yticks(f, pitch(f+1E-6))
plt.show()
### Plot Spectrogram built-in
#Pxx, freqs, bins, im = plt.specgram(x, NFFT=w_size, Fs=fs, noverlap=100, cmap=plt.cm.gist_heat)
#plt.ylim([0, 4200])
#plt.show()
### Plot Spectrogram built-in (2)
#np.mean( np.exp(np.log(Pxx) - np.log(Pxx.max())) < .001 )
#plt.pcolormesh(bins, freqs, np.exp(np.log(Pxx) - np.log(Pxx.max())), cmap=plt.cm.gist_heat, vmin=.00001, vmax=.0001)
#plt.title('STFT Magnitude')
#plt.ylabel('Frequency [Hz]')
#plt.ylim([0, 4200])
#plt.xlabel('Time [sec]')
#plt.yticks(f, pitch(f))
#plt.show()
### Movie
from matplotlib.animation import FuncAnimation
#thresh = .0005
thresh = .5
fig, ax = plt.subplots()
ln, = plt.plot([], [], animated=True)
title = ax.text(.8, .95, '', transform = ax.transAxes, va='center')
#plt.xticks(np.log(piano_freq), pitch(piano_freq), rotation=90)
plt.xticks(np.log(f), pitch(f), rotation=90)
plt.axhline(y=thresh, color='grey')
def init():
#ax.set_ylim(0, 1.1)
#ax.set_ylim(0, .01)
#ax.set_ylim(0, 1.1)
ax.set_ylim(0, thresh*2)
ax.set_xlim(np.log(27.5), np.log(4186))
return [ln, title]
def update(i):
ydata = np.exp( np.log(Zxx[:,i]) - np.log(Zxx[:,i].max()) )
#ydata = np.exp( np.log(Zxx[:,i]) - np.log(Zxx.max()) )
#ydata = np.exp( np.log(Zxx[:,i]) - np.log(10000) )
#ydata = Zxx[:,i]
ln.set_data(np.log(f), ydata)
title.set_text("time: " + str(np.round(t[i],2)) + "s")
#print t[i], pitch(f[Zxx[:,i].argmax()])
return [title, ln]
delay = (t[1:] - t[:-1]).mean() * 1000
ani = FuncAnimation(fig, update, frames=range(t.size),
init_func=init, blit=True, repeat=False, interval=delay)
plt.show()
|
[
"[email protected]"
] | |
4b9c499c4cf735c4bbb7e381d11f44e4a1d22ac8
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/atomic/byte/Schema+Instance/NISTXML-SV-IV-atomic-byte-maxExclusive-5-3.py
|
f378841d6d0ec8179638494024c1501e673c1b5e
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 |
MIT
| 2023-07-25T14:19:04 | 2020-02-10T21:59:47 |
Python
|
UTF-8
|
Python
| false | false | 259 |
py
|
from output.models.nist_data.atomic.byte.schema_instance.nistschema_sv_iv_atomic_byte_max_exclusive_5_xsd.nistschema_sv_iv_atomic_byte_max_exclusive_5 import NistschemaSvIvAtomicByteMaxExclusive5
obj = NistschemaSvIvAtomicByteMaxExclusive5(
value=-73
)
|
[
"[email protected]"
] | |
90a26d93ea05d64db95e9ed53c7fe2fcd4b30d8a
|
56591823019e0ac1d857f97a1b8c85e9d85a8385
|
/Scopuli/Interfaces/WEB/Jinja/Filters.py
|
d2702eaf522afa636d5c239edcaee4604161951d
|
[
"Apache-2.0"
] |
permissive
|
MaxOnNet/scopuli-core-web
|
3c19e312ec5688034295ac86a7a56fe2b2cf7915
|
66a2c31b36d7fc05be36ba5d5b141644459b4aba
|
refs/heads/master
| 2020-03-23T19:49:56.383093 | 2018-08-30T13:44:31 | 2018-08-30T13:44:31 | 142,004,596 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,911 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright [2018] Tatarnikov Viktor [[email protected]]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" """
import phonenumbers
def _decode_text(value):
"""
Decode a text-like value for display.
Unicode values are returned unchanged. Byte strings will be decoded
with a text-safe replacement for unrecognized characters.
"""
if isinstance(value, bytes):
return value.decode('ascii', 'replace')
else:
return value
def filter_markdown(value):
from flask import Markup
from markdown import markdown
return Markup(markdown(value))
def filter_printable(value):
try:
return _decode_text(repr(value))
except Exception as e:
return '<repr(%s) raised %s: %s>' % (
object.__repr__(value), type(e).__name__, e)
def filter_shuffle(seq):
import random
try:
result = list(seq)
random.shuffle(result)
return result
except:
return seq
def filter_phonenumber(value, country='RU', format=phonenumbers.PhoneNumberFormat.INTERNATIONAL):
try:
parsed = phonenumbers.parse(value, country)
return phonenumbers.format_number(parsed, format)
except phonenumbers.NumberParseException as e:
return value
def filter_money(value):
return "{money:0,.2f} р.".format(money=value)
|
[
"[email protected]"
] | |
a3d679949562466f4ce55d64546316cf11b470e1
|
1b5404b8099de74d4e39e0a41b1d04c61defa8d4
|
/Лабиринт/dump/labyrinth_find_solution.py
|
6284287ae0344286006f098090bcd1a1b2c5c773
|
[] |
no_license
|
ipeterov/random-stuff
|
5d07bdcfdcb145d06ed89095f2ad34b70ff0f0bd
|
dbb38d42331f636919fd149b23783e02ee2c9afb
|
refs/heads/master
| 2023-05-14T00:41:51.122251 | 2023-05-04T12:10:26 | 2023-05-04T12:10:26 | 206,028,412 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,776 |
py
|
import pickle
def refactored_labyrinth(labyrinth):
# 0 - верх, 1 право, 2 - лево, 3 - низ
refactored_labyrinth = []
for y in range(len(labyrinth)):
refactored_labyrinth.append([])
for x in range(len(labyrinth[0])):
refactored_labyrinth[y].append([0,0,0,0])
for y in range(len(labyrinth)):
for x in range(len(labyrinth[0])):
if labyrinth[y-1][x]['d'] == 1 or y == 0:
refactored_labyrinth[y][x][0] = 1
if labyrinth[y][x]['r'] == 1 or x == len(labyrinth[0]) - 1:
refactored_labyrinth[y][x][1] = 1
if labyrinth[y][x]['d'] == 1 or y == len(labyrinth) - 1:
refactored_labyrinth[y][x][2] = 1
if labyrinth[y][x-1]['r'] == 1 or x == 0:
refactored_labyrinth[y][x][3] = 1
return refactored_labyrinth
def find_path(labyrinth, start_coords = [0,0]):
def move(current_coords, forbidden_move):
if current_coords == goal_coords:
#~ print('aaaaaaaa')
for element in path:
gpath.append(element)
exit
path.append(current_coords)
dead_end = False
print(current_coords)
y = current_coords[0]
x = current_coords[1]
while not dead_end:
for i in range(4):
if labyrinth[y][x][i] != 1 and i != forbidden_move:
if i == 0:
move([y-1,x], 2)
elif i == 1:
move([y,x+1], 3)
elif i == 2:
move([y+1,x], 0)
elif i == 3:
move([y,x-1], 1)
i = 5
if i != 5:
dead_end = True
try:
labyrinth[y + 1][x][0] = 1
except:
pass
try:
labyrinth[y][x - 1][1] = 1
except:
pass
try:
labyrinth[y - 1][x][2] = 1
except:
pass
try:
labyrinth[y][x + 1][3] = 1
except:
pass
path.pop()
#~ print(labyrinth)
labyrinth = refactored_labyrinth(labyrinth)
#~ print(labyrinth)
goal_coords = [99, 99]
gpath = []
path = []
goal_reached = False
move(start_coords, -1)
if len(gpath) == 0:
print('fuckfuckfuck')
return None
gpath.append(goal_coords)
return gpath
name = 'labyrinth_backtrack'
labyrinth = pickle.load(open(name, 'rb'))
path = find_path(labyrinth)
pickle.dump(path, open('labyrinth_solution','wb'))
|
[
"[email protected]"
] | |
73db1141a062dab86543dba3571ab8faea784fdc
|
4a5562bf8a7967c9c5d76265d89c366165bff9f8
|
/template_python/setup.py
|
a4bf90beaf8b2625aaf3603393c5c536b60eec9a
|
[] |
no_license
|
lokendert/me132_students
|
640f935bd6e5c5d65329a161731afad4068a72e0
|
8e1075c4b61bef5c8f4d322cb168e2f942e1fad6
|
refs/heads/master
| 2020-03-31T03:40:46.680398 | 2011-02-04T20:13:29 | 2011-02-04T20:13:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 520 |
py
|
from setuptools import setup, find_packages
setup(
name='me132_template',
author="The ME132 TAs",
author_email="[email protected]",
url='www.its.caltech.edu/~me132',
description="A minimal Player client",
version="0.1",
package_dir={'':'src'},
packages=find_packages(),
entry_points={
'console_scripts': [
# List executables in the format '<name> = <module>:<function>'
'my_python_client = me132_template.basic_client:main'
]
}
)
|
[
"[email protected]"
] | |
bf3f86be25ab7a8ffe01b3fea5ec5bc1ae6b5c2b
|
a4a63eedacd544872fbfa33fc58d7cf1558829b7
|
/backend/event/api/v1/urls.py
|
046246934dbd8d54f00c2d7d0a6bb4781e87498b
|
[] |
no_license
|
crowdbotics-apps/revil-18107
|
3d9bd52855e33debaa60f4f5c801629fb1aa60da
|
2671f3410b43cd8ed2ccc51780a80366fb594684
|
refs/heads/master
| 2022-10-17T09:34:39.097853 | 2020-06-15T00:05:02 | 2020-06-15T00:05:02 | 272,301,823 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 854 |
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
VendorViewSet,
LocationViewSet,
FavoritesViewSet,
VendorDetailViewSet,
CategoryViewSet,
FaqViewSet,
PresenterViewSet,
ScheduleViewSet,
MyScheduleViewSet,
SponsorViewSet,
)
router = DefaultRouter()
router.register("faq", FaqViewSet)
router.register("vendordetail", VendorDetailViewSet)
router.register("location", LocationViewSet)
router.register("presenter", PresenterViewSet)
router.register("myschedule", MyScheduleViewSet)
router.register("schedule", ScheduleViewSet)
router.register("category", CategoryViewSet)
router.register("favorites", FavoritesViewSet)
router.register("vendor", VendorViewSet)
router.register("sponsor", SponsorViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
[
"[email protected]"
] | |
5896418942efd005a46d1c7d2e74df68364411c9
|
9ede3bec6dc9cd58f91ba3ee2b3f4b7eb3b6c889
|
/lintreview/repo.py
|
aa745a9b199595da98ab54ef33439fa29c5edb40
|
[
"MIT"
] |
permissive
|
LyleH/lint_review_1
|
d0816e68ee74c507357471b1183348b2c8d59af2
|
a36945446745a9e8d8c1f1955e084add6563647b
|
refs/heads/master
| 2021-01-19T11:43:42.780988 | 2016-09-22T05:28:23 | 2016-09-22T05:28:23 | 68,887,536 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,101 |
py
|
import lintreview.github as github
import lintreview.git as git
import logging
log = logging.getLogger(__name__)
class GithubRepository(object):
"""Abstracting wrapper for the
various interactions we have with github.
This will make swapping in other hosting systems
a tiny bit easier in the future.
"""
def __init__(self, config, user, repo_name):
self.config = config
self.user = user
self.repo_name = repo_name
def repository(self):
"""Get the underlying repository model
"""
self.repo = github.get_repository(
self.config,
self.user,
self.repo_name)
return self.repo
def pull_request(self, number):
"""Get a pull request by number.
"""
pull = self.repository().pull_request(number)
return GithubPullRequest(pull)
def ensure_label(self, label):
"""Create label if it doesn't exist yet
"""
repo = self.repository()
if not repo.label(label):
repo.create_label(
name=label,
color="bfe5bf", # a nice light green
)
def create_status(self, sha, state, description):
"""Create a commit status
"""
context = self.config.get('APP_NAME', 'lintreview')
repo = self.repository()
repo.create_status(
sha,
state,
None,
description,
context)
class GithubPullRequest(object):
"""Abstract the underlying github models.
This makes other code simpler, and enables
the ability to add other hosting services later.
"""
def __init__(self, pull_request):
self.pull = pull_request
@property
def number(self):
return self.pull.number
@property
def is_private(self):
data = self.pull.as_dict()
return data['head']['repo']['private']
@property
def head(self):
data = self.pull.as_dict()
return data['head']['sha']
@property
def clone_url(self):
data = self.pull.as_dict()
return data['head']['repo']['clone_url']
@property
def target_branch(self):
data = self.pull.as_dict()
return data['base']['ref']
def commits(self):
return self.pull.commits()
def review_comments(self):
return self.pull.review_comments()
def files(self):
return list(self.pull.files())
def remove_label(self, label_name):
issue = self.pull.issue()
labels = issue.labels()
if not any(label_name == label.name for label in labels):
return
log.debug("Removing issue label '%s'", label_name)
issue.remove_label(label_name)
def add_label(self, label_name):
issue = self.pull.issue()
issue.add_labels(label_name)
def create_comment(self, body):
self.pull.create_comment(body)
def create_review_comment(self, body, commit_id, path, position):
self.pull.create_review_comment(body, commit_id, path, position)
|
[
"[email protected]"
] | |
2ad8fb86568b9db89c98af5b07780a905127da55
|
0675dad295526480242c9da48310a1c958423e72
|
/dmrg_j2j2/build_lattice.py
|
0d410148abf8f05a76145d0c57c8cbc956ac5397
|
[] |
no_license
|
GiggleLiu/numeric_master
|
627e054ab7404b1bbf8b8eec65f05346b35640a3
|
47b9eaf1eeaceacf5ff43f2226620e5c37064095
|
refs/heads/master
| 2021-08-24T11:31:37.107583 | 2017-11-21T06:26:38 | 2017-11-21T06:26:38 | 111,409,702 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,766 |
py
|
#!/usr/bin/env python
test_str = '''
<LATTICES>
<GRAPH name = "heisenberg" dimension="1" vertices="6" edges="5">
<VERTEX id="1" type="0"><COORDINATE>0</COORDINATE></VERTEX>
<VERTEX id="2" type="1"><COORDINATE>2</COORDINATE></VERTEX>
<VERTEX id="3" type="1"><COORDINATE>3</COORDINATE></VERTEX>
<VERTEX id="4" type="1"><COORDINATE>4</COORDINATE></VERTEX>
<VERTEX id="5" type="1"><COORDINATE>5</COORDINATE></VERTEX>
<VERTEX id="6" type="0"><COORDINATE>6</COORDINATE></VERTEX>
<EDGE source="1" target="2" id="1" type="0" vector="1"/>
<EDGE source="2" target="3" id="2" type="0" vector="1"/>
<EDGE source="3" target="4" id="3" type="0" vector="1"/>
<EDGE source="4" target="5" id="4" type="0" vector="1"/>
<EDGE source="5" target="6" id="5" type="0" vector="1"/>
</GRAPH>
</LATTICES>
'''
import lxml.etree as ET
def build_j1j2(size, filename):
lattice = ET.Element('LATTICES')
graph = ET.SubElement(lattice, 'GRAPH', attrib={'name':'J1J2',
'dimension':'1', 'vertices':'%d'%size, 'edges':'%d'%(size-1)})
for i in range(size):
vi = ET.SubElement(graph, 'VERTEX', attrib={'id':'%d'%(i+1),
'type':'0'})
co = ET.SubElement(vi, 'COORDINATE')
co.text = '%d'%i
for i in range(1,size+1):
ET.SubElement(graph, 'EDGE', attrib={'source':'%d'%(i),'target':'%d'%((i)%size+1),
'id':'%d'%i, 'type':'0', 'vector':'1'})
ET.SubElement(graph, 'EDGE', attrib={'source':'%d'%(i),'target':'%d'%((i+1)%size+1),
'id':'%d'%i, 'type':'1', 'vector':'1'})
with open(filename, 'w') as f:
f.write(ET.tostring(lattice, pretty_print=True))
if __name__ == '__main__':
import sys
nsite = int(sys.argv[1])
build_j1j2(nsite, 'lattices/j1j2_%d.xml'%nsite)
|
[
"[email protected]"
] | |
0a594efa5004b79150ace703b60d768090d1e120
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/lib/python2.7/site-packages/odps/tunnel/checksum.py
|
8e8fc3c8d359101a5792eac47318794d0db3a82b
|
[
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 |
Apache-2.0
| 2022-10-05T12:10:32 | 2016-12-15T05:26:12 |
Python
|
UTF-8
|
Python
| false | false | 1,918 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from ..crc import Crc32c, Crc32
from .. import utils
class Checksum(object):
TRUE = bytearray([1])
FALSE = bytearray([0])
def __init__(self, method='crc32c'):
self.crc = Crc32c() if method.lower() == 'crc32c' else Crc32()
def _mode(self):
# use for UT to check if use c extension
try:
from ..src.crc32c_c import Crc32c
return 'c' if isinstance(self.crc, Crc32c) else 'py'
except ImportError:
return 'py'
def update_bool(self, val):
assert isinstance(val, bool)
val = self.TRUE if val else self.FALSE
self._update(val)
def update_int(self, val):
val = struct.pack('<i', val)
self._update(val)
def update_long(self, val):
val = struct.pack('<q', val)
self._update(val)
def update_float(self, val):
val = struct.pack('<d', val)
self._update(val)
def _update(self, b):
# update crc without type checking
self.crc.update(bytearray(b))
def update(self, b):
b = utils.to_binary(b)
self._update(b)
def getvalue(self):
return self.crc.getvalue()
def reset(self):
return self.crc.reset()
|
[
"[email protected]"
] | |
faf3c8dfa5ff66ccb5061a5361f46ea8660794fb
|
6eb56f2e3f14f2373be07fe95b1c6fedf1e2d49f
|
/alien_invasion/settings.py
|
88e78bcdae2ac7d01be37c4e59510f618f2782a2
|
[
"Apache-2.0"
] |
permissive
|
chaofan-zheng/python_leanring_code
|
fe22b0370cadebf7456477269aff4a35cef0eb41
|
0af44ff39b9ded2c1d2cc96c6d356d21170ac04d
|
refs/heads/main
| 2023-02-28T07:56:46.457552 | 2021-02-10T15:08:33 | 2021-02-10T15:08:33 | 323,584,115 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 553 |
py
|
"""存储雷霆战机的所有类"""
class Settings():
def __init__(self):
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (0, 0, 0)
self.speed_factor = 1.5
self.bullet_speed_factor = 5
self.bullet_width = 5
self.bullet_height = 15
self.bullet_color = 255, 255, 102
self.bullets_allowed = 5
self.alien_speed_factor = 1
self.fleet_drop_speed = 10
# fleet_direction =1 表示右移动,-1表示左移
self.fleet_direction = 1
|
[
"[email protected]"
] | |
fef6b5cbd6467df66736475fcd841be9bc0cc929
|
84c4514c0d9588026f1f203c2d351df226170f75
|
/python/itertools/permutations.py
|
bfacc64c73bf1bbc3b0ce55bba4154f974d6fe6c
|
[] |
no_license
|
hiromichinomata/hackerrank
|
eafc1a902353f6bdac508f67cfa7eebdbfb2811f
|
bffca0f56c92b752706b5a9fb4c814f44ea5d14e
|
refs/heads/master
| 2022-12-01T15:39:25.811250 | 2020-08-08T01:44:10 | 2020-08-08T01:44:10 | 264,445,214 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 151 |
py
|
from itertools import permutations
s, num = input().strip().split()
s = sorted(s)
num = int(num)
for i in permutations(s, num):
print("".join(i))
|
[
"[email protected]"
] | |
16db4fc999d70029f8e94677713d54ff4f1cca36
|
f4335e8e7d3010506f570167bbba18156d3a4674
|
/stubs/django/core/management/commands/diffsettings.pyi
|
1bf6f90fade7e0b8e54afff184eba3267ee5ee24
|
[] |
no_license
|
rtpg/typehangar
|
133686ea45ad6187b768290aeebda9cbcae25586
|
790d057497c4791a38f9e3e009b07935b4a12f45
|
refs/heads/master
| 2021-01-19T04:49:17.940793 | 2017-01-16T13:54:14 | 2017-01-16T13:54:14 | 69,260,488 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 461 |
pyi
|
# Stubs for django.core.management.commands.diffsettings (Python 3.5)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
from django.core.management.base import BaseCommand
def module_to_dict(module, omittable: Any = ...): ...
class Command(BaseCommand):
help = ... # type: str
requires_system_checks = ... # type: bool
def add_arguments(self, parser): ...
def handle(self, **options): ...
|
[
"[email protected]"
] | |
e468552fe67dcb111020cfc2ebd9623c74e0c240
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03720/s960059730.py
|
c3987b6c50c512aecd596e019b24702590445f5d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,311 |
py
|
import sys, re
from math import ceil, floor, sqrt, pi, factorial, gcd
from copy import deepcopy
from collections import Counter, deque
from heapq import heapify, heappop, heappush
from itertools import accumulate, product, combinations, combinations_with_replacement
from bisect import bisect, bisect_left, bisect_right
from functools import reduce
from decimal import Decimal, getcontext
# input = sys.stdin.readline
def i_input(): return int(input())
def i_map(): return map(int, input().split())
def i_list(): return list(i_map())
def i_row(N): return [i_input() for _ in range(N)]
def i_row_list(N): return [i_list() for _ in range(N)]
def s_input(): return input()
def s_map(): return input().split()
def s_list(): return list(s_map())
def s_row(N): return [s_input for _ in range(N)]
def s_row_str(N): return [s_list() for _ in range(N)]
def s_row_list(N): return [list(s_input()) for _ in range(N)]
def lcm(a, b): return a * b // gcd(a, b)
sys.setrecursionlimit(10 ** 6)
INF = float('inf')
MOD = 10 ** 9 + 7
num_list = []
str_list = []
def main():
n, m = i_map()
for _ in range(m):
a, b = i_map()
num_list.append(a)
num_list.append(b)
num_counter = Counter(num_list)
for i in range(1,n+1):
print(num_counter[i])
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
cc23354f1ac1be52b795119e99c44df6f9b9a574
|
0b793bce2da8c3d09b7956c0672ddbffd46feaed
|
/hackerrank/algorithm/lonly_integer.py
|
49cc044edcb98b61afa115495f50c34b58c36815
|
[
"MIT"
] |
permissive
|
knuu/competitive-programming
|
c6c4e08fb231937d988bdc5a60a8ad6b31b97616
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
refs/heads/master
| 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 135 |
py
|
from collections import Counter
_ = int(input())
for key, val in Counter(input().split()).items():
if val == 1:
print(key)
|
[
"[email protected]"
] | |
e8f56efacae6ebed48b265ae2ae07847dcfaeb1d
|
9b87fc7054bedaef1bbfe2842bfca12d5585119b
|
/nicegui/elements/custom_example.py
|
ab8af2bcd42916f997d1d55803d71709488c011e
|
[
"MIT"
] |
permissive
|
TrendingTechnology/nicegui
|
cb08287c9b0cab7ae1a831ee623a056d8ecdee43
|
68fa24456497683417d2e613ec573673deacd7f7
|
refs/heads/main
| 2023-06-20T06:11:52.914008 | 2021-07-22T05:09:40 | 2021-07-22T05:09:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 785 |
py
|
from .custom_view import CustomView
from .element import Element
class CustomExampleView(CustomView):
def __init__(self, on_change):
super().__init__('custom_example', __file__, value=0)
self.on_change = on_change
self.allowed_events = ['onAdd']
self.initialize(temp=False, onAdd=self.handle_add)
def handle_add(self, msg):
self.options.value += msg.number
if self.on_change is not None:
return self.on_change(self.options.value)
return False
class CustomExample(Element):
def __init__(self, *, on_change=None):
super().__init__(CustomExampleView(on_change))
def add(self, number: str):
self.view.options.value += number
self.view.on_change(self.view.options.value)
|
[
"[email protected]"
] | |
25e4a10195a5b94ecb830ef0b1d184c9feda747f
|
58ffe83eb9828668c13242c6f98238f08655f561
|
/app/notebooks/problang/transcript_utils.py
|
cdb2f5a88761411c4cf30c48af9b83fd05e1dcf8
|
[
"Apache-2.0"
] |
permissive
|
DanFu09/esper
|
f9dcc47cd5677dee8dffb1e066d69332471a0d6c
|
ccc5547de3637728b8aaab059b6781baebc269ec
|
refs/heads/master
| 2020-04-04T21:31:43.549572 | 2020-01-16T01:14:13 | 2020-01-16T01:14:13 | 156,289,533 | 4 | 0 |
Apache-2.0
| 2018-12-14T03:01:02 | 2018-11-05T22:05:07 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 7,054 |
py
|
import numpy as np
import torch
from torch.utils.data import Dataset
import requests
from query.models import Video
from timeit import default_timer as now
from esper.prelude import pcache
import random
SEGMENT_SIZE = 200
SEGMENT_STRIDE = 100
def video_list():
r = requests.get('http://localhost:8111/videos')
return r.json()
def get_doc(item):
r = requests.post('http://localhost:8111/getdoc', json={'phrases': [item]})
return r.json()
def doc_len():
r = requests.get('http://localhost:8111/doclen')
return r.json()
def compute_vectors(docs, vocabulary, window_size, stride):
requests.post('http://localhost:8111/computevectors', json={
'vocabulary': vocabulary,
'docs': docs,
'window_size': window_size,
'stride': stride
})
def find_segments(docs, lexicon, threshold, window_size, stride):
r = requests.post('http://localhost:8111/findsegments', json={
'lexicon': lexicon,
'threshold': threshold,
'window_size': window_size,
'merge_overlaps': False,
'stride': stride,
'docs': docs
})
return r.json()
def small_video_sample():
videos = []
id = 1
while len(videos) < 10:
try:
v = Video.objects.get(id=id)
get_doc(v)
videos.append(v)
except Exception:
pass
id += 1
return videos
def word_counts():
r = requests.get('http://localhost:8111/wordcounts')
return r.json()
VOCAB_THRESHOLD = 100
def load_vocab():
counts = word_counts()
print('Full vocabulary size: {}'.format(len(counts)))
vocabulary = sorted([word for (word, count) in counts.items() if count > VOCAB_THRESHOLD])
print('Filtered vocabulary size: {}'.format(len(vocabulary)))
return vocabulary
vocabulary = pcache.get('vocabulary', load_vocab)
vocab_size = len(vocabulary)
class SegmentTextDataset(Dataset):
def __init__(self, docs, vocabulary=None, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False):
self._segment_size = segment_size
self._use_cuda = use_cuda
self._vocabulary = vocabulary
self._doc_names = docs
self._doc_lens = doc_len()
self._num_segs = np.array([
len(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
for doc in self._doc_names
])
self._back_index = [
(i, j, k)
for i, doc in enumerate(self._doc_names)
for k, j in enumerate(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
]
self._forward_index = {
(self._doc_names[i], j): k
for k, (i, j, _) in enumerate(self._back_index)
}
self._docs = {}
self._segs = {}
def segment_index(self, doc, word):
return self._forward_index[(doc, word)]
def _text_to_vector(self, words):
counts = defaultdict(int)
for w in words:
counts[w] += 1
t = torch.tensor([counts[word] for word in self._vocabulary], dtype=torch.float32)
t /= torch.sum(t)
return t
def __len__(self):
return self._num_segs.sum()
def __getitem__(self, idx):
(i, j, _) = self._back_index[idx]
if not (i, j) in self._segs:
if not i in self._docs:
self._docs[i] = get_doc(self._doc_names[i])
seg = self._docs[i][j:j+self._segment_size]
data = {
'document_idx': i,
'segment_idx': j,
}
if self._vocabulary is not None:
data['vector'] = self._text_to_vector(seg)
if self._use_cuda:
data['vector'] = data['vector'].cuda()
data['segment'] = ' '.join(seg)
self._segs[(i, j)] = data
return self._segs[(i, j)]
import mmap
class SegmentVectorDataset(Dataset):
def __init__(self, docs, vocab_size, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False, inmemory=False):
self._ds = SegmentTextDataset(docs, segment_size=segment_size, segment_stride=segment_stride)
self._doc_names = docs
self._vocab_size = vocab_size
self._use_cuda = use_cuda
self._inmemory = inmemory
self._file_handle = open('/app/data/segvectors.bin', 'r+b')
self._file = mmap.mmap(self._file_handle.fileno(), 0)
self._byte_offsets = []
if self._inmemory:
self._buffer = self._file.read()
# Compute prefix sum of document offsets
for i, doc in enumerate(self._doc_names):
dlen = self._ds._num_segs[i-1] * self._vocab_size
if i == 0:
self._byte_offsets.append(0)
else:
self._byte_offsets.append(self._byte_offsets[i - 1] + dlen)
def _byte_offset(self, idx):
(i, _, j) = self._ds._back_index[idx]
return self._byte_offsets[i] + j * self._vocab_size
def __len__(self):
return len(self._ds)
def __getitem__(self, idx):
offset = self._byte_offset(idx)
if self._inmemory:
byts = self._buffer[offset:offset+self._vocab_size]
else:
self._file.seek(offset)
byts = self._file.read(self._vocab_size)
assert len(byts) == self._vocab_size, \
'Invalid read at index {}, offset {}. Expected {} bytes, got {}'.format(idx, offset, self._vocab_size, len(byts))
npbuf = np.frombuffer(byts, dtype=np.uint8)
tbuf = torch.from_numpy(npbuf).float()
tbuf /= torch.sum(tbuf)
if self._use_cuda:
tbuf = tbuf.cuda()
return tbuf, idx
class LabeledSegmentDataset(Dataset):
def __init__(self, unlabeled_dataset, labels, categories):
self._ds = unlabeled_dataset
self._labels = labels
self._categories = categories
def __len__(self):
return len(self._labels)
def __getitem__(self, idx):
(seg_idx, label) = self._labels[idx]
label = torch.tensor([1 if label == i else 0 for i in range(self._categories)], dtype=torch.float32)
if self._ds._use_cuda:
label = label.cuda()
tbuf, _ = self._ds[seg_idx]
return tbuf, label, seg_idx
def label_widget(dataset, indices, done_callback):
from IPython.display import display, clear_output
from ipywidgets import Text, HTML, Button
labels = []
i = 0
transcript = HTML(dataset[indices[0]]['segment'])
box = Text(placeholder='y/n')
def on_submit(text):
nonlocal i
label = 1 if text.value == 'y' else 0
labels.append((indices[i], label))
i += 1
transcript.value = dataset[indices[i]]['segment']
box.value = ''
box.on_submit(on_submit)
finished = False
btn_finished = Button(description='Finished')
def on_click(b):
done_callback(labels)
btn_finished.on_click(on_click)
display(transcript)
display(box)
display(btn_finished)
|
[
"[email protected]"
] | |
6310996c29f82720e743d2c1c5d7c036e79d4a73
|
d93c91e904470b46e04a4eadb8c459f9c245bb5a
|
/banglore_scrape/proptiger/proptiger/spiders/proptigerresale.py
|
47b05e9f213ad8c5615011068e0591a29f338475
|
[] |
no_license
|
nbourses/scrappers
|
3de3cd8a5408349b0ac683846b9b7276156fb08a
|
cde168a914f83cd491dffe85ea24aa48f5840a08
|
refs/heads/master
| 2021-03-30T15:38:29.096213 | 2020-03-25T03:23:56 | 2020-03-25T03:23:56 | 63,677,541 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,099 |
py
|
import scrapy
from proptiger.items import ProptigerItem
from scrapy.spiders import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
import json
from scrapy.selector import XmlXPathSelector
import lxml.etree as etree
from urlparse import urljoin
import urllib
import time
from datetime import datetime as dt
class PropRentSpider(Spider):
name = "proptigerresaleBangalore"
start_urls = ['https://www.proptiger.com/data/v2/entity/resale-listing?selector={%22filters%22:{%22and%22:[{%22equal%22:{%22bookingStatusId%22:1}},{%22equal%22:{%22cityId%22:2}}]},%22paging%22:{%22start%22:0,%22rows%22:15}}']
allowed_domains = ["www.proptiger.com"]
rules = (Rule(LinkExtractor(deny=(), allow=('http://www.proptiger.com/'), ), callback='parse', follow=True, ),)
custom_settings = {
'DEPTH_LIMIT': 10000,
'DOWNLOAD_DELAY': 2
}
def parse(self, response):
jr = response.body
jd = json.loads(jr)
handle_http_list = [500]
path = jd["data"]
base_url = "https://www.proptiger.com/"
max_page = int(jd["totalCount"])
cur_page = int(response.url.split(':')[-2].split(',')[0])
cur_page1 = cur_page + 15
page_num =str(cur_page1)
url = 'https://www.proptiger.com/data/v2/entity/resale-listing?selector={{%22filters%22:{{%22and%22:[{{%22equal%22:{{%22bookingStatusId%22:1}}}},{{%22equal%22:{{%22cityId%22:2}}}}]}},%22paging%22:{{%22start%22:{x},%22rows%22:15}}}}'.format(x=str(cur_page1))
for i in range(0,len(path)):
if (i+cur_page) == (max_page):
break
item = ProptigerItem()
item['data_id'] = path[i]['propertyId']
try:
item['listing_by'] = path[i]['companySeller']['company']['type']
except:
item['listing_by'] = 'None'
try:
item['name_lister'] = path[i]['companySeller']['user']['fullName']
except:
item['name_lister'] = 'None'
try:
item['mobile_lister'] = path[i]['companySeller']['user']['contactNumbers'][0]['contactNumber']
except:
item['mobile_lister'] = 'None'
try:
item['price_per_sqft'] = path[i]['currentListingPrice']['pricePerUnitArea']
except:
item['price_per_sqft'] = '0'
try:
item['Selling_price'] = str(path[i]['currentListingPrice']['price'])
except:
item['Selling_price'] = '0'
item['Monthly_Rent'] = '0'
try:
dt1 = int(path[i]['currentListingPrice']['createdAt'] * 0.001)
item['listing_date'] = time.strftime('%m/%d/%Y %H:%M:%S', time.gmtime(dt1))
except:
item['listing_date'] = '0'
try:
dt2 = int(path[i]['currentListingPrice']['updatedAt'] * 0.001)
item['updated_date'] = time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime(dt2))
except:
item['updated_date'] = '0'
try:
item['lat'] = path[i]['latitude']
except:
item['lat'] = '0'
try:
item['longt'] = path[i]['longitude']
except:
item['longt'] = '0'
try:
item['txn_type'] = path[i]['listingCategory']
except:
item['txn_type'] = 'None'
try:
item['config_type'] = str(path[i]['property']['bedrooms']) + 'BHK'
except:
item['config_type'] = 'None'
try:
item['property_type'] = path[i]['property']['unitType']
except:
item['property_type'] = 'None'
try:
item['Bua_sqft'] = str(path[i]['property']['size'])
except:
item['Bua_sqft'] = '0'
try:
item['carpet_area'] = str(path[i]['property']['carpetArea'])
except:
item['carpet_area'] = '0'
try:
item['areacode'] = path[i]['property']['project']['localityId']
except:
item['areacode'] = 'None'
try:
item['city'] = path[i]['property']['project']['locality']['suburb']['city']['label']
except:
item['city'] = 'None'
try:
item['locality'] = path[i]['property']['project']['locality']['suburb']['label']
except:
item['locality'] = 'None'
try:
item['sublocality'] = path[i]['property']['project']['locality']['label']
except:
item['sublocality'] = 'None'
try:
item['Building_name'] = path[i]['property']['project']['locality']['newsTag']
except:
item['Building_name'] = 'None'
try:
dt3 = int(path[i]['property']['project']['launchDate'] * 0.001)
item['Launch_date'] = str(time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime(dt3)))
except:
item['Launch_date'] = '0'
try:
item['address'] = path[i]['property']['project']['address']
except:
item['address'] = 'None'
try:
dt4 = int(path[i]['property']['project']['possessionDate'] * 0.001)
item['Possession'] = str(time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime(dt4)))
except:
item['Possession'] = '0'
try:
item['Status'] = path[i]['property']['project']['projectStatus']
except:
item['Status'] = 'None'
try:
item['platform'] = path[i]['listingSourceDomain']
except:
item['platform'] = 'None'
item['management_by_landlord'] = 'None'
item['google_place_id'] = 'None'
item['age'] = 'None'
if item['Selling_price'] == '0' and item['Monthly_Rent'] == '0':
item['price_on_req'] = 'true'
else:
item['price_on_req'] = 'false'
item['Details'] = path[i]['property']['project']['description']
item['scraped_time'] = dt.now().strftime('%m/%d/%Y %H:%M:%S')
if (((not item['Monthly_Rent'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['price_per_sqft'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0'))):
item['quality4'] = 1
elif (((not item['price_per_sqft'] == '0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Monthly_Rent'] == '0') and (not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None')) or ((not item['Monthly_Rent'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None'))):
item['quality4'] = 0.5
else:
item['quality4'] = 0
if ((not item['Building_name'] == 'None') and (not item['listing_date'] == '0') and (not item['txn_type'] == 'None') and (not item['property_type'] == 'None') and ((not item['Selling_price'] == '0') or (not item['Monthly_Rent'] == '0'))):
item['quality1'] = 1
else:
item['quality1'] = 0
if ((not item['Launch_date'] == '0') and (not item['Possession'] == '0')):
item['quality2'] = 1
else:
item['quality2'] = 0
if ((not item['mobile_lister'] == 'None') or (not item['listing_by'] == 'None') or (not item['name_lister'] == 'None')):
item['quality3'] = 1
else:
item['quality3'] = 0
yield item
if (cur_page+15) < ( max_page):
yield Request(url, callback=self.parse)
|
[
"[email protected]"
] | |
8d12ea6102055c34798e687b5a6532f7642b276f
|
1311696a180047135c825ffa283f9ac9750d4236
|
/tests/data/stubs-ok/micropython-linux-1_12/websocket.py
|
84603dedea90d09964895308d20f7dfc0ad0c2bf
|
[
"MIT"
] |
permissive
|
Josverl/micropython-stubber
|
71103afa842da02d5ad074b541d9bff7243ce23f
|
68fe9113f4b4e611bb4c3d19f79c8ba0e7111f5e
|
refs/heads/main
| 2023-08-31T00:51:22.200348 | 2023-05-31T07:48:54 | 2023-05-31T07:48:54 | 177,823,007 | 135 | 8 |
NOASSERTION
| 2023-09-11T21:25:19 | 2019-03-26T16:00:53 |
Python
|
UTF-8
|
Python
| false | false | 546 |
py
|
"""
Module: 'websocket' on micropython-linux-1.12
"""
# MCU: {'ver': '1.12', 'port': 'linux', 'arch': 'x64', 'sysname': 'unknown', 'release': '1.12.0', 'name': 'micropython', 'mpy': 2821, 'version': '1.12.0', 'machine': 'unknown', 'build': '', 'nodename': 'unknown', 'platform': 'linux', 'family': 'micropython'}
# Stubber: 1.3.6
class websocket:
''
def close():
pass
def ioctl():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def write():
pass
|
[
"[email protected]"
] | |
91fe8bdac939808480646276789f56bc2fd0c450
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_227/ch149_2020_04_13_20_21_26_194548.py
|
50b04d13c34b8c1459a9db8abfd23816a3214e2e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 926 |
py
|
salario_bruto=float(input("Qual o seu salário bruto? "))
numero_dependentes=int(input("Qual o seu número de dependentes? "))
if salario_bruto<=1.045:
INSS=salario_bruto*0.075
elif salario_bruto>=1045.01 and salario_bruto<=2089.60:
INSS=salario_bruto*0.09
elif salario_bruto>=2089.61 and salario_bruto<=3134.40:
INSS=salario_bruto*0.12
elif salario_bruto>=3134.41 and salario_bruto<=6101.06:
INSS=salario_bruto*0.14
else:
INSS=671.12
base_de_calculo=salario_bruto-INSS-(numero_dependentes*189.59)
if base_de_calculo<=1903.98:
IRRF=0
elif base_de_calculo>=1903.99 and base_de_calculo<=2826.65:
IRRF=(base_de_calculo*0.075)-142.80
elif base_de_calculo>=2826.65 and base_de_calculo<=3751.05:
IRRF=(base_de_calculo*0.15)-354.80
elif base_de_calculo>=3751.06 and base_de_calculo<=4664.68:
IRRF=(base_de_calculo*0.225)-636.13
else:
IRRF=(base_de_calculo*0.275)-869.36
print(IRRF)
|
[
"[email protected]"
] | |
1921637bf67204f6d4521f412444523581176738
|
afb16c3188bf06af65ae0d998e114c72342bd8be
|
/note/demo/python_trace/demo2.py
|
69e2891cccff56b373a8630dfd6f7efb23775614
|
[] |
no_license
|
onsunsl/onsunsl.github.io
|
aa75f399f1c647bc2e62314633bfe35187e59ad4
|
4ed2b1b9a2407afcbffdf304020d42b81c4c8cdc
|
refs/heads/master
| 2023-05-26T12:33:11.167270 | 2023-04-01T10:18:05 | 2023-04-01T10:18:05 | 237,595,319 | 1 | 0 | null | 2023-05-23T20:13:11 | 2020-02-01T10:02:58 |
Python
|
UTF-8
|
Python
| false | false | 490 |
py
|
import os
from time import sleep
import signal
import sys
from traceback import extract_stack
def sigterm_handler(_signo, _stack_frame):
# Raises SystemExit(0):
f = open("./1.txt", "w")
f.write("sigterm_handler")
f.close()
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
try:
print(os.getpid(), os.getppid())
print("Hello")
i = 0
while True:
i += 1
print("Iteration #%i" % i)
sleep(1)
finally:
print("Goodbye")
|
[
"[email protected]"
] | |
e3bcf5984f2cde90d396e03b2e11d82015d67e8c
|
3cedc7c1519d3b013aad9ec4e6a6ee7834da7589
|
/python_code/多线程开发/E_多线程使用共享数据.py
|
65fc69e75f8ee5a199ae857933d77ea27bd7330c
|
[] |
no_license
|
hzrg/songqin_course
|
53437100669ee93d2ac5ecae5de938b1a4007d7f
|
05e422ce34a42fd6d3819722a19252f8005e79ed
|
refs/heads/master
| 2022-02-09T13:27:59.871400 | 2019-06-13T06:08:45 | 2019-06-13T06:08:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,631 |
py
|
# coding=utf8
import threading
from time import sleep
# 存储支付宝账号余额
zhifubao = {
'jcy' : 2000,
'liming' : 5000,
'wangan' : 15000,
'zhaolei' : 6005000,
}
# 线程1 : 滴滴打车处理,参数是用户账户和扣款金额
def thread1_didi_pay(account,amount):
print('* t1: get balance from bank')
balance = zhifubao[account]
# 下面的sleep(2) 表示一些处理过程需要花上2秒钟
print('* t1: do something(like discount lookup) for 2 seconds')
sleep(2)
print('* t1: deduct')
zhifubao[account] = balance - amount
# 线程2 : 余额宝处理,参数是用户账户和当前利息
def thread2_yuebao_interest(account,amount):
print('$ t2: get balance from bank')
balance = zhifubao[account]
# 下面的sleep(1) 表示一些处理过程需要花上1秒钟
print('$ t2: do something2.... for 1 seconds')
sleep(1)
print('$ t2: add')
zhifubao[account] = balance + amount
t1 = threading.Thread(target=thread1_didi_pay, args=('jcy',10))
t2 = threading.Thread(target=thread2_yuebao_interest, args=('jcy',10))
t1.start()
t2.start()
t1.join()
t2.join()
print('finally, jcy balance is %s' % zhifubao['jcy'])
"""
正常来说,金额应该不变的,但是由于使用共享数据,导致的问题,
2个线程同时start,同时使用的是共享的数据2000,第二个线程
先结束,变成2010,存回列表,但是第一个线程此时使用的还是开始的2000,
第一个线程结束后,就是1990,覆盖掉2010;
解决方法,加锁。
"""
|
[
"[email protected]"
] | |
4d2a3ab4f356b1581b21a231111a088874cc611e
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/onnx/_internal/diagnostics/infra/sarif/_suppression.py
|
c1dcb014809d994a4777917e5e1764388b48dff5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937 | 2023-08-03T00:40:33 | 2023-08-03T04:14:52 | 65,600,975 | 77,092 | 24,610 |
NOASSERTION
| 2023-09-14T21:58:39 | 2016-08-13T05:26:41 |
Python
|
UTF-8
|
Python
| false | false | 1,249 |
py
|
# DO NOT EDIT! This file was generated by jschema_to_python version 0.0.1.dev29,
# with extension for dataclasses and type annotation.
from __future__ import annotations
import dataclasses
from typing import Literal, Optional
from torch.onnx._internal.diagnostics.infra.sarif import _location, _property_bag
@dataclasses.dataclass
class Suppression(object):
"""A suppression that is relevant to a result."""
kind: Literal["inSource", "external"] = dataclasses.field(
metadata={"schema_property_name": "kind"}
)
guid: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "guid"}
)
justification: Optional[str] = dataclasses.field(
default=None, metadata={"schema_property_name": "justification"}
)
location: Optional[_location.Location] = dataclasses.field(
default=None, metadata={"schema_property_name": "location"}
)
properties: Optional[_property_bag.PropertyBag] = dataclasses.field(
default=None, metadata={"schema_property_name": "properties"}
)
state: Optional[Literal["accepted", "underReview", "rejected"]] = dataclasses.field(
default=None, metadata={"schema_property_name": "state"}
)
# flake8: noqa
|
[
"[email protected]"
] | |
7691802558073b399b3e21487c2b7faf90c162dc
|
b250b3f74b30ad29f65acab3040433473a259cc1
|
/src/_23a.py
|
cdd79900dd2c709eacf9c37588896d815d22132b
|
[] |
no_license
|
Abarn279/advent-of-code-2015
|
0cc6ce58ba443335fd9dcd451e327cec01fd3e96
|
8fbf0b2bc576556d5351d64b93c972a6f6ec8020
|
refs/heads/master
| 2021-06-28T09:11:28.905618 | 2020-11-30T22:02:10 | 2020-11-30T22:02:10 | 75,760,645 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,140 |
py
|
reg = {'a':1, 'b':0}
prog = '''jio a, +19
inc a
tpl a
inc a
tpl a
inc a
tpl a
tpl a
inc a
inc a
tpl a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
jmp +23
tpl a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
inc a
tpl a
inc a
tpl a
inc a
tpl a
inc a
inc a
tpl a
inc a
inc a
tpl a
tpl a
inc a
jio a, +8
inc b
jie a, +4
tpl a
inc a
jmp +2
hlf a
jmp -7
'''.split('\n')
i = 0
while i < len(prog):
line = prog[i]
inst = line[:3]
if inst == 'hlf':
r = prog[i].split(' ')[1]
reg[r] = reg[r] / 2
elif inst == 'tpl':
r = prog[i].split(' ')[1]
reg[r] = reg[r] * 3
elif inst == 'inc':
r = prog[i].split(' ')[1]
reg[r] = reg[r] + 1
elif inst == 'jmp':
o = prog[i].split(' ')[1]
i = i + int(o)
continue
elif inst == 'jie':
pass
[inst, r, o] = prog[i].split(' ')
r = r[:-1]
if reg[r] % 2 == 0:
i = i + int(o)
continue
elif inst == 'jio':
[inst, r, o] = prog[i].split(' ')
r = r[:-1]
if reg[r] == 1:
i = i + int(o)
continue
i += 1
print(reg)
|
[
"[email protected]"
] | |
9491cccb3a1203f18678ca88d25a374d6c280612
|
a06fd6b7b4e5fc2b1b5a46b4edd20a11f717a5ea
|
/netbox/extras/filters.py
|
d0a801b481f55cfc6f08e7f6c154b2c803fd170f
|
[
"Apache-2.0"
] |
permissive
|
feiynagly/netbox
|
d9be722eaa5021cf39e82c19c3e4562dedd94254
|
d364bbbaa6ee4f2a19015d07dd0de855628befb4
|
refs/heads/master
| 2022-12-04T04:41:29.052349 | 2021-05-11T07:13:56 | 2021-05-11T07:13:56 | 173,664,986 | 1 | 1 |
Apache-2.0
| 2022-11-22T03:12:55 | 2019-03-04T03:10:07 |
Python
|
UTF-8
|
Python
| false | false | 7,182 |
py
|
import django_filters
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from taggit.models import Tag
from dcim.models import DeviceRole, Platform, Region, Site
from tenancy.models import Tenant, TenantGroup
from .constants import CF_FILTER_DISABLED, CF_FILTER_EXACT, CF_TYPE_BOOLEAN, CF_TYPE_SELECT
from .models import ConfigContext, CustomField, Graph, ExportTemplate, ObjectChange, TopologyMap
class CustomFieldFilter(django_filters.Filter):
"""
Filter objects by the presence of a CustomFieldValue. The filter's name is used as the CustomField name.
"""
def __init__(self, custom_field, *args, **kwargs):
self.cf_type = custom_field.type
self.filter_logic = custom_field.filter_logic
super().__init__(*args, **kwargs)
def filter(self, queryset, value):
# Skip filter on empty value
if value is None or not value.strip():
return queryset
# Selection fields get special treatment (values must be integers)
if self.cf_type == CF_TYPE_SELECT:
try:
# Treat 0 as None
if int(value) == 0:
return queryset.exclude(
custom_field_values__field__name=self.field_name,
)
# Match on exact CustomFieldChoice PK
else:
return queryset.filter(
custom_field_values__field__name=self.field_name,
custom_field_values__serialized_value=value,
)
except ValueError:
return queryset.none()
# Apply the assigned filter logic (exact or loose)
if self.cf_type == CF_TYPE_BOOLEAN or self.filter_logic == CF_FILTER_EXACT:
queryset = queryset.filter(
custom_field_values__field__name=self.field_name,
custom_field_values__serialized_value=value
)
else:
queryset = queryset.filter(
custom_field_values__field__name=self.field_name,
custom_field_values__serialized_value__icontains=value
)
return queryset
class CustomFieldFilterSet(django_filters.FilterSet):
"""
Dynamically add a Filter for each CustomField applicable to the parent model.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
obj_type = ContentType.objects.get_for_model(self._meta.model)
custom_fields = CustomField.objects.filter(obj_type=obj_type).exclude(filter_logic=CF_FILTER_DISABLED)
for cf in custom_fields:
self.filters['cf_{}'.format(cf.name)] = CustomFieldFilter(field_name=cf.name, custom_field=cf)
class GraphFilter(django_filters.FilterSet):
class Meta:
model = Graph
fields = ['type', 'name']
class ExportTemplateFilter(django_filters.FilterSet):
class Meta:
model = ExportTemplate
fields = ['content_type', 'name']
class TagFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
class Meta:
model = Tag
fields = ['name', 'slug']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) |
Q(slug__icontains=value)
)
class TopologyMapFilter(django_filters.FilterSet):
site_id = django_filters.ModelMultipleChoiceFilter(
field_name='site',
queryset=Site.objects.all(),
label='Site',
)
site = django_filters.ModelMultipleChoiceFilter(
field_name='site__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
class Meta:
model = TopologyMap
fields = ['name', 'slug']
class ConfigContextFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
region_id = django_filters.ModelMultipleChoiceFilter(
field_name='regions',
queryset=Region.objects.all(),
label='Region',
)
region = django_filters.ModelMultipleChoiceFilter(
field_name='regions__slug',
queryset=Region.objects.all(),
to_field_name='slug',
label='Region (slug)',
)
site_id = django_filters.ModelMultipleChoiceFilter(
field_name='sites',
queryset=Site.objects.all(),
label='Site',
)
site = django_filters.ModelMultipleChoiceFilter(
field_name='sites__slug',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
role_id = django_filters.ModelMultipleChoiceFilter(
field_name='roles',
queryset=DeviceRole.objects.all(),
label='Role',
)
role = django_filters.ModelMultipleChoiceFilter(
field_name='roles__slug',
queryset=DeviceRole.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
platform_id = django_filters.ModelMultipleChoiceFilter(
field_name='platforms',
queryset=Platform.objects.all(),
label='Platform',
)
platform = django_filters.ModelMultipleChoiceFilter(
field_name='platforms__slug',
queryset=Platform.objects.all(),
to_field_name='slug',
label='Platform (slug)',
)
tenant_group_id = django_filters.ModelMultipleChoiceFilter(
field_name='tenant_groups',
queryset=TenantGroup.objects.all(),
label='Tenant group',
)
tenant_group = django_filters.ModelMultipleChoiceFilter(
field_name='tenant_groups__slug',
queryset=TenantGroup.objects.all(),
to_field_name='slug',
label='Tenant group (slug)',
)
tenant_id = django_filters.ModelMultipleChoiceFilter(
field_name='tenants',
queryset=Tenant.objects.all(),
label='Tenant',
)
tenant = django_filters.ModelMultipleChoiceFilter(
field_name='tenants__slug',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
class Meta:
model = ConfigContext
fields = ['name', 'is_active']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) |
Q(description__icontains=value) |
Q(data__icontains=value)
)
class ObjectChangeFilter(django_filters.FilterSet):
q = django_filters.CharFilter(
method='search',
label='Search',
)
time = django_filters.DateTimeFromToRangeFilter()
class Meta:
model = ObjectChange
fields = ['user', 'user_name', 'request_id', 'action', 'changed_object_type', 'object_repr']
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(user_name__icontains=value) |
Q(object_repr__icontains=value)
)
|
[
"[email protected]"
] | |
b89827e7bd2186efac21f3de64db0f0df6ff1c32
|
c2296f56df3b934f824be07338e14bccf7c0e34f
|
/url_classification/data/movie_reviews/__init__.py
|
b3a85173320bf97854087bfab6ecbd94c0f6812c
|
[] |
no_license
|
jayceyxc/MachineLearning
|
b190c141be714f4ef7d8b79fab1d0cddc6b7cfcb
|
793179dab920725866c4fac4d2bae8e1a570d122
|
refs/heads/master
| 2022-04-16T21:39:05.652266 | 2020-04-14T07:51:04 | 2020-04-14T07:51:04 | 140,239,558 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: 1.0
@author: ‘yuxuecheng‘
@contact: [email protected]
@software: PyCharm Community Edition
@file: __init__.py.py
@time: 2017/8/7 12:18
"""
|
[
"[email protected]"
] | |
4d352594e3d2b3e79f5ea48063fc2959abef8c5b
|
3c31584c1b661195a567ffd2603d30cb2e270493
|
/codeforces/864/D.py
|
86f83b4c6f59f1a9df0e1846a628d8b628115a0c
|
[] |
no_license
|
ku-nal/Codeforces
|
c7f621e35b5d4eea1ed11276ee8e91031252ca91
|
df43c2fcbcfd1c9f96b6fe79c7abc9ddee054cb7
|
refs/heads/main
| 2023-04-10T19:00:40.559074 | 2021-04-27T15:15:51 | 2021-04-27T15:15:51 | 362,154,763 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,877 |
py
|
#===========Template===============
from io import BytesIO, IOBase
import sys,os
inpl=lambda:list(map(int,input().split()))
inpm=lambda:map(int,input().split())
inpi=lambda:int(input())
inp=lambda:input()
rev,ra,l=reversed,range,len
P=print
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
def factors(n):
return list(set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))))
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
def input(): return sys.stdin.readline().rstrip("\r\n")
#=========I/p O/p ========================================#
from bisect import bisect_left as bl
from bisect import bisect_right as br
import sys,operator,math,operator
from collections import Counter
import random
from functools import reduce
#==============To chaliye shuru krte he ====================#
n=inpi()
li=inpl()
omap=Counter(li)
arr=[]
for i in ra(1,n+1):
if i not in omap:
arr.append(i)
c,ans=0,0
omap1={}
for i in ra(n):
if c<len(arr) and omap[li[i]]>1 and li[i] not in omap1:
if arr[c]>li[i]:
omap1[li[i]]=1
omap[li[i]]-=1
else:
omap[li[i]]-=1
li[i]=arr[c]
ans+=1
c+=1
elif omap[li[i]]>=1 and li[i] in omap1:
omap[li[i]]-=1
li[i]=arr[c]
ans+=1
c+=1
P(ans)
P(*li)
|
[
"[email protected]"
] | |
24e479bc14d88a4d856866a9475952562dcc6177
|
da7a165522daea7c346693c5f32850017c482967
|
/abc51-100/abc051/c.py
|
ad218ed2b388081ae9705ec0c52f82e5979ea0be
|
[] |
no_license
|
SShayashi/ABC
|
19f8750919208c5ff8935638dbaab941c255f914
|
3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c
|
refs/heads/master
| 2021-05-04T21:06:10.720367 | 2020-07-11T13:59:16 | 2020-07-11T13:59:29 | 119,886,572 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
def m():
sx, sy, tx, ty = map(int, input().split())
X = tx-sx
Y = ty-sy
ans = ""
# 一周
ans += "U" * Y
ans += "R" * X
ans += "D" * Y
ans += "L" * X
# 左に一つずらして目的地まで
ans += "L"
ans += "U" * (Y+1)
ans += "R" * (X+1)
ans += "D"
# 右にずれて開始地点まで
ans += "R"
ans += "D" * (Y+1)
ans += "L" * (X+1)
ans += "U"
return ans
print(m())
|
[
"[email protected]"
] | |
cb46e9e19fae34da7ec6451e0dfeb1b3222bff77
|
4c34dca6c12dd36e9e8eb360a2cbbb3f39a50e20
|
/scratchpad/scratch.py
|
4698f26325561e0b97de44eeba25d723830a5498
|
[
"BSD-3-Clause"
] |
permissive
|
PlumpMath/m2py
|
a35e0265d9e3c46214c9560b46a9e59df63c9a9b
|
4a8f754f04adb151b1967fe13b8f80b4ec169560
|
refs/heads/master
| 2021-01-18T20:16:37.973122 | 2015-01-30T11:29:15 | 2015-01-30T11:29:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 917 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
txt = """
>>> t = [1, 2, 3, 4, 5]
>>> map(lambda x: x**2, t)
[1, 4, 9, 16, 25]
>>> t
[1, 2, 3, 4, 5]
>>> zip(t, map(lambda x: x**2, t))
[(1, 1), (2, 4), (3, 9), (4, 16), (5, 25)]
>>>
"""
def paste_run():
global txt
import re
from .utils import xclip
#txt = xclip()
#txt = txt.strip('\n').strip('\r')
#print txt
# Replace bad character
txt = txt.replace('’', "'")
# Remove lines non starting with >>>
lines = [x for x in txt.splitlines() if x.startswith(">>>")]
# Remove >>> from beginning of lines
lines = [x.split(">>>")[1].strip() for x in lines]
#nextxt = "\n".join(lines)
#exec(nextxt)
for line in lines:
print(">>> ", line)
if not line:
continue
if re.match(".*=.*", line):
exec(line)
else:
print(eval(line))
paste_run()
|
[
"[email protected]"
] | |
501d97a1367b23e6209650cac4c62ceab7531ec4
|
ee1eed00f04fe4050a9b7d9761a76af37842b8b1
|
/dynamicportofolio/migrations/0001_initial.py
|
9c57f02e620c776d880992704da0cded5c914ad2
|
[] |
no_license
|
dimansion/dango
|
707b738d1df735a1019e44a53b095d9af4e2a44a
|
4239531849cef3f6c00ff3ba1e38c768a8648e0f
|
refs/heads/master
| 2020-12-24T08:30:31.614872 | 2016-09-07T13:45:33 | 2016-09-07T13:45:33 | 36,284,822 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 733 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('description', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(null=True, blank=True)),
],
),
]
|
[
"[email protected]"
] | |
2e6d529fae8f08a9ea454cbd51d7dad90e159848
|
2611f7e793c52d7bc60be2772cf66b3704c05876
|
/14B-088/HI/analysis/rotation_curves/rotsub_channels_movie.py
|
dd2ef9206c76b7ce8fe348324c48c81dbc5dd5fa
|
[
"MIT"
] |
permissive
|
e-koch/VLA_Lband
|
15e89878f554a70f0bc2a19cb7c5cb1b825f3ced
|
8fca7b2de0b88ce5c5011b34bf3936c69338d0b0
|
refs/heads/master
| 2022-11-29T01:43:22.069806 | 2020-12-21T19:48:22 | 2020-12-21T19:48:22 | 42,543,618 | 2 | 2 |
MIT
| 2022-11-25T15:38:46 | 2015-09-15T20:06:58 |
Python
|
UTF-8
|
Python
| false | false | 2,386 |
py
|
import numpy as np
import matplotlib.pyplot as p
from spectral_cube import SpectralCube
from spectral_cube.cube_utils import average_beams
from astropy.utils.console import ProgressBar
from astropy import units as u
from astropy.visualization import AsinhStretch
from astropy.visualization.mpl_normalize import ImageNormalize
import warnings
import matplotlib.animation as anim
from paths import fourteenB_HI_data_wGBT_path, allfigs_path, fourteenB_wGBT_HI_file_dict
from constants import hi_freq
'''
Channel plots of the rotation subtracted HI cube combined into a movie!
Borrowing code from @keflavich:
https://github.com/keflavich/paper_w51_evla/blob/master/plot_codes/h77a_layers.py
'''
cube = SpectralCube.read(fourteenB_wGBT_HI_file_dict['RotSube_Cube'])
# Begin channel map code here
# integrate over velocities to make channel maps of a set width
vstart = 0 # channels
vend = cube.shape[0]
vstep = 10
all_slabs = np.arange(vstart, vend + vstep, vstep, dtype=int)
# Define the average beam
try:
beam = cube.beam
except AttributeError:
beam = average_beams(cube.beams)
layers = \
[cube[start:end].moment0().value *
beam.jtok(hi_freq) / 1000. * u.km / u.s
for start, end in
ProgressBar(zip(all_slabs[:-1], all_slabs[1:]))]
# Scale all to the maximum
mx = np.max([np.nanmax(x).value for x in layers])
spec_axis = cube.spectral_axis.to(u.km / u.s).value
center_vels = [(spec_axis[start] + spec_axis[min(end, cube.shape[0] - 1)]) / 2. for start, end in
zip(all_slabs[:-1], all_slabs[1:])]
pb = ProgressBar(len(center_vels))
fig = p.figure()
ax = fig.add_subplot(111)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
p.tight_layout()
def updater(i):
pb.update()
layer = layers[i]
im = ax.imshow(layer.value, origin='lower',
norm=ImageNormalize(vmin=-0.001,
vmax=mx,
stretch=AsinhStretch()),
cmap=p.cm.gray_r)
# ax.annotate("${0:.0f} km/s$".format(center_vels[i]),
# (0.53, 0.9),
# xycoords='axes fraction', color='k',
# fontsize=15.5)
ani = anim.FuncAnimation(fig, updater, range(len(center_vels)))
# p.show()
writer = anim.writers['ffmpeg'](fps=4)
ani.save(allfigs_path("m33_rotsub_movie.mp4"), writer=writer, dpi=300)
|
[
"[email protected]"
] | |
5623d1d86e28812e453b1b0d2b6bad08204a8e8a
|
df2cbe914f463ad050d7ed26194424afbe3a0a52
|
/addons/web_editor/models/test_models.py
|
282b703c03d208e9b44cd2a107f060d5c20fe103
|
[
"Apache-2.0"
] |
permissive
|
SHIVJITH/Odoo_Machine_Test
|
019ed339e995be980606a2d87a63312ddc18e706
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
refs/heads/main
| 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 |
Apache-2.0
| 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null |
UTF-8
|
Python
| false | false | 1,266 |
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields
class ConverterTest(models.Model):
_name = 'web_editor.converter.test'
_description = 'Web Editor Converter Test'
# disable translation export for those brilliant field labels and values
_translate = False
char = fields.Char()
integer = fields.Integer()
float = fields.Float()
numeric = fields.Float(digits=(16, 2))
many2one = fields.Many2one('web_editor.converter.test.sub')
binary = fields.Binary(attachment=False)
date = fields.Date()
datetime = fields.Datetime()
selection_str = fields.Selection([
('A', "Qu'il n'est pas arrivé à Toronto"),
('B', "Qu'il était supposé arriver à Toronto"),
('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
('D', "La réponse D"),
], string=u"Lorsqu'un pancake prend l'avion à destination de Toronto et "
u"qu'il fait une escale technique à St Claude, on dit:")
html = fields.Html()
text = fields.Text()
class ConverterTestSub(models.Model):
_name = 'web_editor.converter.test.sub'
_description = 'Web Editor Converter Subtest'
name = fields.Char()
|
[
"[email protected]"
] | |
9bfefdedb6210274b7005f49c69bd92d3e256979
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/96/usersdata/215/54936/submittedfiles/estatistica.py
|
2396e9941096556e8218859d3628ab019b398ed1
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 760 |
py
|
# -*- coding: utf-8 -*-
n=int(input('digite n:'))
a=[]
b=[]
somaA=0
difquadA=0
sdqA=0
somaB=0
difquadB=0
sdqB=0
for z in range(1 ,n+1 ,1):
valorA=float(input('valor listaA:'))
a.append(valorA)
for i in range (0,len(a),1):
somaA=somaA=a[i]
mediaA=somaA/len(a)
for j in range (0 , len(a), 1):
difquadA=(a[j]-mediaA)**2
sdqA=sqdA+difquadA
varA=sdqA/(len(a)-1)
devioA=varA**0.5
for z in range (1,n+1,1):
valorB=float(input('valor listaB:'))
b.append(valorB)
for i in range (0,len(a),1):
somaB=somaB=b[i]
mediaB=somaB/len(b)
for j in range (0,len(a),1):
difquadB=(b[j]-mediaB)**2
sdqB=sqdB+difquadB
varB=sdqB/(len(b)-1)
devioB=varB**0.5
print('%.2f' %mediaA)
print('%.2f' %devioA)
print('%.2f' %mediaB)
print('%.2f' %devioB)
|
[
"[email protected]"
] | |
48d258b6d821fc4ab55853b8287503e12dcf9ba2
|
585bac463cb1919ac697391ff130bbced73d6307
|
/105_ConstructBinaryTreeFromPreorderAndInorderTraversal/solution_1.py
|
729fb58003e0825a66c681dcd89d745020540bf4
|
[] |
no_license
|
llgeek/leetcode
|
ce236cf3d3e3084933a7a4a5e8c7766f7f407285
|
4d340a45fb2e9459d47cbe179ebfa7a82e5f1b8c
|
refs/heads/master
| 2021-01-22T23:44:13.318127 | 2020-03-11T00:59:05 | 2020-03-11T00:59:05 | 85,667,214 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 740 |
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
self.preidx = 0
def helper(sidx, eidx):
if sidx > eidx:
return None
root = TreeNode(preorder[self.preidx])
self.preidx += 1
i = sidx
while i <= eidx:
if inorder[i] == root.val:
break
i += 1
root.left = helper(sidx, i-1)
root.right = helper(i+1, eidx)
return root
return helper(0, len(inorder)-1)
|
[
"[email protected]"
] | |
5e4f0f125a0d414df5abb90f65a10363540cd67a
|
d204538b66b477fea7289c6ca9801919f6fbd09e
|
/demo/start_demo.py
|
9fbf7bd3389d6acb37fb7a02802831d838bc6f38
|
[
"Apache-2.0"
] |
permissive
|
TrendingTechnology/openchat
|
efb8194f38bc809ffca165d65ae13c1f10771b84
|
cee89e3acff33ef598bf3dfe6d2e13a418a9a0aa
|
refs/heads/main
| 2023-03-26T03:33:52.876583 | 2021-03-04T05:17:28 | 2021-03-04T05:17:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 132 |
py
|
from openchat import OpenChat
from demo.web_demo_env import WebDemoEnv
OpenChat(model="blenderbot", size="large", env=WebDemoEnv())
|
[
"[email protected]"
] | |
e70b9829664b9c6f71b685b5dd938706773b2eac
|
91add811783a4f19c7474e92ee87b91d9035a9ae
|
/segmentation_pytorch/models/unet/layers.py
|
eca45cbd03bdfc02b7e567b8042babd3c2a61240
|
[] |
no_license
|
NIRVANALAN/PyTorch_UNOdeMSegNet
|
fb0f0f992444dd7b41102b3896e9f2866873fee4
|
49b577cef650a4bcb3d5c4879bef2d97982e5f4c
|
refs/heads/master
| 2022-12-22T15:14:22.929861 | 2020-03-21T05:22:40 | 2020-03-21T05:22:40 | 201,301,445 | 3 | 3 | null | 2022-12-08T03:14:09 | 2019-08-08T16:58:08 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,206 |
py
|
import torch
import torch.nn as nn
from .utils import init_weights
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n+1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True),)
setattr(self, 'conv%d' % i, conv)
in_size = out_size
else:
for i in range(1, n+1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True),)
setattr(self, 'conv%d' % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n+1):
conv = getattr(self, 'conv%d' % i)
x = conv(x)
return x
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp, self).__init__()
self.conv = unetConv2(in_size+(n_concat-2)*out_size, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(
in_size, out_size, kernel_size=2, stride=2, padding=0)
else:
self.up = nn.Sequential(
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(in_size, out_size, 1))
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1:
continue
init_weights(m, init_type='kaiming')
def forward(self, high_feature, *low_feature):
outputs0 = self.up(high_feature)
for feature in low_feature:
outputs0 = torch.cat([outputs0, feature], 1)
return self.conv(outputs0)
|
[
"[email protected]"
] | |
e6cd00e49f7d1ca2bed65faf4373545c7d8492ce
|
8698757521458c2061494258886e5d3cdfa6ff11
|
/argo/core/network/Bernoulli.py
|
c79dc46905b9ebaf1716bb2d64646f650601ff94
|
[
"MIT"
] |
permissive
|
ricvo/argo
|
546c91e84d618c4bc1bb79a6bc7cba01dca56d57
|
a10c33346803239db8a64c104db7f22ec4e05bef
|
refs/heads/master
| 2023-02-25T01:45:26.412280 | 2020-07-05T22:55:35 | 2020-07-05T22:55:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,097 |
py
|
import tensorflow as tf
import sonnet as snt
import numpy as np
from operator import xor
import types
import pdb
from abc import ABC, abstractmethod
from tensorflow_probability import distributions as tfd
from .AbstractModule import AbstractModule
class Bernoulli(AbstractModule):
def __init__(self, output_size=-1, output_shape=-1, initializers={}, regularizers={}, clip_value=0, dtype=None,
name='Bernoulli'):
super().__init__(name = name)
assert xor(output_size==-1, output_shape==-1), "Either output_size or output_shape mut be specified, not both"
if output_size!=-1:
self._output_shape = [output_size]
else:
self._output_shape = output_shape
self._initializers = initializers
self._regularizers = regularizers
self._clip_value = clip_value
self._dtype=dtype
def _build(self, inputs):
# create the layers for mean and covariance
output_shape = [-1] + self._output_shape
logits = tf.reshape(snt.Linear(np.prod(self._output_shape), initializers=self._initializers, regularizers=self._regularizers)(inputs),output_shape)
dtype = inputs.dtype
if self._dtype is not None:
dtype = self._dtype
if self._clip_value > 0:
probs = tf.nn.sigmoid(logits)
probs = tf.clip_by_value(probs, self._clip_value, 1 - self._clip_value)
bernoulli = tfd.Bernoulli(probs=probs, dtype=dtype)
else:
bernoulli = tfd.Bernoulli(logits=logits, dtype=dtype)
def reconstruction_node(self):
return self.mean()
bernoulli.reconstruction_node = types.MethodType(reconstruction_node, bernoulli)
def distribution_parameters(self):
return [self.mean()]
bernoulli.distribution_parameters = types.MethodType(distribution_parameters, bernoulli)
def get_probs(self):
return self.probs
bernoulli.get_probs = types.MethodType(get_probs, bernoulli)
return bernoulli
|
[
"[email protected]"
] | |
02c3a4438f148ad6f4507b2fe5038d1f2d498bd3
|
144b18db9f190daf499df56f555cfc064bfa42f3
|
/pysoa/test/plan/grammar/directives/time.py
|
60158f54599153c7b636798e6b2e839efb164050
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
cache51/pysoa
|
3eff93d9db7cc125ae016f29d294c5263fdfa692
|
fd37d64dfefff01ff0f7f48e225e0d672b36b5db
|
refs/heads/master
| 2020-03-15T14:25:56.935337 | 2018-05-04T14:56:04 | 2018-05-04T14:56:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,955 |
py
|
"""
Directives for freezing time during test execution
"""
from __future__ import absolute_import, unicode_literals
import datetime
from pyparsing import Literal
from pysoa.test.plan.errors import FixtureSyntaxError
from pysoa.test.plan.grammar.directive import (
Directive,
ActionDirective,
register_directive,
VarValueGrammar
)
try:
from freezegun import freeze_time
except ImportError:
freeze_time = None
class FreezeTimeMixin(object):
@staticmethod
def parse_and_store_freeze_to(target, value, file_name, line_number):
if not freeze_time:
raise FixtureSyntaxError(
'Could not import freezegun to support freeze time syntax. Perhaps you need to install it?',
file_name,
line_number,
)
if value == 'now':
freeze_to = None
else:
try:
freeze_to = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise FixtureSyntaxError('Could not parse datetime value for time freeze', file_name, line_number)
target['_freezegun_freeze_time'] = freeze_to
@staticmethod
def start_freeze(target):
if '_freezegun_freeze_time' in target:
target['_freezegun_context'] = freeze_time(target['_freezegun_freeze_time'])
target['_freezegun_context'].start()
@staticmethod
def stop_freeze(target):
if '_freezegun_context' in target:
target['_freezegun_context'].stop()
del target['_freezegun_context']
class FreezeTimeTestPlanDirective(Directive, FreezeTimeMixin):
"""
Freeze Time using freezegun for the duration of an entire test plan.
This will span all actions within the plan, no matter where the statement is located.
"""
@classmethod
def name(cls):
return 'freeze_time_test'
@classmethod
def get_full_grammar(cls):
return (
Literal('freeze time') +
':' +
VarValueGrammar
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
self.parse_and_store_freeze_to(test_case, parse_results.value, file_name, line_number)
self.start_freeze(test_case)
def post_parse_test_case(self, test_case):
self.stop_freeze(test_case)
def set_up_test_case(self, test_case, test_fixture, **kwargs):
self.start_freeze(test_case)
def tear_down_test_case(self, test_case, test_fixture, **kwargs):
self.stop_freeze(test_case)
def assert_test_case_action_results(self, *args, **kwargs):
pass
class FreezeTimeActionDirective(ActionDirective, FreezeTimeMixin):
"""
Freeze Time using freezegun for the duration of a single action.
"""
@classmethod
def name(cls):
return 'freeze_time_action'
@classmethod
def get_full_grammar(cls):
return (
super(FreezeTimeActionDirective, cls).get_full_grammar() +
Literal('freeze time') +
':' +
VarValueGrammar
)
def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number):
self.parse_and_store_freeze_to(action_case, parse_results.value, file_name, line_number)
self.start_freeze(action_case)
def post_parse_test_case_action(self, action_case, test_case):
self.stop_freeze(action_case)
def set_up_test_case_action(self, action_name, action_case, test_case, test_fixture, **kwargs):
self.start_freeze(action_case)
def tear_down_test_case_action(self, action_name, action_case, test_case, test_fixture, **kwargs):
self.stop_freeze(action_case)
def assert_test_case_action_results(self, *args, **kwargs):
pass
register_directive(FreezeTimeTestPlanDirective)
register_directive(FreezeTimeActionDirective)
|
[
"[email protected]"
] | |
8e468456067fa4b93a3f6a54a9cf2fc969db6b19
|
1e1f7d3687b71e69efa958d5bbda2573178f2acd
|
/payroll/doctype/attendance/attendance.py
|
664a3cb0a3cc17293706a9f4e4f0e4d3d86d2577
|
[] |
no_license
|
ravidey/erpnext
|
680a31e2a6b957fd3f3ddc5fd6b383d8ea50f515
|
bb4b9bfa1551226a1d58fcef0cfe8150c423f49d
|
refs/heads/master
| 2021-01-17T22:07:36.049581 | 2011-06-10T07:32:01 | 2011-06-10T07:32:01 | 1,869,316 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,537 |
py
|
# Please edit this list and import only required elements
import webnotes
from webnotes.utils import add_days, add_months, add_years, cint, cstr, date_diff, default_fields, flt, fmt_money, formatdate, generate_hash, getTraceback, get_defaults, get_first_day, get_last_day, getdate, has_common, month_name, now, nowdate, replace_newlines, sendmail, set_default, str_esc_quote, user_format, validate_email_add
from webnotes.model import db_exists
from webnotes.model.doc import Document, addchild, removechild, getchildren, make_autoname, SuperDocType
from webnotes.model.doclist import getlist, copy_doclist
from webnotes.model.code import get_obj, get_server_obj, run_server_obj, updatedb, check_syntax
from webnotes import session, form, is_testing, msgprint, errprint
set = webnotes.conn.set
sql = webnotes.conn.sql
get_value = webnotes.conn.get_value
in_transaction = webnotes.conn.in_transaction
convert_to_lists = webnotes.conn.convert_to_lists
# -----------------------------------------------------------------------------------------
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
# Notification objects
self.badge_obj = get_obj('Badge Settings','Badge Settings','','',1)
#autoname function
def autoname(self):
self.doc.name = make_autoname(self.doc.naming_series+'.#####')
#get employee name based on employee id selected
def get_emp_name(self):
emp_nm = sql("select employee_name from `tabEmployee` where name=%s", self.doc.employee)
#this is done because sometimes user entered wrong employee name while uploading employee attendance
set(self.doc, 'employee_name', emp_nm and emp_nm[0][0] or '')
ret = { 'employee_name' : emp_nm and emp_nm[0][0] or ''}
return str(ret)
#validation for duplicate record
def validate_duplicate_record(self):
res = sql("select name from `tabAttendance` where employee = '%s' and att_date = '%s' and not name = '%s' and docstatus = 1"%(self.doc.employee,self.doc.att_date, self.doc.name))
if res:
msgprint("Employee's attendance already marked.")
raise Exception
#validation - leave_type is mandatory for status absent/ half day else not required to entered.
def validate_status(self):
if self.doc.status == 'Present' and self.doc.leave_type:
msgprint("You can not enter leave type for attendance status 'Present'")
raise Exception
elif (self.doc.status == 'Absent' or self.doc.status == 'Half Day') and not self.doc.leave_type:
msgprint("Please enter leave type for attendance status 'Absent'")
raise Exception
#check for already record present in leave transaction for same date
def check_leave_record(self):
if self.doc.status == 'Present':
chk = sql("select name from `tabLeave Transaction` where employee=%s and (from_date <= %s and to_date >= %s) and status = 'Submitted' and leave_transaction_type = 'Deduction' and docstatus!=2", (self.doc.employee, self.doc.att_date, self.doc.att_date))
if chk:
msgprint("Leave Application created for employee "+self.doc.employee+" whom you are trying to mark as 'Present' ")
raise Exception
#For absent/ half day record - check for leave balances of the employees
def validate_leave_type(self):
if not self.doc.status =='Present' and self.doc.leave_type not in ('Leave Without Pay','Compensatory Off'):
#check for leave allocated to employee from leave transaction
ret = sql("select name from `tabLeave Transaction` where employee = '%s' and leave_type = '%s' and leave_transaction_type = 'Allocation' and fiscal_year = '%s'"%(self.doc.employee,self.doc.leave_type,self.doc.fiscal_year))
#if leave allocation is present then calculate leave balance i.e. sum(allocation) - sum(deduction)
if ret:
q1 = 'SUM(CASE WHEN leave_transaction_type = "Allocation" THEN total_leave ELSE 0 END)-SUM(CASE WHEN leave_transaction_type = "Deduction" THEN total_leave ELSE 0 END)'
q2 = "select %s from `tabLeave Transaction` where employee = '%s' and leave_type = '%s' and fiscal_year = '%s' and docstatus = 1"
res = sql(q2%(q1,self.doc.employee,self.doc.leave_type,self.doc.fiscal_year))
if res:
if self.doc.status == 'Absent' and flt(res[0][0]) < 1:
msgprint("%s balances are insufficient to cover a day absence, please select other leave type."%self.doc.leave_type)
raise Exception
if self.doc.status == 'Half Day' and flt(res[0][0]) < 0.5:
msgprint("%s balances are insufficient to cover a half day absence, please select other leave type."%self.doc.leave_type)
raise Exception
else:
msgprint("Leave Allocation for employee %s not done.\n You can allocate leaves from HR -> Leave Transaction OR HR -> Leave Control Panel."%self.doc.employee)
raise Exception
def validate_fiscal_year(self):
fy=sql("select year_start_date from `tabFiscal Year` where name='%s'"% self.doc.fiscal_year)
ysd=fy and fy[0][0] or ""
yed=add_days(str(ysd),365)
if str(self.doc.att_date) < str(ysd) or str(self.doc.att_date) > str(yed):
msgprint("'%s' Not Within The Fiscal Year selected"%(self.doc.att_date))
raise Exception
def validate_att_date(self):
import datetime
if getdate(self.doc.att_date)>getdate(datetime.datetime.now().date().strftime('%Y-%m-%d')):
msgprint("Attendance can not be marked for future dates")
raise Exception
# Validate employee
#-------------------
def validate_employee(self):
emp = sql("select name, status from `tabEmployee` where name = '%s'" % self.doc.employee)
if not emp:
msgprint("Employee: %s does not exists in the system" % self.doc.employee, raise_exception=1)
elif emp[0][1] != 'Active':
msgprint("Employee: %s is not Active" % self.doc.employee, raise_exception=1)
# validate...
def validate(self):
self.validate_fiscal_year()
self.validate_att_date()
#self.validate_leave_type()
self.validate_duplicate_record()
#self.validate_status()
self.check_leave_record()
def on_update(self):
#self.validate()
#this is done because sometimes user entered wrong employee name while uploading employee attendance
x=self.get_emp_name()
def on_submit(self):
#this is done because while uploading attendance chnage docstatus to 1 i.e. submit
set(self.doc,'docstatus',1)
pass
|
[
"[email protected]"
] | |
fd24b3900bc159123582a764faa95efbf5f54eef
|
99aa9b2be5199bf1b2f670bc9bb1a5bc7cec1c89
|
/OA/MS/Numbers With Equal Digit Sum.py
|
510e263aff6f09a6c2e3936c708e1801d3888015
|
[] |
no_license
|
SimonFans/LeetCode
|
5196e85dec886b18cb2350419a4a2ae3c751966c
|
0a34a19bb0979d58b511822782098f62cd86b25e
|
refs/heads/master
| 2023-02-08T00:49:30.916655 | 2023-01-31T06:32:32 | 2023-01-31T06:32:32 | 145,938,196 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
def find_digit_sum(num):
val = 0
while num:
val += num % 10
num //= 10
return val
def num_digit_equal_sum(arr):
digit_sum_map = {}
max_val = -1
for num in arr:
digit_sum = find_digit_sum(num)
if digit_sum in digit_sum_map:
other_val = digit_sum_map[digit_sum]
max_val = max(max_val, other_val + num)
digit_sum_map[digit_sum] = max(other_val, num)
else:
digit_sum_map[digit_sum] = num
return max_val
|
[
"[email protected]"
] | |
0fb3c1a5ddf254ca4d04fb76e8f9943dfbef7bf9
|
738e2f18c6ca259fe3a6b0d4d70efd32d83a8758
|
/generate_bind_conf
|
b4eb80756ab0933abf30e137d32e4a0ab38762c8
|
[] |
no_license
|
nicferrier/secondarys
|
9f1e5a1abb616b1a8346be785de33f5667f44762
|
1d5998750686ec27ac2cfbe7542c60e3a6c33ad6
|
refs/heads/master
| 2016-09-05T19:13:51.949494 | 2012-06-17T21:42:15 | 2012-06-17T21:42:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 270 |
#!/usr/bin/env python
import csv
import sys
for entry in csv.reader(sys.stdin.readlines()):
print "zone \"%s\" {" % entry[0]
print "\ttype slave;"
print "\tfile \"/var/cache/bind/db.%s\";" % entry[0]
print "\tmasters { %s; };" % entry[1]
print "};"
|
[
"[email protected]"
] | ||
0cbd5474b71672cb168a892ee1b300395a042c70
|
36957a9ce540846d08f151b6a2c2d582cff1df47
|
/VR/Python/Python36/Lib/turtledemo/lindenmayer.py
|
5f29811cc858c0d44403b5343333afd966e76012
|
[] |
no_license
|
aqp1234/gitVR
|
60fc952307ef413e396d31e0d136faffe087ed2b
|
e70bd82c451943c2966b8ad1bee620a0ee1080d2
|
refs/heads/master
| 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 |
C#
|
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:af4e1ba8102f30f049caf1c4657df7ee1a0b79dd016ca78698a1cfe4067a7df7
size 2553
|
[
"[email protected]"
] | |
f391e38a5611fd6cdd88cebaaff3a9c04b3d4a5a
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/menus/models_20201030110920.py
|
d789727b7aff7632494fbbeb3ed03cb146885f45
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 240 |
py
|
from django.db import models
from modelcluster.models import ClusterableModel
# Create your models here.
class Menu(ClusterableModel):
title = models.CharField(max_length=100)
slug = AutoSlugField(
populate_fro
)
|
[
"[email protected]"
] | |
c23d8ce5ad9ad476b4bb2bf58e618efab78a3471
|
ed454f31cf5a3d2605f275cc83ec82f34f06bb33
|
/zerver/views/pointer.py
|
7f015f01e2b6d0e5aac11ed4a96adc385d4a39ff
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
18-2-SKKU-OSS/2018-2-OSS-L5
|
b62a3ce53eff63ed09395dc1f8296fef089d90e2
|
190bc3afbf973d5917e82ad9785d01b2ea1773f2
|
refs/heads/master
| 2020-04-08T11:44:14.468373 | 2018-12-11T04:35:30 | 2018-12-11T04:35:30 | 159,317,980 | 3 | 4 |
Apache-2.0
| 2018-12-09T14:14:21 | 2018-11-27T10:30:18 |
Python
|
UTF-8
|
Python
| false | false | 1,186 |
py
|
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import to_non_negative_int
from zerver.lib.actions import do_update_pointer
from zerver.lib.request import has_request_variables, JsonableError, REQ
from zerver.lib.response import json_success
from zerver.models import UserProfile, UserMessage, get_usermessage_by_message_id
def get_pointer_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request: HttpRequest, user_profile: UserProfile,
pointer: int=REQ(converter=to_non_negative_int)) -> HttpResponse:
if pointer <= user_profile.pointer:
return json_success()
if get_usermessage_by_message_id(user_profile, pointer) is None:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, request.client, pointer, update_flags=update_flags)
return json_success()
|
[
"[email protected]"
] | |
8b5f46f03fd3acf298116d84ec5c3e44a9f3af84
|
a8750439f200e4efc11715df797489f30e9828c6
|
/CodeForces/login.py
|
785f5e468166714bb35241f17932e9b1ce0d062a
|
[] |
no_license
|
rajlath/rkl_codes
|
f657174305dc85c3fa07a6fff1c7c31cfe6e2f89
|
d4bcee3df2f501349feed7a26ef9828573aff873
|
refs/heads/master
| 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 234 |
py
|
a, b = [x for x in input().split()]
ans = a[0]
i = 1
j = 0
while i < len(a) or j < len(b):
if i >= len(a) or b[j] < a[i]:
ans += b[j]
j += 1
break
else:
ans += a[i]
i += 1
print(ans)
|
[
"[email protected]"
] | |
d4f0c626e2bd451c7704118209afe8adf6d93c47
|
93b88de2ae87c4d7bed4d545fe38c502e84e1ba6
|
/table/models.py
|
dee20de09b8933b6cbaa0e3a4cfd8823273031b1
|
[] |
no_license
|
jod35/empdata-table
|
b77fb8394f74cb71d50aeb1c2d5183d39f9fd5dd
|
4bda87eb8f54b4e53c3adc534002f50a7e46c5f8
|
refs/heads/master
| 2020-12-20T05:23:17.126355 | 2020-01-25T05:49:20 | 2020-01-25T05:49:20 | 235,975,783 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 415 |
py
|
from . import db
class Employee(db.Model):
id=db.Column(db.Integer(),primary_key=True)
name=db.Column(db.String(40),nullable=False)
age=db.Column(db.Integer(),nullable=False)
gender=db.Column(db.String(10),nullable=False)
salary=db.Column(db.Integer(),nullable=False)
residence=db.Column(db.String(25),nullable=False)
def __repr__(self):
return "Employee {}".format(self.name)
|
[
"[email protected]"
] | |
0d6d50fe03634a9956397e0cd037cd9f4ae7634e
|
607e1b1ec5a41fd5f6cf83e7e20a1372717d2486
|
/leetcode/62.py
|
a6d0a7914195cf7602733f2e272dab0afe4cdedd
|
[] |
no_license
|
histuckyi/algorithm
|
067e627e1672e858b3143440200262e0e5db495c
|
fb04bbd8cdb3ead707bb07abbc1688b99f7505a7
|
refs/heads/master
| 2023-07-08T05:22:49.049599 | 2023-06-24T07:00:25 | 2023-06-24T07:00:25 | 147,614,786 | 1 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,608 |
py
|
"""
LeetCode 62. Unique Paths
blog : https://daimhada.tistory.com/131
problem : https://leetcode.com/problems/unique-paths/submissions/
"""
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
r = n
c = m
field = [[0]*c for i in range(r)]
rd = [0, 1]
cd = [1, 0]
pos_list = [(0,0)]
while pos_list:
pos = pos_list.pop()
pr, pc = pos
field[pr][pc] += 1
for i in range(2):
temp_r = pr + rd[i]
temp_c = pc + cd[i]
if temp_r < 0 or temp_c < 0 or r <= temp_r or c <= temp_c:
continue
pos_list.append((temp_r, temp_c))
return field[r-1][c-1]
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
"""
Runtime : faster than 40.64% of Python3
Memory Usage : less than 5.25% of Python3
"""
r = n
c = m
field = [[0]*(c) for i in range(r)]
direction = [(0,-1), (-1, 0)]
for i in range(r):
for j in range(c):
if i == 0 or j == 0:
field[i][j] = 1
continue
for next_pos in direction:
add_r, add_c = next_pos
temp_r = i + add_r
temp_c = j + add_c
if temp_r < 0 or temp_c < 0 or r <= temp_r or c <= temp_c:
continue
field[i][j] += field[temp_r][temp_c]
return field[r-1][c-1]
s = Solution()
s.uniquePaths(7,3)
|
[
"[email protected]"
] | |
88ac7eaa07a6e60ea86b3a2c3c89d5bdf3800eed
|
7a0f0c2107019c82b693e809c1a9b912bee9d9b1
|
/app/chap3_2_2/models/mkqueries.py
|
a6ed847f49085fe78b1ee60cf6cf84fe8ca6cc7b
|
[] |
no_license
|
petershan1119/Django-Official-Practice
|
352f17a4c0b03abe81af7471c4823f096868a4b5
|
a24f626c28bda6024e1b5380f1f8a3c436ba5a0d
|
refs/heads/master
| 2021-01-24T01:28:46.044910 | 2018-02-26T00:32:55 | 2018-02-26T00:32:55 | 122,808,687 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,955 |
py
|
from django.db import models
__all__ = (
'Blog',
'Author',
'Entry',
)
class Blog(models.Model):
name = models.CharField(max_length=100)
tagline = models.TextField(blank=True)
def __str__(self):
return self.name
class Author(models.Model):
"""
## ManyToMany의 경우 add 이용해서 업데이트 (p.105)
joe = Author.objects.create(name='Joe')
entry.authors.all()
entry.authors.add(joe)
"""
name = models.CharField(max_length=200)
email = models.EmailField(blank=True)
def __str__(self):
return self.name
class Entry(models.Model):
"""
## ForeignKey 업데이트 경우 그냥 할당 (p.105)
b = Blog(name='Beatles Blog', tagline='All the latest Beatles news')
b.save()
entry = Entry.objects.create(blog=b, headline='Test entry')
entry.blog
entry.blog.pk
b2 = Blog.objects.create(name='Cheddar Talk')
entry.blog = b2
## filters 이용해서 특정 objects retrieve하는 경우 (p.106)
Entry.objects.create(blog=b, headline='2006 test entry', pub_date=date(2006, 1, 1))
Entry.objects.filter(pub_date__year=2006)
## chaining filters 예시 (p.107)
b = Blog.objects.create(name='lhy Blog')
Entry.objects.create(blog=b, headline='What\'s up', pub_date=date(2020, 1, 1))
Entry.objects.create(blog=b, headline='What 123', pub_date=date(2000, 1, 1))
Entry.objects.create(blog=b, headline='Whattttttt', pub_date=date(2005, 2, 1))
## Everything inside a single filter() call vs. Successive filter() (p.111)
b1 = Blog.objects.create(name='Lennon and 2008')
b2 = Blog.objects.create(name='Lennon 2008 separate')
Entry.objects.create(blog=b1, headline='Lennon', pub_date=date(2008, 1, 1))
Entry.objects.create(blog=b2, headline='Fastcampus', pub_date=date(2008, 1, 1))
Entry.objects.create(blog=b2, headline='Lennon', pub_date=date(2018, 2, 19))
Blog.objects.filter(entry__headline__contains='Lennon', entry__pub_date__year=2008)
Blog.objects.filter(entry__headline__contains='Lennon').filter(entry__pub_date__year=2008)
## 다른 fields간 values 비교 (p.112)
b = Blog.objects.create(name='F blog')
e1 = Entry.objects.create(blog=b, headline='F entry', n_comments=10, n_pingbacks=5)
e1.n_comments = 10
e1.n_pingbacks = 5
e1.save()
e2 = Entry.objects.create(blog=b, headline='F entry2', n_comments=5, n_pingbacks=10)
Entry.objects.filter(n_comments__gt=F('n_pingbacks'))
"""
blog = models.ForeignKey(Blog, on_delete=models.CASCADE)
headline = models.CharField(max_length=255)
pub_date = models.DateField(blank=True, null=True)
mod_date = models.DateField(auto_now=True)
authors = models.ManyToManyField(Author, blank=True)
n_comments = models.IntegerField(default=0)
n_pingbacks = models.IntegerField(default=0)
rating = models.IntegerField(default=0)
def __str__(self):
return self.headline
|
[
"[email protected]"
] | |
c03eaa16a3e0a5b7f3a46d2d94e6d83848e0d6e8
|
4f972877da14226125440b3da9bdb058764d8a54
|
/pandasStudy/temp_opt.py
|
f108619a26d725634c493b10c9b32adf500d1dee
|
[] |
no_license
|
ZhiYinZhang/study
|
16c29990cb371e7e278c437aa0abc7c348614063
|
8c085310b4f65e36f2d84d0acda4ca257b7389af
|
refs/heads/master
| 2021-07-09T16:05:02.925343 | 2020-06-30T07:53:05 | 2020-06-30T07:53:05 | 153,767,096 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 879 |
py
|
#-*- coding: utf-8 -*-
# @Time : 2019/3/9 14:37
# @Author : Z
# @Email : S
# @File : temp_opt.py
import pandas as pd
import json
# df.to_json(,orient="records",force_ascii=False)
# path="e:/test/json/shaoshanshi.json"
#
# df=pd.read_json(path,orient="records",lines=True)
#
# print(df)
# df.to_json("e:/test/json/shaoshanshi.csv",orient="records",force_ascii=False)
# df=pd.read_csv("E:/test/dianshang/data/cust_tel_20200110.csv",dtype=str)
#
# df.to_json("e://test/dianshang/data/cust_tel_20200110.json",orient="records")
# path="e://test//json//"
# df=pd.read_json(path+"part.json",orient="records",lines=True,encoding="utf-8",dtype=False)
#
#
# # pd.read_csv()
#
# print(df.dtypes)
#
# print(df)
# df.to_json(path+"part1.json",orient="records",force_ascii=False)
pd.read_excel()
df=pd.read_csv("e://test//csv//test.csv",dtype=str)
print(df)
print(df.dtypes)
|
[
"[email protected]"
] | |
c8b547b5c2825f3a201e760acb128b8fc94edaca
|
14cc70fa60dfaa441aab34b083cff1bf59574264
|
/opencivicdata/legislative/models/session.py
|
397d1f240810a4a6ecba6cda44895ce9e76871cc
|
[] |
permissive
|
tubaman/python-opencivicdata
|
85434672bea6b40a417104d9381097df58b8a7b2
|
010cd72bdd806e76f342195a1f1e20acbed5a431
|
refs/heads/master
| 2020-07-26T13:32:22.452022 | 2019-08-20T05:56:12 | 2019-08-20T05:56:12 | 208,660,220 | 0 | 0 |
BSD-3-Clause
| 2019-09-15T21:33:06 | 2019-09-15T21:33:06 | null |
UTF-8
|
Python
| false | false | 1,192 |
py
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from opencivicdata.core.models.base import RelatedBase
from opencivicdata.core.models import Jurisdiction
from ...common import SESSION_CLASSIFICATION_CHOICES
@python_2_unicode_compatible
class LegislativeSession(RelatedBase):
jurisdiction = models.ForeignKey(Jurisdiction,
related_name='legislative_sessions',
# should be hard to delete Jurisdiction
on_delete=models.PROTECT
)
identifier = models.CharField(max_length=100)
name = models.CharField(max_length=300)
classification = models.CharField(max_length=100, choices=SESSION_CLASSIFICATION_CHOICES,
blank=True)
start_date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
end_date = models.CharField(max_length=10) # YYYY[-MM[-DD]]
def __str__(self):
return '{} {}'.format(self.jurisdiction, self.name)
class Meta:
db_table = 'opencivicdata_legislativesession'
|
[
"[email protected]"
] | |
4f086d0abd4fee89dc9252a3a4212d6653a80f19
|
2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5
|
/archive/1467. Probability of a Two Boxes Having The Same Number of Distinct Balls.py
|
5becc6fac00c3d0f19e7da6a06a9d4ace6447378
|
[] |
no_license
|
doraemon1293/Leetcode
|
924b19f840085a80a9e8c0092d340b69aba7a764
|
48ba21799f63225c104f649c3871444a29ab978a
|
refs/heads/master
| 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,261 |
py
|
from typing import List
from functools import lru_cache
class Solution:
def getProbability(self, balls: List[int]) -> float:
self.num = 0
N = len(balls)
space_in_each_box = sum(balls) // 2
@lru_cache(None)
def comb(x, y): # x<=y
res = 1
for i in range(x):
res *= y - i
for i in range(1, x + 1):
res //= i
return res
@lru_cache(None)
def get_permunation_number(balls_array):
# print(balls_array)
summ = sum(balls_array)
res = 1
for ball in balls_array:
res *= comb(ball, summ)
summ -= ball
# print(res)
return res
def dfs(cur_no, space_box1, colour_box1, colour_box2, balls_array):
if space_box1 == 0:
colour_box2 += N - cur_no
if colour_box1 == colour_box2:
balls_array1=balls_array
balls_array2=[balls[i]-(balls_array[i] if i<len(balls_array) else 0) for i in range(N)]
balls_array1 = tuple(sorted([x for x in balls_array1 if x!=0]))
balls_array2 = tuple(sorted([x for x in balls_array2 if x != 0]))
temp1 = get_permunation_number(balls_array1)
temp2 = get_permunation_number(balls_array2)
self.num += temp1*temp2
else:
if cur_no < N:
for i in range(min(space_box1+1, balls[cur_no]+1)):
if i == 0:
dfs(cur_no + 1, space_box1, colour_box1, colour_box2 + 1, balls_array+[0])
elif i == balls[cur_no]:
dfs(cur_no + 1, space_box1 - i, colour_box1 + 1, colour_box2, balls_array + [i])
else:
dfs(cur_no + 1, space_box1 - i, colour_box1 + 1, colour_box2 + 1, balls_array + [i])
self.den=get_permunation_number(tuple(sorted(balls)))
dfs(0, space_in_each_box, 0, 0, [])
return self.num / self.den
balls=[1,1]
balls= [2,1,1]
balls = [6, 6, 6, 6, 6, 6,6,6]
print(Solution().getProbability(balls))
|
[
"19241008o"
] |
19241008o
|
b90c7a68490243757448c83d51d4eae5a3c86fad
|
8f6cc0e8bd15067f1d9161a4b178383e62377bc7
|
/ppo_baseline_DMB/WORKINGON/easy_ppo_v2/storage.py
|
0bd79023734c597fa209870d6297b8372a5c8253
|
[] |
no_license
|
humorbeing/python_github
|
9c4dfc61a3cefbb266fefff335f6b28d05797e5e
|
e4b4b49bee7e7e3843c6874717779ce8d619bd02
|
refs/heads/master
| 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 |
Python
|
UTF-8
|
Python
| false | false | 7,531 |
py
|
import torch
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
import numpy as np
def ss(s=''):
print()
print(' ---' * 15)
print(' ---' * 15)
print()
# print(' >>>>>>>>>>>>>>>>>>>> <<<<<<<<<<<<<<<<<<<< ')
print(s)
print()
print(' ---' * 15)
print(' ---' * 15)
print()
import sys
sys.exit()
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
class RolloutStorage(object):
def __init__(self, num_steps, num_processes, obs_shape):
self.obs = np.zeros(shape=(num_steps + 1, num_processes, *obs_shape))
self.rewards = np.zeros(shape=(num_steps, num_processes, 1))
self.value_preds = np.zeros(shape=(num_steps + 1, num_processes, 1))
self.returns = np.zeros(shape=(num_steps + 1, num_processes, 1))
self.action_log_probs = np.zeros(shape=(num_steps, num_processes, 1))
action_shape = 1
self.actions = np.zeros(shape=(num_steps, num_processes, action_shape))
self.masks = np.ones(shape=(num_steps + 1, num_processes, 1))
self.bad_masks = np.ones(shape=(num_steps + 1, num_processes, 1))
self.num_steps = num_steps
self.step = 0
def to(self, device):
self.obs = self.obs.to(device)
self.recurrent_hidden_states = self.recurrent_hidden_states.to(device)
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.returns = self.returns.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.actions = self.actions.to(device)
self.masks = self.masks.to(device)
self.bad_masks = self.bad_masks.to(device)
def insert(self, obs, actions, action_log_probs,
value_preds, rewards, masks, bad_masks):
np.copyto(self.obs[self.step + 1], obs)
np.copyto(self.actions[self.step], actions)
np.copyto(self.action_log_probs[self.step], action_log_probs)
np.copyto(self.value_preds[self.step], value_preds)
np.copyto(self.rewards[self.step], rewards)
np.copyto(self.masks[self.step + 1], masks)
np.copyto(self.bad_masks[self.step + 1], bad_masks)
self.step = (self.step + 1) % self.num_steps
def after_update(self):
self.obs[0].copy_(self.obs[-1])
self.masks[0].copy_(self.masks[-1])
self.bad_masks[0].copy_(self.bad_masks[-1])
def compute_returns(self,
next_value,
gamma):
self.returns[-1] = next_value
for step in reversed(range(self.rewards.size(0))):
self.returns[step] = self.returns[step + 1] * \
gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self,
advantages,
num_mini_batch=None,
mini_batch_size=None):
num_steps, num_processes = self.rewards.size()[0:2]
batch_size = num_processes * num_steps
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(num_processes, num_steps, num_processes * num_steps,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
sampler = BatchSampler(
SubsetRandomSampler(range(batch_size)),
mini_batch_size,
drop_last=True)
for indices in sampler:
obs_batch = self.obs[:-1].view(-1, *self.obs.size()[2:])[indices]
actions_batch = self.actions.view(-1,
self.actions.size(-1))[indices]
value_preds_batch = self.value_preds[:-1].view(-1, 1)[indices]
return_batch = self.returns[:-1].view(-1, 1)[indices]
masks_batch = self.masks[:-1].view(-1, 1)[indices]
old_action_log_probs_batch = self.action_log_probs.view(-1,
1)[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages.view(-1, 1)[indices]
yield obs_batch, actions_batch,\
value_preds_batch, return_batch,\
masks_batch, old_action_log_probs_batch,\
adv_targ
def recurrent_generator(self, advantages, num_mini_batch):
num_processes = self.rewards.size(1)
assert num_processes >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(num_processes, num_mini_batch))
num_envs_per_batch = num_processes // num_mini_batch
perm = torch.randperm(num_processes)
for start_ind in range(0, num_processes, num_envs_per_batch):
obs_batch = []
recurrent_hidden_states_batch = []
actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
obs_batch.append(self.obs[:-1, ind])
recurrent_hidden_states_batch.append(
self.recurrent_hidden_states[0:1, ind])
actions_batch.append(self.actions[:, ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
masks_batch.append(self.masks[:-1, ind])
old_action_log_probs_batch.append(
self.action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
T, N = self.num_steps, num_envs_per_batch
# These are all tensors of size (T, N, -1)
obs_batch = torch.stack(obs_batch, 1)
actions_batch = torch.stack(actions_batch, 1)
value_preds_batch = torch.stack(value_preds_batch, 1)
return_batch = torch.stack(return_batch, 1)
masks_batch = torch.stack(masks_batch, 1)
old_action_log_probs_batch = torch.stack(
old_action_log_probs_batch, 1)
adv_targ = torch.stack(adv_targ, 1)
# States is just a (N, -1) tensor
recurrent_hidden_states_batch = torch.stack(
recurrent_hidden_states_batch, 1).view(N, -1)
# Flatten the (T, N, ...) tensors to (T * N, ...)
obs_batch = _flatten_helper(T, N, obs_batch)
actions_batch = _flatten_helper(T, N, actions_batch)
value_preds_batch = _flatten_helper(T, N, value_preds_batch)
return_batch = _flatten_helper(T, N, return_batch)
masks_batch = _flatten_helper(T, N, masks_batch)
old_action_log_probs_batch = _flatten_helper(T, N, \
old_action_log_probs_batch)
adv_targ = _flatten_helper(T, N, adv_targ)
yield obs_batch, recurrent_hidden_states_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ
|
[
"[email protected]"
] | |
66b1f7ab8b33518cd88195b541716565248d3e8e
|
2734b77a68f6d7e22e8b823418ad1c59fe1a34af
|
/opengever/document/behaviors/__init__.py
|
203ebd83e1f3d6ecb246888b2fffc589e66ad832
|
[] |
no_license
|
4teamwork/opengever.core
|
5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1
|
a01bec6c00d203c21a1b0449f8d489d0033c02b7
|
refs/heads/master
| 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 |
Python
|
UTF-8
|
Python
| false | false | 174 |
py
|
from zope.interface import Interface
class IBaseDocument(Interface):
"""Marker interface for objects with a document like type
(og.document, ftw.mail.mail) etc."""
|
[
"[email protected]"
] | |
3a6ecf79f1d71f56398219969add0d7eaa07bd92
|
908bba8bdc246d665d6b22e3a8b91720c34054e7
|
/whatsapp-sentiment.py
|
e7af36895172fa9f736ffba1bc4ba56d53798139
|
[
"Apache-2.0"
] |
permissive
|
yogithesymbian/whatsapp-sentiments
|
24874ab055522b8733c500a104d218b205c054a8
|
d15d4a44282ecfc9b28fc0d16f2714f0f6ed7d2b
|
refs/heads/master
| 2020-05-25T00:33:48.165911 | 2017-03-19T17:27:15 | 2017-03-19T17:27:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 981 |
py
|
from textblob import TextBlob
from plotly.offline import plot
import plotly.graph_objs as go
import random
user1 = "Bob"
user2 = 'Alice'
with open('chat_sample.txt', 'r+') as f:
samples = f.readlines()
d = {user1:[], user2:[]}
for line in samples:
time, *text = line.split('-')
text = ''.join(text)
name, *chat = text.split(':')
t = TextBlob(''.join(chat))
name = name.strip()
if name == user1 or name == user2:
d[name].append(t.sentiment.polarity)
trace1 = go.Scatter(
y = d[user1][:9000],
name = user1,
mode = 'markers',
marker=dict(
size='8',
colorscale='Picnic',
color = random.sample(range(9000),9000),
)
)
trace2 = go.Scatter(
y = d[user2],
name = user2,
mode = 'markers',
marker=dict(
size='7',
color = random.sample(range(8000), 8000),
colorscale='Electric',
)
)
data = [trace1, trace2]
plot(data)
|
[
"[email protected]"
] | |
b278f7784694cab7b0f6e4c0ae2aa4bf7f6d02af
|
0e083f405af00029c9ec31849f0f7f81c56844b5
|
/configs/mmseg/segmentation_sdk_dynamic.py
|
bfb033efed815d9f803ec76bca1feeee792fd4fd
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmdeploy
|
39b9e7b611caab2c76a6142fcb99f0bf1d92ad24
|
5479c8774f5b88d7ed9d399d4e305cb42cc2e73a
|
refs/heads/main
| 2023-09-01T21:29:25.315371 | 2023-08-31T09:59:29 | 2023-08-31T09:59:29 | 441,467,833 | 2,164 | 605 |
Apache-2.0
| 2023-09-14T10:39:04 | 2021-12-24T13:04:44 |
Python
|
UTF-8
|
Python
| false | false | 307 |
py
|
_base_ = ['./segmentation_dynamic.py', '../_base_/backends/sdk.py']
codebase_config = dict(model_type='sdk')
backend_config = dict(pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='PackSegInputs', meta_keys=['img_path', 'ori_shape', 'img_shape'])
])
|
[
"[email protected]"
] | |
b6002bc250faf4ddfd8640d2a7ed44bf9176c3ec
|
36785c0893ab1e2c81c6a03305f42459776a84e0
|
/ambra_sdk/request_args.py
|
e29318245d880cd3dec5ab930e8d16a232ac1280
|
[
"Apache-2.0"
] |
permissive
|
dicomgrid/sdk-python
|
06589f87f33850bd15e6e99fb683bada6492775f
|
2618e682d38339439340d86080e8bc6ee6cf21b5
|
refs/heads/master
| 2022-08-28T14:50:35.864012 | 2022-08-22T12:36:50 | 2022-08-22T12:36:50 | 253,867,502 | 11 | 6 |
Apache-2.0
| 2022-04-13T10:06:38 | 2020-04-07T17:36:56 |
HTML
|
UTF-8
|
Python
| false | false | 5,637 |
py
|
"""Request args."""
from datetime import date
from json import JSONEncoder
from json import dumps as json_dumps
from typing import Any, Dict, Iterable, Mapping, Optional
import aiohttp
from aiohttp.helpers import sentinel
class Encoder(JSONEncoder):
"""Ambra arguments Encoder."""
def default(self, el: Any):
"""Encode default.
:param el: el
:return: encoded el
"""
if isinstance(el, date):
return el.strftime('%Y-%m-%d %H:%M:%S')
return JSONEncoder.default(self, el)
def cast_argument(arg: Any) -> Any:
"""Cast argument.
:param arg: arg
:return: casted arg
"""
if isinstance(arg, date):
return arg.strftime('%Y-%m-%d %H:%M:%S')
if isinstance(arg, (list, dict)):
return json_dumps(arg, cls=Encoder)
return arg
def cast_arguments(args: Dict[str, Any]) -> Dict[str, str]:
"""Cast arguments.
:param args: args
:return: casted args
"""
casted_args = {}
for arg_name, arg_value in args.items():
casted_args[arg_name] = cast_argument(arg_value)
return casted_args
class RequestArgs: # NOQA:WPS230
"""Request args.
Like in requests.request args
"""
def __init__( # NOQA:D107,WPS211
self,
method: str,
url: str,
full_url: str,
params: Optional[Any] = None, # NOQA:WPS110
data: Optional[Any] = None, # NOQA:WPS110
json: Optional[Any] = None,
headers: Optional[Any] = None,
cookies: Optional[Any] = None,
files: Optional[Any] = None,
auth: Optional[Any] = None,
timeout: Optional[Any] = None,
allow_redirects: Optional[Any] = None,
proxies: Optional[Any] = None,
verify: Optional[Any] = None,
stream: Optional[Any] = None,
cert: Optional[Any] = None,
): # NOQA: DAR101
"""Init."""
self.method = method
self.url = url
self.full_url = full_url
self.params = params # NOQA:WPS110
self.data = data # NOQA:WPS110
self.json = json
self.headers = headers
self.cookies = cookies
self.files = files
self.auth = auth
self.timeout = timeout
self.allow_redirects = allow_redirects
self.proxies = proxies
self.verify = verify
self.stream = stream
self.cert = cert
def to_dict(self):
"""To dict.
:return: dict repr
"""
return self.__dict__.copy()
def dict_optional_args(
self,
autocast_arguments_to_string: bool,
):
"""Get dict optional args.
:param autocast_arguments_to_string: autocast arguments to string
:return: dict of request optional parameters
"""
dict_args = self.to_dict()
dict_args.pop('method')
dict_args.pop('url')
dict_args.pop('full_url')
if dict_args.get('data') is not None and autocast_arguments_to_string:
dict_args['data'] = cast_arguments( # NOQA:WPS110
dict_args['data'],
)
return dict_args
class AioHTTPRequestArgs: # NOQA:WPS230
"""AioHTTP Request args."""
def __init__( # NOQA:D107,WPS211
self,
method: str,
url: str,
full_url: str,
params: Optional[Mapping[str, str]] = None, # NOQA:WPS110
data: Any = None, # NOQA:WPS110
json: Any = None,
cookies=None,
headers=None,
skip_auto_headers: Optional[Iterable[str]] = None,
auth: Optional[aiohttp.BasicAuth] = None,
allow_redirects: bool = True,
max_redirects: int = 10,
compress: Optional[str] = None,
chunked: Optional[bool] = None,
expect100: bool = False,
raise_for_status=None,
read_until_eof: bool = True,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
timeout=sentinel,
ssl=None,
proxy_headers=None,
trace_request_ctx=None,
):
self.method = method
self.url = url
self.full_url = full_url
self.params = params # NOQA:WPS110
self.data = data # NOQA:WPS110
self.json = json
self.cookies = cookies
self.headers = headers
self.skip_auto_headers = skip_auto_headers
self.auth = auth
self.allow_redirects = allow_redirects
self.max_redirects = max_redirects
self.compress = compress
self.chunked = chunked
self.expect100 = expect100
self.raise_for_status = raise_for_status
self.read_until_eof = read_until_eof
self.proxy = proxy
self.proxy_auth = proxy_auth
self.timeout = timeout
self.ssl = ssl
self.proxy_headers = proxy_headers
self.trace_request_ctx = trace_request_ctx
def to_dict(self):
"""To dict.
:return: dict repr
"""
return self.__dict__.copy()
def dict_optional_args(
self,
autocast_arguments_to_string: bool,
):
"""Get dict optional args.
:param autocast_arguments_to_string: autocast arguments to string
:return: dict of request optional parameters
"""
dict_args = self.to_dict()
dict_args.pop('method')
dict_args.pop('url')
dict_args.pop('full_url')
if dict_args.get('data') is not None and autocast_arguments_to_string:
dict_args['data'] = cast_arguments( # NOQA:WPS110
dict_args['data'],
)
return dict_args
|
[
"[email protected]"
] | |
2b887ca5322df9eb742eec5d14620c6a8c37621d
|
b5921afe6ea5cd8b3dcfc83147ab5893134a93d0
|
/tl/contrib/tweepy/auth.py
|
51ed3d90ae2fd53d749c402f1806617c2846a51b
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
techdragon/tl
|
aaeb46e18849c04ad436e0e786401621a4be82ee
|
6aba8aeafbc92cabdfd7bec11964f7c3f9cb835d
|
refs/heads/master
| 2021-01-17T16:13:18.636457 | 2012-11-02T10:08:10 | 2012-11-02T10:08:10 | 9,296,808 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,226 |
py
|
# Tweepy
# Copyright 2009 Joshua Roesslein
# See LICENSE
from urllib.request import Request, urlopen
from urllib.parse import quote
import base64
from tweepy import oauth
from tweepy.error import TweepError
from tweepy.api import API
class AuthHandler(object):
def apply_auth(self, url, method, headers, parameters):
"""Apply authentication headers to request"""
raise NotImplementedError
def get_username(self):
"""Return the username of the authenticated user"""
raise NotImplementedError
class BasicAuthHandler(AuthHandler):
def __init__(self, username, password):
self.username = username
self._b64up = base64.b64encode(bytes('%s:%s' % (username, password), 'ascii'))
def apply_auth(self, url, method, headers, parameters):
headers['Authorization'] = 'Basic %s' % self._b64up.decode()
def get_username(self):
return self.username
class OAuthHandler(AuthHandler):
REQUEST_TOKEN_URL = 'http://api.twitter.com/oauth/request_token'
AUTHORIZATION_URL = 'http://api.twitter.com/oauth/authorize'
AUTHENTICATE_URL = 'http://api.twitter.com/oauth/authenticate'
ACCESS_TOKEN_URL = 'http://api.twitter.com/oauth/access_token'
def __init__(self, consumer_key, consumer_secret, callback=None):
self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self._sigmethod = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.request_token = None
self.access_token = None
self.callback = callback
self.username = None
def apply_auth(self, url, method, headers, parameters):
request = oauth.OAuthRequest.from_consumer_and_token(self._consumer,
http_url=url, http_method=method, token=self.access_token, parameters=parameters)
request.sign_request(self._sigmethod, self._consumer, self.access_token)
headers.update(request.to_header())
def _get_request_token(self):
try:
request = oauth.OAuthRequest.from_consumer_and_token(self._consumer,
http_url = self.REQUEST_TOKEN_URL, callback=self.callback)
request.sign_request(self._sigmethod, self._consumer, None)
resp = urlopen(Request(self.REQUEST_TOKEN_URL,
headers=request.to_header()), timeout=5.0)
return oauth.OAuthToken.from_string(resp.read().decode())
except Exception as e:
raise TweepError(e)
def set_access_token(self, key, secret):
self.access_token = oauth.OAuthToken(key, secret)
def get_authorization_url(self):
"""Get the authorization URL to redirect the user"""
try:
# get the request token
self.request_token = self._get_request_token()
# build auth request and return as url
request = oauth.OAuthRequest.from_token_and_callback(
token=self.request_token, http_url=self.AUTHORIZATION_URL)
return request.to_url()
except Exception as e:
raise TweepError(e)
def get_access_token(self, verifier=None):
"""
After user has authorized the request token, get access token
with user supplied verifier.
"""
try:
# build request
request = oauth.OAuthRequest.from_consumer_and_token(
self._consumer,
token=self.request_token, http_url=self.ACCESS_TOKEN_URL,
verifier=str(verifier)
)
request.sign_request(self._sigmethod, self._consumer, self.request_token)
# send request
resp = urlopen(Request(self.ACCESS_TOKEN_URL, headers=request.to_header()))
self.access_token = oauth.OAuthToken.from_string(resp.read().decode())
return self.access_token
except Exception as e:
raise TweepError(e)
def get_username(self):
if self.username is None:
api = API(self)
user = api.verify_credentials()
if user:
self.username = user.screen_name
else:
raise TweepError("Unable to get username, invalid oauth token!")
return self.username
|
[
"[email protected]"
] | |
87f68bcf618d998027044494849ca6cc6cbdb568
|
b488060127559a3910ad5bf6642061019cc5f7df
|
/app/auth/views.py
|
f16dd5a46f53c65e4f7cb58c19eb52ce58c65ca7
|
[] |
no_license
|
hypnopompicindex/flasky
|
1cf4e104bf68a192348049d651ddf7e35c6c6e0d
|
2131bb49decd8a17d25078ab37205f12e22aefa1
|
refs/heads/master
| 2016-09-05T16:04:45.933010 | 2014-08-29T22:25:55 | 2014-08-29T22:25:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,999 |
py
|
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..models import User
from .forms import LoginForm
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm, \
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated() \
and not current_user.confirmed \
and request.endpoint[:5] != 'auth.':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous() or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
|
[
"[email protected]"
] | |
a0647338bf9bf7f1b4ad381078643e483422723e
|
825930f372fdf8c9c42cd2f9b1f424ab9de90b38
|
/accounts/migrations/0003_order_note.py
|
92701e816ce3c74d2368fbed83add82c8b9acf2c
|
[] |
no_license
|
Xasanjon/crm2
|
56cbfa05d910144c75a3cdfe7423ba68fd576534
|
52279925e64e4268830fbeae6af897aef14b64d0
|
refs/heads/master
| 2023-07-02T04:13:33.928305 | 2021-08-16T14:53:43 | 2021-08-16T14:53:43 | 395,755,429 | 0 | 0 | null | 2021-08-16T14:53:44 | 2021-08-13T18:30:32 |
Python
|
UTF-8
|
Python
| false | false | 392 |
py
|
# Generated by Django 3.2 on 2021-08-02 20:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20210725_0252'),
]
operations = [
migrations.AddField(
model_name='order',
name='note',
field=models.CharField(max_length=200, null=True),
),
]
|
[
"[email protected]"
] | |
1b803449349f7c2d236f15348e6db398d826631f
|
504344fc66e8d54081a17306d3012a16bbb81ee7
|
/1_start_main.py
|
f5b040ad17b8d6c087939daec2d577d8e233f917
|
[] |
no_license
|
Ryanshuai/auto_pubg
|
814753644a8e8e7aa3d7ca3c346a9e05b825c00d
|
696f33f888efc441a74e142db878e836bbf3efee
|
refs/heads/master
| 2022-09-21T12:13:24.155393 | 2020-11-12T20:03:43 | 2020-11-12T20:03:43 | 153,748,441 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,482 |
py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from screen_parameter import show_position_y, show_position_x, show_size_y, show_size_x
from press_gun.robot import Robot
from state.all_states import All_States
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(show_size_x, show_size_y)
Dialog.move(show_position_x, show_position_y)
Dialog.setWindowFlag(QtCore.Qt.WindowStaysOnTopHint)
Dialog.setWindowFlag(QtCore.Qt.FramelessWindowHint)
# Dialog.setAttribute(QtCore.Qt.WA_TranslucentBackground, True)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
Dialog.setFont(font)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(0, 0, show_size_x, show_size_y))
self.label.setObjectName("label")
QtCore.QMetaObject.connectSlotsByName(Dialog)
# self.robot = Robot(All_States(), is_calibrating=True)
self.robot = Robot(All_States())
self.robot.temp_qobject.state_str_signal[str].connect(self.retranslateUi)
def retranslateUi(self, text):
_translate = QtCore.QCoreApplication.translate
self.label.setText(_translate("Dialog", text))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
a8854b058391a3e400e059150fc9e2444400ab81
|
d4b049d91795b5f8899f5ee60151a04be8890af9
|
/litapplications/candidates/migrations/0037_auto_20170604_1531.py
|
673c7eb72fcbdf9751afa92d8101506c0ee2c1c1
|
[] |
no_license
|
thatandromeda/litapplications
|
3ab879c6edee1fd8424c3546eead47659699655a
|
d8b67d0b82ea14fb71b871f7563b7096640e4c25
|
refs/heads/master
| 2020-05-21T23:59:07.004211 | 2017-12-08T03:25:24 | 2017-12-08T03:25:24 | 64,570,749 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 640 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-06-04 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('candidates', '0036_auto_20170410_0025'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='year_end',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='appointment',
name='year_start',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.