blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23d273d98f2e161c75df76b438884487fc3cedd3
|
c459f4dd7b198ec8d8db8379726a5b2650be6636
|
/backoffice/migrations/0027_auto_20210224_1621.py
|
7500f1e83101678b90c23d489885f249201fc720
|
[] |
no_license
|
jittat/admapp
|
4c712182cd06e82efab6c2513fb865e5d00feae8
|
38bf299015ae423b4551f6b1206742ee176b8b77
|
refs/heads/master
| 2023-06-10T03:23:41.174264 | 2023-06-09T19:41:03 | 2023-06-09T19:41:03 | 101,953,724 | 10 | 4 | null | 2023-04-21T22:48:55 | 2017-08-31T03:12:04 |
Python
|
UTF-8
|
Python
| false | false | 412 |
py
|
# Generated by Django 2.2.17 on 2021-02-24 16:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backoffice', '0026_auto_20210224_1550'),
]
operations = [
migrations.AlterField(
model_name='adjustmentmajorslot',
name='major_full_code',
field=models.CharField(max_length=20),
),
]
|
[
"[email protected]"
] | |
80afd9060c0a336de7f3a9e75573797ae0777d67
|
2d4e020e6ab48c46e0a19cb69048d9e8d26e46a6
|
/Bohubrihi/Django/My_Second_Project/My_Second_Project/settings.py
|
3bcaebfe1e75a7315343e35a2ba5a991bf8e3466
|
[] |
no_license
|
IsmailTitas1815/Learning
|
a92476fcf7bcd28a7dc1ab2f4eb3a5c27034728f
|
207eaf4101a6d161c1044310f4b3cc54e9c514eb
|
refs/heads/master
| 2023-07-04T20:13:07.263331 | 2021-08-07T20:07:39 | 2021-08-07T20:07:39 | 293,100,950 | 0 | 0 | null | 2021-05-07T16:55:29 | 2020-09-05T15:18:46 |
Python
|
UTF-8
|
Python
| false | false | 3,345 |
py
|
"""
Django settings for My_Second_Project project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = Path(BASE_DIR,'templates')
STATIC_DIR = Path(BASE_DIR,'static')
MEDIA_DIR = Path(BASE_DIR,'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8d8_*(tac3=jz$u*#jmd+2&io=##j$#!9w&2&5f9sizytgav-e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Login_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'My_Second_Project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'My_Second_Project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR,]
MEDIA_URL = '/media/'
MEDIA_ROOT = MEDIA_DIR
LOGIN_URL = '/login/'
|
[
"[email protected]"
] | |
99a8eae16c7531f553ff02740662130e3d8620f6
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/02_preprocessing/concat82/702-tideGauge.py
|
f1a8c66c3f23c5786e62c1878fbcbc30041d10aa
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,482 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 13 10:02:00 2020
---------------------------------------------------------
This script concatenates yearly predictor files
Browses the predictor folders for the chosen TG
Concatenates the yearly csvs for the chosen predictor
Saves the concatenated csv in a separate directory
---------------------------------------------------------
@author: Michael Tadesse
"""
#%% import packages
import os
import pandas as pd
#%% define directories
home = '/lustre/fs0/home/mtadesse/erafive_localized'
out_path = '/lustre/fs0/home/mtadesse/eraFiveConcat'
#cd to the home dir to get TG information
os.chdir(home)
tg_list = os.listdir()
x = 702
y = 703
#looping through TGs
for t in range(x, y):
tg = tg_list[t]
print(tg)
#concatenate folder paths
os.chdir(os.path.join(home, tg))
#defining the folders for predictors
#choose only u, v, and slp
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp'),\
"wnd_u": os.path.join(where, 'wnd_u'),\
'wnd_v' : os.path.join(where, 'wnd_v')}
#%%looping through predictors
for pred in csv_path.keys():
os.chdir(os.path.join(home, tg))
# print(tg, ' ', pred, '\n')
#cd to the chosen predictor
os.chdir(pred)
#%%looping through the yearly csv files
count = 1
for yr in os.listdir():
print(pred, ' ', yr)
if count == 1:
dat = pd.read_csv(yr)
# print('original size is: {}'.format(dat.shape))
else:
#remove the header of the subsequent csvs before merging
# dat_yr = pd.read_csv(yr, header=None).iloc[1:,:]
dat_yr = pd.read_csv(yr)
dat_yr.shape
dat = pd.concat([dat, dat_yr], axis = 0)
# print('concatenated size is: {}'.format(dat.shape))
count+=1
print(dat.shape)
#saving concatenated predictor
#cd to the saving location
os.chdir(out_path)
#create/cd to the tg folder
try:
os.makedirs(tg)
os.chdir(tg) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg)
#save as csv
pred_name = '.'.join([pred, 'csv'])
dat.to_csv(pred_name)
|
[
"[email protected]"
] | |
b5aaaf013e3bf2e723a3e6318ca85c11b84631ca
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/428/usersdata/284/106789/submittedfiles/jogoDaVelha.py
|
3a3449d0234c2f307a0dbc52ba6f670be5669645
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,279 |
py
|
# -*- coding: utf-8 -*-
from jogoDaVelha_BIB import *
# COLOQUE SEU PROGRAMA A PARTIR DAQUI
#definir entrada em lista:
import random
def solicitaSimboloDoHumano():
letra = 0
while not (letra == 'O' or letra == 'X'):
print('Qual símbolo você deseja utilizar no jogo? (X ou O) ')
letra = input().upper()
if letra == 'X':
return ['X','O']
else:
return ['O','X']
def sorteioPrimeiraJogada():
if random.randint(1,2) == 1:
return 'Computador'
else:
return 'Jogador'
#movimento em forma de vetor/matriz:
def jogadaHumana(tabuleiro):
movimento = 0
while movimento not in '1 2 3 4 5 6 7 8 9'.split() or not vazio(tabuleiro, int(movimento)):
print('Qual a sua jogada, {}?'.format(nome))
movimento = input()
if movimento == 0 0:
movimento = 1
return int(movimento)
#função com relação a matrizes:
def jogadaComputador(tabuleiro, letraComputador):
if letraComputador == 'X':
letraJogador = 'O'
else:
letraJogador = 'X'
for i in range(1,10):
copy = mostraTabuleiro(tabuleiro)
if vazio(copy, i):
movimentacao(copy, letraComputador, i)
if verificaVencedor(copy, letraComputador):
return i
for i in range(1, 10):
copy = mostraTabuleiro(tabuleiro)
if vazio(copy, i):
movimentacao(copy, letraJogador, i)
if verificaVencedor(copy, letraJogador):
return i
movimento = movAleatoria(tabuleiro, [1, 3, 7, 9])
if movimento != None:
return movimento
if vazio(tabuleiro, 5):
return 5
return movAleatoria(tabuleiro, [2, 4, 6, 8])
#def validaJogada()
def mostraTabuleiro(tabuleiro):
dupeTabuleiro = []
for i in tabuleiro:
dupeTabuleiro.append(i)
return dupeTabuleiro
def verificaVencedor(tabuleiro, letra):
return ((tabuleiro[7] == letra and tabuleiro[8] == letra and tabuleiro[9] == letra) or
(tabuleiro[4] == letra and tabuleiro[5] == letra and tabuleiro[6] == letra) or
(tabuleiro[1] == letra and tabuleiro[2] == letra and tabuleiro[3] == letra) or
(tabuleiro[7] == letra and tabuleiro[4] == letra and tabuleiro[1] == letra) or
(tabuleiro[8] == letra and tabuleiro[5] == letra and tabuleiro[2] == letra) or
(tabuleiro[9] == letra and tabuleiro[6] == letra and tabuleiro[3] == letra) or
(tabuleiro[7] == letra and tabuleiro[5] == letra and tabuleiro[3] == letra) or
(tabuleiro[9] == letra and tabuleiro[5] == letra and tabuleiro[1] == letra))
#################################################################################
def vazio(tabuleiro, movimento):
return tabuleiro[movimento] == ' '
def desenhaTabuleiro(tabuleiro):
print(' ' + tabuleiro[7] + ' | ' + tabuleiro[8] + ' | ' + tabuleiro[9])
print(' ' + tabuleiro[4] + ' | ' + tabuleiro[5] + ' | ' + tabuleiro[6])
print(' ' + tabuleiro[1] + ' | ' + tabuleiro[2] + ' | ' + tabuleiro[3])
def jogarNovamente():
print('Você deseja jogar novamente?(sim ou não)')
return input().lower().startswith('sim')
def movimentacao(tabuleiro, letra, movimento):
tabuleiro[movimento] = letra
def movAleatoria(tabuleiro, movimentosList):
movPossiveis = []
for i in movimentosList:
if vazio(tabuleiro, i):
movPossiveis.append(i)
if len(movPossiveis) != 0:
return random.choice(movPossiveis)
else:
return None
def completo(tabuleiro):
for i in range(1, 10):
if vazio(tabuleiro, i):
return False
return True
print('Bem vindo ao JogoDaVelha do grupo X')
nome = input('Qual o seu nome (ou apelido)? ')
while True:
tabul = [' '] * 10
letraJogador, letraComputador = solicitaSimboloDoHumano()
turn = sorteioPrimeiraJogada()
print('Vencedor do sorteio para início do jogo: {}'.format(turn))
rodando = True
while rodando:
if turn == 'Jogador':
desenhaTabuleiro(tabul)
movimento = jogadaHumana(tabul)
movimentacao(tabul, letraJogador, movimento)
if verificaVencedor(tabul, letraJogador):
desenhaTabuleiro(tabul)
print('Vencedor: {}'.format(nome))
rodando = False
else:
if completo(tabul):
desenhaTabuleiro(tabul)
print('Deu Velha!')
break
else:
turn = 'Computador'
else:
movimento = jogadaComputador(tabul, letraComputador)
movimentacao(tabul, letraComputador, movimento)
if verificaVencedor(tabul, letraComputador):
desenhaTabuleiro(tabul)
print('Vencedor: Computador')
rodando = False
else:
if completo(tabul):
desenhaTabuleiro(tabul)
print('Deu Velha!')
break
else:
turn = 'Jogador'
if not jogarNovamente():
break
|
[
"[email protected]"
] | |
f3f8bb5d1c5a47a51aafed60b97ba43063db2582
|
6380a784ff2bbae2ffa67a50757bf0bb3bd7e87c
|
/config/api_router.py
|
a12bee390b55446679072d6b59145e56981ec515
|
[] |
no_license
|
mahidul-islam/rijal
|
fe07a1024ba2b430569b4f91abcd275958297013
|
d31e8548ff69438b7e1b0f49468f367be15533a9
|
refs/heads/master
| 2022-11-21T14:23:25.446961 | 2020-07-21T07:10:17 | 2020-07-21T07:10:17 | 281,323,224 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 312 |
py
|
from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from arrijal.users.api.views import UserViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
app_name = "api"
urlpatterns = router.urls
|
[
"[email protected]"
] | |
5e7064c8728be77fe5a27654c1734f63852bd13a
|
25c544b0189ee53c40fb4213f1f406c6e37227c8
|
/clu/scripts/treeline.py
|
65fb98cb1267b10c9cc6834ac8b0d59051576c9e
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
fish2000/CLU
|
b7c67e0699387df2cbb29c27839faf20affc1888
|
1b31ed5a27c805b113ef69da21f964388b119d0c
|
refs/heads/master
| 2023-02-24T19:54:36.547553 | 2023-02-19T12:58:21 | 2023-02-19T12:58:21 | 192,204,161 | 5 | 0 |
NOASSERTION
| 2021-11-17T03:46:12 | 2019-06-16T15:01:07 |
Python
|
UTF-8
|
Python
| false | false | 31,517 |
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import clu.abstract
import collections.abc
import contextlib
import copy
import shlex
import sys
from itertools import filterfalse
from clu.config.abc import NamespaceWalker
from clu.config.keymap import articulate, FrozenNested
from clu.config.ns import get_ns_and_key, split_ns, unpack_ns, pack_ns
from clu.naming import qualified_name, nameof
from clu.predicates import typeof, isnormative
from clu.exporting import Exporter
exporter = Exporter(path=__file__)
export = exporter.decorator()
"""
TREELINE – tree ± command-line
• Turn a command like this:
$ clu-command WRITE --key0=yo --key1=dogg \
--key2=i_heard ns0 \
--key3=you_like ns1 ns2 --key4=tree --key5=structures
• … into a tree of nodes! In order to eventually make it
into a “clu.config.keymap.Flat” instance like this:
{
"key0" : "yo",
"key1" : "dogg",
"key2" : "i_heard",
"ns0:key3" : "you_like",
"ns0:ns1:ns2:key4" : "tree",
"ns0:ns1:ns2:key5" : "structures"
}
"""
command = "clu-command WRITE " \
"--key0=yo --key1=dogg --key2=i_heard ns0 " \
"--key3=you_like ns1 ns2 " \
"--key4=tree --key5=structures"
executable, action, *nsflags = shlex.split(command)
SPACETABS = " " * 2
@export
class Level(contextlib.AbstractContextManager,
metaclass=clu.abstract.Slotted):
""" A context manager to babysit indent levels. """
__slots__ = ('value', 'tab')
def __init__(self, initial_value=0, tab=SPACETABS):
self.value = abs(initial_value)
self.tab = tab
def indent(self, string):
return self.value * self.tab + string
def __int__(self):
return self.value
def __index__(self):
return self.value
def __enter__(self):
self.value += 1
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
# N.B. return False to throw, True to supress
self.value -= 1
return exc_type is None
# Boolean predicate to filter leafnodes:
leaf_predicate = lambda node: node.is_leafnode()
@export
class NodeBase(collections.abc.Hashable,
collections.abc.Collection,
clu.abstract.Cloneable,
clu.abstract.ReprWrapper,
metaclass=clu.abstract.Slotted):
""" The base class for all tree nodes. """
__slots__ = ('node_parent', 'node_name',
'node_value',
'child_nodes')
def __new__(cls, parent, name, *children, value=None):
try:
instance = super().__new__(cls, parent,
name,
*children,
value=value) # type: ignore
except TypeError:
instance = super().__new__(cls)
instance.node_name = str(name)
instance.node_value = value
instance.node_parent = parent
instance.child_nodes = {}
if children:
self._append_nodes(*children)
return instance
@property
def name(self):
""" The name of this node. """
return self.node_name
@property
def nsname(self):
""" The fully namespaced name of the current node. Namespaces are
enumerated from the current node on upward to the root.
"""
if self.is_rootnode():
return self.node_name
parent = self
namespaces = []
while True:
parent = parent.node_parent
if parent.is_rootnode():
break
namespaces.append(parent.node_name)
if not namespaces:
return self.node_name
return pack_ns(self.node_name, *reversed(namespaces))
@property
def value(self):
""" The value of this node. """
return self.is_leafnode() and self.node_value or None
def is_leafnode(self):
""" Return True if this node is a leafnode, otherwise False. """
return not bool(len(self.child_nodes))
def is_rootnode(self):
""" Return True if this node is a root node, otherwise False. """
return False
def _append_nodes(self, *children):
""" Append some nodes as children to this node. """
for child in children:
if type(child) is RootNode:
raise ValueError(f"Children must be standard nodes – not root nodes")
if type(child) not in acceptable_types:
badtype = nameof(typeof(child))
raise ValueError(f"Children must be Node types, not {badtype}")
if child in set(self.child_nodes.values()):
thistype = nameof(typeof(self))
raise ValueError(f"WTF: {thistype} “{child!s}” is already a child")
if child.node_parent != self:
child.node_parent = self
self.child_nodes[child.name] = child
def add_child(self, name, value=None):
""" Add a child node to this node.
Specify a name, and optionally a value for the node.
"""
node = Node(parent=self, name=name, value=value)
self.child_nodes[node.name] = node
return node
def has_child(self, nskey):
""" Return True if a child exists for a (possibly namespaced) name,
otherwise False.
"""
key, namespaces = unpack_ns(nskey)
node = self
for namespace in namespaces:
node = node.namespace(namespace)
return key in node.child_nodes
def get_child(self, nskey):
""" Retrieve a child node of a (possibly namespaced) given name. """
key, namespaces = unpack_ns(nskey)
node = self
for namespace in namespaces:
node = node.namespace(namespace)
return node.child_nodes[key]
def assemble_subcommand(self, recursive=False):
""" Reassemble the command-line string for a given node.
Optionally, recurse through the child nodes, adding
their values to the command-line string.
"""
if self.is_leafnode():
name, value = self.name, self.value
if value is None:
return f"--{name}"
return f"--{name}={value!s}"
iterable = recursive and self or self.leaves()
assembler = lambda node: node.assemble_subcommand(recursive=recursive)
nsflags = " ".join(map(assembler, iterable))
return f"{self.name} {nsflags}"
def leaf(self, leafname):
""" Retrieve a child leafnode of a given name from this node. """
node = self.child_nodes[leafname]
if not node.is_leafnode():
raise KeyError(leafname)
return node
def namespace(self, nsname):
""" Retrieve a child namespace of a given name from this node. """
node = self.child_nodes[nsname]
if node.is_leafnode():
raise KeyError(nsname)
return node
def leaves(self):
""" Iterator over this nodes’ child leafnodes. """
yield from filter(leaf_predicate, self.child_nodes.values())
def namespaces(self):
""" Iterator over this nodes’ child namespaces. """
yield from filterfalse(leaf_predicate, self.child_nodes.values())
def to_dict(self):
""" Recursively dict-ify this node and any children it may have """
out = { 'name' : self.name }
if self.value:
out['value'] = self.value
if (not self.is_leafnode()) and len(self):
out['children'] = tuple(child.to_dict() for child in self)
return out
def clone(self, deep=False, memo=None):
""" Clone the current node, its leafnodes, and possibly
all of its children, recursively.
"""
replica = type(self)(name=str(self.node_name),
value=copy.copy(self.node_value),
parent=True)
iterable = deep and self or self.leaves()
cloner = lambda node: node.clone(deep=deep)
replica._append_nodes(*map(cloner, iterable))
return replica
def __len__(self):
return len(self.child_nodes)
def __iter__(self):
yield from self.child_nodes.values()
def __getitem__(self, idx):
match idx:
case int() | slice():
return tuple(self.child_nodes.values())[idx]
case _ if isnormative(idx):
return self.get_child(str(idx))
case _:
thistype = nameof(typeof(self))
badtype = nameof(typeof(idx))
message = f"{thistype} indices must be integers, slices, or strings – not {badtype}"
raise TypeError(message)
def __contains__(self, nskey):
return self.has_child(nskey)
def __hash__(self):
# So nice it’s thrice!
return hash(self.name) \
& hash(self.name) \
& hash(id(self))
def __bool__(self):
return True # ?!
def __str__(self):
return self.name
def inner_repr(self):
child_count = len(self.child_nodes)
out = f"Node({self.node_name})"
if self.node_value:
valuetype = qualified_name(typeof(self.node_value))
out += f" = “{self.node_value}” ({valuetype})"
if child_count:
out += f" → [{child_count}]"
return out
@export
class RootNode(NodeBase):
""" A root node, anchoring a tree.
There may only be one of these in a tree, and it must be
the trees’ root node (like duh). When building up a tree
from scratch, you instantiate a RootNode and use its methods
to craft the tree in place.
"""
def __new__(cls, *children, name='root'):
instance = super().__new__(cls, None, # no parent
name, # if you insist
*children,
value=None) # value isn’t necessary
return instance
@classmethod
def populate(cls, *arguments):
instance = cls()
instance.populate_with_arguments(*arguments)
return instance
@property
def value(self):
""" A root node has no value, by definition. """
return None
def is_rootnode(self):
return True
def clone(self, deep=False, memo=None):
""" Clone the root node, its leafnodes, and possibly
all of its children, recursively.
"""
replica = type(self)(name=str(self.node_name))
iterable = deep and self or self.leaves()
cloner = lambda node: node.clone(deep=deep)
replica._append_nodes(*map(cloner, iterable))
return replica
def inner_repr(self):
return self.assemble_subcommand(recursive=True)
@staticmethod
def parse_argument_to_child(argument, parent):
""" Static method for parsing an argument into its values,
and adding a node with those corresponding values
to a provided parent node – then returning this
parent node (if we created a leaf) or our freshly
created node (if we created a namespace).
… this allows us to keep attaching stuff to whatever
gets returned here, wherever we are in the process
of parsing the command line.
"""
# Examine the argument:
if argument.startswith('--'):
if '=' in argument:
# It’s a leaf with a value specified:
name, value = argument.removeprefix('--').split('=')
else:
# It’s a leaf with no value provided:
name, value = argument.removeprefix('--'), None
else:
# It’s a namespace:
name, value = argument, None
# Add and recover a new node, containing the values
# we parsed out:
node = parent.add_child(name=name, value=value)
# Return the node if it’s a namespace, otherwise
# hand back the original parent:
return argument.startswith('--') and parent or node
def populate_with_arguments(self, *arguments):
""" Populate the root node from a sequence of command-line arguments. """
node = self
for argument in arguments:
node = self.parse_argument_to_child(argument, parent=node)
return self
@export
class Node(NodeBase):
""" A standard tree node. """
def __new__(cls, parent, name, *children, value=None):
if not parent:
raise ValueError("Nodes require a valid parent node")
instance = super().__new__(cls, parent,
name,
*children,
value=value) # type: ignore
return instance
# Used in RootNode._append_nodes(…):
acceptable_types = { NodeBase, RootNode, Node }
@export
def node_repr(node):
""" Print a pithy string representation of a node. """
if not node.is_leafnode():
child_count = len(node)
return f"• {node!s} → [{child_count}]"
if not node.value:
return f"• {node!s}"
return f"• {node!s} = {node.value}"
@export
def tree_repr(node, level):
""" Recursively walk, stringify, and print a node tree. """
yield level.indent(node_repr(node))
for leaf in node.leaves():
with level:
yield level.indent(node_repr(leaf))
for namespace in node.namespaces():
with level:
yield from tree_repr(namespace, level)
# NodeTreeMap – a NamespaceWalker-derived KeyMap hosting a node tree
@export
def treewalk(node, pre=None):
""" Iteratively walk a node tree.
Based on https://stackoverflow.com/a/12507546/298171
"""
pre = pre and pre[:] or []
if node.is_leafnode():
yield pre + [node.name, node.value]
elif node.is_rootnode():
for child in node:
yield from treewalk(child, pre)
else:
for child in node:
yield from treewalk(child, pre + [node.name])
@export
class NodeTreeMap(NamespaceWalker, clu.abstract.ReprWrapper,
clu.abstract.Cloneable):
""" NodeTreeMap – a NamespaceWalker-derived KeyMap hosting a node tree """
__slots__ = 'tree'
@classmethod
def from_dict(cls, instance_dict):
""" Used by `clu.config.codecs` to deserialize NodeTreeMaps """
# Create a new NodeTreeMap instance with an empty
# node tree – and an interim FrozenNested instance
# using the instance dict data:
instance = cls(tree=RootNode())
interim = FrozenNested.from_dict(instance_dict)
# instance.update(interim)
# Go through the namespaces, creating them within
# the new instance as needed:
for namespace in interim.namespaces():
node = instance.tree
for nsfragment in split_ns(namespace):
try:
node = node.namespace(nsfragment)
except KeyError:
node = node.add_child(nsfragment)
# With namespaces in place, go through the items,
# using the newly created namespaces to anchor
# namespaced items as needed:
for nskey, value in interim.items():
ns, key = get_ns_and_key(nskey)
if not ns:
instance.tree.add_child(key, value)
else:
instance.tree.get_child(ns).add_child(key, value)
# Return the new instance:
return instance
def __init__(self, tree=None, **updates):
""" Initialize a NodeTreeMap, hosting a given node tree """
try:
super().__init__(**updates)
except TypeError:
super().__init__()
tree = getattr(tree, 'tree', tree)
# “mnq gvfc” – Nellie
if tree is not None:
if type(tree) not in acceptable_types:
badtype = nameof(typeof(tree))
raise TypeError(f"NodeTreeMap requires a Node instance, not type {badtype}")
self.tree = tree
# N.B. – assume updates is a basic KeyMap:
if updates:
for nskey, value in updates.items():
key, namespace = unpack_ns(nskey)
node = self.tree
for nsfragment in namespace:
try:
node = node.namespace(nsfragment)
except KeyError:
node = node.add_child(nsfragment)
node.add_child(key, value)
def walk(self):
yield from treewalk(self.tree)
def submap(self, *namespaces, unprefixed=False):
if unprefixed:
return { node.name : self[node.name] for node in self.tree.leaves() }
if not namespaces:
return self.flatten(cls=dict)
node = self.tree
for namespace in namespaces:
node = node.namespace(namespace)
return { child.name : self[child.nsname] for child in node.leaves() }
def inner_repr(self):
return repr(self.tree)
def clone(self, deep=False, memo=None):
""" Replicate the node tree map, possibly recursively """
replica = type(self)()
replica.tree = self.tree.clone(deep=deep)
return replica
def to_dict(self):
""" Used by `clu.config.codecs` to serialize the NodeTreeMap """
# Using clu.config.keymap.articulate(…) to build the dict –
# This sets up an instance dict matching a Nested KeyMap’s
# internal layout:
return articulate(self.tree, walker=treewalk)
def __contains__(self, nskey):
# Delegate to the root node of the internal node tree:
return self.tree.has_child(nskey)
def __getitem__(self, nskey):
# Delegate to the root node of the internal node tree:
return self.tree.get_child(nskey).value
# Assign the modules’ `__all__` and `__dir__` using the exporter:
__all__, __dir__ = exporter.all_and_dir()
def test():
from clu.testing.utils import inline
from pprint import pprint
@inline.precheck
def show_command_details():
print(f"EXECUTABLE: {executable}")
print(f"ACTION: {action}")
print("NAMESPACED FLAGS:")
pprint(nsflags, indent=4)
@inline
def test_nodebase_basics():
""" Check some of the basic NodeBase functions """
emptynode = NodeBase(parent=None, name='yo')
print(f"EMPTY NODE: {emptynode!s}")
print(repr(emptynode))
print()
datanode = NodeBase(parent=None, name='dogg', value="ALL_TYPES_OF_SHIT")
print(f"DATA NODE: {datanode!s}")
print(repr(datanode))
print()
emptynode.add_child('i_heard', value="you like")
emptynode._append_nodes(datanode)
print(f"EMPTY NODE REDUX: {emptynode!s}")
print(repr(emptynode))
print()
assert emptynode[0].name == "i_heard"
assert emptynode[0].value == "you like"
assert emptynode['i_heard'].name == "i_heard"
assert emptynode['i_heard'].value == "you like"
assert emptynode[1].name == "dogg"
assert emptynode[1].value == "ALL_TYPES_OF_SHIT"
@inline
def test_nodebase_repr_simple():
""" Test a tree of raw NodeBase instances """
root = NodeBase(parent=None, name='root')
root.add_child('yo')
root.add_child('dogg')
root.add_child('i_heard')
root.add_child('you_like')
nsX = NodeBase(parent=root, name="ns0")
nsY = NodeBase(parent=root, name="ns1")
root._append_nodes(nsX, nsY)
nsX.add_child('namespaced')
nsY.add_child('commands')
def node_repr(node):
if not node.value:
return f"• {node!s}"
return f"• {node!s} = {node.value}"
def tree_repr(node, level):
with level:
yield level.indent(node_repr(node))
for child in node:
yield from tree_repr(child, level)
for line in tree_repr(root, Level()):
print(line)
print()
@inline
def test_rootnode_repr_sorted():
""" Test an anchored tree of Node instances """
root = RootNode()
root.add_child('yo')
root.add_child('dogg')
root.add_child('i_heard')
root.add_child('you_like')
nsX = Node(parent=root, name="ns0")
nsY = Node(parent=root, name="ns1")
root._append_nodes(nsX, nsY)
nsX.add_child('namespaced')
nsY.add_child('commands')
for line in tree_repr(root, Level()):
print(line)
print()
@inline
def test_parse_command_line():
""" Transform a command into a node tree """
def parse_argument_to_child_node(arg, parent):
""" Function to parse an argument into its values,
and then add a node with those corresponding values
to a provided parent node – then returning this
parent node (if we created a leaf) or our freshly
created node (if we created a namespace).
… this allows us to keep attaching stuff to whatever
gets returned here, wherever we are in the process
of parsing the command line.
"""
# Examine the argument:
if arg.startswith('--'):
if '=' in arg:
# It’s a leaf with a value specified:
name, value = arg.removeprefix('--').split('=')
else:
# It’s a leaf with no value provided:
name, value = arg.removeprefix('--'), None
else:
# It’s a namespace:
name, value = arg, None
# Add and recover a new node, containing the values
# we parsed out:
node = parent.add_child(name=name, value=value)
# Return the node if it’s a namespace, otherwise
# hand back the original parent:
return arg.startswith('--') and parent or node
# Create an empty tree:
root = RootNode()
# Starting with the root node, go through the list of
# namespaced argument flags and whatnot, parsing each
# in turn, advancing the “node” in question to namespaces
# as we encounter and create them:
node = root
for argument in nsflags:
node = parse_argument_to_child_node(argument, parent=node)
for line in tree_repr(root, Level()):
print(line)
print()
@inline
def test_assemble_subcommand():
""" Reassemble the model command-line invocation """
# The root node is named for the model command:
root = RootNode(name="WRITE")
# Add child leaves and namespaces to match
# the model command:
root.add_child('key0', 'yo')
root.add_child('key1', 'dogg')
root.add_child('key2', 'i_heard')
ns0 = root.add_child('ns0')
ns0.add_child('key3', 'you_like')
ns1 = ns0.add_child('ns1')
ns2 = ns1.add_child('ns2')
ns2.add_child('key4', 'tree')
ns2.add_child('key5', 'structures')
# Assemble subcommands for namespaces:
root_command = root.assemble_subcommand()
ns0_command = ns0.assemble_subcommand()
ns2_command = ns2.assemble_subcommand()
assert root_command in command
assert ns0_command in command
assert ns2_command in command
print("ROOT COMMAND:")
print(root_command)
print()
print("NS0 SUBCOMMAND:")
print(ns0_command)
print()
print("NS2 SUBCOMMAND:")
print(ns2_command)
print()
# Assemble full command recursively:
full_command = root.assemble_subcommand(recursive=True)
print("FULL ASSEMBLED COMMAND:")
print(full_command)
print()
assert full_command in command
for line in tree_repr(root, Level()):
print(line)
print()
@inline
def test_roundtrip_command_line():
""" Parse and subsequently reassemble the command """
# Create an empty tree:
root = RootNode(name="WRITE")
# Populate it from the model command line:
root.populate_with_arguments(*nsflags)
# Re-assemble the full model command recursively:
full_command = root.assemble_subcommand(recursive=True)
assert full_command in command
for line in tree_repr(root, Level()):
print(line)
print()
@inline
def test_nodetreemap_basics():
""" Check the basic functions of NodeTreeMap """
# Fill a tree, per the command line:
root = RootNode.populate(*nsflags)
# Stick it in a NodeTreeMap:
itemlist = []
ntm = NodeTreeMap(tree=root)
for nskey, value in ntm.items():
itemlist.append((nskey, value))
pprint(itemlist)
print()
pprint(ntm.flatten().submap())
print()
@inline
def test_nodetree_namespaces():
""" Check node namespaced names """
# Fill a tree, per the command line:
root = RootNode.populate(*nsflags)
assert root.get_child('key0').nsname == 'key0'
assert root.get_child('key0').value == 'yo'
assert root.get_child('key1').nsname == 'key1'
assert root.get_child('key1').value == 'dogg'
assert root.get_child('key2').nsname == 'key2'
assert root.get_child('key2').value == 'i_heard'
assert root.get_child('ns0:key3').name == 'key3'
assert root.get_child('ns0:key3').nsname == 'ns0:key3'
assert root.get_child('ns0:key3').value == 'you_like'
assert root.get_child('ns0:ns1:ns2:key4').name == 'key4'
assert root.get_child('ns0:ns1:ns2:key4').nsname == 'ns0:ns1:ns2:key4'
assert root.get_child('ns0:ns1:ns2:key4').value == 'tree'
assert root.get_child('ns0:ns1:ns2:key5').name == 'key5'
assert root.get_child('ns0:ns1:ns2:key5').nsname == 'ns0:ns1:ns2:key5'
assert root.get_child('ns0:ns1:ns2:key5').value == 'structures'
assert root.has_child('ns0:key3')
assert root.has_child('ns0:ns1:ns2:key4')
assert root.has_child('ns0:ns1:ns2:key5')
ntm = NodeTreeMap(tree=root)
assert ntm['key0'] == 'yo'
assert ntm['key1'] == 'dogg'
assert ntm['key2'] == 'i_heard'
assert ntm['ns0:key3'] == 'you_like'
assert ntm['ns0:ns1:ns2:key4'] == 'tree'
assert ntm['ns0:ns1:ns2:key5'] == 'structures'
assert 'ns0:key3' in ntm
assert 'ns0:ns1:ns2:key4' in ntm
assert 'ns0:ns1:ns2:key5' in ntm
@inline
def test_roundtrip_nodetree_dict():
""" Check NodeTreeMap to/from dict functions """
# Fill a tree, per the command line:
root = RootNode.populate(*nsflags)
ntm = NodeTreeMap(tree=root)
instance_dict = ntm.to_dict()
instance = NodeTreeMap.from_dict(instance_dict)
# pprint(ntm.flatten().submap())
# pprint(instance.flatten().submap())
# pprint(tuple(ntm.namespaces()))
# pprint(tuple(instance.namespaces()))
assert ntm == instance
assert instance_dict == instance.to_dict()
@inline
def test_roundtrip_nodetree_json():
""" Check NodeTreeMap to/from json functions """
from clu.config.codecs import json_encode, json_decode
# Fill a tree, per the command line:
root = RootNode.populate(*nsflags)
ntm = NodeTreeMap(tree=root)
ntm_json = json_encode(ntm)
ntm_reconstituted = json_decode(ntm_json)
assert ntm == ntm_reconstituted
print(ntm_json)
print()
@inline
def test_nodetree_halfviz():
""" Generate Halfviz from a node tree """
# Fill a tree, per the command line:
root = RootNode.populate(*nsflags)
def edge_repr(parent, node):
return f"{parent.name} -> {node.name}"
def tree_repr(parent, node, level):
with level:
if parent:
yield level.indent(edge_repr(parent, node))
for child in node:
yield from tree_repr(node, child, level)
for line in tree_repr(None, root, Level()):
print(line)
print()
@inline
def test_nodetree_clone():
""" Clone a node tree """
# Fill a tree, per the command line:
root = RootNode.populate(*nsflags)
toor = root.clone(deep=True)
roor = root.clone()
# custom node_repr(…) to show node IDs:
def node_repr(node):
if not node.is_leafnode():
child_count = len(node)
return f"• {node!s} → [{child_count}] ({id(node)})"
if not node.value:
return f"• {node!s} ({hash(node)})"
return f"• {node!s} = {node.value} ({id(node)})"
def tree_repr(node, level):
yield level.indent(node_repr(node))
for leaf in node.leaves():
with level:
yield level.indent(node_repr(leaf))
for namespace in node.namespaces():
with level:
yield from tree_repr(namespace, level)
for line in tree_repr(root, Level()):
print(line)
print(repr(root))
print()
for line in tree_repr(toor, Level()):
print(line)
print(repr(toor))
print()
for line in tree_repr(roor, Level()):
print(line)
print(repr(roor))
print()
@inline
def test_nodetree_to_dict():
""" Recursively dict-ify a node tree """
# Fill a tree, per the command line:
root = RootNode.populate(*nsflags)
dicts = root.to_dict()
pprint(dicts)
return inline.test(100)
if __name__ == '__main__':
sys.exit(test())
|
[
"[email protected]"
] | |
745a7bddd4f8ff7dd570dc5adce795e85124f73a
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/tf3d/ops/tensorflow_sparse_conv_ops/sparse_conv_ops_test.py
|
5f0edf1bbfee5b82233daf983f0fb915f45ef112
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 |
Apache-2.0
| 2020-06-23T01:55:11 | 2020-02-23T07:59:42 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,803 |
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test sparse_conv ops."""
import numpy as np
import tensorflow as tf
try:
import tensorflow_sparse_conv_ops as sparse_conv_ops # pylint: disable=g-import-not-at-top
except ImportError:
import sparse_conv_ops # pylint: disable=g-import-not-at-top
class SparseConvOpTest(tf.test.TestCase):
"""Test sparse_conv ops."""
def test_spar_conv_op(self):
voxel_features = tf.constant(
[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 2.0, 8.0]]],
dtype=tf.float32)
voxel_xyz_indices = tf.constant(
[[[0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]]], dtype=tf.int32)
num_valid_voxels = tf.constant([4], dtype=tf.int32)
init_value = np.ones([3, 3, 3, 3, 5], np.float32)
filters = tf.Variable(initial_value=init_value, trainable=True)
with tf.GradientTape() as g:
voxel_outputs = sparse_conv_ops.submanifold_sparse_conv3d(
voxel_xyz_indices, num_valid_voxels, voxel_features, filters)
print('voxel_outputs:', voxel_outputs)
self.assertAllEqual(voxel_outputs.shape, [1, 4, 5])
self.assertAllEqual(
g.gradient(voxel_outputs, filters).shape, [3, 3, 3, 3, 5])
if __name__ == '__main__':
tf.test.main()
|
[
"[email protected]"
] | |
d5831481809bc39418d1870a746d55b3c2d993ab
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/netex/models/third_party_product.py
|
50ae347ef65cbaf67a7ff980b403457e18ba01ef
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 |
Python
|
UTF-8
|
Python
| false | false | 309 |
py
|
from dataclasses import dataclass
from .third_party_product_version_structure import ThirdPartyProductVersionStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class ThirdPartyProduct(ThirdPartyProductVersionStructure):
class Meta:
namespace = "http://www.netex.org.uk/netex"
|
[
"[email protected]"
] | |
41550f34d2705294a0046ac8b1de8397166aec24
|
73332abdcadb62f4f262d0c30856c3c257a9ee7d
|
/oyProjectManager/models/repository.py
|
e652584bc54fa7228d4ef379aa506eb227c11021
|
[
"BSD-2-Clause"
] |
permissive
|
code-google-com/oyprojectmanager
|
454435604cc150c1b54ec2c54294e0fa05490f82
|
3085ecbe1cc04a73ec69b4848b789009546feae7
|
refs/heads/master
| 2021-01-19T02:40:56.342086 | 2015-01-26T16:40:00 | 2015-01-26T16:40:00 | 32,266,400 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,653 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
from exceptions import AttributeError, RuntimeError, ValueError, IOError
import os
from oyProjectManager import utils
from oyProjectManager.utils import cache
# create a logger
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
# TODO: Remove Repository Class, it is useless
class Repository(object):
"""Repository class gives information about the repository and projects in
that repository.
The Repository class helps:
* Get a list of project names in the current repository
* Find server paths
* and some auxiliary things like:
* convert the given path to repository relative path which contains
the environment variable key in the repository path.
In the current design of the system there can only be one repository where
all the projects are saved on. It is a little bit hard or adventurous to
build a system which supports multiple repositories.
.. note::
In future may there be support for multiple repositories by using
repository specific environment variables, like $REPO1 for repository in
the first index of the config.repository settings and $REPO2 for the
second and etc. But in the current design it was a little bit an overkill
to add this support.
.. warning::
The repository setting (``repository``) in the users own config.py file
is useless for getting the repository path. It is the $REPO environment
variable that oyProjectManager uses. The ``repository`` setting in the
``config.py`` is there to be able replace the path values for one
operating system in another, for example, think that a path for a texture
file is set to "/mnt/Projects/TestProject/Texture1". This
is obviously a path for OSX or linux, but what happens when you are under
Windows and open the file, in this case oyProjectManager will try to
replace the path with the environment variable by checking if the path
matches any of the oses repository path settings and it will reproduce
the path as "$REPO/TestProject" in case the repository settings is
"/mnt/Projects" for OSX.
There are no parameters that needs to be set to initialize a Repository
instance.
"""
def __init__(self):
logger.debug("initializing repository instance")
# get the config
from oyProjectManager import conf
self.conf = conf
self._server_path = ""
self._windows_path = ""
self._osx_path = ""
self._linux_path = ""
self._project_names = []
self._validate_repository_env_key()
# -----------------------------------------------------
# read the repository settings and assign the defaults
try:
self._windows_path = \
self.conf.repository["windows_path"].replace("\\", "/")
except AttributeError:
pass
try:
self._linux_path = \
self.conf.repository["linux_path"].replace("\\", "/")
except AttributeError:
pass
try:
self._osx_path = \
self.conf.repository["osx_path"].replace("\\", "/")
except AttributeError:
pass
logger.debug("finished initializing repository instance")
def _validate_repository_env_key(self):
"""validates the repository env key environment variable
"""
# raise a RuntimeError if no REPO environment var is set
if not os.environ.has_key(self.conf.repository_env_key):
raise RuntimeError("Please set an environment variable with the "
"name %s and set it to your repository path" %
self.conf.repository_env_key)
if os.environ[self.conf.repository_env_key] == "":
raise ValueError("The %s environment variable can not be an "
"empty string" % self.conf.repository_env_key)
# @property
# @bCache.cache()
@cache.CachedMethod
@property
def project_names(self):
"""returns a list of project names
"""
self.update_project_list()
return self._project_names
def update_project_list(self):
"""updates the project list variable
"""
logger.debug("updating projects list")
try:
self._project_names = []
child_folders = utils.getChildFolders(self.server_path)
for folder in child_folders:
# check if the .metadata.db file exists under the folder
if os.path.exists(
os.path.join(
self.server_path,
folder,
self.conf.database_file_name
)
):
# it should be a valid project
self._project_names.append(folder)
self._project_names.sort()
except IOError:
logger.warning("server path doesn't exists, %s" % self.server_path)
@property
def server_path(self):
"""The server path
"""
return os.path.expandvars(
os.path.expandvars(
os.path.expanduser(
os.environ[self.conf.repository_env_key]
)
)
)
@property
def linux_path(self):
return self._linux_path.replace("\\", "/")
@property
def windows_path(self):
"""The windows path of the jobs server
"""
return self._windows_path.replace("\\", "/")
@property
def osx_path(self):
"""The osx path of the jobs server
"""
return self._osx_path.replace("\\", "/")
def get_project_name(self, file_path):
"""Returns the project name from the given path or full path.
Calculates the project name from the given file or folder full path.
It returns None if it can not get a suitable name.
:param str file_path: The file or folder path.
:returns: Returns a string containing the name of the project
:rtype: str
"""
#assert(isinstance(file_path, (str, unicode)))
if file_path is None:
return None
file_path = os.path.expandvars(
os.path.expanduser(
os.path.normpath(file_path)
)
).replace("\\", "/")
if not file_path.startswith(self.server_path.replace("\\", "/")):
return None
residual = file_path[len(self.server_path.replace("\\", "/"))+1:]
parts = residual.split("/")
if len(parts) > 1:
return parts[0]
return None
def relative_path(self, path):
"""Converts the given path to repository relative path.
If "M:/JOBs/EXPER/_PROJECT_SETUP_" is given it will return
"$REPO/EXPER/_PROJECT_SETUP_"
"""
return path.replace(self.server_path,
"$" + self.conf.repository_env_key)
|
[
"[email protected]"
] | |
bef343ea16d689688535c9e4696e408161b5b29d
|
8c16c8fa0a32e1b190df3206c739cc844f81df7a
|
/home/migrations/0002_load_initial_data.py
|
808c1c33641ba4e51e3c8fae89ca49b39bf69c32
|
[] |
no_license
|
crowdbotics-apps/r123-dev-1618
|
c689f7f0e5fb294632fa6abe40b13a2e4b5ed3bc
|
192d89a3e82f212700fce8b9160947a73abdc716
|
refs/heads/master
| 2022-04-02T11:13:12.271354 | 2020-02-06T12:48:14 | 2020-02-06T12:48:14 | 238,687,483 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,273 |
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "R123"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">R123</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "r123-dev-1618.botics.co"
site_params = {
"name": "R123",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"[email protected]"
] | |
096f62f342b714d1b73d1cde4d377414250fa6f4
|
02d8a026d63127f045042e03e23acbe6c9675db8
|
/vb2py/PythonCard/tools/resourceEditor/modules/backgroundInfoDialog.rsrc.py
|
613f3425866455d083794a5997c9411d81d9d930
|
[
"BSD-3-Clause"
] |
permissive
|
VB6Hobbyst7/xl_vb2py
|
40e77976b452732575e2726fb1f0675b1ab9f86f
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
refs/heads/main
| 2023-07-28T20:12:11.933183 | 2021-09-23T18:12:02 | 2021-09-23T18:12:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,268 |
py
|
{'type':'CustomDialog',
'name':'backgroundInfo',
'title':'Background Info',
'position':(53, 94),
'size':(370, 563),
'components': [
{'type':'Button',
'name':'btnCustomize',
'position':(247, 333),
'label':'Customize',
},
{'type':'RadioGroup',
'name':'windowStyle',
'position':(76, 279),
'size':(281, -1),
'items':['Static', 'Resizeable', 'Custom'],
'label':'Window Style',
'layout':'horizontal',
'max':1,
'stringSelection':'Static',
},
{'type':'StaticText',
'name':'stcName',
'position':(10, 10),
'text':'Name:',
},
{'type':'StaticText',
'name':'stcTitle',
'position':(10, 35),
'text':'Title:',
},
{'type':'StaticText',
'name':'stcPosition',
'position':(10, 60),
'text':'Position:',
},
{'type':'StaticText',
'name':'stcSize',
'position':(10, 85),
'text':'Size:',
},
{'type':'StaticText',
'name':'stcForegroundColor',
'position':(10, 110),
'text':'Foreground color:',
},
{'type':'StaticText',
'name':'stcBackgroundColor',
'position':(10, 135),
'text':'Background color:',
},
{'type':'StaticText',
'name':'stcImage',
'position':(10, 160),
'text':'Image:',
},
{'type':'StaticText',
'name':'stcIcon',
'position':(10, 210),
'text':'Icon:',
},
{'type':'TextField',
'name':'fldName',
'position':(130, 5),
},
{'type':'TextField',
'name':'fldTitle',
'position':(130, 30),
'size':(188, -1),
},
{'type':'TextField',
'name':'fldPosition',
'position':(130, 55),
'size':(80, -1),
},
{'type':'TextField',
'name':'fldSize',
'position':(130, 80),
'size':(80, -1),
},
{'type':'TextField',
'name':'fldForegroundColor',
'position':(130, 110),
},
{'type':'Button',
'name':'btnForegroundColor',
'position':(250, 110),
'label':'Color...',
},
{'type':'TextField',
'name':'fldBackgroundColor',
'position':(130, 135),
},
{'type':'Button',
'name':'btnBackgroundColor',
'position':(250, 135),
'label':'Color...',
},
{'type':'TextField',
'name':'fldImage',
'position':(130, 160),
},
{'type':'Button',
'name':'btnFile',
'position':(250, 160),
'label':'File...',
},
{'type':'CheckBox',
'name':'chkTiled',
'position':(130, 185),
'size':(135, -1),
'label':'Tile image',
},
{'type':'TextField',
'name':'fldIcon',
'position':(130, 210),
},
{'type':'Button',
'name':'btnIconFile',
'position':(250, 210),
'label':'File...',
},
{'type':'CheckBox',
'name':'chkStatusBar',
'position':(130, 235),
'label':'Status bar on window',
},
{'type':'CheckBox',
'name':'chkVisible',
'position':(130, 260),
'size':(135, -1),
'checked':True,
'label':'Visible at startup',
},
{'type':'Button',
'id':5100,
'name':'btnOK',
'position':(9, 405),
'default':1,
'label':'OK',
},
{'type':'Button',
'id':5101,
'name':'btnCancel',
'position':(114, 405),
'label':'Cancel',
},
] # end components
} # end CustomDialog
|
[
"[email protected]"
] | |
0e4e8d3779836386612dc162af745efcb539ecb8
|
6ac2631c256f156d4ddf169e6c67f1fe66ebcaaf
|
/062/pyteacher/app_accounts/migrations/0011_auto_20190321_0515.py
|
e36b9ecbc568798fd0c86cd592db78267480287a
|
[] |
no_license
|
kasaiee/how-to-pyteacher
|
101f106aeeed1b34756cecf502337ff8ee584ff5
|
074a57533f53fd1b8c7f37cd11dbc3b32ab8a08f
|
refs/heads/master
| 2022-12-10T23:50:46.851784 | 2019-07-15T19:31:03 | 2019-07-15T19:31:03 | 187,372,111 | 6 | 4 | null | 2022-12-08T01:55:05 | 2019-05-18T15:08:03 | null |
UTF-8
|
Python
| false | false | 502 |
py
|
# Generated by Django 2.1.5 on 2019-03-21 05:15
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app_base', '0011_auto_20190302_2010'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app_accounts', '0010_auto_20190321_0437'),
]
operations = [
migrations.RenameModel(
old_name='RegisteredCourse',
new_name='RegisteredItem',
),
]
|
[
"[email protected]"
] | |
048517a1253073256e7a998e84e5de7e1dcffbcd
|
7560e624ac39fcdf44b7b8d747c072c923bb6d1b
|
/docs/conf.py
|
2bc866940e3023d41d63604c316372b4429720d7
|
[] |
no_license
|
tardis-sn-archive/tardisatomic
|
889473a8c9b466dd433bc89778b16e43857d1652
|
bea354a6427d3d9539abbac6a1ce476bdb3c9aaf
|
refs/heads/master
| 2021-05-28T21:54:55.953270 | 2015-05-20T14:22:43 | 2015-05-20T14:22:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,948 |
py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# Load all of the global Astropy configuration
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None),
'h5py': ('http://docs.h5py.org/en/latest/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/dev/', None),
'astropy': ('http://docs.astropy.org/en/stable/', None)
}
import sphinx_bootstrap_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'numpydoc',
'astropy_helpers.sphinx.ext.automodapi'
]
source_suffix = '.rst'
## get's rid of many toctree contains errors: see https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
extensions += ['matplotlib.sphinxext.plot_directive',
'sphinxcontrib.bibtex']
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.1'
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates']
#exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog = """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = u'TARDIS Atomic'
author = u'TARDIS team'
copyright = u'2013, ' + author
master_doc = 'index'
#default_role = 'obj'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import tardisatomic
# The short X.Y version.
version = tardisatomic.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = tardisatomic.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'tardis_logo.ico'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
|
[
"[email protected]"
] | |
c7f5730d2632381a3a679b41df95ef215a1c2038
|
0129b016055daa1aaa1e9e0911f271fa7b38e27e
|
/programacao_estruturada/20192_186/Bimestral2_186_20192/Parte 1/Mois__s/question_two.py
|
393b6ee477dd83b455a0fb3d91cd8ed5723a104c
|
[] |
no_license
|
rogeriosilva-ifpi/teaching-tds-course
|
7c43ff17d6677aef7b42071929b3de8361748870
|
771ccdc4dc932d0ef5ce6ba61a02b5ee11920d4c
|
refs/heads/master
| 2022-04-04T01:08:45.157185 | 2020-01-30T19:36:57 | 2020-01-30T19:36:57 | 206,439,119 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 124 |
py
|
frase = str(input('Digite uma frase: ')).split
print('A nova frase é: ', frase.len - frase.count(" "))
print(upper(frase))
|
[
"[email protected]"
] | |
51fa504aef3e03d5e2b6330243586dabc34d5f60
|
94d15ed8d7578c733b0559f270a24e444648b5c0
|
/high_challenge/심화문제 8.4.py
|
c6ca6b6d98f318186974833794c8fa5a79844fd9
|
[] |
no_license
|
zodang/python_practice
|
3f9a5d4d26c70cef2f30a04362ac0ec541a330ef
|
e5da0ef7a5d15d7e942ca0353815b125c7ed72e0
|
refs/heads/main
| 2023-08-19T21:53:55.520068 | 2021-10-30T07:32:01 | 2021-10-30T07:32:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 63 |
py
|
lst = [10,30,40,50,30,30,20,20,20,10,30]
print(lst.index(20))
|
[
"[email protected]"
] | |
d73fc0ae3c2da2162d8453d2b5e787b9df6ae86b
|
7e395a7ac6abec3fe24f4ca02d5370f1c8fb3c17
|
/DemoPrj_tent/DemoPrj_tent/urls_public.py
|
06ff4bfac8b1385a6b6a1c05aa6da3df0bdb4195
|
[] |
no_license
|
udaykumaraodh/DjangoTentPrj
|
fbfe6929954846d3c9bc4815a06108eecf3ea54c
|
53d8c518247666f7325bb55672819dce66bf89a9
|
refs/heads/main
| 2023-07-27T21:06:11.704280 | 2021-08-31T16:28:49 | 2021-08-31T16:28:49 | 401,743,799 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 894 |
py
|
"""DemoPrj_tent URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from employee_shared import views
urlpatterns = [
path('admin/', admin.site.urls),
path('empdet/',views.empDetails),
path('empupd/',views.empUpd),
path('empdel/',views.empDel),
]
|
[
"[email protected]"
] | |
168901e7e84d4b1ade6c2cd222d40e16ee48d113
|
7ec38beb6f041319916390ee92876678412b30f7
|
/src/leecode/medium_0885.py
|
54fe1f13b536aa90188d941dedc3bf3b9271e365
|
[] |
no_license
|
hopensic/LearnPython
|
3570e212a1931d4dad65b64ecdd24414daf51c73
|
f735b5d865789843f06a623a4006f8883d6d1ae0
|
refs/heads/master
| 2022-02-18T23:11:30.663902 | 2022-02-12T17:51:56 | 2022-02-12T17:51:56 | 218,924,551 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,202 |
py
|
from datetime import datetime
'''
tag: ^0885 ^medium ^math
name: ^(Spiral Matrix III)
'''
class Solution:
def spiralMatrixIII(self, R: int, C: int, r0: int, c0: int):
res = []
loop_count = max(r0, R - r0, c0, C - c0) + 2
destX, destY = c0, r0
mat = [[0] * C for _ in range(R)]
total = R * C
c = 0
for num in range(1, loop_count):
# 求 east的方向
srcX = destX
srcY = destY
destX = c0 + num if c0 + num < C else C - 1
destY = srcY
for i in range(srcX, destX + 1):
if mat[srcY][i] == 0:
mat[srcY][i] += 1
res.append((srcY, i))
c += 1
if c == total:
break
# 求 south的方向
srcX = destX
srcY = destY
destX = srcX
destY = r0 + num if r0 + num < R else R - 1
for i in range(srcY, destY + 1):
if mat[i][srcX] == 0:
mat[i][srcX] += 1
res.append((i, srcX))
c += 1
if c == total:
break
# west
srcX = destX
srcY = destY
destX = c0 - num if c0 - num >= 0 else 0
destY = srcY
for i in range(srcX, destX - 1, -1):
if mat[srcY][i] == 0:
mat[srcY][i] += 1
res.append((srcY, i))
c += 1
if c == total:
break
# north
srcX = destX
srcY = destY
destX = srcX
destY = r0 - num if r0 - num >= 0 else 0
for i in range(srcY, destY - 1, -1):
if mat[i][srcX] == 0:
mat[i][srcX] += 1
res.append((i, srcX))
c += 1
if c == total:
break
return res
R = 3
C = 3
r0 = 2
c0 = 2
t1 = datetime.now()
s = Solution()
print(s.spiralMatrixIII(R, C, r0, c0))
t2 = datetime.now()
print(t2 - t1)
|
[
"[email protected]"
] | |
479e2b2cb2e47d4dcd5e6f5be98b017a32ce79c5
|
142e8dbcd065e689dd7599f1f2b7ee23f2ae9616
|
/54.nearest_even_no.py
|
d3833e6cd7a45422e5f29f7e501cc974dbf4ddcd
|
[] |
no_license
|
shanthivimalanataraajan01/Beginner
|
f62ef7ba9b4c99591ca61f5f68a75d542c4adeb1
|
0b45d623ae24b0896a1d3f91e01fc497c31edc1d
|
refs/heads/master
| 2020-04-26T22:19:12.549521 | 2019-01-25T10:30:02 | 2019-01-25T10:30:02 | 173,869,297 | 0 | 0 | null | 2019-03-05T03:51:53 | 2019-03-05T03:51:53 | null |
UTF-8
|
Python
| false | false | 81 |
py
|
# your code goes here
n=int(input())
if n%2!=0:
n=n-1
print(n)
else:
print(n)
|
[
"[email protected]"
] | |
155930bcbbb9b559f4026ae42f775034e140cbe7
|
4d99350a527a88110b7bdc7d6766fc32cf66f211
|
/OpenGLCffi/GL/EXT/KHR/robustness.py
|
39eb5aee53fafaf629f399a33917ceedf689edd4
|
[
"MIT"
] |
permissive
|
cydenix/OpenGLCffi
|
e790ef67c2f6c9877badd5c38b7d58961c8739cd
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
refs/heads/master
| 2021-01-11T07:31:10.591188 | 2017-04-17T11:04:55 | 2017-04-17T11:04:55 | 80,312,084 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,324 |
py
|
from OpenGLCffi.GL import params
@params(api='gl', prms=[])
def glGetGraphicsResetStatus():
pass
@params(api='gl', prms=['x', 'y', 'width', 'height', 'format', 'type', 'bufSize', 'data'])
def glReadnPixels(x, y, width, height, format, type, bufSize, data):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformfv(program, location, bufSize, params):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformiv(program, location, bufSize, params):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformuiv(program, location, bufSize, params):
pass
@params(api='gl', prms=[])
def glGetGraphicsResetStatusKHR():
pass
@params(api='gl', prms=['x', 'y', 'width', 'height', 'format', 'type', 'bufSize', 'data'])
def glReadnPixelsKHR(x, y, width, height, format, type, bufSize, data):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformfvKHR(program, location, bufSize, params):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformivKHR(program, location, bufSize, params):
pass
@params(api='gl', prms=['program', 'location', 'bufSize', 'params'])
def glGetnUniformuivKHR(program, location, bufSize, params):
pass
|
[
"[email protected]"
] | |
b79e99dc04ca2be2a69ca6079afc7b69c0afa6cd
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/Quote18/HQ_18_156.py
|
2d711354476219ece70b5d19df540986a8e7b237
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,485 |
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import time
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class HQ_18_156(xtp_test_case):
def subTickByTick(self, Api, stk_info, case_name, rs_expect):
print Api.GetApiVersion()
def on_all_tick_by_tick(data, error, is_last):
pass
def on_unsub_tick_by_tick(data, error, is_last):
self.print_msg(case_name, rs_expect, error)
Api.setSubTickByTickHandle(on_all_tick_by_tick)
Api.setUnSubscribeTickByTickHandle(on_unsub_tick_by_tick)
Api.SubscribeTickByTick(stk_info)
Api.UnSubscribeTickByTick(stk_info)
time.sleep(1)
def print_msg(self, case_name, rs_expect, error):
if rs_expect == error:
logger.warning('{0}测试正确!'.format(case_name))
else:
logger.error('{0}测试错误!'.format(case_name))
self.assertEqual(error, rs_expect)
def test_HQ_18_156(self):
pyname = 'HQ_18_156'
client_id = 6
Api = XTPQuoteApi(client_id)
Api.Login()
stk_info = {'ticker': '!@#¥%……&×()<>?', 'exchange_id': 2}
self.subTickByTick(Api, stk_info, pyname,
{'error_id': 11200003, 'error_msg': 'unknown security'}) # 5
Api.Logout()
if __name__=='__main__':
unittest.main()
|
[
"[email protected]"
] | |
4584b6d202923b7876acb783ea7f94a55dccd0e4
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/command_lib/storage/file_download_task.py
|
55c926f4ed9ddba8c6feb5c5f335d5da0e5ad498
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 |
Python
|
UTF-8
|
Python
| false | false | 2,211 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task for file downloads.
Typically executed in a task iterator:
googlecloudsdk.command_lib.storage.task_executor.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.storage import api_factory
from googlecloudsdk.command_lib.storage import storage_url
from googlecloudsdk.command_lib.storage import task
from googlecloudsdk.core.util import files
class FileDownloadTask(task.Task):
"""Represents a command operation triggering a file download.
Attributes:
destination_local_path (str): The local filesystem path to write the file
to.
source_object_reference (resource_reference.ObjectReference): Must
contain the full path of object to download, including bucket.
Directories will not be accepted.
"""
def __init__(self, destination_local_path, source_object_reference):
super(FileDownloadTask, self).__init__()
self.download_stream = files.FileWriter(destination_local_path)
cloud_url = storage_url.CloudUrl.from_url_string(
source_object_reference.storage_url.url_string)
self.provider = cloud_url.scheme
self.bucket_name = cloud_url.bucket_name
self.object_name = cloud_url.object_name
def execute(self, callback=None):
# TODO(b/162264437): Support all of DownloadObject's parameters.
api_factory.get_api(self.provider).DownloadObject(self.bucket_name,
self.object_name,
self.download_stream)
|
[
"[email protected]"
] | |
07be9fff7ab78d6a62e09d5b2b53cd61b81cb9aa
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2671/60767/236760.py
|
65aabd02d6cd17bfb4abc0f16f21487ef1587e78
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 512 |
py
|
def has11(num):
while(num>=1):
if(num%2==1):
num = num>>1
if(num%2==1):
return True
num = num>>1
return False
numOfTests = int(input())
Tests = []
for i in range(0,numOfTests):
Tests.append(int(input()))
for test in Tests:
temp = []
cnt = 0
for i in range(0,test):
temp.append("1")
s = "".join(temp)
maxNum = int(s,base=2)
for x in range(1,maxNum+1):
if(has11(x)):
cnt = cnt+1
print(cnt)
|
[
"[email protected]"
] | |
34d0c59dbfbfa437e013e76dcd0fc29661a04b84
|
1259ee2a27cbb2d7de3e034159957d6043161add
|
/tests/roots/test-ext-autodoc/conf.py
|
9f026eb8deab5ef78ede37824b69d322c6f75fc5
|
[
"MIT",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
balabit-deps/balabit-os-7-sphinx
|
f7b0ad4967418f074e8876cd8c7f4a7f5cfbe5d3
|
4e18ca37f4ddddf346c0b30835a544db20887259
|
refs/heads/master
| 2020-04-07T09:14:11.757278 | 2018-04-11T21:10:19 | 2018-07-20T22:59:13 | 158,244,890 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 209 |
py
|
import sys, os
sys.path.insert(0, os.path.abspath('.'))
extensions = ['sphinx.ext.autodoc']
# The suffix of source filenames.
source_suffix = '.rst'
autodoc_mock_imports = [
'dummy'
]
nitpicky = True
|
[
"[email protected]"
] | |
80d765845dcafee702bed7550ecb6e0196682ee9
|
30ac2f9831ebd33885a6f48d153356c2e3731c26
|
/Python_Stack/flask/playground_project/server.py
|
12ab16b5079218d55d52fb8fb967d84a7c922824
|
[] |
no_license
|
pharaoht/Coding-Dojo-Projects
|
192cfd8c36b6dadb049e81d31bd780c7ab340d1e
|
504f71acbac3c006cf866a08aea0566058f81ce2
|
refs/heads/master
| 2023-05-11T21:09:17.316257 | 2021-06-08T00:54:09 | 2021-06-08T00:54:09 | 334,003,413 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 434 |
py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/play")
def showboxes():
return render_template("index1.html")
@app.route("/play/<num>")
def showboxes1(num):
return render_template("index.html", num=int(num))
@app.route("/play/<num>/<color>")
def showboxes2(num, color):
return render_template("index3.html", num=int(num), color=color)
if __name__ == "__main__":
app.run(debug=True)
|
[
"[email protected]"
] | |
8a903a879f726b3a88d3a2e6e5a2b06370843dcb
|
9aa85999021da96ce0a7d76789c1298d174d1835
|
/blogs/migrations/0076_auto_20200128_1850.py
|
649f921de7e73d9b82215f179e7f222baced5e4f
|
[] |
no_license
|
m0bi5/ISTE-NITK_Website
|
20b83a3a629836c33c7478c0af834f6f57e0e907
|
2e186bb1ba457c930f9b691cc5a5584b8e3c270c
|
refs/heads/master
| 2022-11-24T03:02:49.354491 | 2020-07-24T15:43:44 | 2020-07-24T15:43:44 | 184,452,941 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 452 |
py
|
# Generated by Django 2.2.4 on 2020-01-28 18:50
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0075_auto_20200128_1844'),
]
operations = [
migrations.AlterField(
model_name='bloghits',
name='created',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 28, 18, 50, 19, 836941)),
),
]
|
[
"[email protected]"
] | |
55e8252223a59f65395fe1c7789fd74a8d4a78ba
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02792/s894845946.py
|
a0b1659006571d0d9285032ffceafc05e79611c9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,081 |
py
|
# import sys
# sys.setrecursionlimit(10 ** 6)
def cmb(n, r):
import math
if n < r:
return 0
return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))
# from decorator import stop_watch
#
#
# @stop_watch
def solve(N):
ABs = {}
ABs_visited = {}
for h in range(1, 10):
for t in range(1, 10):
ABs.setdefault((h, t), 0)
ABs_visited.setdefault((h, t), False)
for n in range(1, N + 1):
s = str(n)
h, t = int(s[0]), int(s[-1])
if not (h == 0 or t == 0):
ABs[(h, t)] += 1
ans = 0
for k in ABs:
if ABs_visited[k]:
continue
h, t = k
ABs_visited[(h, t)] = True
# ABs_visited[(t, h)] = True
# if h == t:
# ans += ABs[(h, t)] + cmb(ABs[(h, t)], 2)
# else:
# ans += ABs[(h, t)] * ABs[(t, h)]
ans += ABs[(h, t)] * ABs[(t, h)]
# print(k, ABs[k], ans)
# input()
# print(ABs)
print(ans)
if __name__ == '__main__':
N = int(input())
solve(N)
|
[
"[email protected]"
] | |
d998ef3b950460ec243b2d468ca72164ed6addba
|
7fdefad804586192915fc298a63db136c0863995
|
/wxPython in Action/wxPythonInAction-src/Chapter-15/tree_simple.py
|
b9863e92018508c953ddaf7a65d656e00e8287f4
|
[] |
no_license
|
typ0520/python_ebook
|
2ca948937e2f390a4e4c2ac57f6cd3124ab507a0
|
9abda102b9f245178b61bf9ffca0e633ad96fec1
|
refs/heads/master
| 2021-06-20T14:14:22.813999 | 2017-08-14T07:31:41 | 2017-08-14T07:31:41 | 98,658,811 | 0 | 1 | null | 2017-07-28T14:43:55 | 2017-07-28T14:43:55 | null |
UTF-8
|
Python
| false | false | 1,913 |
py
|
import wx
import data
class TestFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="simple tree", size=(400,500))
# Create the tree
self.tree = wx.TreeCtrl(self)
# Add a root node
root = self.tree.AddRoot("wx.Object")
# Add nodes from our data set
self.AddTreeNodes(root, data.tree)
# Bind some interesting events
self.Bind(wx.EVT_TREE_ITEM_EXPANDED, self.OnItemExpanded, self.tree)
self.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self.OnItemCollapsed, self.tree)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged, self.tree)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnActivated, self.tree)
# Expand the first level
self.tree.Expand(root)
def AddTreeNodes(self, parentItem, items):
"""
Recursively traverses the data structure, adding tree nodes to
match it.
"""
for item in items:
if type(item) == str:
self.tree.AppendItem(parentItem, item)
else:
newItem = self.tree.AppendItem(parentItem, item[0])
self.AddTreeNodes(newItem, item[1])
def GetItemText(self, item):
if item:
return self.tree.GetItemText(item)
else:
return ""
def OnItemExpanded(self, evt):
print "OnItemExpanded: ", self.GetItemText(evt.GetItem())
def OnItemCollapsed(self, evt):
print "OnItemCollapsed:", self.GetItemText(evt.GetItem())
def OnSelChanged(self, evt):
print "OnSelChanged: ", self.GetItemText(evt.GetItem())
def OnActivated(self, evt):
print "OnActivated: ", self.GetItemText(evt.GetItem())
app = wx.PySimpleApp(redirect=True)
frame = TestFrame()
frame.Show()
app.MainLoop()
|
[
"[email protected]"
] | |
475fa382a546505487a35004c7fe5da4a68bd9ac
|
208baab269ddffab1a93e7dc70b052d07bf50560
|
/hood/migrations/0004_editor.py
|
363a783e7d71c25eefd44a4b578ec7916b9cdc00
|
[] |
no_license
|
marysinaida/Neighborhood
|
a1035f09515ae9a24bed74ddf1263e06db134c94
|
a285df5528bb99d6cb69f9ab41e320682422fe9d
|
refs/heads/master
| 2020-12-13T23:29:18.148498 | 2020-01-21T15:04:53 | 2020-01-21T15:04:53 | 234,562,242 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 725 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-01-21 10:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('hood', '0003_auto_20200121_1312'),
]
operations = [
migrations.CreateModel(
name='Editor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
],
),
]
|
[
"[email protected]"
] | |
8db6c287bc1a3eac410d8592ce97a1a24f13b860
|
3ee1bb0d0acfa5c412b37365a4564f0df1c093fb
|
/ml/m14_pipeline2_4_boston.py
|
cb55ea1a8f86bd861a41b7075007639d2c8f3b1b
|
[] |
no_license
|
moileehyeji/Study
|
3a20bf0d74e1faec7a2a5981c1c7e7861c08c073
|
188843c6415a4c546fdf6648400d072359d1a22b
|
refs/heads/main
| 2023-04-18T02:30:15.810749 | 2021-05-04T08:43:53 | 2021-05-04T08:43:53 | 324,901,835 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,655 |
py
|
# Pipeline, make_pipeline
# 모델 비교
# 2번부터 RandomForest 모델 사용
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.pipeline import Pipeline, make_pipeline # concatenate와 Concatenate의 차이와 같음
# 모델 import
from sklearn.svm import LinearSVC, SVC
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
# 1. 데이터
dataset = load_boston()
x = dataset.data
y = dataset.target
# 전처리
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, random_state = 120, shuffle = True)
# Pipeline 사용시 필요 없음
# scaler = MinMaxScaler()
# scaler.fit(x_train)
# x_train = scaler.transform(x_train)
# x_test = scaler.transform(x_test)
# 2. 모델구성
# ====================================================================Pipeline
# Pipeline, make_pipeline : 전처리와 모델을 연결(통로)
# 별도 MinMaxScaler 필요없음
scalers = np.array([MinMaxScaler(), StandardScaler()])
for scaler in scalers:
print('==========================',scaler)
model_Pipeline = Pipeline([('scaler', scaler), ('malddong', RandomForestRegressor())])
model_make_pipeline = make_pipeline(scaler, RandomForestRegressor())
# 3. 훈련
model_Pipeline.fit(x_train, y_train)
model_make_pipeline.fit(x_train, y_train)
# 4. 평가
results1 = model_Pipeline.score(x_test, y_test)
results2 = model_make_pipeline.score(x_test, y_test)
print('model_Pipeline의 score : ', results1)
print('model_make_pipeline의 score : ', results2)
'''
1. Tensorflow :
CNN모델 r2 : 0.9462232137123261
2. RandomForest모델 :
============================================GridSearchCV
최종 정답률 : 0.8571954130553036
34.47초 걸렸습니다
============================================RandomizedSearchCV
최종 정답률 : 0.8542102932416746
13.23초 걸렸습니다
3. RandomForest모델, Pipeline() :
========================== MinMaxScaler()
model_Pipeline의 score : 0.8513337126169909
model_make_pipeline의 score : 0.8513337126169909
========================== StandardScaler()
model_Pipeline의 score : 0.8471314230423943
model_make_pipeline의 score : 0.8471314230423943
'''
|
[
"[email protected]"
] | |
fea6c22074c77c9e5682f20c035d3b881dfa6d4f
|
ed218f5ea54eac34743f22596eae60242bb73004
|
/backend/chat/admin.py
|
5e83fe01bb146e7b8ebb6050982a7647e1960b63
|
[] |
no_license
|
crowdbotics-apps/kids-teach-kids-18288
|
65566d8ae92964c1e482c79883d1bce2bf7ff6f0
|
3b54f77ed6366541a4cd041d85934c8c802b409b
|
refs/heads/master
| 2022-11-05T14:28:05.406100 | 2020-06-20T19:03:20 | 2020-06-20T19:03:20 | 273,767,844 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 388 |
py
|
from django.contrib import admin
from .models import (
Message,
ThreadMember,
MessageAction,
ThreadAction,
ForwardedMessage,
Thread,
)
admin.site.register(ThreadAction)
admin.site.register(ForwardedMessage)
admin.site.register(MessageAction)
admin.site.register(Thread)
admin.site.register(ThreadMember)
admin.site.register(Message)
# Register your models here.
|
[
"[email protected]"
] | |
667f652834d1ed267e8db34154d55671fed9c562
|
5b28005b6ee600e6eeca2fc7c57c346e23da285f
|
/nomadic_recording_lib/ui/iOSCControl/sessionselect.py
|
6669a04c959c24f1afc8377ae5c8dea8ae353723
|
[] |
no_license
|
nocarryr/wowza_logparse
|
c31d2db7ad854c6b0d13495a0ede5f406c2fce3f
|
d6daa5bf58bae1db48ac30031a845bf975c7d5cc
|
refs/heads/master
| 2021-01-17T07:19:00.347206 | 2017-06-24T16:57:32 | 2017-06-24T16:57:32 | 25,835,704 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,048 |
py
|
from Bases import OSCBaseObject
import widgets
class SessionSelect(OSCBaseObject):
_Properties = {'selection':dict(type=str, quiet=True)}
def __init__(self, **kwargs):
self.iOsc = kwargs.get('iOsc')
self.client = kwargs.get('client')
kwargs.setdefault('osc_parent_node', self.client.osc_node)
kwargs.setdefault('osc_address', 'SessionSelect')
kwargs.setdefault('ParentEmissionThread', self.iOsc.ParentEmissionThread)
super(SessionSelect, self).__init__(**kwargs)
x = .25
y = .1
w = .25
h = .1
bounds = [x, y, w, h]
self.topwidget = self.iOsc.add_widget('Label',
name='topwidget',
bounds=bounds,
osc_parent_node=self.osc_node,
client=self.client,
value='Select Session')
self.session_btns = {}
sessions = sorted(self.iOsc.comm.osc_io.discovered_sessions.keys())
for i, key in enumerate(sessions):
if key is None:
continue
y += h
bounds = [x, y, w, h]
btn = self.topwidget.add_widget(SessionButton, name=key, index=i, bounds=bounds)
self.session_btns[key] = btn
btn.bind(touch_state=self.on_session_btn_touch)
def unlink(self):
self.topwidget.remove()
super(SessionSelect, self).unlink()
def on_session_btn_touch(self, **kwargs):
state = kwargs.get('value')
btn = kwargs.get('obj')
if state and self.selection is None:
self.selection = btn.name
self.LOG.info(self.selection)
class SessionButton(widgets.Toggle):
def __init__(self, **kwargs):
self.index = kwargs.get('index')
kwargs['label'] = kwargs['name']
super(SessionButton, self).__init__(**kwargs)
|
[
"[email protected]"
] | |
59982cb9f893c43e0cf6038a51750f94da4c4fb5
|
1dd72195bc08460df7e5bb82d3b7bac7a6673f49
|
/api/alembic/versions/69cbd7ca2477_add_gfs_prediction_model.py
|
856f0767fd5fe67adc724458b639c5b7b9e2571f
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
bcgov/wps
|
c4347c39cadfad6711502d47776abc8d03895593
|
0ba707b0eddc280240964efa481988df92046e6a
|
refs/heads/main
| 2023-08-19T00:56:39.286460 | 2023-08-16T18:03:06 | 2023-08-16T18:03:06 | 235,861,506 | 35 | 9 |
Apache-2.0
| 2023-09-11T21:35:07 | 2020-01-23T18:42:10 |
Python
|
UTF-8
|
Python
| false | false | 573 |
py
|
"""Add GFS prediction model
Revision ID: 69cbd7ca2477
Revises: de8355996f8e
Create Date: 2023-02-09 14:30:49.597571
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '69cbd7ca2477'
down_revision = 'de8355996f8e'
branch_labels = None
depends_on = None
def upgrade():
op.execute('INSERT INTO prediction_models(name, abbreviation, projection)\
VALUES(\'Global Forecast System\', \'GFS\', \'lonlat.0.5deg\')')
def downgrade():
op.execute('DELETE FROM prediction_models WHERE abbreviation = \'GFS\'')
|
[
"[email protected]"
] | |
2344ae408834eb1cc85fd232d50f961cdc1f96b4
|
09c97a53c39c83bef52d15db6644a27a3bbf229f
|
/part01-web/day07~_Python/todoMgrSystem/view/menu_view.py
|
e8eb413180911442b7d826719825336e9c65ee92
|
[] |
no_license
|
wansang93/Cloud_multicampus
|
aa0372914e28ebdc76f5d683e9886841be5f5891
|
646325cee93d6bcabd4163c7bb099e4e92621e9f
|
refs/heads/master
| 2023-03-27T07:02:41.552663 | 2021-03-24T01:53:07 | 2021-03-24T01:53:07 | 326,610,203 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,454 |
py
|
from entity.todo import Todo
"""
Define View
"""
### Menu ###
# show menu
def display_menu():
print('==================================================')
print('등록(1) 보기(2) 수정(3) 삭제(4) 모두 삭제(5) 나가기(x)')
# select menu number
def select_menu():
menu = input()
return menu
# 메뉴를 다시 입력하라는 문구
def display_reinput():
print('메뉴를 잘못 입력하였습니다. 다시 입력해 주세요.')
# 메뉴1. 등록하기(Create)
def display_register():
while True:
todo_id = input('id를 입력해 주세요(유니크 값으로): ')
# 사용자 입력 폼에서 에러 체크
if not todo_id.isdecimal():
print('id는 숫자를 입력해 주세요.')
else:
break
while True:
todo_what = input('할 일을 입력해 주세요: ')
# 사용자 입력 폼에서 에러 체크
if not todo_what:
print('공백이 아닌 것으로 입력해 주세요.')
else:
break
return Todo(todo_id, todo_what)
# 메뉴3. 수정하기(Update)
# 해당하는 id가 있는지 확인하기
def check_id_for_update():
while True:
todo_id = input('업데이트 할 id를 입력해 주세요: ')
# 사용자 입력 폼에서 에러 체크
if not todo_id.isdecimal():
print('id는 숫자를 입력해 주세요.')
else:
break
return todo_id
def get_what_for_update():
while True:
todo_what = input('수정 사항을 입력해 주세요: ')
# 사용자 입력 폼에서 에러 체크
if not todo_what:
print('공백이 아닌 것으로 입력해 주세요.')
else:
break
return todo_what
# 메뉴4. 삭제하기(Delete)
def check_id_for_delete():
while True:
todo_id = input('삭제할 할 id를 입력해 주세요: ')
# 사용자 입력 폼에서 에러 체크
if not todo_id.isdecimal():
print('id는 숫자를 입력해 주세요.')
else:
break
return todo_id
# 메뉴5. 전부 삭제하기(Delete All)
def delete_all():
print('메모를 전부 삭제합니다.')
text = input('정말로 실행하시겠습니까? [y/n]: ')
while True:
if text in ['y', 'Y', 'n', 'N']:
break
print('y 또는 n 으로 입력해 주세요.')
return text
|
[
"[email protected]"
] | |
325e2088f1a70378cc88cdf3a96e15fa365b4554
|
35aca1291dae461d5562a3b7484e5f659ee80817
|
/oneflow/__main__.py
|
b0b1d9d18c301e9044ada93b56b5d3610add3d7f
|
[
"Apache-2.0"
] |
permissive
|
Flowingsun007/oneflow
|
e6a52cfbf5e82ca4f8b787aa026f40a2f568a10f
|
c1880c011dd453719a28d880abe15e2dab8d0da1
|
refs/heads/master
| 2023-05-11T19:18:59.220269 | 2021-05-28T20:10:35 | 2021-05-28T20:10:35 | 372,195,705 | 0 | 0 |
Apache-2.0
| 2021-06-02T09:46:51 | 2021-05-30T11:24:37 | null |
UTF-8
|
Python
| false | false | 1,574 |
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--start_worker", default=False, action="store_true", required=False
)
parser.add_argument("--env_proto", type=str, required=False)
parser.add_argument("--doctor", default=False, action="store_true", required=False)
args = parser.parse_args()
def StartWorker(env_proto):
import oneflow._oneflow_internal
oneflow._oneflow_internal.InitEnv(env_proto)
def main():
start_worker = args.start_worker
if start_worker:
env_proto = args.env_proto
assert os.path.isfile(
env_proto
), "env_proto not found, please check your env_proto path: {}".format(env_proto)
with open(env_proto, "rb") as f:
StartWorker(f.read())
if args.doctor:
import oneflow
print("path:", oneflow.__path__)
print("version:", oneflow.__version__)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c74cf90baafe4882b333e01b28d7a2e85ebfb96b
|
130e9ef21397b5263ecaf2923f3a196eba58ef5a
|
/pyxel/ui/number_picker.py
|
e7a1d3b6ec2d162aa814b72adab9af68df6dec6c
|
[
"MIT"
] |
permissive
|
sacredhotdog/pyxel
|
29571dd3daef6d813f9fdd833bf55e5ba0af689a
|
08da48dbd1ac53c06cf8a383f28d66fd89f78f4a
|
refs/heads/master
| 2020-04-04T16:35:26.370822 | 2018-11-04T09:06:05 | 2018-11-04T09:06:05 | 156,084,070 | 0 | 0 |
MIT
| 2018-11-04T13:18:19 | 2018-11-04T13:18:19 | null |
UTF-8
|
Python
| false | false | 2,600 |
py
|
import pyxel
from .constants import INPUT_FIELD_COLOR, INPUT_TEXT_COLOR
from .text_button import TextButton
from .widget import Widget
class NumberPicker(Widget):
"""
Events:
__on_change(value)
"""
def __init__(self, parent, x, y, min_value, max_value, value, **kwargs):
self._number_len = max(len(str(min_value)), len(str(max_value)))
width = self._number_len * 4 + 21
height = 7
super().__init__(parent, x, y, width, height, **kwargs)
self._min_value = min_value
self._max_value = max_value
self._value = None
self.dec_button = TextButton(self, x, y, "-")
self.inc_button = TextButton(self, x + width - 7, y, "+")
self.add_event_handler("enabled", self.__on_enabled)
self.add_event_handler("disabled", self.__on_disabled)
self.add_event_handler("draw", self.__on_draw)
self.dec_button.add_event_handler("press", self.__on_dec_button_press)
self.dec_button.add_event_handler("repeat", self.__on_dec_button_press)
self.inc_button.add_event_handler("press", self.__on_inc_button_press)
self.inc_button.add_event_handler("repeat", self.__on_inc_button_press)
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self._value != value:
self._value = value
self.call_event_handler("change", value)
self.dec_button.is_enabled = self._value != self._min_value
self.inc_button.is_enabled = self._value != self._max_value
def __on_enabled(self):
self.dec_button.is_enabled = self._value != self._min_value
self.inc_button.is_enabled = self._value != self._max_value
def __on_disabled(self):
self.dec_button.is_enabled = False
self.inc_button.is_enabled = False
def __on_draw(self):
x1 = self.x
y1 = self.y
x2 = self.x + self.width - 1
y2 = self.y + self.height - 1
pyxel.rect(x1 + 9, y1, x2 - 9, y2, INPUT_FIELD_COLOR)
pyxel.text(
self.x + 11,
self.y + 1,
("{:>" + str(self._number_len) + "}").format(self._value),
INPUT_TEXT_COLOR,
)
def __on_dec_button_press(self):
offset = 10 if pyxel.btn(pyxel.KEY_SHIFT) else 1
self.value = max(self._value - offset, self._min_value)
def __on_inc_button_press(self):
offset = 10 if pyxel.btn(pyxel.KEY_SHIFT) else 1
self.value = min(self._value + offset, self._max_value)
|
[
"[email protected]"
] | |
268eb15f65ebda9c888438d11aa9f83830e6d243
|
25ba5fb4e2d02d6949f85ca49e11a70d3960432d
|
/lib/python/treadmill/alert/__init__.py
|
9fef9e3d07e7fbee2c8270059253dff919d686f5
|
[
"Apache-2.0"
] |
permissive
|
crazyrex/treadmill
|
9c007967db9470685b2d11e2232ad9926b47733e
|
75be287a808a4cbdacab67b3f62a3cb3eb1eab67
|
refs/heads/master
| 2020-03-31T03:55:39.713552 | 2018-10-04T15:28:36 | 2018-10-04T15:28:36 | 151,884,550 | 1 | 0 |
Apache-2.0
| 2018-10-06T21:50:04 | 2018-10-06T21:50:03 | null |
UTF-8
|
Python
| false | false | 1,360 |
py
|
"""Treadmill alert module.
"""
import io
import json
import os.path
import time
from treadmill import fs
def create(alerts_dir,
epoch_ts=None,
instanceid=None,
summary=None,
type_=None,
**alert_data):
"""Create a file in alerts_dir representing the alert.
"""
if not epoch_ts:
epoch_ts = time.time()
alert_data.update(
{
'epoch_ts': epoch_ts,
'instanceid': instanceid,
'summary': summary,
'type_': type_,
}
)
fs.write_safe(
os.path.join(alerts_dir, _to_filename(instanceid, type_)),
lambda f: f.write(
json.dumps(alert_data, indent=4).encode()
),
prefix='.tmp',
permission=0o644
)
def _to_filename(instanceid, type_):
"""Returns a host wide unique filename for the alert.
Alerts sorted alphabetically result in chronological order.
"""
return '{:f}-{}-{}'.format(
time.monotonic(), instanceid, type_
).replace(os.path.sep, '_')
def read(filename, alerts_dir=None):
"""Return the alert stored in the file.
"""
if alerts_dir is not None:
filename = os.path.join(alerts_dir, filename)
with io.open(filename, 'rb') as file_:
alert = json.loads(file_.read().decode())
return alert
|
[
"[email protected]"
] | |
6d3769cc470c8fe3b6958de2bbaec474c6edbc6b
|
ec68eee1abe0f900210c2bad51b64fb8a1053d5d
|
/fullerene/config.py
|
f1f81f62399499bb184b1eb86504f3882803b09d
|
[] |
no_license
|
bitprophet/fullerene
|
36a011eebf1ef1a14f963ed8101334c608757b92
|
edb9afe6c07c9d610dfa8630142abb96382ff0c1
|
refs/heads/master
| 2020-05-18T20:27:56.612256 | 2012-02-11T00:14:52 | 2012-02-11T00:14:52 | 2,579,486 | 15 | 1 | null | 2017-12-11T17:30:57 | 2011-10-14T23:37:06 |
Python
|
UTF-8
|
Python
| false | false | 2,804 |
py
|
import yaml
from graphite import Graphite
from metric import Metric
class Config(object):
def __init__(self, text):
# Load up
config = yaml.load(text)
# Required items
try:
try:
exclude_hosts = config['hosts']['exclude']
except KeyError:
exclude_hosts = []
self.graphite = Graphite(
uri=config['graphite_uris']['internal'],
exclude_hosts=exclude_hosts
)
except KeyError:
raise ValueError, "Configuration must specify graphite_uris: internal"
# Optional external URL (for links)
self.external_graphite = config['graphite_uris'].get('external', None)
# 'metrics' section
self.metrics = {}
for name, options in config.get('metrics', {}).iteritems():
self.metrics[name] = Metric(
options=options,
config=self,
name=name
)
# Metric groups
self.groups = {}
for name, metrics in config.get('metric_groups', {}).iteritems():
if name not in self.groups:
self.groups[name] = {}
for item in metrics:
self.groups[name][item] = self.parse_metric(item)
# 'collections'
self.collections = config.get('collections', {})
for collection in self.collections.values():
# Instantiate metrics where needed
for group in collection['groups'].values():
group['metrics'] = map(self.parse_metric, group['metrics'])
if 'overview' in group:
group['overview'] = map(
self.parse_metric,
group['overview'][:]
)
# Default graph args
self.defaults = config.get('defaults', {})
# Timeperiod aliases
self.periods = config.get('periods', {})
def parse_metric(self, item):
exists = False
try:
exists = item in self.metrics
except TypeError:
pass
# Name + name already exists as a metric alias == use that
if exists:
metric = self.metrics[item]
else:
# String == metric path == make new metric from it
if isinstance(item, basestring):
metric = Metric({'path': item}, config=self, name=item)
# Non-string == assume hash/dict == make metric from that (assumes
# one-item dict, name => metric)
else:
name, value = item.items()[0]
metric = Metric(name=name, config=self, options=value)
return metric
@property
def metric_groups(self):
return sorted(self.groups)
|
[
"[email protected]"
] | |
5b8a25a8b5efba5420da7435bb20a6ce21dac8c9
|
a0f0efaaaf69d6ccdc2a91596db29f04025f122c
|
/install/nav_2d_msgs/lib/python2.7/dist-packages/nav_2d_msgs/msg/_Twist2D.py
|
d9e163e152623a1775bb41432708d59da2817b86
|
[] |
no_license
|
chiuhandsome/ros_ws_test-git
|
75da2723154c0dadbcec8d7b3b1f3f8b49aa5cd6
|
619909130c23927ccc902faa3ff6d04ae0f0fba9
|
refs/heads/master
| 2022-12-24T05:45:43.845717 | 2020-09-22T10:12:54 | 2020-09-22T10:12:54 | 297,582,735 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,788 |
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from nav_2d_msgs/Twist2D.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Twist2D(genpy.Message):
_md5sum = "938fa65709584ad8e77d238529be13b8"
_type = "nav_2d_msgs/Twist2D"
_has_header = False # flag to mark the presence of a Header object
_full_text = """float64 x
float64 y
float64 theta
"""
__slots__ = ['x','y','theta']
_slot_types = ['float64','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
x,y,theta
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Twist2D, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.theta is None:
self.theta = 0.
else:
self.x = 0.
self.y = 0.
self.theta = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.theta))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 24
(_x.x, _x.y, _x.theta,) = _get_struct_3d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.theta))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 24
(_x.x, _x.y, _x.theta,) = _get_struct_3d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
|
[
"[email protected]"
] | |
0b82641f368069e443e83c50231fbc5e08c0a609
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02584/s013302359.py
|
f67303454d5f33e7cc48ea5bafc8491a565b7b35
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
x, k, d = map(int, input().split())
cur = abs(x)
rem = k
cnt = min(cur // d, k)
cur = cur - d * cnt
rem = rem - cnt
if rem > 0:
if rem % 2 == 1:
cur = cur - d
ans = abs(cur)
print(ans)
|
[
"[email protected]"
] | |
c1767d932fff8f34fbd63518d4b75eae263689f5
|
3a8cb8a3639cee11ec5e9c8e4a0a5f940b711dec
|
/tests/repos/converters.py
|
8510d4d3b9760df9ba3b12a1f22052591b31504e
|
[
"Apache-2.0"
] |
permissive
|
pofatu/pygeoroc
|
f35686e19dc327159ce4285e18e2590e91332a23
|
c722da35ab36f2fdcc3d793a025bb81d217238e1
|
refs/heads/master
| 2022-07-09T15:37:31.134567 | 2022-06-29T12:52:34 | 2022-06-29T12:52:34 | 253,836,063 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 225 |
py
|
from pygeoroc.errata import CONVERTERS
FIELDS = {
'LAND_OR_SEA': CONVERTERS.upper,
}
COORDINATES = {
'NEW_CALEDONIA.csv': {
'latitude': CONVERTERS.negative,
'longitude': CONVERTERS.positive,
}
}
|
[
"[email protected]"
] | |
089c2fb171d420553bd2708286517e513092ca50
|
55b57d64ec547869835334318f3059fbb507558c
|
/Fred2/Data/pssms/smmpmbec/mat/A_02_11_9.py
|
c26fd3d35fdd4abe9d48da106982f3101dd25ad0
|
[
"BSD-3-Clause"
] |
permissive
|
FRED-2/Fred2
|
9845f6678d4011cb746c7a5a6f283eea68077a02
|
b3e54c8c4ed12b780b61f74672e9667245a7bb78
|
refs/heads/master
| 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 |
Python
|
UTF-8
|
Python
| false | false | 2,305 |
py
|
A_02_11_9 = {0: {'A': -0.036, 'C': -0.12, 'E': 0.678, 'D': 0.737, 'G': -0.02, 'F': -0.698, 'I': -0.427, 'H': 0.239, 'K': -0.293, 'M': -0.416, 'L': -0.103, 'N': 0.147, 'Q': 0.596, 'P': 0.699, 'S': 0.006, 'R': -0.198, 'T': 0.363, 'W': -0.325, 'V': -0.251, 'Y': -0.578}, 1: {'A': 0.123, 'C': 0.508, 'E': 0.747, 'D': 1.206, 'G': 0.05, 'F': -0.352, 'I': -1.317, 'H': 1.169, 'K': 0.831, 'M': -2.091, 'L': -1.886, 'N': 0.521, 'Q': -0.817, 'P': 0.938, 'S': -0.052, 'R': 1.01, 'T': -0.654, 'W': 0.523, 'V': -0.909, 'Y': 0.452}, 2: {'A': 0.12, 'C': -0.308, 'E': 0.388, 'D': -0.098, 'G': -0.084, 'F': -0.452, 'I': 0.021, 'H': 0.014, 'K': 0.74, 'M': -0.269, 'L': -0.316, 'N': -0.122, 'Q': -0.1, 'P': 0.004, 'S': 0.283, 'R': 0.885, 'T': 0.48, 'W': -0.65, 'V': 0.007, 'Y': -0.541}, 3: {'A': -0.123, 'C': -0.335, 'E': -0.564, 'D': -0.409, 'G': -0.154, 'F': -0.086, 'I': 0.006, 'H': 0.192, 'K': 0.236, 'M': 0.032, 'L': 0.27, 'N': 0.055, 'Q': 0.24, 'P': 0.046, 'S': -0.011, 'R': 0.31, 'T': 0.277, 'W': -0.264, 'V': 0.329, 'Y': -0.046}, 4: {'A': -0.074, 'C': 0.041, 'E': 0.088, 'D': 0.07, 'G': -0.007, 'F': 0.024, 'I': -0.145, 'H': 0.114, 'K': 0.159, 'M': 0.109, 'L': 0.057, 'N': -0.087, 'Q': 0.137, 'P': -0.196, 'S': 0.056, 'R': 0.322, 'T': -0.018, 'W': -0.366, 'V': -0.171, 'Y': -0.115}, 5: {'A': -0.096, 'C': 0.13, 'E': 0.183, 'D': 0.349, 'G': 0.066, 'F': 0.029, 'I': -0.431, 'H': -0.225, 'K': 0.167, 'M': -0.031, 'L': -0.341, 'N': 0.169, 'Q': 0.183, 'P': 0.001, 'S': -0.005, 'R': 0.547, 'T': 0.017, 'W': -0.099, 'V': -0.449, 'Y': -0.166}, 6: {'A': 0.011, 'C': -0.251, 'E': 0.022, 'D': 0.08, 'G': 0.239, 'F': -0.312, 'I': -0.166, 'H': -0.169, 'K': 0.422, 'M': 0.013, 'L': 0.229, 'N': 0.231, 'Q': 0.132, 'P': 0.059, 'S': 0.149, 'R': 0.191, 'T': 0.14, 'W': -0.481, 'V': -0.072, 'Y': -0.468}, 7: {'A': -0.137, 'C': -0.044, 'E': -0.003, 'D': 0.431, 'G': -0.185, 'F': -0.008, 'I': 0.729, 'H': -0.171, 'K': 0.203, 'M': -0.144, 'L': 0.241, 'N': 0.017, 'Q': -0.165, 'P': -0.123, 'S': 0.05, 'R': -0.002, 'T': -0.141, 'W': -0.185, 'V': 0.041, 'Y': -0.404}, 8: {'A': -0.847, 'C': -0.097, 'E': 0.555, 'D': 0.419, 'G': -0.18, 'F': 0.195, 'I': -1.284, 'H': 1.363, 'K': 0.921, 'M': -0.927, 'L': -1.161, 'N': -0.352, 'Q': 0.467, 'P': 0.36, 'S': -0.292, 'R': 1.862, 'T': -0.485, 'W': 0.469, 'V': -1.76, 'Y': 0.773}, -1: {'con': 4.72423}}
|
[
"[email protected]"
] | |
2148763f39be962a4ae2cb0e8be40e490b756cb2
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_155/3117.py
|
4b41befbf837cdaf9d873331ac63d00b40d7c5e5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 628 |
py
|
infile = open('A-large.in', 'r')
outfile = open('out.txt', 'w')
cases = int(infile.readline())
for i in range(1, cases+1):
line = infile.readline().split()
s_max = int(line[0])
audience = line[1]
standing = 0
ans = 0
if s_max != 0:
for j in range(0, s_max + 1):
if standing >= j:
standing += int(audience[j])
else:
invites = j - standing
ans += invites
standing += int(audience[j]) + invites
outfile.write("Case #" + str(i) + ": " + str(ans) + "\n")
infile.close()
outfile.close()
|
[
"[email protected]"
] | |
968ffe2bd211dc6988e1de691ba4a0c09270e96c
|
5df4d172df0bc6b6c8e021e44a0cfa50a6b01251
|
/src/sanic/run_websocket_server.py
|
bd4e50f51c4efe2153f03012dfb1290d89996a43
|
[
"MIT"
] |
permissive
|
kingking888/MocaBliveAPI
|
225ba33663f41c08ac358e5b138c57e26381d8f0
|
205bf4eec2becd0bf5a5a64f5d98718a73f51543
|
refs/heads/master
| 2022-10-18T08:59:53.301603 | 2020-06-08T10:59:08 | 2020-06-08T10:59:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,401 |
py
|
# Ω*
# ■ ■■■■■
# ■ ■■ ■■
# ■ ■■ ■
# ■ ■■
# ■■■■■ ■ ■■■
# ■■ ■■ ■ ■■■
# ■■ ■■ ■ ■■■■
# ■■ ■■ ■ ■■■■
# ■■■■■■■■■ ■ ■■■
# ■■ ■ ■■
# ■■ ■ ■■
# ■■ ■ ■ ■■ ■■
# ■■ ■■ ■ ■■■ ■■■ ■■
# ■■■■■ ■ ■■■ ■■■■■
"""
Copyright (c) 2020.1.17 [el.ideal-ideas]
This software is released under the MIT License.
see LICENSE.txt or following URL.
https://www.el-ideal-ideas.com/MocaLog/LICENSE/
"""
# -- Imports --------------------------------------------------------------------------
from .. import core
from typing import Optional
from ssl import SSLContext
from sanic import Sanic
from socket import AF_INET6, SOCK_STREAM, socket
from sanic.websocket import WebSocketProtocol
# -------------------------------------------------------------------------- Imports --
# -- Run --------------------------------------------------------------------------
def run_websocket_server(app: Sanic,
ssl: Optional[SSLContext],
host: str,
port: int,
access_log: bool = False,
debug: bool = False,
use_ipv6: bool = False,
workers=1) -> None:
"""Run Sanic server."""
try:
if use_ipv6:
sock = socket(AF_INET6, SOCK_STREAM)
sock.bind((host, port))
app.run(sock=sock,
access_log=access_log,
ssl=ssl,
debug=debug,
workers=workers,
protocol=WebSocketProtocol)
else:
app.run(host=host,
port=port,
access_log=access_log,
ssl=ssl,
debug=debug,
workers=workers,
protocol=WebSocketProtocol)
except OSError as os_error:
core.print_warning(f'Sanic Websocket Server stopped. Please check your port is usable. <OSError: {os_error}>')
except Exception as other_error:
core.print_warning(f'Sanic Websocket Server stopped, unknown error occurred. <Exception: {other_error}>')
finally:
if use_ipv6:
sock.close()
# -------------------------------------------------------------------------- Run --
|
[
"[email protected]"
] | |
4cc4a9f6af19cb45b334d02cac35add8080a7c70
|
39bae52d75b501f6db49b05480d5bb185c2370be
|
/my_work/btv/btv舆情早前版本/btv舆情_非精简版/btv_舆情/read_WEIBO_USER.py
|
dc1105b64aa42fb718e430a61e8f295129d1d165
|
[] |
no_license
|
callmeivy/Pycharm_Project
|
65c53de17a3902c565e13c8b69d134daf97ba6f8
|
ec1637683ee1f9d6d3f3533d9de9e913eb779898
|
refs/heads/master
| 2021-08-23T07:11:11.542918 | 2017-12-04T02:27:44 | 2017-12-04T02:27:44 | 112,572,594 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,220 |
py
|
#coding=utf-8
import sys,os
import MySQLdb
import time
from collections import Counter
reload(sys)
sys.setdefaultencoding('utf8')
import datetime
import requests
import json
import base64
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
import requests
def issuccessful(request):
if 200 <= request.status_code and request.status_code <= 299:
return True
else:
return False
def mentioned_trend(baseurl,mysqlhostIP, mysqlUserName = 'root', mysqlPassword = '', dbname = 'btv_v2'):
list_key_words = list()
# 存储评论数据
# 连接数据库
print(base64.b64decode(b'Q29weXJpZ2h0IChjKSAyMDEyIERvdWN1YmUgSW5jLiBBbGwgcmlnaHRzIHJlc2VydmVkLg==').decode())
sqlConn=MySQLdb.connect(host=mysqlhostIP, user=mysqlUserName, passwd=mysqlPassword, db = dbname, charset='utf8')
sqlcursor = sqlConn.cursor()
sqlcursor.execute('''CREATE TABLE IF NOT EXISTS key_kk_buzz(pk bigint NOT NULL PRIMARY KEY AUTO_INCREMENT, keywords varchar(50)) DEFAULT CHARSET=utf8;''')
print '新建库成功'
os.popen('kinit -k -t /home/ctvit/ctvit.keytab ctvit')
kerberos_auth = HTTPKerberosAuth(mutual_authentication=OPTIONAL)
tablename = "DATA:WEIBO_USER"
r = requests.get(baseurl + "/" + tablename + "/*", auth=kerberos_auth, headers = {"Accept" : "application/json"})
if issuccessful(r) == False:
print "Could not get messages from HBase. Text was:\n" + r.text
# quit()
bleats = json.loads(r.text)
box = list()
for row in bleats['Row']:
# print 000
# count+=1
message = ''
lineNumber = ''
username = ''
for cell in row['Cell']:
columnname = base64.b64decode(cell['column'])
value = cell['$']
if value == None:
print 'none'
continue
if columnname == "base_info:screen_name":
key_word = base64.b64decode(value)
print 'll', key_word
if key_word == '罗旭':
print 'ok'
# if ("北京卫视春晚" not in key_word) and ("北京台的春晚" not in key_word) and ("BTV春晚" not in key_word) and ("BTV春晚" not in key_word) and ("bTV春晚" not in key_word):
# break
# if columnname == "base_info:cdate":
# cdate = base64.b64decode(value)
# cdate = cdate.split('T')[0]
# print 'date',cdate
# if cdate not in box:
# box.append(cdate)
# for i in box:
# print i
# print 'ppp',type(key_word)
# print '11',key_word
# if key_word not in list_key_words:
# list_key_words.append(key_word)
#
# tempData = []
# for i in list_key_words:
# print 'key',i
# tempData.append(str(i))
# sqlcursor.execute('''insert into key_kk_buzz(keywords) values (%s)''',tempData)
# sqlConn.commit()
# tempData = []
sqlConn.close()
# print "key_words", i
if __name__=='__main__':
commentTest = mentioned_trend(baseurl = "http://172.28.12.34:8080", mysqlhostIP = '172.28.34.16', dbname = 'btv_v2')
|
[
"[email protected]"
] | |
179ef80ca6844d3e1067e15cdbb9ab8d66bc4b24
|
fdc6b0e9d2bbb6ceec718e58ef2de531a621c16a
|
/djangoangular/settings.py
|
f244f12d81123ce692c36ddcbc97ee440d80538e
|
[] |
no_license
|
manulangat1/djangoangular
|
2d776c31cc7fec5f7b6fd3a79cbc0f3d65c50dfb
|
02600e5d71aeea302f44cf25a4efd32c50d16d4b
|
refs/heads/master
| 2020-04-23T01:31:07.751799 | 2019-02-15T04:45:53 | 2019-02-15T04:45:53 | 170,815,095 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,205 |
py
|
"""
Django settings for djangoangular project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd$9f=p*6lt869=79sfd9i7=ymq7p3ljt396pwcn@!1xm0&#fip'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'manu11.apps.Manu11Config',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoangular.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoangular.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'feb11',
'USER': 'manulangat',
'PASSWORD':'3050manu'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
52cd9f391400b90fae34f5b8dd6fd2c5e3a667c6
|
5c7fb0e2f3bc498351ba4c57247ec1637da57e21
|
/python/mpi/enum.py
|
f4b091573d29406aba69119a5c793a4e9555f7b0
|
[] |
no_license
|
shixing/myLib
|
35180e479f3701c07894f829b8f495594d23a225
|
d4557fe0f07543ba588a7464b6efafebac1284a5
|
refs/heads/master
| 2020-05-19T13:34:34.022281 | 2017-04-10T01:08:06 | 2017-04-10T01:08:06 | 19,991,244 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 349 |
py
|
# copied from mpi4py-examples/09-task-pull.py
def enum(*sequential, **named):
"""
Handy way to fake an enumerated type in Python
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
tags = enum('READY', 'DONE', 'EXIT', 'START')
tags.READY # == 0
tags.READY == tags.DONE # == False
|
[
"[email protected]"
] | |
4188530b0c1b69052fa56920b558dc66dab2cbe9
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/b7db18784e744fa39ebc6a5d607b0289.py
|
9405afec6249a56d672a92e52e9a8340f2f34a94
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 327 |
py
|
def hey(content):
"""
Simple function which passes tests ran from bob_test.py.
"""
text = content.strip()
if text.isupper():
return "Whoa, chill out!"
elif text[-1:] == "?":
return "Sure."
elif len(text) == 0:
return "Fine. Be that way!"
else:
return "Whatever."
|
[
"[email protected]"
] | |
1d46b3c0287a5603be16485830b47c7a717b1e70
|
cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde
|
/P/PathSum.py
|
f4df2af0815725df3b70e8818ba0d2c397a4b0b4
|
[] |
no_license
|
bssrdf/pyleet
|
8861bbac06dfe0f0f06f6ad1010d99f8def19b27
|
810575368ecffa97677bdb51744d1f716140bbb1
|
refs/heads/master
| 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,712 |
py
|
'''
-Easy-
Given the root of a binary tree and an integer targetSum, return true if the
tree has a root-to-leaf path such that adding up all the values along the path
equals targetSum.
A leaf is a node with no children.
Example 1:
Input: root = [5,4,8,11,null,13,4,7,2,null,null,null,1], targetSum = 22
Output: true
Example 2:
Input: root = [1,2,3], targetSum = 5
Output: false
Example 3:
Input: root = [1,2], targetSum = 0
Output: false
Constraints:
The number of nodes in the tree is in the range [0, 5000].
-1000 <= Node.val <= 1000
-1000 <= targetSum <= 1000
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from BinaryTree import (TreeNode, null, constructBinaryTree)
class Solution(object):
def hasPathSum(self, root, targetSum):
"""
:type root: TreeNode
:type targetSum: int
:rtype: bool
"""
if not root: return False
if not root.left and not root.right:
if targetSum == root.val: return True
else: return False
if root.left and self.hasPathSum(root.left, targetSum-root.val):
return True
if root.right and self.hasPathSum(root.right, targetSum-root.val):
return True
return False
if __name__ == "__main__":
root = constructBinaryTree([5,4,8,11,null,13,4,7,2,null,null,null,1])
print(Solution().hasPathSum(root, 22))
root = constructBinaryTree([1, 2, 3])
print(Solution().hasPathSum(root, 5))
root = constructBinaryTree([1, 2, null])
print(Solution().hasPathSum(root, 1))
|
[
"[email protected]"
] | |
2d2c15b57889aca91355673475220e7fce3cd17b
|
e20d947696ffb2422c5856ca1067c9a068705a82
|
/day011/hw_004_元组.py
|
190b4ecb34df475476aae193895ba6f42c218bd8
|
[] |
no_license
|
lxgzhw520/ZhangBaofu
|
27aed80446c687890e17c35a2bc5c93a5b75462e
|
401bae4b2d8e1a284d281cc29b5ed31a4e2039a4
|
refs/heads/master
| 2020-05-07T16:14:06.627708 | 2019-04-21T06:20:26 | 2019-04-21T06:20:26 | 180,673,589 | 4 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,116 |
py
|
# _*_ coding:UTF-8 _*_
# 开发人员: 理想国真恵玩-张大鹏
# 开发团队: 理想国真恵玩
# 开发时间: 2019-04-12 08:44
# 文件名称: hw_004_元组.py
# 开发工具: PyCharm
# 元组和列表类似 不过列表是用()定义的 值不可修改
t = () # 定义一个空元组
print(type(t))
t1 = (1,) # 定义一个元素的列表
print(type(t1)) # type用来打印一个数据的类型
t2 = (1) # 注意一个元素必须加逗号,否则不是元组类型
print(type(t2))
# 元组推导式 这种方法是不行的
t = (i for i in range(10) if i % 2 == 0)
print(t)
print(type(t))
# 要快速生成元组,需要先生成列表,再将列表转换为元组
# 注意这里的推导式 加了一个判断条件
# 格式[元素 元素取值 元素取值的过滤条件]
t = tuple([i for i in range(10) if i % 2 == 0])
print(t)
print(type(t))
# 适用于列表的方法,一般也适用于元组
print('--' * 22)
# 长度
print(len(t))
# 统计
print(t.count(2))
# 索引
print(t.index(2))
# 排序 是不可用的
# t.sort()
# 反转 也不可用
# t.reverse()
# 访问 用索引
print(t[1])
|
[
"[email protected]"
] | |
adda1694dfde2bc1d6bbc99f30c38673d8050ccf
|
bb150497a05203a718fb3630941231be9e3b6a32
|
/framework/api/paddlebase/test_fill_diagonal_.py
|
93120e73f50302acf34f960d7acf4c341ceb6a23
|
[] |
no_license
|
PaddlePaddle/PaddleTest
|
4fb3dec677f0f13f7f1003fd30df748bf0b5940d
|
bd3790ce72a2a26611b5eda3901651b5a809348f
|
refs/heads/develop
| 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 |
Python
|
UTF-8
|
Python
| false | false | 4,229 |
py
|
#!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test_fill_diagonal_
"""
import pytest
import numpy as np
import paddle
import paddle.device as device
# global params
types = [np.float32, np.float64, np.int32, np.int64]
if device.is_compiled_with_cuda() is True:
places = [paddle.CPUPlace(), paddle.CUDAPlace(0)]
else:
# default
places = [paddle.CPUPlace()]
def fill_diagonal_base(x, value, offset=0, warp=False):
"""
api calculate
"""
outputs, gradients = [], []
for place in places:
for t in types:
paddle.disable_static(place)
y = x.astype(t)
y = paddle.to_tensor(y)
y.stop_gradient = False
y = y * 2
y.retain_grads()
out = paddle.Tensor.fill_diagonal_(y, value, offset, warp)
outputs.append(out.numpy())
loss = paddle.sum(out)
loss.backward()
gradients.append(y.grad.numpy())
return outputs, gradients
@pytest.mark.api_base_fill_diagonal_vartype
def test_fill_diagonal_base():
"""
base
"""
x = np.zeros((3, 3))
out, grad = fill_diagonal_base(x, 1)
res_out = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
res_grad = np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]])
length = len(out)
for i in range(length):
assert np.allclose(out[i], res_out)
assert np.allclose(grad[i], res_grad)
@pytest.mark.api_base_fill_diagonal_parameters
def test_fill_diagonal_0():
"""
default: wrap = False
"""
x = np.zeros((5, 3))
out, grad = fill_diagonal_base(x, 1)
res_out = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
res_grad = np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
assert np.allclose(out[0], res_out)
assert np.allclose(grad[0], res_grad)
@pytest.mark.api_base_fill_diagonal_parameters
def test_fill_diagonal_1():
"""
offset = 1
value = 4
"""
x = np.zeros((3, 3))
out, grad = fill_diagonal_base(x, 4, offset=1)
res_out = np.array([[0.0, 4.0, 0.0], [0.0, 0.0, 4.0], [0.0, 0.0, 0.0]])
res_grad = np.array([[1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0]])
assert np.allclose(out[0], res_out)
assert np.allclose(grad[0], res_grad)
# @pytest.mark.api_base_fill_diagonal_parameters
# def test_fill_diagonal_2():
# """
# offset = -1
# value = -4
# """
# x = np.zeros((3, 3))
# out, grad = fill_diagonal_base(x, -4, offset=-1)
# res_out = np.array([[0., 0., 0.],
# [-4., 0., 0.],
# [0., -4., 0.]])
# res_grad = np.array([[1., 1., 1.],
# [0., 1., 1.],
# [1., 0., 1.]])
#
# assert np.allclose(out[0], res_out)
# assert np.allclose(grad[0], res_grad)
@pytest.mark.api_base_fill_diagonal_parameters
def test_fill_diagonal_3():
"""
wrap = True
"""
x = np.zeros((7, 3))
out, grad = fill_diagonal_base(x, 4, warp=True)
res_out = np.array(
[
[4.0, 0.0, 0.0],
[0.0, 4.0, 0.0],
[0.0, 0.0, 4.0],
[0.0, 0.0, 0.0],
[4.0, 0.0, 0.0],
[0.0, 4.0, 0.0],
[0.0, 0.0, 4.0],
]
)
res_grad = np.array(
[
[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
]
)
assert np.allclose(out[0], res_out)
assert np.allclose(grad[0], res_grad)
@pytest.mark.api_base_fill_diagonal_parameters
def test_fill_diagonal_4():
"""
default: Multidimensional
all dimensions of input must be of equal length
"""
x = np.zeros((2, 2, 2))
out, grad = fill_diagonal_base(x, 1)
res_out = np.array([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 1.0]]])
res_grad = np.array([[[0.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 0.0]]])
assert np.allclose(out[0], res_out)
assert np.allclose(grad[0], res_grad)
|
[
"[email protected]"
] | |
a1ba025fe318dd09aeeb95e54f2a655205cf044f
|
0466559817d3a1be9409da2c83db99c4db3bacfe
|
/hubcheck/pageobjects/widgets/members_profile_citizenship.py
|
ec9f66eaa2fd00b4a7f93c8527708680e75ad588
|
[
"MIT"
] |
permissive
|
ken2190/hubcheck
|
955cf9b75a1ee77e28256dfd3a780cfbc17de961
|
2ff506eb56ba00f035300862f8848e4168452a17
|
refs/heads/master
| 2023-03-20T15:17:12.949715 | 2015-09-29T16:11:18 | 2015-09-29T16:11:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,551 |
py
|
from hubcheck.pageobjects.basepageelement import Radio
from hubcheck.pageobjects.basepageelement import Select
from hubcheck.pageobjects.widgets.members_profile_element import MembersProfileElement
class MembersProfileCitizenship(MembersProfileElement):
def __init__(self, owner, locatordict={}):
super(MembersProfileCitizenship,self).__init__(owner,locatordict)
# load hub's classes
MembersProfileCitizenship_Locators = self.load_class('MembersProfileCitizenship_Locators')
# update this object's locator
self.locators.update(MembersProfileCitizenship_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.coriginus = Radio(self,{'Yes':'coriginus_yes','No':'coriginus_no'})
self.corigin = Select(self,{'base':'corigin'})
self.access = Select(self,{'base':'access'})
# update the component's locators with this objects overrides
self._updateLocators()
def value(self):
"""return a dictionary with the values of coriginus, corigin, and access"""
return {'coriginus' : self.coriginus.value(),
'corigin' : self.corigin.value(),
'access' : self.access.value()}
def update(self,coriginus=None,corigin=None,access=None):
"""update the values of coriginus, corigin, and access"""
if coriginus != None:
self.coriginus.value = coriginus
if corigin != None:
self.corigin.value = corigin
if access != None:
self.access.value = access
self.save.click()
class MembersProfileCitizenship_Locators_Base(object):
"""locators for MembersProfileCitizenship object"""
locators = {
'base' : "css=.profile-countryorigin",
'coriginus_yes' : "css=#corigin_usyes",
'coriginus_no' : "css=#corigin_usno",
'corigin' : "css=#corigin",
'access' : "css=.profile-countryorigin select[name='access[countryorigin]']",
'sectionkey' : "css=.profile-countryorigin .key",
'sectionvalue' : "css=.profile-countryorigin .value",
'open' : "css=.profile-countryorigin .edit-profile-section",
'close' : "css=.profile-countryorigin .edit-profile-section",
'save' : "css=.profile-countryorigin .section-edit-submit",
'cancel' : "css=.profile-countryorigin .section-edit-cancel",
}
|
[
"[email protected]"
] | |
8d54b53bc1d6286ed1f8111eb5d4bbe6b2fe7523
|
3e0f16d967952cb30eb4375457f603164f44e906
|
/1장. 파이썬 데이터 모델/1.3 특별 메서드 개요.py
|
74906a0abfa2df4836a1bd83d527c64bff0ce1af
|
[] |
no_license
|
moon0331/FluentPython
|
6d8491d41a9cbdf5b2e528ebd4261693d11c9443
|
f307abc00b15fd747dff04e8ccbccf45da0897a9
|
refs/heads/master
| 2022-11-27T16:10:10.367276 | 2020-08-08T12:49:52 | 2020-08-08T12:49:52 | 286,024,890 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 98 |
py
|
# 데이터 모델
# 특별 메서드 이름
#https://docs.python.org/3/reference/datamodel.html
|
[
"[email protected]"
] | |
c5547a73de98204b49faad630d70600c0e27b32d
|
fd3c3ab6482c91e2ac6e497f89ed525eb93a7047
|
/tests/test_stimulus.py
|
6210872d3c6bd772e8d86955b420021640b855fd
|
[
"MIT"
] |
permissive
|
alex-vrv/pyABF
|
5c4d71f078f4b9bf6e95a1b58d10d9ca6510fed6
|
9ec95539f0130c307e6fa9b6edc980178b9cb6f7
|
refs/heads/master
| 2023-04-08T13:33:02.571622 | 2021-04-17T17:07:11 | 2021-04-17T17:07:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,427 |
py
|
"""
Tests related to locating and reading command waveforms from stimulus waveform
files. If the stimulus waveforms aren't found you can provide a search path
as an argument when instantiating pyabf.ABF()
"""
import sys
import pytest
import os
import numpy as np
import time
import warnings
try:
# this ensures pyABF is imported from this specific path
sys.path.insert(0, "src")
import pyabf
except:
raise ImportError("couldn't import local pyABF")
ABF_PATH = os.path.abspath("data/abfs/H19_29_150_11_21_01_0011.abf")
STIM_FOLDER = os.path.abspath("data/stimulusFiles")
def test_findStimulusFile_NansIfNotFound():
"""When the stimulus file isn't found the waveform should be all NANs."""
warnings.simplefilter("ignore")
abf = pyabf.ABF(ABF_PATH)
stimulus = abf.stimulusByChannel[0]
waveform = stimulus.stimulusWaveform(stimulusSweep=0)
assert isinstance(waveform, np.ndarray)
assert len(waveform) == len(abf.sweepY)
assert np.isnan(waveform).all()
def test_findStimulusFile_foundIfPathGiven():
"""The user can tell pyABF where to look for stimulus files."""
abf = pyabf.ABF(ABF_PATH, stimulusFileFolder=STIM_FOLDER)
stimulus = abf.stimulusByChannel[0]
waveform = stimulus.stimulusWaveform(stimulusSweep=0)
assert isinstance(waveform, np.ndarray)
assert not np.isnan(waveform).any()
assert pytest.approx(waveform[100000], 76.261)
def cachedStimulusSpeedBoost(useCaching):
"""Open an ABF/stimulus twice and return the times (in sec)"""
times = [None, None]
useCaching = [False, useCaching]
for i in range(2):
t1 = time.perf_counter()
abf = pyabf.ABF(
ABF_PATH,
stimulusFileFolder=STIM_FOLDER,
cacheStimulusFiles=useCaching[i]
)
stimulus = abf.stimulusByChannel[0]
waveform = stimulus.stimulusWaveform(stimulusSweep=0)
assert pytest.approx(waveform[100000], 76.261)
times[i] = time.perf_counter() - t1
speedBoost = times[0]/times[1]
print(f"Caching: {useCaching[1]}, speed boost: {speedBoost}x")
return speedBoost
def test_stimulus_caching():
# first try without caching
assert (cachedStimulusSpeedBoost(False) < 2)
# now use caching for a >10x speed boost
assert (cachedStimulusSpeedBoost(True) > 10)
# confirm not using caching is still slow
assert (cachedStimulusSpeedBoost(False) < 2)
|
[
"[email protected]"
] | |
be1bb4ed6662e9ff830d39528774f26d5040e745
|
94f304cb4c2ac2ad6ff1ee39725f46254c8838bc
|
/core/draw/Ui_draw_point.py
|
7ec948d966ec3c42e306b90ab1b088857653a980
|
[] |
no_license
|
kmolLin/python3_solve_dynamic
|
105bd70edaa5014e0ad76a9a3c66e43dc0fa5ad7
|
18f56e6958dd1816dfb7c26f4857aa3b41de9312
|
refs/heads/master
| 2021-06-03T10:19:44.551240 | 2016-09-23T13:22:52 | 2016-09-23T13:22:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,397 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/ahshoe/Desktop/Pyslvs/core/draw/draw_point.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(377, 219)
Dialog.setMinimumSize(QtCore.QSize(377, 219))
Dialog.setMaximumSize(QtCore.QSize(377, 219))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/point.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
Dialog.setSizeGripEnabled(False)
Dialog.setModal(True)
self.horizontalLayout = QtWidgets.QHBoxLayout(Dialog)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setTextFormat(QtCore.Qt.RichText)
self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.label_4 = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setTextFormat(QtCore.Qt.RichText)
self.label_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_4.setObjectName("label_4")
self.verticalLayout.addWidget(self.label_4)
self.Point_num = QtWidgets.QTextBrowser(Dialog)
self.Point_num.setMaximumSize(QtCore.QSize(16777215, 30))
self.Point_num.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.Point_num.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.Point_num.setObjectName("Point_num")
self.verticalLayout.addWidget(self.Point_num)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 1, 1, 1)
self.X_coordinate = QtWidgets.QLineEdit(Dialog)
self.X_coordinate.setInputMethodHints(QtCore.Qt.ImhLowercaseOnly)
self.X_coordinate.setText("")
self.X_coordinate.setObjectName("X_coordinate")
self.gridLayout.addWidget(self.X_coordinate, 1, 0, 1, 1)
self.Y_coordinate = QtWidgets.QLineEdit(Dialog)
self.Y_coordinate.setText("")
self.Y_coordinate.setEchoMode(QtWidgets.QLineEdit.Normal)
self.Y_coordinate.setClearButtonEnabled(False)
self.Y_coordinate.setObjectName("Y_coordinate")
self.gridLayout.addWidget(self.Y_coordinate, 1, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Vertical)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout_2.addWidget(self.buttonBox)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.Fix_Point = QtWidgets.QCheckBox(Dialog)
self.Fix_Point.setObjectName("Fix_Point")
self.verticalLayout_2.addWidget(self.Fix_Point)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "New Point"))
self.label_3.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-size:12pt;\">Setting Coordinates for the New Point.</span></p></body></html>"))
self.label_4.setText(_translate("Dialog", "<html><head/><body><p>Point Number</p></body></html>"))
self.Point_num.setWhatsThis(_translate("Dialog", "Name for next point."))
self.label.setText(_translate("Dialog", "x coordinate"))
self.label_2.setText(_translate("Dialog", "y coordinate"))
self.X_coordinate.setPlaceholderText(_translate("Dialog", "0.0"))
self.Y_coordinate.setPlaceholderText(_translate("Dialog", "0.0"))
self.Fix_Point.setText(_translate("Dialog", "&Fixed"))
import icons_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
3e3528b72cecdd0356d6df40b19ea4c3497bb400
|
914ca4921c114c917267214e0987ebecf30b3510
|
/Programming_Practice/Python/Base/Bigdata_day1010/LIST08.py
|
aab3742c91537c9b07e87bb9007499fe748b1f30
|
[] |
no_license
|
BurnFaithful/KW
|
52535030ea57f1489a0d108d599b66ffee50a1f4
|
15deb50449b8f902f623f20b97448c0f473a9342
|
refs/heads/master
| 2022-12-20T16:06:01.827398 | 2020-09-12T08:51:23 | 2020-09-12T08:51:23 | 294,897,186 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 731 |
py
|
# 리스트 조작 함수 그 외
mylist = [30, 10, 20]
print("현재 리스트 :", mylist)
mylist.append(40)
print("append(40) 후 :", mylist)
res = mylist.pop()
print("pop()으로 추출한 값 :", res)
print("현재 리스트 :", mylist)
mylist.sort()
print("sort() 후 :", mylist)
mylist.reverse()
print("reverse() 후 :", mylist)
val = mylist.index(20)
print("index(20) :", val)
mylist.insert(2, 222)
print("insert(2, 222) 후 :", mylist)
mylist.remove(222)
print("remove(222) 후 :", mylist)
mylist.extend([77, 88, 99])
print("extend([77, 88, 99]) 후 :", mylist)
cnt = mylist.count(77)
print("현재 리스트의 77 개수 :", cnt)
ary = [1, 3, 5, 6, 7, 4, 10, 66, 9, 99]
temp = sorted(ary)
print(ary)
print(temp)
|
[
"[email protected]"
] | |
45098d49bfd0204d1df0e2b2fdf4155f7bc7261a
|
b3c47795e8b6d95ae5521dcbbb920ab71851a92f
|
/Leetcode/Algorithm/python/1000/00944-Delete Columns to Make Sorted.py
|
01c17945b8dcf7591fc445ab750d72a8edfe98a0
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Wizmann/ACM-ICPC
|
6afecd0fd09918c53a2a84c4d22c244de0065710
|
7c30454c49485a794dcc4d1c09daf2f755f9ecc1
|
refs/heads/master
| 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null |
UTF-8
|
Python
| false | false | 314 |
py
|
class Solution(object):
def minDeletionSize(self, A):
n = len(A[0])
st = []
for i in xrange(n):
st.append(''.join(map(lambda x: x[i], A)))
res = 0
for s in st:
if s != ''.join(sorted(s)):
res += 1
return res
|
[
"[email protected]"
] | |
4f2ab5a122e4427a2b0bc316047d8a326960d0b6
|
cf7e071e387be8713ba9cb66fb9e02d65f24c13e
|
/tests/adapters/test_redirects.py
|
94e5a745db77de995f8af1df62a917b382a9cbae
|
[
"BSD-3-Clause"
] |
permissive
|
yunstanford/httpcore
|
5169b74a3d266e71b4fdfe9fdf2fac76ea97321d
|
84b9691f3ff3184a9627eee4d088982bbb295f23
|
refs/heads/master
| 2020-05-21T15:32:27.287463 | 2019-05-09T09:57:23 | 2019-05-09T09:57:23 | 186,093,284 | 0 | 0 | null | 2019-05-11T05:47:55 | 2019-05-11T05:47:55 | null |
UTF-8
|
Python
| false | false | 8,820 |
py
|
import json
from urllib.parse import parse_qs
import pytest
from httpcore import (
URL,
Adapter,
RedirectAdapter,
RedirectBodyUnavailable,
RedirectLoop,
Request,
Response,
TooManyRedirects,
codes,
)
class MockDispatch(Adapter):
def prepare_request(self, request: Request) -> None:
pass
async def send(self, request: Request, **options) -> Response:
if request.url.path == "/redirect_301":
status_code = codes.moved_permanently
headers = {"location": "https://example.org/"}
return Response(status_code, headers=headers, request=request)
elif request.url.path == "/redirect_302":
status_code = codes.found
headers = {"location": "https://example.org/"}
return Response(status_code, headers=headers, request=request)
elif request.url.path == "/redirect_303":
status_code = codes.see_other
headers = {"location": "https://example.org/"}
return Response(status_code, headers=headers, request=request)
elif request.url.path == "/relative_redirect":
headers = {"location": "/"}
return Response(codes.see_other, headers=headers, request=request)
elif request.url.path == "/no_scheme_redirect":
headers = {"location": "//example.org/"}
return Response(codes.see_other, headers=headers, request=request)
elif request.url.path == "/multiple_redirects":
params = parse_qs(request.url.query)
count = int(params.get("count", "0")[0])
redirect_count = count - 1
code = codes.see_other if count else codes.ok
location = "/multiple_redirects"
if redirect_count:
location += "?count=" + str(redirect_count)
headers = {"location": location} if count else {}
return Response(code, headers=headers, request=request)
if request.url.path == "/redirect_loop":
headers = {"location": "/redirect_loop"}
return Response(codes.see_other, headers=headers, request=request)
elif request.url.path == "/cross_domain":
headers = {"location": "https://example.org/cross_domain_target"}
return Response(codes.see_other, headers=headers, request=request)
elif request.url.path == "/cross_domain_target":
headers = dict(request.headers.items())
content = json.dumps({"headers": headers}).encode()
return Response(codes.ok, content=content, request=request)
elif request.url.path == "/redirect_body":
await request.read()
headers = {"location": "/redirect_body_target"}
return Response(codes.permanent_redirect, headers=headers, request=request)
elif request.url.path == "/redirect_body_target":
content = await request.read()
body = json.dumps({"body": content.decode()}).encode()
return Response(codes.ok, content=body, request=request)
return Response(codes.ok, content=b"Hello, world!", request=request)
@pytest.mark.asyncio
async def test_redirect_301():
client = RedirectAdapter(MockDispatch())
response = await client.request("POST", "https://example.org/redirect_301")
assert response.status_code == codes.ok
assert response.url == URL("https://example.org/")
assert len(response.history) == 1
@pytest.mark.asyncio
async def test_redirect_302():
client = RedirectAdapter(MockDispatch())
response = await client.request("POST", "https://example.org/redirect_302")
assert response.status_code == codes.ok
assert response.url == URL("https://example.org/")
assert len(response.history) == 1
@pytest.mark.asyncio
async def test_redirect_303():
client = RedirectAdapter(MockDispatch())
response = await client.request("GET", "https://example.org/redirect_303")
assert response.status_code == codes.ok
assert response.url == URL("https://example.org/")
assert len(response.history) == 1
@pytest.mark.asyncio
async def test_disallow_redirects():
client = RedirectAdapter(MockDispatch())
response = await client.request(
"POST", "https://example.org/redirect_303", allow_redirects=False
)
assert response.status_code == codes.see_other
assert response.url == URL("https://example.org/redirect_303")
assert len(response.history) == 0
response = await response.next()
assert response.status_code == codes.ok
assert response.url == URL("https://example.org/")
assert len(response.history) == 1
@pytest.mark.asyncio
async def test_relative_redirect():
client = RedirectAdapter(MockDispatch())
response = await client.request("GET", "https://example.org/relative_redirect")
assert response.status_code == codes.ok
assert response.url == URL("https://example.org/")
assert len(response.history) == 1
@pytest.mark.asyncio
async def test_no_scheme_redirect():
client = RedirectAdapter(MockDispatch())
response = await client.request("GET", "https://example.org/no_scheme_redirect")
assert response.status_code == codes.ok
assert response.url == URL("https://example.org/")
assert len(response.history) == 1
@pytest.mark.asyncio
async def test_fragment_redirect():
client = RedirectAdapter(MockDispatch())
url = "https://example.org/relative_redirect#fragment"
response = await client.request("GET", url)
assert response.status_code == codes.ok
assert response.url == URL("https://example.org/#fragment")
assert len(response.history) == 1
@pytest.mark.asyncio
async def test_multiple_redirects():
client = RedirectAdapter(MockDispatch())
url = "https://example.org/multiple_redirects?count=20"
response = await client.request("GET", url)
assert response.status_code == codes.ok
assert response.url == URL("https://example.org/multiple_redirects")
assert len(response.history) == 20
@pytest.mark.asyncio
async def test_too_many_redirects():
client = RedirectAdapter(MockDispatch())
with pytest.raises(TooManyRedirects):
await client.request("GET", "https://example.org/multiple_redirects?count=21")
@pytest.mark.asyncio
async def test_too_many_redirects_calling_next():
client = RedirectAdapter(MockDispatch())
url = "https://example.org/multiple_redirects?count=21"
response = await client.request("GET", url, allow_redirects=False)
with pytest.raises(TooManyRedirects):
while response.is_redirect:
response = await response.next()
@pytest.mark.asyncio
async def test_redirect_loop():
client = RedirectAdapter(MockDispatch())
with pytest.raises(RedirectLoop):
await client.request("GET", "https://example.org/redirect_loop")
@pytest.mark.asyncio
async def test_redirect_loop_calling_next():
client = RedirectAdapter(MockDispatch())
url = "https://example.org/redirect_loop"
response = await client.request("GET", url, allow_redirects=False)
with pytest.raises(RedirectLoop):
while response.is_redirect:
response = await response.next()
@pytest.mark.asyncio
async def test_cross_domain_redirect():
client = RedirectAdapter(MockDispatch())
url = "https://example.com/cross_domain"
headers = {"Authorization": "abc"}
response = await client.request("GET", url, headers=headers)
data = json.loads(response.content.decode())
assert response.url == URL("https://example.org/cross_domain_target")
assert data == {"headers": {}}
@pytest.mark.asyncio
async def test_same_domain_redirect():
client = RedirectAdapter(MockDispatch())
url = "https://example.org/cross_domain"
headers = {"Authorization": "abc"}
response = await client.request("GET", url, headers=headers)
data = json.loads(response.content.decode())
assert response.url == URL("https://example.org/cross_domain_target")
assert data == {"headers": {"authorization": "abc"}}
@pytest.mark.asyncio
async def test_body_redirect():
client = RedirectAdapter(MockDispatch())
url = "https://example.org/redirect_body"
data = b"Example request body"
response = await client.request("POST", url, data=data)
data = json.loads(response.content.decode())
assert response.url == URL("https://example.org/redirect_body_target")
assert data == {"body": "Example request body"}
@pytest.mark.asyncio
async def test_cannot_redirect_streaming_body():
client = RedirectAdapter(MockDispatch())
url = "https://example.org/redirect_body"
async def streaming_body():
yield b"Example request body"
with pytest.raises(RedirectBodyUnavailable):
await client.request("POST", url, data=streaming_body())
|
[
"[email protected]"
] | |
ccc0db47bb35b02bf55b2a3b7b26bd606bcd75a7
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/log-20190927/132.230.102.123-10.21.11.20/1569575874.py
|
5de66d09d31823420aabdb67b028801eeaf683e5
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,761 |
py
|
import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def divisior(n: int) -> list:
"""Eine Funktion, die alle Dividenten einer positiven,
ganzen Zahl in einer Liste wiedergibt
"""
j = [n]
for d in range(n+1): #loop bis n
d > 0
if abs(n) % int(d) == 0:
j.append(str(d))
return j
else:
return j
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(n):
nonlocal covered, count
if n <= 0:
covered.add(0)
if n == 1:
covered.add(1)
r = func (n)
lenr = len (r)
if lenr == 1:
covered.add(2)
if lenr == 2:
covered.add(3)
if (lenr > 2) and ( lenr % 2 == 0):
covered.add(4)
if lenr > 2 and lenr % 2 == 1:
covered.add(5)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func.__doc__:
wrapper.__doc__ = func.__doc__
wrapper.__hints__ = typing.get_type_hints (func)
return wrapper
return coverage
coverage = mk_coverage()
try:
divisors = coverage(divisors)
except:
pass
## Lösung Teil 2. (Tests)
def test_divisior():
assert divisior(6) == ["1","2","3","6"]
assert divisior(3) == ["3"]
assert divisior(-3) == ["3"]
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_divisors (self):
assert divisors
assert 'n' in getfullargspec(divisors).args
class TestGrades:
def test_docstring_present(self):
assert divisors.__doc__ is not None
def test_typing_present(self):
assert divisors.__hints__ == typing.get_type_hints(self.divisors_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def divisors_oracle(self, n:int)->list:
return [ d for d in range (1, n + 1) if n % d == 0 ]
def check_divisors (self, x):
assert set(divisors (x)) == set(self.divisors_oracle (x))
def test_correctness(self):
for i in range (100):
self.check_divisors (i)
n = random.randrange (10000)
self.check_divisors (n)
|
[
"[email protected]"
] | |
3a04f547f002847a7ded45264a9b924b04ad80c2
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_95/2604.py
|
17221a93a4af173a541d76a67dca1f017c864d35
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,419 |
py
|
f = open('A-small-attempt5.in')
lines = f.readline()
inputlist = f.readlines()
linewords =[]
TranslatedWord = ''
TranslatedWords = []
TranslatedSentence = ''
outputlist=[]
tongues = {}
tongues['a']='y'
tongues['b']='h'
tongues['c']='e'
tongues['d']='s'
tongues['e']='o'
tongues['f']='c'
tongues['g']='v'
tongues['h']='x'
tongues['i']='d'
tongues['j']='u'
tongues['k']='i'
tongues['l']='g'
tongues['m']='l'
tongues['n']='b'
tongues['o']='k'
tongues['p']='r'
tongues['q']='z'
tongues['r']='t'
tongues['s']='n'
tongues['t']='w'
tongues['u']='j'
tongues['v']='p'
tongues['w']='f'
tongues['x']='m'
tongues['y']='a'
tongues['z']='q'
for i in inputlist:
linewords = i.split( )
#print linewords
for j in linewords:
for letters in j:
TranslatedWord = TranslatedWord + tongues[letters]
TranslatedWords.append(TranslatedWord)
TranslatedWord = ''
#print TranslatedWords
for word in TranslatedWords:
TranslatedSentence = TranslatedSentence + ' ' + word
x = len(outputlist)
outputlist.append('Case #' + str(x+1) + ':' + TranslatedSentence + '\n')
#print TranslatedSentence
TranslatedSentence = ''
TranslatedWords=[]
#Now lets print results
results = open('output.txt', 'w')
results.writelines(outputlist)
for outputSentence in outputlist:
print outputSentence
results.close()
|
[
"[email protected]"
] | |
05f1f72fb9d2533b3aba6ac889a694e0e1edc5f1
|
3088dc21f3e5eeb31575704712a695d71772495f
|
/torch/_C/_lazy.pyi
|
e86b80837d5898fd477b7f63c5e7ad387a2a65de
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
ezyang/pytorch
|
8e82444c78025ff12fa605a62a18acfc43b176b8
|
6fc64500d1af9ee1306c7695ab0a2ff01852ff00
|
refs/heads/master
| 2023-09-02T01:56:28.108400 | 2022-06-07T19:13:21 | 2022-06-07T19:13:21 | 101,798,885 | 3 | 0 |
NOASSERTION
| 2022-08-23T22:10:07 | 2017-08-29T19:28:39 |
C++
|
UTF-8
|
Python
| false | false | 878 |
pyi
|
from typing import List
from torch import Tensor
#defined in torch/csrc/lazy/python/init.cpp
def _mark_step(device: str, devices: List[str], wait: bool): ...
def _wait_device_ops(devices: List[str]): ...
def _reset_metrics(): ...
def _counter_names() -> List[str]: ...
def _counter_value(name: str) -> int: ...
def _get_graph_hash(tensors: List[Tensor]) -> str: ...
def _sync_multi(tensors: List[Tensor], devices: List[str], wait: bool = True, sync_ltc_data: bool = True): ...
def _get_tensor_id(tensor: Tensor) -> int: ...
def _get_tensors_text(tensors: List[Tensor]) -> str: ...
def _get_tensors_dot(tensors: List[Tensor]) -> str: ...
def _get_tensors_backend(tensors: List[Tensor]) -> str: ...
def _get_force_fallback() -> str: ...
def _set_force_fallback(newval: str): ...
def _clear_ir_cache(): ...
def _dump_ir_cache(filename: str): ...
def _set_reuse_ir(val: bool): ...
|
[
"[email protected]"
] | |
eddda169430ed0378114c2ea9afbf5730a434155
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02984/s103576149.py
|
4b0ac84178304706c240d4938f971696c04a987b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
n=int(input())
a=list(map(int,input().split()))
s=sum(a)
ans=[]
s1=0
for i in range(n):
if i%2==1:
s1+=a[i]
ans.append(str(s-s1*2))
for j in range(n-1):
ans.append(str(a[j]*2-int(ans[-1])))
print(' '.join(ans))
|
[
"[email protected]"
] | |
b973456b943c3feb54008e7162ec38a69daa125a
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2008-EOL/programming/languages/perl/perl-Clone/actions.py
|
a535f852d7ba91e57c9e618fdef343f9b88e8c49
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 586 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2007,2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import perlmodules
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "%s-%s" % (get.srcNAME()[5:], get.srcVERSION())
def setup():
perlmodules.configure()
def build():
perlmodules.make()
def check():
perlmodules.make("test")
def install():
perlmodules.install()
pisitools.dodoc("Changes", "MANIFEST")
|
[
"[email protected]"
] | |
1bb3a9f8dfa507a3d76443d3a52f62ed9e64ba3a
|
eac7ae395c4832ac394087054ab014d1d6a9f6a6
|
/python_experiments/data_analysis/figures_icde19/parse_others/parse_hidx.py
|
b9b210f1b1b13d1f29e2f0c48b1ac00e6f5122c1
|
[
"MIT"
] |
permissive
|
mexuaz/AccTrussDecomposition
|
21be22007e1c50ca4b7df6fbbad1dfbf4c2fffae
|
15a9e8fd2f123f5acace5f3b40b94f1a74eb17d4
|
refs/heads/master
| 2022-12-14T03:41:05.133564 | 2020-09-03T00:35:33 | 2020-09-03T00:35:33 | 291,565,779 | 0 | 0 |
MIT
| 2020-08-30T22:57:55 | 2020-08-30T22:57:55 | null |
UTF-8
|
Python
| false | false | 1,989 |
py
|
from data_analysis.util.read_file_utils_updated import *
from config import *
from exec_utilities import exec_utils
import json
others_time_hidx = 'h-idx'
def fetch_statistics(root_dir, dataset_lst, t_num, algorithm, json_file_path):
# Dataset -> Detailed Time Info
my_dict = dict()
for dataset in dataset_lst:
file_path = os.sep.join([root_dir, dataset, t_num, algorithm + '.log'])
logger.info(file_path)
lines = get_file_lines(file_path)
time_iter = list(filter(lambda l: 'Total time' in l, lines)) if lines is not None else None
if time_iter is None or len(list(time_iter)) == 0:
my_dict[dataset] = 0
else:
tmp = time_iter[0]
print(tmp)
my_dict[dataset] = eval(tmp.split(':')[-1].replace('secs', ''))
with open(json_file_path, 'w') as ofs:
ofs.write(json.dumps(my_dict, indent=4))
if __name__ == '__main__':
base_dir = '/home/yche/'
os.system('mkdir -p {}logs/'.format(base_dir))
my_res_log_file_folder = 'exp-2019-10-07-hidx'
log_path = my_res_log_file_folder + '.log'
logger = exec_utils.get_logger('{}logs/'.format(base_dir) + log_path, __name__)
hostname = 'ustgpu2'
root_dir = '{}mnt/ustgpu2/workspace/yche/git-repos/' \
'OutOfCoreSCAN/python_experiments/exp_results/{}/ustgpu2'.format(base_dir, my_res_log_file_folder)
os.system('mkdir -p {}'.format(others_time_hidx))
for my_md_algorithm_name in ['pnd-2300', 'hidx-org-2300']:
json_file_path = './{}/{}.json'.format(others_time_hidx, my_md_algorithm_name)
# Dataset -> Detailed Time Info
dataset_lst = [
'snap_orkut', 'webgraph_uk', 'webgraph_webbase',
'webgraph_eu', 'webgraph_it', 'webgraph_twitter']
reorder_tag = 'org'
fetch_statistics(root_dir=root_dir, dataset_lst=dataset_lst, t_num='40',
algorithm=my_md_algorithm_name, json_file_path=json_file_path)
|
[
"[email protected]"
] | |
3ac4f8b2a990c75001a1b676c0dcdbdcfaa0819c
|
59b0ebc4249f20edd0e87dc63784c6e8c138c7fd
|
/.history/roman_20180615012149.py
|
1f4d4bae42ab05f4189eed06aa87328cc9a2eeb8
|
[] |
no_license
|
Los4U/first_python_programs
|
f397da10be3ef525995f3f220e3b60012a6accaa
|
c3fc33a38c84abd292cb2e86de63e09434fc7fc4
|
refs/heads/master
| 2020-03-22T08:09:40.426118 | 2018-07-04T17:17:58 | 2018-07-04T17:17:58 | 139,748,883 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 687 |
py
|
number = 568
print("1000", number//1000)
print("900", number//900)
print("500", number//500)
print("400", number//400)
print("100", number//100)
print("90", number//90)
print("50", number//50)
print("40", number//40)
print("10", number//10)
print("9", number//9)
print("5", number//5)
print("4", number//4)
print("1", number//1)
change = [[1000, 'M'], [900, 'CM'], [500, 'D'], [400, 'CD'],
[ 100, 'C'], [ 90, 'XC'], [ 50, 'L'], [ 40, 'XL'],
[ 10, 'X'], [ 9, 'IX'], [ 5, 'V'], [ 4, 'IV'],
[ 1, 'I']]
result = ''
for denom, roman_digit in conv:
result += roman_digit*(number//denom)
number %= denom
print(number , " - ", result)
|
[
"[email protected]"
] | |
12623598889120e6503dc91e9996fe6e19049188
|
eb9e5f950f567458deb7ac6a958e9e07eec8211c
|
/Python/Projects/mysite/im/views.py
|
d6a2010f3c6fe4d3720effe6c19e7f063374e179
|
[] |
no_license
|
hyteer/ytest
|
b32402f4a85af2cba298729b81ae73ccedbe6013
|
98234f88e923a705ce08673a269904ca81117f03
|
refs/heads/master
| 2020-01-23T21:47:40.100472 | 2017-01-23T10:12:21 | 2017-01-23T10:12:21 | 74,676,200 | 0 | 0 | null | 2017-01-23T10:12:22 | 2016-11-24T13:34:34 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,691 |
py
|
# encoding: utf-8
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.views import generic
from .forms import RoomLabelForm
from .models import Message, Room
# Create your views here.
def index(req):
#return HttpResponse("hi...")
if req.method == 'POST':
form = RoomLabelForm(req.POST)
if form.is_valid():
room_label = form.cleaned_data['room_label']
return HttpResponseRedirect('/im/room/%s' % room_label)
else:
form = RoomLabelForm()
return render(req, 'im/index.html', {"form": form})
def room(request, label):
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
room, created = Room.objects.get_or_create(label=label)
# We want to show the last 50 messages, ordered most-recent-last
messages = reversed(room.messages.order_by('-time')[:50])
return render(request, "im/room.html", {
'room': room,
'messages': messages,
})
class RoomList(generic.ListView):
template_name = 'im/roomlist.html'
context_object_name = 'room_list'
def get_queryset(self):
"""Return the last five published questions."""
return Room.objects.all()[:5]
'''
def room_new(request, label):
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
room, created = Room.objects.get_or_create(label=label)
# We want to show the last 50 messages, ordered most-recent-last
messages = reversed(room.messages.order_by('-timestamp')[:50])
return render(request, "realtime/room_new.html", {
'room': room,
'messages': messages,
})
'''
|
[
"[email protected]"
] | |
f8a8e65317639d4af42307e7fb7372570acb74ba
|
9cb4b0753f1392b488547395c43a8a6df5789a7a
|
/test/test_print_formats.py
|
c83cd4697133d482824149cf907251f9e8e95df5
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ZryletTC/archapp
|
db20cbcbf8135cc364d7dc0dda00a4409ff05800
|
68299fa3e35c292cff33bba55a3a75e9ae568815
|
refs/heads/master
| 2021-07-23T00:16:10.902137 | 2018-09-17T18:05:33 | 2018-09-17T18:05:33 | 233,143,295 | 0 | 0 |
NOASSERTION
| 2020-01-10T23:30:32 | 2020-01-10T22:50:21 | null |
UTF-8
|
Python
| false | false | 319 |
py
|
import unittest
from archapp.util import print_formats
class PrintFormatsTestCase(unittest.TestCase):
def test_print_list_no_crash(self):
print_formats.list_print([], do_print=False)
print_formats.list_print(["text"], do_print=False)
print_formats.list_print(["text"] * 50, do_print=False)
|
[
"[email protected]"
] | |
909fdf9e128bce0236a62e8ff7811d35593840e1
|
97a4d29863d1ce96f366554fdd985c3ce580bb5d
|
/038.py
|
0992e125a2c74680dc26c628fe36a109f19fe972
|
[] |
no_license
|
Everfighting/Python-Algorithms
|
5c3a102fed3a29858f3112d657c69e077efc7e28
|
235e9b4c66602035be39a8d3b3ad9cf016aebbb9
|
refs/heads/master
| 2021-01-20T22:19:18.902687 | 2018-03-02T05:38:27 | 2018-03-02T05:38:27 | 61,302,323 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 313 |
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == '__main__':
a = []
sum = 0.0
for i in range(3):
for j in range(3):
a.append(float(raw_input("input num:\n")))
for i in range(3):
sum += a[3*i+i]
#比原来的解法更加容易理解!
print sum
|
[
"[email protected]"
] | |
2f05dd68471f8c482bab11750f30469d447bc5fd
|
5de646fb3ecf10ecb45e05018a23b6345fb9ca53
|
/kickstart/2020 Round A/workout.py
|
3228eaf2dddec7155a79d9a7818a00d038790e31
|
[] |
no_license
|
PPinto22/LeetCode
|
5590d6ca87efcd29f9acd2eaed1bcf6805135e29
|
494a35542b61357c98c621202274d774e650a27c
|
refs/heads/master
| 2022-04-29T20:37:31.085120 | 2022-04-02T12:02:30 | 2022-04-02T12:02:30 | 201,478,904 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,270 |
py
|
import math
def solve(N, K, sessions):
diffs = [sessions[i + 1] - sessions[i] for i in range(N - 1)]
return min_binary_search(1, max(diffs), constraints, K, diffs)
# Validate if it is possible to achieve the given difficulty
# with less than 'max_splits' splits
def constraints(difficulty, max_splits, diffs):
splits = sum(get_splits(diffs, difficulty))
return splits <= max_splits
# Find the minimum value between lower and upper
# that meets the given constraints
def min_binary_search(lower, upper, constraints, *args):
while lower <= upper:
middle = (lower + upper) // 2
if constraints(middle, *args):
upper = middle - 1
else:
lower = middle + 1
return lower
# Lists how many additional sessions must be added between
# each session get a the difficulty of 'target' (at most)
def get_splits(diffs, target):
return [math.ceil(diff / target) - 1 for diff in diffs]
if __name__ == '__main__':
T = int(input())
for Ti in range(1, T + 1):
# N: Sessions; K: Additional sessions
N, K = map(int, input().split())
sessions = list(map(int, input().split()))
result = solve(N, K, sessions)
print('Case #{}: {}'.format(Ti, result), flush=True)
|
[
"[email protected]"
] | |
0a34f9a13d7367f74a7cb32c527c6e70ca44d90c
|
a4c2c6e12e8f79a1e96df2364a304a39413c61ca
|
/codingame.py
|
e0e9b774680edac154a6326df5f5b734129ba8f2
|
[] |
no_license
|
ilmoi/challenges
|
43c416f82bf5fa6b357a9b8f17caa17763c1031f
|
4e90f675ee7965d63af75d642217d64517bc434f
|
refs/heads/master
| 2022-07-17T17:35:42.177809 | 2020-05-14T16:49:24 | 2020-05-14T16:49:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 42 |
py
|
import random
print(random.randint(0, 1))
|
[
"[email protected]"
] | |
a7fe54623cf7e91b74619370ecdf197fd332ce39
|
83727bce4680f56234b7ca35ab3fe99cd0cb0d3e
|
/lib/nark/__init__.py
|
c0748f97d6f7c712e456b91d78fa6e0469f45dfb
|
[] |
no_license
|
onethousandfaces/rpg
|
62647b2dd6ad0c253ed363f3bcd340706f075082
|
219b3f865c99d5619ec0a63f5e18ac1f0f064413
|
refs/heads/master
| 2021-01-15T16:16:11.037991 | 2013-04-07T07:41:20 | 2013-04-07T07:41:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 146 |
py
|
from Enum import *
import log
import ioc
from Log import Log
from Register import Register
__all__ = [ enum, bitflags, log, Log, Register, ioc ]
|
[
"[email protected]"
] | |
93aa7788b41ca27080912ddcf5f42c76086fa66a
|
f7ae3a193cf672f2c7edf27518f6d3871f635bce
|
/tools/gen_daily_areas_scotland.py
|
dd1391d07c85027df83bc161ea627e10c7746a1b
|
[] |
no_license
|
lozenge0/covid-19-uk-data
|
262c158f27734dc0d8b0e3d28c21be613465eebe
|
e9f2c927e0be2e4301921d423108160e4a272ade
|
refs/heads/master
| 2021-03-26T01:50:55.964597 | 2020-03-15T21:58:24 | 2020-03-15T21:58:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,516 |
py
|
#!/usr/bin/env python
# Extract local authority case data (England) or health board data (Scotland) from an HTMLpage and save in CSV format.
from bs4 import BeautifulSoup
import csv
import pandas as pd
import re
import sys
html_file = sys.argv[1]
csv_file = sys.argv[2]
# Get upper tier local authority name to code mapping.
# Note that this does not include Scotland, but that's OK as Scotland areas are health boards, not local authorities.
la_mapping = pd.read_csv(
"data/raw/Lower_Tier_Local_Authority_to_Upper_Tier_Local_Authority_April_2019_Lookup_in_England_and_Wales.csv"
)
la_name_to_code = dict(zip(la_mapping["UTLA19NM"], la_mapping["UTLA19CD"]))
la_name_to_code["Cornwall and Isles of Scilly"] = la_name_to_code["Cornwall"]
la_name_to_code["Hackney and City of London"] = la_name_to_code["Hackney"]
m = re.match(".+-(.+)-(\d{4}-\d{2}-\d{2})\.html", html_file)
country = m.group(1).title()
date = m.group(2)
html = open(html_file).read()
soup = BeautifulSoup(html, features="html.parser")
table = soup.find_all("table")[-1]
output_rows = [["Date", "Country", "AreaCode", "Area", "TotalCases"]]
for table_row in table.findAll("tr"):
columns = table_row.findAll("td")
if len(columns) == 0:
continue
output_row = [date, country, la_name_to_code.get(columns[0].text, "")]
for column in columns:
output_row.append(column.text)
output_rows.append(output_row)
with open(csv_file, "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(output_rows)
|
[
"[email protected]"
] | |
ddea386e7d4b21095806b4773a9d65d07e26e84f
|
d5d9996c55414561fe77a2630ad7e0cfff0735ad
|
/pddm/statstics_anlysis/bootstrapped/__init__.py
|
f83d651288bd9cb793be2ee6023a791a593a03fa
|
[
"Apache-2.0"
] |
permissive
|
kyo-kutsuzawa/EEI_Analysis_model_based_rl
|
4b704875619be0045b6f3b1ad12e86bd1041347c
|
d83ad7a7da936672a05ccacc6846d16c33421b96
|
refs/heads/main
| 2023-01-11T22:36:49.348156 | 2020-11-10T04:54:21 | 2020-11-10T04:54:21 | 313,227,869 | 0 | 0 |
Apache-2.0
| 2020-11-16T07:55:31 | 2020-11-16T07:55:30 | null |
UTF-8
|
Python
| false | false | 295 |
py
|
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
|
[
"[email protected]"
] | |
76fa13ebb809c0d96b5283163e49de9cd30ee7b8
|
a1a934e69050884560074a633dfe33d21a8acfcb
|
/examples/scikit-learn/wine-quality/pyfunc_predict.py
|
7c172de9a2b0fc2d98a8ae608c1cb2eceea55122
|
[] |
no_license
|
SkilledMinds/mlflow-fun
|
977a8bf0e052e72f2b98ee8a17ed017034e6a9a2
|
3caa0e5f61739357733cc165338c1d5a3c93f456
|
refs/heads/master
| 2020-04-29T00:56:28.995924 | 2019-03-13T23:57:14 | 2019-03-13T23:57:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 863 |
py
|
# Serve predictions with mlflow.pyfunc.load_pyfunc()
from __future__ import print_function
import sys
import mlflow
import mlflow.pyfunc
import mlflow.tracking
import util
if __name__ == "__main__":
if len(sys.argv) < 1:
println("ERROR: Expecting RUN_ID PREDICTION_FILE")
sys.exit(1)
print("MLflow Version:", mlflow.version.VERSION)
run_id = sys.argv[1]
data_path = sys.argv[2] if len(sys.argv) > 2 else "wine-quality.csv"
print("data_path:",data_path)
print("run_id:",run_id)
client = mlflow.tracking.MlflowClient()
model_uri = client.get_run(run_id).info.artifact_uri + "/model"
print("model_uri:",model_uri)
model = mlflow.pyfunc.load_pyfunc(model_uri)
print("model:",model)
df = util.read_prediction_data(data_path)
predictions = model.predict(df)
print("predictions:",predictions)
|
[
"[email protected]"
] | |
af7495869c9a3fb0198ffbf102eb36b70a3ba9c1
|
52efcaacf23e2345d09a1de61610a74df457057f
|
/auto_derby/scenes/single_mode/aoharu_battle_confirm.py
|
435e426f1990e5313b1c92abcfc2746f849e3917
|
[
"MIT"
] |
permissive
|
debi-derby/auto-derby
|
78bc726e8243c8a25ddc13b364b7289f322caaaa
|
c2e5c138125cac6dc13dbd74045161ca03f6e5cf
|
refs/heads/master
| 2023-09-03T09:03:35.305321 | 2021-11-02T16:18:45 | 2021-11-02T16:18:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,371 |
py
|
# -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
from PIL.Image import Image
from auto_derby import constants, template
from typing import Any, Dict, Iterator, Text, Tuple
from ... import action, templates, mathtools
from ...scenes import Scene
from ..scene import Scene, SceneHolder
def _recognize_predictions(
screenshot: Image,
) -> Iterator[Tuple[constants.RaceType, constants.RacePrediction]]:
rp = mathtools.ResizeProxy(screenshot.width)
bbox_list = (
(constants.RaceType.SPRINT, rp.vector4((31, 505, 113, 533), 540)),
(constants.RaceType.MILE, rp.vector4((136, 505, 199, 533), 540)),
(constants.RaceType.INTERMEDIATE, rp.vector4((230, 505, 309, 533), 540)),
(constants.RaceType.LONG, rp.vector4((331, 505, 405, 533), 540)),
(constants.RaceType.DART, rp.vector4((429, 505, 505, 533), 540)),
)
predition_templates = (
(constants.RacePrediction.HONNMEI, templates.PREDICTION_DOUBLE_CIRCLE),
(constants.RacePrediction.TAIKOU, templates.PREDICTION_CIRCLE_OUTLINE),
# TODO: add template for this
# (constants.RacePrediction.TANNANA, templates.PREDICTION_TRIANGLE),
(constants.RacePrediction.RENNSHITA, templates.PREDICTION_TRIANGLE_OUTLINE),
)
for t, bbox in bbox_list:
img = screenshot.crop(bbox)
for p, tmpl in predition_templates:
try:
next(
template.match(
img,
tmpl,
)
)
yield t, p
except StopIteration:
continue
class AoharuBattleConfirmScene(Scene):
def __init__(self) -> None:
super().__init__()
self.predictions: Dict[constants.RaceType, constants.RacePrediction] = {}
def to_dict(self) -> Dict[Text, Any]:
return {
"predictions": self.predictions,
}
@classmethod
def name(cls):
return "single-mode-aoharu-battle-confirm"
@classmethod
def _enter(cls, ctx: SceneHolder) -> Scene:
action.wait_image_stable(
templates.SINGLE_MODE_AOHARU_BATTLE_CONFIRM_TITLE, duration=0.2
)
return cls()
def recognize_predictions(self) -> None:
self.predictions = dict(_recognize_predictions(template.screenshot()))
|
[
"[email protected]"
] | |
4266fac216ad1d316fc296b75728ee21f701d3c9
|
509823ea14f04d5791486b56a592d7e7499d7d51
|
/parte11/11.1_intro_funciones.py
|
ee051acf132ed8184e654c6fdfe566647698afbb
|
[] |
no_license
|
Fhernd/Python-CursoV2
|
7613144cbed0410501b68bedd289a4d7fbefe291
|
1ce30162d4335945227f7cbb875f99bc5f682b98
|
refs/heads/master
| 2023-08-08T05:09:44.167755 | 2023-08-05T19:59:38 | 2023-08-05T19:59:38 | 239,033,656 | 64 | 38 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,212 |
py
|
# Introducción a las funciones - Unidades de reutilización y encapsulación de información:
# 1. Creación de una función:
print('1. Creación de una función:')
def sumar(numero_1, numero_2):
"""
Suma dos números (sean enteros o punto flotante).
Parameters:
numero_1: primer valor a sumar.
numero_2: segundo valor a sumar.
Returns:
Suma de dos números (enteros o reales).
"""
suma = numero_1 + numero_2
return suma
x = 2
y = 3
resultado = sumar(x, y)
print('El resultado de sumar {} y {} es igual a {}.'.format(x, y, resultado))
print()
# 2. Invocación de una función:
resultado = sumar(2, 3)
print('El resultado de sumar {} y {} es igual a {}.'.format(x, y, resultado))
print()
# 3. Obtener documentación/ayuda de una función:
print('3. Obtener documentación/ayuda de una función:')
print()
help(sumar)
print()
help(print)
print()
# 4. Creación de una función para alternar los valores de dos variables:
print('4. Creación de una función para intercambiar los valores de dos variables:')
# a = 2, b = 3
# a = 3, b = 2
# auxiliar = 2
# a = 3
# b = 2
def intercambiar_valores(a, b):
"""
Intercambia los valores de dos variables.
Parameters:
a: primer valor.
b: segundo valor.
Returns:
Los valores de a y b intercambiados.
"""
auxiliar = a
a = b
b = auxiliar
return a, b
x = 2
b = 3
print('Valores de las variables `x` e `y` antes del intercambio:')
print(f'x = {x} - y = {y}')
resultado = intercambiar_valores(x, b)
x = resultado[0]
y = resultado[1]
print('Valores de las variables `x` e `y` después del intercambio:')
print(f'x = {x} - y = {y}')
print()
# 5. Uso de funcionalidad que provee en su defecto (incorporado) el lenguaje de programación:
print('5. Uso de funcionalidad que provee en su defecto (incorporado) el lenguaje de programación:')
x = 2
y = 3
resultado = x + y
print('El resultado de sumar {} y {} es igual a {}.'.format(x, y, resultado))
print()
print('Valores de las variables `x` e `y` antes del intercambio:')
print(f'x = {x} - y = {y}')
x, y = y, x
print('Valores de las variables `x` e `y` antes del intercambio:')
print(f'x = {x} - y = {y}')
|
[
"[email protected]"
] | |
6ecc8fecb6fb0874525588a2bd17ddb89ac54107
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/flask-admin-examples/layout_bootstrap3/.venv/bin/flask
|
ceac5d8fcd84d5ec7c11f2c2125baa483fdaae5c
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null |
UTF-8
|
Python
| false | false | 287 |
#!/root/NetBeansProjects/fedora/python/flask-admin-examples/layout_bootstrap3/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
4b8b01a2cbf64cf1df9453d4779d4dae791226e7
|
d33bb5d51c432058d2c8efa0882f24c8dad2bb4f
|
/setup.py
|
31ab4adf42f3560f9bdf10a9780f3d01f23951c9
|
[
"Apache-2.0"
] |
permissive
|
anuragarnab/fvcore
|
351e8a50fe27993646f774823f09331f62d161ae
|
da9d3658590c9f672998850542817acecd98facc
|
refs/heads/master
| 2023-03-08T23:21:27.825110 | 2021-02-24T22:30:32 | 2021-02-24T22:30:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,782 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
from os import path
from setuptools import find_packages, setup
def get_version():
init_py_path = path.join(
path.abspath(path.dirname(__file__)), "fvcore", "__init__.py"
)
init_py = open(init_py_path, "r").readlines()
version_line = [l.strip() for l in init_py if l.startswith("__version__")][0]
version = version_line.split("=")[-1].strip().strip("'\"")
# Used by CI to build nightly packages. Users should never use it.
# To build a nightly wheel, run:
# BUILD_NIGHTLY=1 python setup.py sdist
if os.getenv("BUILD_NIGHTLY", "0") == "1":
from datetime import datetime
date_str = datetime.today().strftime("%Y%m%d")
# pip can perform proper comparison for ".post" suffix,
# i.e., "1.1.post1234" >= "1.1"
version = version + ".post" + date_str
new_init_py = [l for l in init_py if not l.startswith("__version__")]
new_init_py.append('__version__ = "{}"\n'.format(version))
with open(init_py_path, "w") as f:
f.write("".join(new_init_py))
return version
setup(
name="fvcore",
version=get_version(),
author="FAIR",
license="Apache 2.0",
url="https://github.com/facebookresearch/fvcore",
description="Collection of common code shared among different research "
"projects in FAIR computer vision team",
python_requires=">=3.6",
install_requires=[
"numpy",
"yacs>=0.1.6",
"pyyaml>=5.1",
"tqdm",
"termcolor>=1.1",
"Pillow",
"tabulate",
"iopath>=0.1.2",
],
extras_require={"all": ["shapely"]},
packages=find_packages(exclude=("tests",)),
)
|
[
"[email protected]"
] | |
900590a83b7d3240581c458f12d539d50dab438f
|
e3ec7260806c1e2b045a0de93a150a5c3fc1b9df
|
/test/ResultsAndPrizes/top-3/test_top_3_winning_numbers_of_the_last_4_draws.py
|
e44b5657bb3991bf27e30b9fabd015acfc7b59ef
|
[
"Apache-2.0"
] |
permissive
|
FearFactor1/SPA
|
58a21c9ec7a72a78f5ff50214e58faac43a3059d
|
a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7
|
refs/heads/master
| 2021-07-07T04:25:12.525595 | 2020-11-16T14:35:33 | 2020-11-16T14:35:33 | 204,684,720 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 624 |
py
|
# Топ-3 + Выигрышные номера последних 4 тиражей
def test_top_3_winning_numbers_last_4_draws(app):
app.ResultAndPrizes.open_page_results_and_prizes()
app.ResultAndPrizes.click_game_top_3()
app.ResultAndPrizes.click_winning_numbers_of_the_last_4_draws()
app.ResultAndPrizes.button_get_report_winners()
app.ResultAndPrizes.parser_report_text_winners()
assert "ВЫИГРЫШНЫЕ НОМЕРА" in app.ResultAndPrizes.parser_report_text_winners()
app.ResultAndPrizes.message_id_33_top_3_winning_numbers_4_last_draw()
app.ResultAndPrizes.comeback_main_page()
|
[
"[email protected]"
] | |
339b0f19ded0667fb6df857d1218b8eef24f7bde
|
4415f0a06536b66d4e7425b3995c4009516c180d
|
/World1/Challenge026.py
|
0091692c182ee489a19bb63c2c350ef621f2301b
|
[] |
no_license
|
AndreisSirlene/Python-Exercises-Curso-em-Video-World-1-2-and-3
|
c73c2df958f5b83744af6288d26bb270aa30f8fd
|
62f59383eee9b8ab43ff78495cf30eb390638013
|
refs/heads/master
| 2023-03-30T11:34:12.672180 | 2021-03-24T23:34:17 | 2021-03-24T23:34:17 | 290,814,841 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 301 |
py
|
phrase = str(input('Type any phrase:')).upper().strip()
print('In this phrase the letter I appears {} times'.format(phrase.count('I')))
print('The first letter I appears in the position {}'.format(phrase.find('I')+1))
print('The last position that letter I appears is {}'.format(phrase.rfind('I')+1))
|
[
"[email protected]"
] | |
2f2f1517049db9c6fdfb700fba9d62aacad883c3
|
c9aa34a74e9f095c4dec21acf0f63ce33ea54757
|
/UndocumentedScripts/CheckPnRDistanceAndTime.py
|
1258464347152b538a8564eff4fc80b7360d2030
|
[] |
no_license
|
dvrpc/TIM3AnalysisScripts
|
d4a5308987279da421d4e9e76ca3ff6efe4e6490
|
cb8d50eead676b2950051a6d527be7a13de19b9f
|
refs/heads/main
| 2023-07-30T01:20:52.457588 | 2021-10-01T15:34:47 | 2021-10-01T15:34:47 | 372,885,969 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,827 |
py
|
import pandas as pd
import numpy as np
import VisumPy.helpers as h
def log(msg):
Visum.Log(20480, msg)
log('Reading Skims')
zones = np.array(h.GetMulti(Visum.Net.Zones, "No"))
def read_skim_tofrom_pnr(matno):
global zones
skim = pd.DataFrame(h.GetMatrix(Visum, matno), zones, zones)
return skim
skims= {'time': {'0000': read_skim_tofrom_pnr(301),
'0600': read_skim_tofrom_pnr(302),
'1000': read_skim_tofrom_pnr(303),
'1500': read_skim_tofrom_pnr(304),
'1900': read_skim_tofrom_pnr(305)},
'dist': {'0000': read_skim_tofrom_pnr(401),
'0600': read_skim_tofrom_pnr(402),
'1000': read_skim_tofrom_pnr(403),
'1500': read_skim_tofrom_pnr(404),
'1900': read_skim_tofrom_pnr(405)}}
def get_skims(args):
global skims
o = args[0] #Origin
d = args[1] #Destination
t = args[2] #Time Period
r = args[3] #Direction
return (skims['time'][t].loc[o, d], skims['dist'][t].loc[o, d])
def classify_time(args):
if args[2] == 1:
t = args[0]
else:
t = args[1]
if t < 360:
return '0000'
elif t < 600:
return '0600'
elif t < 900:
return '1000'
elif t < 1140:
return '1500'
else:
return '1900'
log('Reading maz2taz correspondence')
maz2taz_file = r'D:\TIM3.1\DaySimLab\scenario\inputs\parcels_buffered.dat'
maz2taz = pd.read_csv(maz2taz_file, ' ', index_col = 0)
log('Reading Trip Data')
trip_file = r'D:\TIM3.1\DaySimLab\scenario\Output\05201612\_trip_2.dat'
trip = pd.read_csv(trip_file, '\t')
trip['otaz'] = trip['opcl'].map(maz2taz['taz_p'])
trip['dtaz'] = trip['dpcl'].map(maz2taz['taz_p'])
log('Reading Tour Data')
tour_file = trip_file.replace('trip', 'tour')
tour = pd.read_csv(tour_file, '\t')
log('Merging and Querying')
trip = tour.merge(trip, on = ['hhno', 'pno', 'day', 'tour']) #Merge tour info to trip info
trip = trip.query('(opurp == 0 and dpurp == 10) or (opurp == 10 and dpurp == 0)') #From home to PnR or vice-versa
log('Setting Up Table')
trip['direction'] = np.where(trip['dpurp'] == 10, 'ToPnR', 'FromPnR')
trip['tod_args'] = list(zip(trip['arrtm'], trip['deptm'], trip['half']))
trip['tod'] = trip['tod_args'].apply(classify_time)
log('Getting Skim Values')
trip['skim_args'] = list(zip(trip['otaz'], trip['dtaz'], trip['tod'], trip['direction']))
trip['skims'] = trip['skim_args'].apply(get_skims)
trip['skimtime'] = trip['skims'].apply(lambda x: x[0])
trip['skimdist'] = trip['skims'].apply(lambda x: x[1])
log('Comparing')
trip['timediff'] = trip['skimtime'] - trip['travtime']
trip['distdiff'] = trip['skimdist'] - trip['travdist']
log('Writing')
outfile = r'D:\TIM3\PnRTripsWithVISUMSkimsReclassifyZones.csv'
trip.to_csv(outfile)
log('Done')
|
[
"[email protected]"
] | |
aa90679fd2eaab72c0d8b81a0e0311920984962f
|
7db04177060d1b1d13bdf4e0f77cda83488d08bd
|
/backend/auth/email.py
|
d6bd5a020d88428569dc96d602dbacd15c3c499c
|
[] |
no_license
|
AlenAlic/mailserver
|
6ed5aa32e7f97b732e3630c1c435f233851a4a6a
|
a37f022feb2e3a2433bf7bf15c1cef0fc364901b
|
refs/heads/master
| 2023-02-02T09:17:00.192475 | 2019-11-08T21:12:11 | 2019-11-08T21:12:11 | 220,267,001 | 0 | 0 | null | 2023-01-09T22:33:19 | 2019-11-07T15:21:32 |
CSS
|
UTF-8
|
Python
| false | false | 689 |
py
|
from flask import render_template
from backend.email import send_email
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('Password reset', recipients=[user.email],
text_body=render_template('email/reset_password.txt', user=user, token=token),
html_body=render_template('email/reset_password.html', user=user, token=token))
def send_activation_email(user):
send_email('Activate account', recipients=[user.email],
text_body=render_template('email/activate_account.txt', user=user, token=token),
html_body=render_template('email/activate_account.html', user=user, token=token))
|
[
"[email protected]"
] | |
f0cedc5c68f50025aca88faedf6e41fdc603495d
|
00c6ded41b84008489a126a36657a8dc773626a5
|
/.history/Sizing_Method/ConstrainsAnalysis/DesignPointSelectStrategy_20210715184119.py
|
d6e6d3f91a2163239eae7152a62d36fd304beac1
|
[] |
no_license
|
12libao/DEA
|
85f5f4274edf72c7f030a356bae9c499e3afc2ed
|
1c6f8109bbc18c4451a50eacad9b4dedd29682bd
|
refs/heads/master
| 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,591 |
py
|
# author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPDP1P2 as ca_pd_12
from icecream import ic
import math
"""
The unit use is IS standard
"""
class Design_Point_Select_Strategy:
"""This is a design point select strategy from constrains analysis"""
def __init__(self, altitude, velocity, beta, method=2, strategy_apply=0, propulsion_constrains=0, n=12):
"""
:param altitude: m x 1 matrix
:param velocity: m x 1 matrix
:param beta: P_motor/P_total m x 1 matrix
:param p_turbofan_max: maximum propulsion power for turbofan (threshold value)
:param p_motorfun_max: maximum propulsion power for motorfun (threshold value)
:param n: number of motor
:param method: if method = 1, it is Mattingly Method, otherwise is Gudmundsson Method
:param strategy_apply: if strategy_apply = 0, no strategy apply
:param propulsion_constrains: if propulsion_constrains = 0, no propulsion_constrains apply
the first group of condition is for stall speed
the stall speed condition have to use motor, therefore with PD
:return:
power load: design point p/w and w/s
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.n_motor = n
self.propulsion_constrains = propulsion_constrains
self.strategy_apply = strategy_apply
# initialize the p_w, w_s, hp, n, m
self.n = 100
self.m = altitude.size
self.hp = np.linspace(0, 1, self.n+1)
self.hp_threshold = 0.5
ic(self.hp)
# method = 1 = Mattingly_Method, method = 2 = Gudmundsson_Method
if method == 1:
self.method1 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric
else:
self.method1 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric
problem = self.method1(
self.h[0], self.v[0], self.beta[0], 6000, self.hp_threshold)
self.w_s = problem.allFuncs[0](problem)
def p_w_compute(self, p_w_turbofan_max, p_w_motorfun_max, pc):
p_w = np.zeros([self.m, len(self.hp)]) # m x (n+1) matrix
p_w_1 = np.zeros([self.m, len(self.hp)]) # m x (n+1) matrix
p_w_2 = np.zeros([self.m, len(self.hp)]) # m x (n+1) matrix
for i in range(1, 8):
for j in range(len(self.hp)):
problem1 = self.method1(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
problem2 = self.method2(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
if i >= 5:
p_w_1[i, j] = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w_2[i, j] = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w_1[i, j] = problem1.allFuncs[i](problem1)
p_w_2[i, j] = problem2.allFuncs[i](problem2)
if self.propulsion_constrains != 0 and pc != 0:
if p_w_1[i, j] > p_w_turbofan_max:
p_w_1[i, j] = 100000
elif p_w_2[i, j] > p_w_motorfun_max:
p_w_2[i, j] = 100000
p_w[i, j] = p_w_1[i, j] + p_w_2[i, j]
return p_w, p_w_1, p_w_2
def p_w_min(self, p_w):
#find the min p_w for difference hp for each flight condition:
p_w_min = np.amin(p_w, axis=1)
#find the index of p_w_min which is the hp
hp_p_w_min = np.zeros(8)
for i in range(1, 8):
for j in range(len(self.hp)):
if p_w[i, j] - p_w_min[i] < 0.001:
hp_p_w_min[i] = j * 0.01
p_w_1 = np.zeros(8)
p_w_2= np.zeros(8)
for i in range(1, 8):
problem1 = self.method1(
self.h[i], self.v[i], self.beta[i], self.w_s, hp_p_w_min[i])
problem2 = self.method2(
self.h[i], self.v[i], self.beta[i], self.w_s, hp_p_w_min[i])
if i >= 5:
p_w_1[i] = problem1.allFuncs[-1](
problem1, roc=15 - 5 * (i - 5))
p_w_2[i] = problem2.allFuncs[-1](
problem2, roc=15 - 5 * (i - 5))
else:
p_w_1[i] = problem1.allFuncs[i](problem1)
p_w_2[i] = problem2.allFuncs[i](problem2)
p_w_1_min = np.amax(p_w_1)
p_w_2_min = np.amax(p_w_2)
return p_w_1_min, p_w_2_min, p_w_min, hp_p_w_min
def strategy(self):
if self.strategy_apply == 0:
p_w_turbofan_max = 10000
p_w_motorfun_max = 10000
p_w, p_w_1, p_w_2 = Design_Point_Select_Strategy.p_w_compute(
self, p_w_turbofan_max, p_w_motorfun_max, pc=0)
p_w_min = p_w[:, 50]
p_w_1_min = np.array([self.w_s, np.amax(p_w_1[:, 50])])
p_w_2_min = np.array([self.w_s, np.amax(p_w_2[:, 50])])
hp_p_w_min = 0.5*np.ones(8)
else:
if self.propulsion_constrains == 0:
p_w_turbofan_max = 100000
p_w_motorfun_max = 100000
p_w, p_w_1, p_w_2 = Design_Point_Select_Strategy.p_w_compute(self, p_w_turbofan_max, p_w_motorfun_max, 0)
else:
p_w, _, _ = Design_Point_Select_Strategy.p_w_compute(self, 10000, 10000, pc=0)
p_w_1_min, p_w_2_min, _, _ = Design_Point_Select_Strategy.p_w_min(self, p_w)
p_w_turbofun_boundary = math.ceil(p_w_1_min)
p_w_motorfun_boundary = math.ceil(p_w_2_min)
ic(p_w_turbofun_boundary, p_w_motorfun_boundary)
# build p_w_design_point matrix, try p_w_max to find the best one
p_w_design_point = np.zeros([p_w_turbofun_boundary+1, p_w_motorfun_boundary+1])
for i in range(p_w_turbofun_boundary+1):
for j in range(p_w_motorfun_boundary+1):
p_w, _, _ = Design_Point_Select_Strategy.p_w_compute(self, i, j, 1)
#find the min p_w from hp: 0 --- 100 for each flight condition:
p_w_min = np.amin(p_w, axis=1)
p_w_design_point[i, j] = np.amax(p_w_min)
print(i)
p_w_turbofan_max = np.unravel_index(
p_w_design_point.argmin(), p_w_design_point.shape)[0]
p_w_motorfun_max = np.unravel_index(
p_w_design_point.argmin(), p_w_design_point.shape)[1]
p_w, p_w_1, p_w_2 = Design_Point_Select_Strategy.p_w_compute(
self, p_w_turbofan_max, p_w_motorfun_max, 1)
ic(p_w, p_w_1, p_w_2)
p_w_1_min, p_w_2_min, p_w_min, hp_p_w_min = Design_Point_Select_Strategy.p_w_min(self, p_w)
hp_p_w_min[0] = p_w_motorfun_max/(p_w_motorfun_max+p_w_turbofan_max)
return self.w_s, p_w_min, p_w_1_min, p_w_2_min, hp_p_w_min, p_w_turbofan_max, p_w_motorfun_max
if __name__ == "__main__":
constrains = np.array([[0, 80, 1, 0.2], [0, 68, 0.988, 0.5], [11300, 230, 0.948, 0.8],
[11900, 230, 0.78, 0.8], [3000, 100,
0.984, 0.8], [0, 100, 0.984, 0.5],
[3000, 200, 0.975, 0.6], [7000, 230, 0.96, 0.7]])
n = 250
w_s = np.linspace(100, 9000, n)
constrains_name = ['stall speed', 'take off', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m', 'feasible region-hybrid', 'feasible region-conventional']
color = ['k', 'c', 'b', 'g', 'y', 'plum', 'violet', 'm']
l_style = ['-', '--', '-.-']
methods = [ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun,
ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric,
ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric]
strategy_apply = [0, 1, 1]
m = constrains.shape[0]
p_w = np.zeros([m, n, 8])
# plots
fig, ax = plt.subplots(3, 2, sharey=True, sharex=True, figsize=(10, 10))
ax = ax.flatten()
for z in range(3):
h = constrains[:, 0]
v = constrains[:, 1]
beta = constrains[:, 2]
if z == 0:
problem1 = Design_Point_Select_Strategy(h, v, beta, method=1, strategy_apply=0, propulsion_constrains=0)
problem2 = Design_Point_Select_Strategy(h, v, beta, method=2, strategy_apply=0, propulsion_constrains=0)
w_s, p_w_min, p_w_1_min, p_w_2_min, hp_p_w_min, p_w_turbofan_max, p_w_motorfun_max = problem1.strategy()
for k in range(6):
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
hp = hp_p_w_min[i]
# calculate p_w
if k < 4:
problem = methods[k](h, v, beta, w_s[j], hp)
if i >= 5:
p_w[i, j, k] = problem.allFuncs[-1](problem, roc=15 - 5 * (i - 5))
else:
p_w[i, j, k] = problem.allFuncs[i](problem)
else:
if i == 0:
problem = methods[k-2](h, v, beta, w_s[j], hp)
p_w[i, j, k] = problem.allFuncs[i](problem)
else:
p_w[i, j, k] = p_w[i, j, k-4] + p_w[i, j, k-2]
# plot the lines
if i == 0:
ax[k].plot(p_w[i, :, k], np.linspace(0, 100, n),
linewidth=1, alpha=0.5, linestyle=l_style[z], label=constrains_name[i])
else:
ax[k].plot(w_s, p_w[i, :, k], color=color[i],
linewidth=1, alpha=0.5, linestyle=l_style[z], label=constrains_name[i])
# plot fill region
p_w[0, :, k] = 10 ** 10 * (w_s - p_w[0, 0, k])
ax[k].fill_between(w_s, np.amax(p_w[0:m, :, k], axis=0), 150, color='b', alpha=0.5, label=constrains_name[-2])
ax[k].grid()
ax[k-2].plot(6012, 72, 'r*', markersize=5, label='True Conventional')
handles, labels = plt.gca().get_legend_handles_labels()
fig.legend(handles, labels, bbox_to_anchor=(0.125, 0.02, 0.75, 0.25), loc="lower left",
mode="expand", borderaxespad=0, ncol=4, frameon=False)
hp = constrains[:, 3]
plt.xlim(200, 9000)
plt.ylim(0, 100)
plt.setp(ax[0].set_title(r'$\bf{Mattingly-Method}$'))
plt.setp(ax[1].set_title(r'$\bf{Gudmundsson-Method}$'))
plt.setp(ax[4:6], xlabel='Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.setp(ax[0], ylabel=r'$\bf{Turbofun}$''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.setp(ax[2], ylabel=r'$\bf{Motor}$ ''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.setp(
ax[4], ylabel=r'$\bf{Turbofun+Motor}$' '\n' r'$\bf{vs.Conventional}$ ''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.subplots_adjust(bottom=0.15)
plt.suptitle(r'$\bf{Component}$' ' ' r'$\bf{P_{SL}/W_{TO}}$' ' ' r'$\bf{Diagrams}$'
' ' r'$\bf{After}$' ' ' r'$\bf{Adjust}$' ' ' r'$\bf{Degree-of-Hybridization}$'
'\n hp: take-off=' +
str(hp[0]) + ' stall-speed=' +
str(hp[1]) + ' cruise=' +
str(hp[2]) + ' service-ceiling=' +
str(hp[3]) + '\n level-turn=@3000m' +
str(hp[4]) + ' climb@S-L=' +
str(hp[5]) + ' climb@3000m=' +
str(hp[6]) + ' climb@7000m=' + str(hp[7]))
plt.show()
|
[
"[email protected]"
] | |
8bad4dd94e501f270bd800aaa6a30a287cc857bf
|
f0cec246e2f30f6b4ee5656f1cb6406dd0f7879a
|
/thingsboard_client/models/entity_relations_query.py
|
3809b31c1629f825572c4525d74a841698f3feae
|
[] |
no_license
|
ascentio-tech/thingsboard-swagger-client
|
4e2f7c943e243ec8505c32dab0aa3d6cf1559105
|
1e8bf7664c281c29612fd5b44261f049ca7c44fd
|
refs/heads/master
| 2021-07-20T07:18:12.969459 | 2020-06-17T02:35:54 | 2020-06-17T02:35:54 | 184,322,192 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,072 |
py
|
# coding: utf-8
"""
Thingsboard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. # noqa: E501
OpenAPI spec version: 2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EntityRelationsQuery(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'filters': 'list[EntityTypeFilter]',
'parameters': 'RelationsSearchParameters'
}
attribute_map = {
'filters': 'filters',
'parameters': 'parameters'
}
def __init__(self, filters=None, parameters=None): # noqa: E501
"""EntityRelationsQuery - a model defined in Swagger""" # noqa: E501
self._filters = None
self._parameters = None
self.discriminator = None
if filters is not None:
self.filters = filters
if parameters is not None:
self.parameters = parameters
@property
def filters(self):
"""Gets the filters of this EntityRelationsQuery. # noqa: E501
:return: The filters of this EntityRelationsQuery. # noqa: E501
:rtype: list[EntityTypeFilter]
"""
return self._filters
@filters.setter
def filters(self, filters):
"""Sets the filters of this EntityRelationsQuery.
:param filters: The filters of this EntityRelationsQuery. # noqa: E501
:type: list[EntityTypeFilter]
"""
self._filters = filters
@property
def parameters(self):
"""Gets the parameters of this EntityRelationsQuery. # noqa: E501
:return: The parameters of this EntityRelationsQuery. # noqa: E501
:rtype: RelationsSearchParameters
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this EntityRelationsQuery.
:param parameters: The parameters of this EntityRelationsQuery. # noqa: E501
:type: RelationsSearchParameters
"""
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EntityRelationsQuery, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EntityRelationsQuery):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
7b6e3237909a5d784fdd92d6a36e1c9572ec482e
|
c88e476567f651de989840dd5f74354ee0a95826
|
/Day-30/Restaurant/Restaurant/settings.py
|
34c602354aa07b76604481face55fb61ff3b9b29
|
[] |
no_license
|
InduPriya-pokuri/Django-Internship-24-05-2021
|
5e5c2f4c004482938fbd196a6c07f381748eede5
|
d4daf040aaef484a1f9c3025441d66baa9c16e44
|
refs/heads/main
| 2023-06-13T06:07:51.968401 | 2021-07-12T12:45:52 | 2021-07-12T12:45:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,069 |
py
|
"""
Django settings for Restaurant project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'asdasd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'RestApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Restaurant.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Restaurant.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
04ce739cc903974beed995d84e696a06f1ca6a8c
|
a3366c118bb0b899cb436d89fc76f231d63366d2
|
/fc_toolbelt/tasks/project.py
|
7556d39a8d8e5356d343934d48baaa0b65a33e90
|
[] |
no_license
|
futurecolors/fc-toolbelt
|
cd25ac1f6fc3e2414d12d8235374220944e5136e
|
2f0d33f612a676bf9ff2f8e769c4d4638b5e9212
|
refs/heads/master
| 2021-01-10T19:55:36.062097 | 2013-10-31T10:56:51 | 2013-10-31T10:56:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,897 |
py
|
# coding: utf-8
import os
from functools import partial
from fabric.colors import green
from fabric.context_managers import cd, prefix
from fabric.operations import run, sudo
from fabric.state import env
from fabric.tasks import Task, execute
from fabric.utils import puts
from fc_toolbelt.tasks.gitlab import BaseGitlabTask
from fc_toolbelt.tasks.mysql import create_dev_db
from .writers import write_uwsgi, write_nginx, write_project
class OpenTin(BaseGitlabTask):
""" Commit new Django project from template into repo"""
def run(self, project_slug):
self.project_slug = project_slug
tmpdir = '/tmp/fctools/'
run('rm -rf /tmp/fctools/')
run('mkdir -p %s' % tmpdir)
self.create_folders_from_can(tmpdir)
self.connect()
repo_url = self.get_repo_url_by_path(project_slug)
self.make_initial_commit(os.path.join(tmpdir, self.project_slug), repo_url)
run('rm -rf /tmp/fctools/')
def create_folders_from_can(self, dir):
""" Clone project template, make virtualenv, custom startproject"""
with cd(dir):
env_name = 'canned_env'
template_project_dirname = 'project_template'
run('git clone %(repo)s %(dir)s' % {'repo': env.TEMPLATE_PROJECT_REPO,
'dir': template_project_dirname})
run('virtualenv %s' % env_name)
with prefix('source %s/bin/activate' % env_name):
run('pip install django')
run('django-admin.py startproject %(project)s --template=%(template)s --extension=py,gitignore' % {
'project': self.project_slug,
'template': os.path.join(template_project_dirname, env.TEMPLATE_PROJECT_PACKAGE),
})
def make_initial_commit(self, project_dir, repo_url):
""" Init git repo and push it as current user """
with cd(project_dir):
run('git init')
run('git config user.email "%s@fctools"' % env.user)
run('git config user.name "%s"' % env.user)
run('git add .')
run('git commit -m "Initial commit via fctools"')
run('git remote add origin %s' % repo_url)
run('git checkout -b dev')
run('git push --all --force')
open_tin = OpenTin()
class AddDeveloper(BaseGitlabTask):
""" Creates development project environment for developer"""
name = 'add_developer'
def run(self, project_slug, developer, uwsgi_config=None):
self.project_slug = project_slug
self.developer = developer
self.connect()
repo_url = self.get_repo_url_by_path(project_slug)
self.setup_files(repo_url)
self.setup_databases()
self.setup_http()
puts(green('Congrats! Now visit: %s' % ('http://%s.%s' % (project_slug, developer))))
def setup_files(self, repo_url):
sudo_user = partial(sudo, user=self.developer)
sudo_user('mkdir -p %s' % env.PROJECTS_PATH_TEMPLATE % {'user': self.developer})
puts('Setting up new project "%s" for %s' % (self.project_slug, self.developer))
execute(write_project, project_slug=self.project_slug,
developer=self.developer,
repo_url=repo_url)
puts('Created project "%s" layout for %s' % (self.project_slug, self.developer))
def setup_databases(self):
execute(create_dev_db, self.project_slug, self.developer)
puts('Setup of dev db "%s" for %s is finished' % (self.project_slug, self.developer))
def setup_http(self):
execute(write_uwsgi, self.project_slug, self.developer)
execute(write_nginx, self.project_slug, self.developer)
puts('Nginx+uwsgi are set up for "%s" project, developer %s' % (self.project_slug, self.developer))
add_developer = AddDeveloper()
|
[
"[email protected]"
] | |
5bb63b411ea64c0b78942432783bb9e0c6f28e02
|
51e7336e8bb447187cbe6ede2910f40700316dc1
|
/simics/monitorCore/hapCleaner.py
|
221c8265c08cba073d1765b15bf20f581efc1275
|
[] |
no_license
|
hacker-steroids/RESim
|
69bac74a1b119c54d03b9ea0fda7a85cc45ea854
|
94498c699575f5078de415fac8c517d520cb2f94
|
refs/heads/master
| 2020-05-30T12:33:53.799610 | 2019-06-01T00:51:20 | 2019-06-01T00:51:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,388 |
py
|
'''
Structures for cleaning up stop haps used in reverse execution
'''
class HapCleaner():
hlist = None
def __init__(self, cpu):
self.hlist = []
self.cpu = cpu
class HapType():
def __init__(self, htype, hap):
self.htype = htype
self.hap = hap
def add(self, htype, hap):
ht = self.HapType(htype, hap)
self.hlist.append(ht)
class StopAction():
''' hap_clearer is a list of haps to delete
breakpoints to be deleted
list of functions to be executed '''
def __init__(self, hap_cleaner, breakpoints, flist=None, break_addrs = []):
self.hap_clean = hap_cleaner
self.break_addrs = break_addrs
self.exit_addr = None
if breakpoints is not None:
self.breakpoints = breakpoints
else:
self.breakpoints = []
if flist is not None:
self.flist = flist
else:
self.flist = []
def run(self):
''' Process the functions in the flist '''
if len(self.flist) > 0:
fun = self.flist.pop(0)
fun.run(self.flist)
def getBreaks(self):
return self.break_addrs
def setExitAddr(self, exit_addr):
self.exit_addr = exit_addr
def getExitAddr(self):
return self.exit_addr
def addFun(self, fun):
self.flist.append(fun)
|
[
"[email protected]"
] | |
ecb0e899bf03e3016709f291990968cbabf79748
|
ad054cebf4198f25d6ca9b37b0eef4783762ac04
|
/Algorithm/개념정리/Memoization/Memoization_새꼼달꼼 장사.py
|
a40cd5afd48fe22ef6fc0fc0565a9c744e7da794
|
[] |
no_license
|
ges0531/TIL
|
4888d0bde5f84ad80caac63ffecf247d22daa0bf
|
54389b30e0a67f9c9a3329b1b59c43cdbb33a62c
|
refs/heads/master
| 2023-01-10T23:51:37.409124 | 2020-08-01T07:42:23 | 2020-08-01T07:42:23 | 195,916,245 | 0 | 0 | null | 2023-01-05T01:18:07 | 2019-07-09T02:17:43 |
Python
|
UTF-8
|
Python
| false | false | 1,470 |
py
|
def max_profit_memo(price_list, count, cache):
# Base Case: 0개 혹은 1개면 부분 문제로 나눌 필요가 없기 때문에 가격을 바로 리턴한다
if count < 2:
cache[count] = price_list[count]
return price_list[count]
# 이미 계산한 값이면 cache에 저장된 값을 리턴한다
if count in cache:
return cache[count]
# profit은 count개를 팔아서 가능한 최대 수익을 저장하는 변수
# 팔려고 하는 총개수에 대한 가격이 price_list에 없으면 일단 0으로 설정
# 팔려고 하는 총개수에 대한 가격이 price_list에 있으면 일단 그 가격으로 설정
if count < len(price_list):
profit = price_list[count]
else:
profit = 0
# count개를 팔 수 있는 조합들을 비교해서, 가능한 최대 수익을 profit에 저장
for i in range(1, count // 2 + 1):
profit = max(profit, max_profit_memo(price_list, i, cache)
+ max_profit_memo(price_list, count - i, cache))
# 계산된 최대 수익을 cache에 저장
cache[count] = profit
return cache[count]
def max_profit(price_list, count):
max_profit_cache = {}
return max_profit_memo(price_list, count, max_profit_cache)
# 테스트
print(max_profit([0, 100, 400, 800, 900, 1000], 5))
print(max_profit([0, 100, 400, 800, 900, 1000], 10))
print(max_profit([0, 100, 400, 800, 900, 1000, 1400, 1600, 2100, 2200], 9))
|
[
"[email protected]"
] | |
2525c9a1cf70f389e645dc0637b9f0e5cb23f128
|
90bbeb45b900f1ccf57652d5439fc27be891f4c3
|
/Chapter 4/tests/test_plain_text_email.py
|
ce514bccc4dbd2a4fc127170ca723ae5ecbb9009
|
[] |
no_license
|
RancyChepchirchir/ThoughtfulMachineLearningWithPython-py38
|
cdab296c063c5764e4255105b7c627ed022fbf75
|
d83959b9c21179b2e1a9d1f85428bc8d18d1fb36
|
refs/heads/master
| 2022-11-19T07:48:07.008197 | 2020-07-20T17:42:21 | 2020-07-20T17:42:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 736 |
py
|
import unittest
import io
import re
from email_object import EmailObject
class TestPlaintextEmailObject(unittest.TestCase):
CLRF = "\n\n"
def setUp(self):
self.plain_file = 'fixtures/plain.eml'
with io.open(self.plain_file, 'rb') as plaintext:
self.text = plaintext.read().decode('utf-8')
plaintext.seek(0)
self.plain_email = EmailObject(plaintext)
def test_parse_plain_body(self):
body = self.CLRF.join(self.text.split(self.CLRF)[1:])
self.assertEqual(self.plain_email.body(), body)
def test_parses_the_subject(self):
subject = re.search("Subject: (.*)", self.text).group(1)
self.assertEqual(self.plain_email.subject(), subject)
|
[
"[email protected]"
] | |
4ddd105253fe7cf2e3d05abbd6b48b0b249eb296
|
2da798f1b31c6482d8f47bce394d78ccfae9d279
|
/raw_data_processing/GSE155513/SRS7124070/scripts/add_uns.py
|
ff439b564ac76b82fb7bfd77e135d2333a4e4386
|
[] |
no_license
|
mariafiruleva/sc_athero_itmo_master
|
47378083201e0dbad327b98291bbf4e65d5d3cc5
|
e3c8c1b55d61b551957da13d109c8dfb56aa3173
|
refs/heads/main
| 2023-05-20T11:49:23.202549 | 2021-06-07T16:26:15 | 2021-06-07T16:26:15 | 373,524,898 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,597 |
py
|
import argparse
import re
from urllib.request import urlopen, Request
from xml.etree.ElementTree import parse
import numpy as np
import pandas as pd
import scanpy
def get_markers(markers_file: str) -> dict:
markers = pd.read_csv(markers_file, sep='\t')
return {k: np.array(list(v.values())) for k, v in markers.to_dict().items()}
path = "/mnt/tank/scratch/mfiruleva/scn/data/GSE155513/SRS7124070"
def add_uns(h5: str, h5_out: str, s_d: str, summary_file: str, kallisto_script=None, technology=None) -> None:
file = scanpy.read_h5ad(h5)
description = pd.read_csv(s_d).reset_index().to_dict("records")[0]
file.uns["expType"] = "counts"
file.uns["public"] = True
file.uns["curated"] = False
file.uns["gse"] = description['GSE']
file.uns["geo"] = f"https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc={description['GSE']}"
file.uns["study_accession"] = description['study_accession']
file.uns["species"] = description['scientific_name']
if isinstance(file.uns["species"], list):
file.uns["species"] = file.uns["species"][0]
if technology:
file.uns['technology'] = technology
else:
if description['technology'] != "10x":
file.uns["technology"] = description['technology']
else:
with open(kallisto_script, 'r') as run_file:
data = run_file.read().replace('\n', '')
file.uns["technology"] = re.findall('10xv[0-9]*', data)[0]
link = Request(f'https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc={description["GSE"]}',
headers={'User-Agent': 'Mozilla/5.0'})
link = urlopen(link)
article = Request(link.url, headers={'User-Agent': 'Mozilla/5.0'})
response = urlopen(article).read()
acc_ids = {'SRP': re.findall('SRP\d*', response.decode('utf-8'))[0],
'PRJNA': re.findall('SRP\d*', response.decode('utf-8'))[0]
}
if acc_ids['SRP']:
var_url = urlopen(
f'http://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?save=efetch&db=sra&rettype=runtable&term={acc_ids["SRP"]}')
else:
var_url = urlopen(
f'http://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?save=efetch&db=sra&rettype=runtable&term={acc_ids["PRJNA"]}')
xmldoc = parse(var_url)
file.uns["title"] = xmldoc.findall('EXPERIMENT_PACKAGE/STUDY/DESCRIPTOR/STUDY_TITLE')[0].text
study_des = xmldoc.findall('EXPERIMENT_PACKAGE/STUDY/DESCRIPTOR/STUDY_ABSTRACT')[0].text
file.uns["description"] = re.sub('Overall design:\s*', '', study_des)
file.uns["design"] = re.sub('Overall design:\s*', '', re.findall('Overall design:.*', study_des)[0])
file.uns["token"] = description['secondary_sample_accession']
file.uns["sra"] = f"https://www.ncbi.nlm.nih.gov/sra/{description['secondary_sample_accession']}"
file.uns['processed_from_panglao'] = False
meta = {'dataset': file.uns['gse'], 'sample': file.uns['token'], 'organism': file.uns['species'],
'technology': file.uns['technology'], 'path': path}
pd.DataFrame.from_dict(meta, orient='index').T.to_csv(summary_file, mode='a', header=False, index=False)
file.uns['markers'] = dict()
resolutions = re.sub('\s', '', "0.2, 0.4, 0.6, 0.8, 1").split(',')
for res in resolutions:
file.uns['markers'][f'markers{res}'] = get_markers(f'markers/SCT_snn_res.{res}/markers.tsv')
file.write_h5ad(h5_out, compression='gzip')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Define 10x version")
parser.add_argument('--h5', type=str, required=True,
help='h5 input filename without uns after Seurat processing')
parser.add_argument('--h5_out', type=str, required=True,
help='h5 output filename with filled uns')
parser.add_argument('--kallisto_script', type=str, required=False, default=None,
help='Path to kallisto script')
parser.add_argument('--s_d', type=str, required=True,
help='Path to sample description file')
parser.add_argument('--summary_file', type=str, required=True,
help='Path to the summary file')
parser.add_argument('--technology', type=str, required=False, default=None,
help='Name of used technology; this argument specified in case of panglao db')
args = parser.parse_args()
add_uns(h5=args.h5, h5_out=args.h5_out, kallisto_script=args.kallisto_script,
s_d=args.s_d, summary_file=args.summary_file, technology=args.technology)
|
[
"[email protected]"
] | |
4bfff399a77f0d64ddfa5d94b522b8f764483c1e
|
95ef92ea4cafc8865268f38a7cb03dc078f39846
|
/content/migrations/0001_initial.py
|
5a939b52afa3ff5b4b53b3217f9a31501ee95989
|
[
"MIT"
] |
permissive
|
enterstudio/digihel
|
ec8ea7d714897c8b041f91ff3d0e89e2e9ec364a
|
b0f99a0be768df3b3a0cae20fe29a4018cd67ef7
|
refs/heads/master
| 2022-05-25T17:42:29.419062 | 2017-04-24T08:55:08 | 2017-04-24T08:55:08 | 89,243,603 | 1 | 0 |
MIT
| 2022-05-17T02:21:20 | 2017-04-24T13:30:06 |
CSS
|
UTF-8
|
Python
| false | false | 1,124 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-05 11:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0028_merge'),
]
operations = [
migrations.CreateModel(
name='ContentPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
[
"[email protected]"
] | |
d70d778641730cf8a69624e5b36c83a7e9a896d1
|
c2a46158a91d3dd41e962230d182c80bfc88886e
|
/test/test_documents_builddoc_model.py
|
38d2e7aeecf21fdb98dcccb36ce2c9e117678aaf
|
[] |
no_license
|
martinsauve/doli-swagger-client-python
|
e5f4308b6a38c34c4c0bcc796f6863e983b6d7da
|
b2606e6f9f3064fe55f81ab90ec524921086a159
|
refs/heads/master
| 2023-08-14T00:04:02.228383 | 2021-10-06T15:34:02 | 2021-10-06T15:34:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 950 |
py
|
# coding: utf-8
"""
Restler API Explorer
Live API Documentation # noqa: E501
OpenAPI spec version: 1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.documents_builddoc_model import DocumentsBuilddocModel # noqa: E501
from swagger_client.rest import ApiException
class TestDocumentsBuilddocModel(unittest.TestCase):
"""DocumentsBuilddocModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocumentsBuilddocModel(self):
"""Test DocumentsBuilddocModel"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.documents_builddoc_model.DocumentsBuilddocModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
c925fe272df4b0036fe730abb1694b9c22b93574
|
c24fa89450cccb48fcd481c3cfa475ee0e412e09
|
/InHomeMonitoringCode/room_estimator.py
|
83096f4c203214ac95e7d8eafdd6b4b0f5567f3e
|
[] |
no_license
|
PhoenixYanrongLi/CareEcoSystem_ServerCodeNew
|
e95d1c552cdcc70aac09482dfda63e253e01fcb0
|
b627484694863c425483a04391eedc2ec2ec1098
|
refs/heads/master
| 2021-01-01T04:34:51.858543 | 2016-04-14T17:57:30 | 2016-04-14T17:57:30 | 56,258,674 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 30,892 |
py
|
__author__ = 'gmn255'
# RoomEstimator provides an abstract class for room estimator classes to subclass
# It implements the core methods that are necessary for both the estimator training
# and the real time estimation based on the trained classifier
from parse import parse
import string as string
import numpy as np
from sys import maxint
class RoomEstimator(object):
def __init__(self):
# timearray = the array of time points collected from estimote scanning starting at 0
self.timearray = np.empty(0, dtype='i8')
# timeref = the actual time at which the first estimote measurement is taken and used as reference
self.timeref = 0
# roomlist specifies the dictionary mapping rooms to indices (i.e., not beacon minors but internal indices)
self.roomlist = []
# numrooms = the number of rooms
self.numrooms = 0
def _get_time(self, timestamp):
# 6/9/15: timestamp is now expected to be in long format when pulled from database
# Convert the timestamp into time in seconds
# the input is in format 15-04-01T08:12:33.000
# and the output is a natural number based on a 5-year cycle
# if isinstance(timestamp, basestring):
# # if timestamp is a string, perform appropriate conversion
# # use relative time since first data point as time index
# timeholder = parse("{:d}-{:d}-{:d}T{:d}:{:d}:{:d}.000", timestamp)
# t = (3110400*timeholder.fixed[0] + 1036800*timeholder.fixed[1] + 86400*timeholder.fixed[2] +
# 3600*timeholder.fixed[3] + 60*timeholder.fixed[4] + timeholder.fixed[5]) - self.timeref
# elif isinstance(timestamp, (int, long)):
# if timestamp is an int (or long), return as is
# t = timestamp - self.timeref
if isinstance(timestamp, basestring):
timestamp = long(timestamp)
t = timestamp - self.timeref
return t
def _get_rssi(self, rssiline):
# Convert one line strings from the dequeued array into a numpy array with
# indexes determined from roomlist mapping and values read from strings
rssiarray = np.empty(self.numrooms)
rssiarray.fill(-999)
for i in range(self.numrooms):
rssi = int(float(rssiline[2*i + 3]))
rssiarray[i] = rssi
return rssiarray
def _get_room(self, roomstring):
# translate a room string from the database (e.g., bedroom 1) to an index (e.g. 1)
roomstring = string.strip(roomstring, '"[]') # may contain quotes (") on each end
return roomstring
# # remove spaces from previous format
# rs = string.split(roomstring)
# rd = ""
# for c in rs:
# rd = rd + c
# return rd
def read_rssi_list(self, rssilist):
# read raw array and convert strings to an array of
# usuable values and update self.timearray accordingly
# Input:
# rssilist = 2d list of strings
# Output:
# outputarray = dxr numpy array of d data points and r rssi values
# self.timearray = numpy array of timestamps used
estilist = []
for i in range(len(rssilist)):
if i == 0:
# on first iteration need to define time reference, create roomlist, and read first entry
self.timeref = 0
self.timeref = self._get_time(rssilist[i][0])
self.numrooms = int(rssilist[i][1])
for j in range(self.numrooms):
room = string.strip(rssilist[0][2*j + 2], '[]')
self.roomlist.append(room)
self.timearray = np.array(self._get_time(rssilist[i][0]))
estilist.append(self._get_rssi(rssilist[i][:]))
continue
time = self._get_time(rssilist[i][0])
self.timearray = np.append(self.timearray, time)
rssisubarray = self._get_rssi(rssilist[i][:])
estilist.append(rssisubarray)
#print np.array(estilist)
outputarray = np.array(estilist) # return list as numpy array
return outputarray
@staticmethod
def moving_average_erssi_filter(erssiarray):
# take an estimote RSSI matrix of mxn data points where m refers to time and n refers to room
# assumes sampling rate = 0.1 Hz, passband cutoff = 0.04 Hz, stopband attenuation = 60 dB (see LPF.m)
# return a filtered version of the raw data array
filter_coeff = [0.0525, 0.1528, 0.2947, 0.2947, 0.1528, 0.0525]
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
for i in range(erssiarray.shape[0]):
value = 0
for counter in range(len(filter_coeff)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= filter_coeff[counter] * 999
else:
value += filter_coeff[counter] * erssiarray[i-counter, j]
filtmat[i, j] = value
return filtmat
@staticmethod
def kalman_space_erssi_filter(erssiarray, A):
# filter rssi data using Kalman filtering technique with Gaussian emission matrix and state transition matrix A
# erssiarray = raw rssi data
# A = state transition matrix
# return a filtered version of the initial rssi array
st = 30 # soft thresholding parameter
# emission matrix - Gaussian
def gaussian(dist, sigma):
return (1.0/(sigma*np.sqrt(2*np.pi)))*np.exp(-(dist**2)/(2*sigma**2))
sig = 0.3
C = np.array([
[gaussian(0, sig), gaussian(1, sig), gaussian(1, sig), gaussian(1, sig), gaussian(2, sig), gaussian(2, sig)],
[gaussian(1, sig), gaussian(0, sig), gaussian(1, sig), gaussian(1, sig), gaussian(1, sig), gaussian(1, sig)],
[gaussian(1, sig), gaussian(1, sig), gaussian(0, sig), gaussian(1, sig), gaussian(1, sig), gaussian(1, sig)],
[gaussian(1, sig), gaussian(1, sig), gaussian(1, sig), gaussian(0, sig), gaussian(2, sig), gaussian(2, sig)],
[gaussian(2, sig), gaussian(1, sig), gaussian(1, sig), gaussian(2, sig), gaussian(0, sig), gaussian(1, sig)],
[gaussian(2, sig), gaussian(1, sig), gaussian(1, sig), gaussian(2, sig), gaussian(1, sig), gaussian(0, sig)],
])
# normalize each row
for i in range(C.shape[0]):
rownorm = np.linalg.norm(C[i, :], 1)
for j in range(C.shape[1]):
C[i, j] /= rownorm
# print C
sizeyfull = erssiarray.shape
sizey = sizeyfull[1] # number of rooms
sizex = sizey
length = sizeyfull[0] # number of data points
S = 0.1*np.eye(sizex) # state error covariance (x_est=rssi filtered)
R = 0.1*np.eye(sizey) # measurement error covariance (erssiarray=rssi measured)
G = np.eye(sizex) # kalman gain matrix
# initialize state estimate and info matrix
x_est = np.zeros([sizex, length])
for i in range(sizey):
x_est[i, 0] = erssiarray[0, i] # initial rssi is first measured
P = 0.1*np.eye(sizex) # initial info matrix
#filter
#A = np.eye(6)
#C = np.eye(6)
for i in range(0, length-1):
x_est[:, i+1] = A.dot(x_est[:, i]) # state update extrapolation
P = A.dot(P.dot(A.T)) + S # info matrix extrapolation
G = (P.dot(C.T)).dot(np.linalg.inv((C.dot(P.dot(C.T))+R))) # kalman gain
x_est[:, i+1] = x_est[:, i+1]+G.dot((erssiarray[i+1, :].T-C.dot(x_est[:, i+1]))) # state update
P = (np.eye(sizex)-G.dot(C)).dot(P) # error covariance update
# apply soft thresholding
# for j in range(len(x_est[:, i+1])):
# if (x_est[j, i+1]+999) > st:
# x_est[j, i+1] -= st
# elif (x_est[j, i+1]+999) < -st:
# x_est[j, i+1] += st
# else:
# x_est[j, i+1] = -999
#print erssiarray[i, :].T
#print x_est[:, i]
#print P
return x_est.T # filtered estimate and error covariance
@staticmethod
def kalman_time_erssi_filter(erssiarray, A):
# filter rssi data using Kalman filtering technique with Gaussian emission matrix and state transition matrix A
# erssiarray = raw rssi data
# A = state transition matrix
# return a filtered version of the initial rssi array
filtlen = 6
# emission matrix - Gaussian
def gaussian(dist, sigma):
return (1.0/(sigma*np.sqrt(2*np.pi)))*np.exp(-(dist**2)/(2*sigma**2))
sig = 25
C = np.array([
[gaussian(0, sig), gaussian(1, sig), gaussian(1, sig), gaussian(1, sig), gaussian(2, sig), gaussian(2, sig)],
[gaussian(1, sig), gaussian(0, sig), gaussian(1, sig), gaussian(1, sig), gaussian(1, sig), gaussian(1, sig)],
[gaussian(1, sig), gaussian(1, sig), gaussian(0, sig), gaussian(1, sig), gaussian(1, sig), gaussian(1, sig)],
[gaussian(1, sig), gaussian(1, sig), gaussian(1, sig), gaussian(0, sig), gaussian(2, sig), gaussian(2, sig)],
[gaussian(2, sig), gaussian(1, sig), gaussian(1, sig), gaussian(2, sig), gaussian(0, sig), gaussian(1, sig)],
[gaussian(2, sig), gaussian(1, sig), gaussian(1, sig), gaussian(2, sig), gaussian(1, sig), gaussian(0, sig)],
])
# normalize each row
for i in range(C.shape[0]):
rownorm = np.linalg.norm(C[i, :], 1)
for j in range(C.shape[1]):
C[i, j] /= rownorm
# print C
S = 0.1*np.eye(filtlen) # state error covariance (x_est=rssi filtered)
R = 0.1*np.eye(filtlen) # measurement error covariance (erssiarray=rssi measured)
G = np.eye(filtlen) # kalman gain matrix
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
# initialize state estimate and info matrix
x_est = np.zeros([filtlen, 1])
x_raw = np.zeros([filtlen, 1])
P = 0.1*np.eye(filtlen) # initial info matrix
for i in range(erssiarray.shape[0]):
for k in range(filtlen):
if i-k < 0:
x_raw[k] = -999
else:
x_raw[k] = erssiarray[i-k, j]
x_est = A.dot(x_raw) # state update extrapolation
P = A.dot(P.dot(A.T)) + S # info matrix extrapolation
G = (P.dot(C.T)).dot(np.linalg.inv((C.dot(P.dot(C.T))+R))) # kalman gain
x_est = x_est+G.dot((x_raw-C.dot(x_est))) # state update
P = (np.eye(filtlen)-G.dot(C)).dot(P) # error covariance update
filtmat[i, j] = x_est[0]
return filtmat
@staticmethod
def adaptive_grad_erssi_filter(erssiarray):
# learn coefficients with prior estimates for statistics
# rdx = E[d*x] = d*E[x] = 999^2
# Rx = E[x*x^H] = sigmax^2*I + mx^2 = 25^2*I + 999^2
# constants
filtlen = 6
d = -999
rdx = d**2*np.ones([filtlen, 1])
Rx = 25**2*np.eye(filtlen) + d**2*np.ones([filtlen, filtlen])
w = 0.1*np.ones([filtlen, 1])
mu = 0.00000001
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
for i in range(erssiarray.shape[0]):
w = (np.eye(filtlen) - mu*Rx)*w + mu*rdx
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter, 0] * 999
else:
value += w[counter, 0] * erssiarray[i-counter, j]
filtmat[i, j] = value
return filtmat
@staticmethod
def adaptive_momen_erssi_filter(erssiarray):
# adding momentum to give Nesterov's optimal method
# learn coefficients with prior estimates for statistics
# rdx = E[d*x] = d*E[x] = 999^2
# Rx = E[x*x^H] = sigmax^2*I + mx^2 = 25^2*I + 999^2
# constants
filtlen = 6
d = -999
b = 0.47
rdx = d**2*np.ones([filtlen, 1])
Rx = 25**2*np.eye(filtlen) + d**2*np.ones([filtlen, filtlen])
w = np.zeros([filtlen, 1])
wp = np.zeros([filtlen, 1])
wn = np.zeros([filtlen, 1])
mu = 0.00000001
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
for i in range(erssiarray.shape[0]):
# wn = w + mu*(rdx - (1+2*b-2*b**2)*Rx.dot(w) + b*(1+b)*(Rxn+Rxp).dot(w)) + b*(w-wp)
wn = w + mu*rdx - mu*(w - b*w + b*wp).T.dot(Rx) + b*(w-wp)
wp = w
w = wn
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter, 0] * 999
else:
value += w[counter, 0] * erssiarray[i-counter, j]
filtmat[i, j] = value
return filtmat
@staticmethod
def adaptive_newt_erssi_filter(erssiarray):
# adding estimate of Hessian (P=Rx^-1) to improve convergence
# learn coefficients with prior estimates for statistics
# rdx = E[d*x] = d*E[x] = 999^2
# Rx = E[x*x^H] = sigmax^2*I + mx^2 = 25^2*I + 999^2
# constants
filtlen = 6
d = -999
rdx = d**2*np.ones([filtlen, 1])
Rx = 25**2*np.eye(filtlen) + d**2*np.ones([filtlen, filtlen])
P = np.linalg.inv(Rx)
w = P.dot(rdx) # note filter coefficients are constant because using prior and Newton's method gives
# convergence in one step for quadratic functions
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
for i in range(erssiarray.shape[0]):
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter, 0] * 999
else:
value += w[counter, 0] * erssiarray[i-counter, j]
filtmat[i, j] = value
return filtmat
@staticmethod
def LMS_grad_erssi_filter(erssiarray):
d=-999
muinit = 0.0000000001
w = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) #initial filter coefficients
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
mucount = 0
munext = 2
mu = muinit
for i in range(erssiarray.shape[0]):
# update filter coefficients
x = np.zeros(len(w))
for k in range(len(w)):
if i-k < 0:
x[k] = -999
else:
x[k] = erssiarray[i-k, j]
if x[0] != x[1]:
mu = muinit
mucount = 0
munext = 2
else:
mucount += 1
if mucount == munext:
# approximate 1/k decrease in step size
mu = mu/2
munext = mucount*2
e = d - w.T.dot(x)
w = w + muinit*e*x
# apply filter
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter] * 999
else:
value += w[counter] * erssiarray[i-counter, j]
filtmat[i, j] = value
return filtmat
@staticmethod
def LMS_momen_erssi_filter(erssiarray, restarts=True):
d=-999
muinit = 0.000000001
b = 0.38
filtlen = 6
w = np.zeros(filtlen) #initial filter coefficients
w.fill(1.0/filtlen)
wp = np.zeros(filtlen)
wp.fill(1.0/filtlen)
ep = maxint
epochs = False
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
mucount = 0
munext = 2
mu = muinit
for i in range(erssiarray.shape[0]):
# update filter coefficients
x = np.zeros(len(w))
for k in range(len(w)):
if i-k < 0:
x[k] = -999
else:
x[k] = erssiarray[i-k, j]
if epochs == True:
if x[0] != x[1]:
mu = muinit
mucount = 0
munext = 2
else:
mucount += 1
if mucount == munext:
# approximate 1/k decrease in step size
mu = mu/2
munext = mucount*2
else:
mu = muinit
e = (d-(w + b*(w-wp)).T.dot(x))
wn = w + mu*e*(x.T) + b*(w-wp)
if abs(e) > abs(ep) and restarts is True:
# restart momentum
wp = wn
w = wn
else:
# perform regular update
wp = w
w = wn
ep = e
# apply filter
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter] * 999
else:
value += w[counter] * erssiarray[i-counter, j]
filtmat[i, j] = value
return filtmat
@staticmethod
def LMS_HB_erssi_filter(erssiarray):
d=-999
muinit = 0.000000001
b = 0.38
w = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) #initial filter coefficients
wp = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
epochs = False
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
mucount = 0
munext = 2
mu = muinit
for i in range(erssiarray.shape[0]):
# update filter coefficients
x = np.zeros(len(w))
for k in range(len(w)):
if i-k < 0:
x[k] = -999
else:
x[k] = erssiarray[i-k, j]
if epochs == True:
if x[0] != x[1]:
mu = muinit
mucount = 0
munext = 2
else:
mucount += 1
if mucount == munext:
# approximate 1/k decrease in step size
mu = mu/2
munext = mucount*2
else:
mu = muinit
wn = w + mu*(d-w.T.dot(x))*(x.T) + b*(w-wp)
wp = w
w = wn
# apply filter
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter] * 999
else:
value += w[counter] * erssiarray[i-counter, j]
filtmat[i, j] = value
return filtmat
@staticmethod
def LMS_newt_erssi_filter(erssiarray):
d=-999
w = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) #initial filter coefficients
Rx = 25**2*np.eye(len(w)) + d**2*np.ones([len(w), len(w)])
P = np.linalg.inv(Rx)
#P = 0.1*np.eye(len(w)) # initial inverse autocorrelation estimate
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
for i in range(erssiarray.shape[0]):
# prepare data vectors
x = np.zeros([len(w), 1]) # new data at front of sliding window
xp= np.zeros([len(w), 1]) # old data at back of sliding window
for k in range(len(w)):
if i-k < 0:
x[k, 0] = -999
xp[k, 0]= -999
elif i-k-len(w)-1 < 0:
x[k, 0] = erssiarray[i-k, j]
xp[k, 0]= -999
else:
x[k, 0] = erssiarray[i-k, j]
xp[k, 0]= erssiarray[i-k-len(w)-1, j]
# matrix inversion lemma to include new data
Pn = P - (P.dot(x.dot(x.T.dot(P)))) / (1+x.T.dot(P.dot(x)))
# matrix inversion lemma to remove data from back of sliding window
#P = Pn - (Pn.dot(xp.dot(xp.T.dot(Pn)))) / (1+xp.T.dot(Pn.dot(xp)))
# attempt using Hayes, problem 9.4
# P = (np.eye(len(w)) - x.dot(x.T)).dot(P) + np.eye(len(w))
# calculate new filter coefficients
w = d*Pn.dot(x)
# apply filter
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter] * 999
else:
value += w[counter] * x[counter, 0]
filtmat[i, j] = value
#print filtmat
#print erssiarray
return filtmat
@staticmethod
def RLS_erssi_filter(erssiarray):
# sliding window RLS
d=-999
w = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) #initial filter coefficients
P = 0.1*np.eye(len(w)) # initial inverse autocorrelation estimate
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
for i in range(erssiarray.shape[0]):
# prepare data vectors
x = np.zeros([len(w), 1]) # new data at front of sliding window
xp= np.zeros([len(w), 1]) # old data at back of sliding window
for k in range(len(w)):
if i-k < 0:
x[k, 0] = -999
xp[k, 0]= -999
elif i-k-len(w) < 0:
x[k, 0] = erssiarray[i-k, j]
xp[k, 0]= -999
else:
x[k, 0] = erssiarray[i-k, j]
xp[k, 0]= erssiarray[i-k-len(w), j]
# introduce new data
z = P.dot(x)
g = z / (1+x.T.dot(z))
a = d - w.T.dot(x)
w = w + a*g
P = P - g.dot(z.T)
# remove stale data
z = P.dot(xp)
g = z / (1+xp.T.dot(z))
a = d - w.T.dot(xp)
w = w - a*g
P = P + g.dot(z.T)
# apply filter
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter, 0] * 999
else:
value += w[counter, 0] * x[counter, 0]
filtmat[i, j] = value
return filtmat
@staticmethod
def LMS_l1_grad_erssi_filter(erssiarray):
d=-999
muinit = 0.0000000001
w = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) #initial filter coefficients
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
mucount = 0
munext = 2
mu = muinit
for i in range(erssiarray.shape[0]):
# update filter coefficients
x = np.zeros(len(w))
for k in range(len(w)):
if i-k < 0:
x[k] = -999
else:
x[k] = erssiarray[i-k, j]
if x[0] != x[1]:
mu = muinit
mucount = 0
munext = 2
else:
mucount += 1
if mucount == munext:
# approximate 1/k decrease in step size
mu = mu/2
munext = mucount*2
w = w + muinit*x
# apply filter
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter] * 999
else:
value += w[counter] * erssiarray[i-counter, j]
filtmat[i, j] = value
return filtmat
@staticmethod
def LMS_l1_momen_erssi_filter(erssiarray):
d=-999
muinit = 0.000000001
b = 0.38
w = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) #initial filter coefficients
wp = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
epochs = False
filtmat = np.empty(erssiarray.shape) # filtered matrix
for j in range(erssiarray.shape[1]):
# do column by column
mucount = 0
munext = 2
mu = muinit
for i in range(erssiarray.shape[0]):
# update filter coefficients
x = np.zeros(len(w))
for k in range(len(w)):
if i-k < 0:
x[k] = -999
else:
x[k] = erssiarray[i-k, j]
if epochs == True:
if x[0] != x[1]:
mu = muinit
mucount = 0
munext = 2
else:
mucount += 1
if mucount == munext:
# approximate 1/k decrease in step size
mu = mu/2
munext = mucount*2
else:
mu = muinit
wn = w + mu*x + b*(w-wp)
wp = w
w = wn
# apply filter
value = 0
for counter in range(len(w)):
if i-counter < 0:
# warm start with -999 values (i.e., no sensor detected)
value -= w[counter] * 999
else:
value += w[counter] * erssiarray[i-counter, j]
filtmat[i, j] = value
return filtmat
# @staticmethod
# def KLMS(erssiarray, groundtruth=None, restarts=True,
# kernel=lambda x, y, s: np.exp((-np.linalg.norm(x-y)**2)/(2*s**2))):
# """
# This method implements NKLMS-NC or normalized KLMS with the novelty criterion
# :param erssiarray: the rssi data array
# :param groundtruth: the ground truth array if available
# :param restarts: if true, resets momentum term periodically
# :param kernel: the kernal function to be used. Gaussian kernel used by default
# """
# d=-999
# muinit = 0.000000001
# b = 0.38
# filtlen = 6
# w = np.zeros(filtlen) #initial filter coefficients
# w.fill(1.0/filtlen)
# wp = np.zeros(filtlen)
# wp.fill(1.0/filtlen)
# ep = maxint
# epochs = False
#
# filtmat = np.empty(erssiarray.shape) # filtered matrix
# for j in range(erssiarray.shape[1]):
# # do column by column
# mucount = 0
# munext = 2
# mu = muinit
#
# for i in range(erssiarray.shape[0]):
# # update filter coefficients
# x = np.zeros(len(w))
# for k in range(len(w)):
# if i-k < 0:
# x[k] = -999
# else:
# x[k] = erssiarray[i-k, j]
# if epochs == True:
# if x[0] != x[1]:
# mu = muinit
# mucount = 0
# munext = 2
# else:
# mucount += 1
# if mucount == munext:
# # approximate 1/k decrease in step size
# mu = mu/2
# munext = mucount*2
# else:
# mu = muinit
#
# e = (d-(w + b*(w-wp)).T.dot(x))
# wn = w + mu*e*(x.T) + b*(w-wp)
# if abs(e) > abs(ep) and restarts is True:
# # restart momentum
# wp = wn
# w = wn
# else:
# # perform regular update
# wp = w
# w = wn
# ep = e
#
# # apply filter
# value = 0
# for counter in range(len(w)):
# if i-counter < 0:
# # warm start with -999 values (i.e., no sensor detected)
# value -= w[counter] * 999
# else:
# value += w[counter] * erssiarray[i-counter, j]
# filtmat[i, j] = value
# return filtmat
|
[
"[email protected]"
] | |
b8377b2e248859ff49ac04507a18cd900f528562
|
d8edd97f8f8dea3f9f02da6c40d331682bb43113
|
/networks1147.py
|
4f426f1b851598c40bd8914d2ca5ee2a706e642e
|
[] |
no_license
|
mdubouch/noise-gan
|
bdd5b2fff3aff70d5f464150443d51c2192eeafd
|
639859ec4a2aa809d17eb6998a5a7d217559888a
|
refs/heads/master
| 2023-07-15T09:37:57.631656 | 2021-08-27T11:02:45 | 2021-08-27T11:02:45 | 284,072,311 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,187 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__version__ = 205
# Number of continuous features (E, t, dca)
n_features = 3
geom_dim = 2
class Gen(nn.Module):
def __init__(self, ngf, latent_dims, seq_len, encoded_dim, n_wires):
super().__init__()
self.latent_dims = latent_dims
self.ngf = ngf
self.seq_len = seq_len
self.version = __version__
# Input: (B, latent_dims, 1)
self.act = nn.ReLU()
self.dropout = nn.Dropout(0.1)
n512 = 128
self.n512 = n512
n256 = n512 // 2
n128 = n512 // 4
n64 = n512 // 8
n32 = n512 // 16
n16 = n512 // 32
class Simple(nn.Module):
def __init__(self, in_c, out_c, *args, **kwargs):
super().__init__()
self.conv = nn.ConvTranspose1d(in_c, out_c, *args, **kwargs)
self.norm = nn.BatchNorm1d(out_c)
self.act = nn.ReLU()
def forward(self, x):
return self.act(self.norm(self.conv(x)))
class Res(nn.Module):
def __init__(self, in_c, out_c, k_s, stride, *args, **kwargs):
super().__init__()
self.s1 = Simple(in_c, out_c, k_s, stride, *args, **kwargs)
self.s2 = Simple(out_c, out_c, 3, 1, 1)
self.s3 = Simple(out_c, out_c, 3, 1, 1)
self.conv4 = nn.ConvTranspose1d(out_c, out_c, 3, 1, 1)
self.norm4 = nn.BatchNorm1d(out_c)
if in_c != out_c:
self.convp = nn.ConvTranspose1d(in_c, out_c, 1, 1, 0)
else:
self.convp = nn.Identity()
self.interp = nn.Upsample(scale_factor=stride, mode='linear')
self.act = nn.ReLU()
def forward(self, x):
y0 = self.convp(self.interp(x))
y = self.s1(x)
y = self.s2(y)
y = self.s3(y)
y = self.act(self.norm4(y0 + self.conv4(y)))
return y
self.lin0 = nn.Linear(latent_dims, 128 * 128)
self.s1 = Res(128, 128, 3, 2, 1, output_padding=1)
self.s2 = Res(128, 128, 3, 2, 1, output_padding=1)
self.s3 = Res(128, 128, 3, 2, 1, output_padding=1)
self.s4 = Res(128, 64, 3, 2, 1, output_padding=1)
self.convp = nn.ConvTranspose1d(64, n_features, 1, 1, 0, bias=True)
self.convw = nn.ConvTranspose1d(64, n_wires, 1, 1, 0, bias=True)
self.out = nn.Tanh()
self.max_its = 3000
self.temp_min = 1.0
self.gen_it = 3000
def forward(self, z):
#print('latent space %.2e %.2e' % (z.mean().item(), z.std().item()))
# z: random point in latent space
x = self.act(self.lin0(z).reshape(-1, 128, 128))
x = self.s1(x)
x = self.s2(x)
x = self.s3(x)
x = self.s4(x)
w = self.convw(x)
tau = 1. / ((1./self.temp_min)**(self.gen_it / self.max_its))
wg = F.gumbel_softmax(w, dim=1, hard=True, tau=tau)
p = self.convp(x)
return self.out(p), wg
class Disc(nn.Module):
def __init__(self, ndf, seq_len, encoded_dim, n_wires):
super().__init__()
self.version = __version__
# (B, n_features, 256)
self.act = nn.LeakyReLU(0.2)
n512 = 512
n256 = n512//2
n128 = n512//4
n64 = n512//8
n32 = n512//16
nproj = 4
class Simple(nn.Module):
def __init__(self, in_c, out_c, *args, **kwargs):
super().__init__()
self.conv = nn.Conv1d(in_c, out_c, *args, **kwargs)
self.act = nn.LeakyReLU(0.2)
def forward(self, x):
return self.act(self.conv(x))
class Res(nn.Module):
def __init__(self, in_c, out_c, k_s, stride, *args, **kwargs):
super().__init__()
self.s1 = Simple(in_c, in_c, 3, 1, 1)
self.s2 = Simple(in_c, in_c, 3, 1, 1)
self.s3 = Simple(in_c, in_c, 3, 1, 1)
self.conv4 = nn.Conv1d(in_c, out_c, k_s, stride, *args, **kwargs)
self.act = nn.LeakyReLU(0.2)
if in_c != out_c:
self.convp = nn.Conv1d(in_c, out_c, 1, 1, 0)
else:
self.convp = nn.Identity()
self.interp = nn.AvgPool1d(stride)
def forward(self, x):
y0 = self.convp(self.interp(x))
y = self.s1(x)
y = self.s2(y)
y = self.s3(y)
y = self.act(y0 + self.conv4(y))
return y
self.convw0 = nn.Conv1d(n_wires, nproj, 1, 1, 0, bias=False)
self.s1 = Res(1+n_features+nproj+geom_dim, 64, 3, 1, 1, padding_mode='circular')
self.s2 = Res(64, 128, 3, 2, 1)
self.s3 = Res(128, 128, 3, 2, 1)
self.s4 = Res(128, 128, 3, 2, 1)
self.lin0 = nn.Linear(128, 1)
self.out = nn.Identity()
def forward(self, x_, xy_, w_):
# x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len)
# p_ shape is (batch, features, seq_len),
# w_ is AE-encoded wire (batch, encoded_dim, seq_len)
seq_len = x_.shape[2]
#dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1)
p = x_
#xy = x[:,n_features:n_features+geom_dim]
wg = w_
#xy = torch.tensordot(wg, wire_sphere+torch.randn_like(wire_sphere) * 0.01, dims=[[1], [1]]).permute(0,2,1)
xy = xy_
occupancy = wg.sum(dim=2).var(dim=1).unsqueeze(1).unsqueeze(2)
print(occupancy.mean().item())
w0 = self.convw0(wg)
x = torch.cat([w0, xy, p, occupancy.expand(-1, 1, seq_len)], dim=1)
x = self.s1(x)
x = self.s2(x)
x = self.s3(x)
x = self.s4(x)
x = self.lin0(x.mean(2)).squeeze()
return self.out(x)
def get_n_params(model):
return sum(p.reshape(-1).shape[0] for p in model.parameters())
|
[
"[email protected]"
] | |
6b52b6ec4e126f452e972de1e72f08164c5c6e7a
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_RelativeDifference/trend_ConstantTrend/cycle_30/ar_12/test_artificial_1024_RelativeDifference_ConstantTrend_30_12_0.py
|
d0ff3e622857b909a929bdfb980a69ece6beb1b9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 |
BSD-3-Clause
| 2023-03-08T21:45:40 | 2016-10-13T09:30:30 |
Python
|
UTF-8
|
Python
| false | false | 279 |
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = 12);
|
[
"[email protected]"
] | |
39f835403646b080d5decc274d92fe1f6d778d3e
|
9cedfd5b519f2ef7608313b5b0f1b361aeac8cc6
|
/resources/onelinelog.py
|
c9de6b2a8eea995b7052359e31ab3c538d2b9536
|
[] |
no_license
|
fdev31/archx
|
50a5bb6e8525ef87048345e40928a05e735cb758
|
5345bee7cc1a9cb0b98490749d9bbb5969b9a8f9
|
refs/heads/master
| 2020-05-21T03:08:18.532179 | 2019-12-16T21:23:25 | 2019-12-16T21:23:25 | 50,664,593 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,126 |
py
|
#!/usr/bin/env python
import sys
import codecs
import shutil
import fileinput
import time
PROMPT='> '
DEBUG=0
def univ_file_read(name, mode):
# WARNING: ignores mode argument passed to this function
return open(name, 'rU')
linelen=0
twidth = shutil.get_terminal_size()[0]
logfile=codecs.open('stdout.log', 'w+', encoding='utf-8')
for line in fileinput.input(openhook=univ_file_read):
if DEBUG:
sys.stdout.write(line)
continue
if linelen:
try:
sys.stdout.write(' '*linelen+'\r')
except Exception as e:
print(e)
try:
logfile.write(line)
except Exception as e:
print(e)
line = line.strip().replace('\n', '_')
if not line:
continue
if 'Dload' in line:
line = 'Downloading...'
elif 'db.lck.' in line:
print('DATA BASE LOCKED, rm '+(line.rsplit(' ', 1)[-1][:-1]))
time.sleep(5)
if len(line)+1 > twidth :
line = PROMPT + line[:twidth-10] + '...\r'
else:
line = PROMPT + line + '\r'
sys.stdout.write(line)
sys.stdout.flush()
linelen = len(line) + 1
|
[
"[email protected]"
] | |
79ffe575c9f5608527ae2bbd1b2642208f7b21da
|
af1e8fd6bf305cce661262a4289df74ab886425e
|
/Troubleshooting and Debugging Techniques/examples/binary_search.py
|
c173182fb2fc1aba41f2418902e078a744676067
|
[] |
no_license
|
Nahid-Hassan/online-learning
|
95bf80f205ed33b5071da63c1939baa5e08f13d5
|
aee087bc42cba60ef2c3129fb8e96f68de1f44b4
|
refs/heads/main
| 2023-02-25T23:11:58.008425 | 2021-02-02T09:37:34 | 2021-02-02T09:37:34 | 306,249,918 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 605 |
py
|
def binary_search(list, key):
"""Returns the position of key in the list if found, -1 otherwise.
List must be sorted.
"""
left = 0
right = len(list) - 1
while left <= right:
middle = (left + right) // 2
if list[middle] == key:
return middle
if list[middle] > key:
right = middle - 1
if list[middle] < key:
left = middle + 1
return -1
name_list = ['nahid', 'hassan', 'mony', 'mahin', 'meem', 'bristy']
name_list.sort()
print(name_list)
idx = binary_search(name_list, 'meem')
print(idx, name_list[idx])
|
[
"[email protected]"
] | |
17fd2ad5ff1c32c21c6b8e6b7ec8a3bac988e23c
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/NMHFTCMqW6j8sXkNd_21.py
|
c5fbf2a525560148a2a09327ec38d12348248a53
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 502 |
py
|
"""
Create a function that takes a list of two numbers and checks if the **square
root** of the first number is equal to the **cube root** of the second number.
### Examples
check_square_and_cube([4, 8]) ➞ True
check_square_and_cube([16, 48]) ➞ False
check_square_and_cube([9, 27]) ➞ True
### Notes
* Remember to return either `True` or `False`.
* All lists contain _two positive numbers_.
"""
def check_square_and_cube(lst):
return lst[1] == lst[0] ** 1.5
|
[
"[email protected]"
] | |
f3cdc9b4d2cdd55e81a5b35f33ac217dd6cc97a4
|
58a0ba5ee99ec7a0bba36748ba96a557eb798023
|
/Olympiad Solutions/URI/1059.py
|
a9a1b307c1fe96c5d3f1bca45a294bc8510db5d7
|
[
"MIT"
] |
permissive
|
adityanjr/code-DS-ALGO
|
5bdd503fb5f70d459c8e9b8e58690f9da159dd53
|
1c104c33d2f56fe671d586b702528a559925f875
|
refs/heads/master
| 2022-10-22T21:22:09.640237 | 2022-10-18T15:38:46 | 2022-10-18T15:38:46 | 217,567,198 | 40 | 54 |
MIT
| 2022-10-18T15:38:47 | 2019-10-25T15:50:28 |
C++
|
UTF-8
|
Python
| false | false | 240 |
py
|
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1059
# -*- coding: utf-8 -*-
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
for i in xrange(2,101,2):
print i
|
[
"[email protected]"
] | |
7b1074e18796d2cef670d1ffbc15893dee538894
|
fc2d2163e790741de0c0e1aa337948cfeb5b6ba9
|
/tests/syntax/UnpackTwoStars32.py
|
3a8683cbd4fd3072ce5acf7dcabd04fe814e5bdc
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
nmoehrle/Nuitka
|
bcd20531f150ada82c8414620dca6c5424be64d1
|
317d1e4e49ef8b3bdfe2f80f2464040d644588b2
|
refs/heads/master
| 2023-06-22T09:56:23.604822 | 2017-11-29T14:10:01 | 2017-11-29T14:10:01 | 122,110,166 | 0 | 0 |
Apache-2.0
| 2018-02-19T19:29:05 | 2018-02-19T19:29:05 | null |
UTF-8
|
Python
| false | false | 809 |
py
|
# Copyright 2017, Kay Hayen, mailto:[email protected]
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
foo, *bar, baz, *a, b = name.split('/')
|
[
"[email protected]"
] | |
8d417242c48459b312ed486523acbd941413aea9
|
4ef688b93866285bcc27e36add76dc8d4a968387
|
/tests/test_kms/test_model.py
|
523b1d748ffd9690156bc05d2f272747a9687192
|
[
"Apache-2.0"
] |
permissive
|
localstack/moto
|
cec77352df216cac99d5e0a82d7ada933950a0e6
|
b0b2947e98e05d913d7ee2a0379c1bec73f7d0ff
|
refs/heads/localstack
| 2023-09-01T05:18:16.680470 | 2023-07-10T09:00:26 | 2023-08-07T14:10:06 | 118,838,444 | 22 | 42 |
Apache-2.0
| 2023-09-07T02:07:17 | 2018-01-25T00:10:03 |
Python
|
UTF-8
|
Python
| false | false | 1,183 |
py
|
import pytest
from moto.kms.models import KmsBackend
PLAINTEXT = b"text"
REGION = "us-east-1"
@pytest.fixture(name="backend")
def fixture_backend():
return KmsBackend(REGION)
@pytest.fixture(name="key")
def fixture_key(backend):
return backend.create_key(
None, "ENCRYPT_DECRYPT", "SYMMETRIC_DEFAULT", "Test key", None
)
def test_encrypt_key_id(backend, key):
ciphertext, arn = backend.encrypt(key.id, PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_key_arn(backend, key):
ciphertext, arn = backend.encrypt(key.arn, PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_alias_name(backend, key):
backend.add_alias(key.id, "alias/test/test")
ciphertext, arn = backend.encrypt("alias/test/test", PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_alias_arn(backend, key):
backend.add_alias(key.id, "alias/test/test")
ciphertext, arn = backend.encrypt(
f"arn:aws:kms:{REGION}:{key.account_id}:alias/test/test", PLAINTEXT, {}
)
assert ciphertext is not None
assert arn == key.arn
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.