blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d562e7a011b720994fc47b825111e7783eee740 | 25c4ebcd26e3a09e0d0ea4453c0570a789cc198b | /pj01.py | 745427b053ac793dbe866478a8f8037f78bb87e8 | [] | no_license | yvanesc/alarmPi | b2b31bb8355b4ec9a042d955833621a3b8fce9d6 | 0af63f477c3885e0074f37f41acc925ad4746855 | refs/heads/master | 2020-03-14T03:33:44.263212 | 2019-01-25T20:17:37 | 2019-01-25T20:17:37 | 131,422,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | import time
import RPi.GPIO as GPIO
import pygame, sys, os
import iniPi
import sqlPi
import ipPi
import timePi
import splashPi
import pagePi
from pygame.locals import *
from iniPi import *
os.putenv('SDL_FBDEV', '/dev/fb1')
pygame.init()
DISPLAYSURF = pygame.display.set_mode((320, 480))
#splashPi.disSplash(DISPLAYSURF)
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(27,GPIO.OUT)
fontSel=pygame.font.SysFont(iniPi.font, iniPi.font_size)
# DISPLAYSURF.fill(iniPi.WHITE)
# pygame.display.update()
GPIO.output(27,GPIO.HIGH)
pygame.mouse.set_visible(False)
while True:
os.system('clear')
#clkX+=1
#clkRect+=1
#def page+clkX + clkRect +
#calMenu = "n" +str(clkX)+ str(clkRect) +str(clkTri)+str(clkUp)+str(clkDown)
pagePi.askP (clkX, clkRect, clkTri, clkUp, clkDown, DISPLAYSURF)
#pagePi.n+str(clkX)+ str(clkRect) +str(clkTri)+str(clkUp)+str(clkDown)(DISPLAYSURF)
if (not GPIO.input(5)):
# X limit 1 level
if clkX==1:
clkX =0
else:
clkX+=1
# pygame.display.update()
if (not GPIO.input(22)):
# rect
if clkRect==1:
clkRect =0
else:
clkRect+=1
#pygame.display.update()
if (not GPIO.input(23)):
# O
#pygame.quit()
#sys.exit()
if clkRect==1:
clkRect =0
else:
clkRect+=1
if (not GPIO.input(24)):
# triangle
if clkTri==1:
clkTri =0
else:
clkTri+=1
#pygame.display.update()
if (not GPIO.input(4)):
#VOL LOW
if clkDown==1:
clkDown =0
else:
clkDown+=1
#GPIO.output(27,GPIO.HIGH)
if (not GPIO.input(17)):
#VOL HIGH
if clkUp==1:
clkUp =0
else:
clkUp+=1
#GPIO.output(27,GPIO.LOW)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
#sys.exit()
time.sleep(0.1)
| [
"[email protected]"
] | |
da97c3720835bd7a9a4f958affdf357389b900d0 | c42f7313db9927033102a76ccb451b41d150a60e | /ax/modelbridge/tests/test_registry.py | bf8c05e85fb487b34917f9bc8273b77435bb0025 | [
"MIT"
] | permissive | cnheider/Ax | 1769b1828bff04a65fc91cc868f5409999761326 | d213fc166a2c59f30109d034c3972f5ac1363e5c | refs/heads/master | 2022-12-02T14:21:17.332491 | 2020-08-05T19:27:13 | 2020-08-05T19:28:54 | 285,564,165 | 0 | 0 | MIT | 2020-08-06T12:21:18 | 2020-08-06T12:20:02 | null | UTF-8 | Python | false | false | 11,328 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from ax.modelbridge.discrete import DiscreteModelBridge
from ax.modelbridge.random import RandomModelBridge
from ax.modelbridge.registry import (
MODEL_KEY_TO_MODEL_SETUP,
Cont_X_trans,
Models,
Y_trans,
get_model_from_generator_run,
)
from ax.modelbridge.torch import TorchModelBridge
from ax.models.base import Model
from ax.models.discrete.eb_thompson import EmpiricalBayesThompsonSampler
from ax.models.discrete.thompson import ThompsonSampler
from ax.utils.common.kwargs import get_function_argument_names
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import (
get_branin_data,
get_branin_experiment,
get_branin_optimization_config,
get_factorial_experiment,
)
from torch import device as torch_device, float64 as torch_float64
class ModelRegistryTest(TestCase):
def test_enum_sobol_GPEI(self):
"""Tests Sobol and GPEI instantiation through the Models enum."""
exp = get_branin_experiment()
# Check that factory generates a valid sobol modelbridge.
sobol = Models.SOBOL(search_space=exp.search_space)
self.assertIsInstance(sobol, RandomModelBridge)
for _ in range(5):
sobol_run = sobol.gen(n=1)
self.assertEqual(sobol_run._model_key, "Sobol")
exp.new_batch_trial().add_generator_run(sobol_run).run()
# Check that factory generates a valid GP+EI modelbridge.
exp.optimization_config = get_branin_optimization_config()
gpei = Models.GPEI(experiment=exp, data=exp.fetch_data())
self.assertIsInstance(gpei, TorchModelBridge)
self.assertEqual(gpei._model_key, "GPEI")
botorch_defaults = "ax.models.torch.botorch_defaults"
# Check that the callable kwargs and the torch kwargs were recorded.
self.assertEqual(
gpei._model_kwargs,
{
"acqf_constructor": {
"is_callable_as_path": True,
"value": f"{botorch_defaults}.get_NEI",
},
"acqf_optimizer": {
"is_callable_as_path": True,
"value": f"{botorch_defaults}.scipy_optimizer",
},
"model_constructor": {
"is_callable_as_path": True,
"value": f"{botorch_defaults}.get_and_fit_model",
},
"model_predictor": {
"is_callable_as_path": True,
"value": "ax.models.torch.utils.predict_from_model",
},
"best_point_recommender": {
"is_callable_as_path": True,
"value": f"{botorch_defaults}.recommend_best_observed_point",
},
"refit_on_cv": False,
"refit_on_update": True,
"warm_start_refitting": True,
},
)
self.assertEqual(
gpei._bridge_kwargs,
{
"transform_configs": None,
"torch_dtype": torch_float64,
"torch_device": torch_device(type="cpu"),
"status_quo_name": None,
"status_quo_features": None,
"optimization_config": None,
"transforms": Cont_X_trans + Y_trans,
"fit_out_of_design": False,
"default_model_gen_options": None,
},
)
gpei = Models.GPEI(
experiment=exp, data=exp.fetch_data(), search_space=exp.search_space
)
self.assertIsInstance(gpei, TorchModelBridge)
def test_enum_model_kwargs(self):
"""Tests that kwargs are passed correctly when instantiating through the
Models enum."""
exp = get_branin_experiment()
sobol = Models.SOBOL(
search_space=exp.search_space, init_position=2, scramble=False, seed=239
)
self.assertIsInstance(sobol, RandomModelBridge)
for _ in range(5):
sobol_run = sobol.gen(1)
exp.new_batch_trial().add_generator_run(sobol_run).run()
def test_enum_factorial(self):
"""Tests factorial instantiation through the Models enum."""
exp = get_factorial_experiment()
factorial = Models.FACTORIAL(exp.search_space)
self.assertIsInstance(factorial, DiscreteModelBridge)
factorial_run = factorial.gen(n=-1)
self.assertEqual(len(factorial_run.arms), 24)
def test_enum_empirical_bayes_thompson(self):
"""Tests EB/TS instantiation through the Models enum."""
exp = get_factorial_experiment()
factorial = Models.FACTORIAL(exp.search_space)
self.assertIsInstance(factorial, DiscreteModelBridge)
factorial_run = factorial.gen(n=-1)
exp.new_batch_trial().add_generator_run(factorial_run).run()
data = exp.fetch_data()
eb_thompson = Models.EMPIRICAL_BAYES_THOMPSON(
experiment=exp, data=data, min_weight=0.0
)
self.assertIsInstance(eb_thompson, DiscreteModelBridge)
self.assertIsInstance(eb_thompson.model, EmpiricalBayesThompsonSampler)
thompson_run = eb_thompson.gen(n=5)
self.assertEqual(len(thompson_run.arms), 5)
def test_enum_thompson(self):
"""Tests TS instantiation through the Models enum."""
exp = get_factorial_experiment()
factorial = Models.FACTORIAL(exp.search_space)
self.assertIsInstance(factorial, DiscreteModelBridge)
factorial_run = factorial.gen(n=-1)
exp.new_batch_trial().add_generator_run(factorial_run).run()
data = exp.fetch_data()
thompson = Models.THOMPSON(experiment=exp, data=data)
self.assertIsInstance(thompson.model, ThompsonSampler)
def test_enum_uniform(self):
"""Tests uniform random instantiation through the Models enum."""
exp = get_branin_experiment()
uniform = Models.UNIFORM(exp.search_space)
self.assertIsInstance(uniform, RandomModelBridge)
uniform_run = uniform.gen(n=5)
self.assertEqual(len(uniform_run.arms), 5)
def test_view_defaults(self):
"""Checks that kwargs are correctly constructed from default kwargs +
standard kwargs."""
self.assertEqual(
Models.SOBOL.view_defaults(),
(
{
"seed": None,
"deduplicate": False,
"init_position": 0,
"scramble": True,
"generated_points": None,
},
{
"optimization_config": None,
"transforms": Cont_X_trans,
"transform_configs": None,
"status_quo_name": None,
"status_quo_features": None,
"fit_out_of_design": False,
},
),
)
self.assertTrue(
all(
kw in Models.SOBOL.view_kwargs()[0]
for kw in ["seed", "deduplicate", "init_position", "scramble"]
),
all(
kw in Models.SOBOL.view_kwargs()[1]
for kw in [
"search_space",
"model",
"transforms",
"experiment",
"data",
"transform_configs",
"status_quo_name",
"status_quo_features",
"fit_out_of_design",
]
),
)
def test_get_model_from_generator_run(self):
"""Tests that it is possible to restore a model from a generator run it
produced, if `Models` registry was used.
"""
exp = get_branin_experiment()
initial_sobol = Models.SOBOL(experiment=exp, seed=239)
gr = initial_sobol.gen(n=1)
# Restore the model as it was before generation.
sobol = get_model_from_generator_run(
generator_run=gr, experiment=exp, data=exp.fetch_data(), after_gen=False
)
self.assertEqual(sobol.model.init_position, 0)
self.assertEqual(sobol.model.seed, 239)
# Restore the model as it was after generation (to resume generation).
sobol_after_gen = get_model_from_generator_run(
generator_run=gr, experiment=exp, data=exp.fetch_data()
)
self.assertEqual(sobol_after_gen.model.init_position, 1)
self.assertEqual(sobol_after_gen.model.seed, 239)
self.assertEqual(initial_sobol.gen(n=1).arms, sobol_after_gen.gen(n=1).arms)
exp.new_trial(generator_run=gr)
# Check restoration of GPEI, to ensure proper restoration of callable kwargs
gpei = Models.GPEI(experiment=exp, data=get_branin_data())
# Punch GPEI model + bridge kwargs into the Sobol generator run, to avoid
# a slow call to `gpei.gen`.
gr._model_key = "GPEI"
gr._model_kwargs = gpei._model_kwargs
gr._bridge_kwargs = gpei._bridge_kwargs
gpei_restored = get_model_from_generator_run(
gr, experiment=exp, data=get_branin_data()
)
for key in gpei.__dict__:
self.assertIn(key, gpei_restored.__dict__)
original, restored = gpei.__dict__[key], gpei_restored.__dict__[key]
# Fit times are set in instantiation so not same and model compared below
if key in ["fit_time", "fit_time_since_gen", "model"]:
continue # Fit times are set in instantiation so won't be same
if isinstance(original, OrderedDict) and isinstance(restored, OrderedDict):
original, restored = list(original.keys()), list(restored.keys())
if isinstance(original, Model) and isinstance(restored, Model):
continue # Model equality is tough to compare.
self.assertEqual(original, restored)
for key in gpei.model.__dict__:
self.assertIn(key, gpei_restored.model.__dict__)
original, restored = (
gpei.model.__dict__[key],
gpei_restored.model.__dict__[key],
)
# Botorch model equality is tough to compare and training data
# is unnecessary to compare, because data passed to model was the same
if key in ["model", "warm_start_refitting", "Xs", "Ys"]:
continue
self.assertEqual(original, restored)
def test_ModelSetups_do_not_share_kwargs(self):
"""Tests that none of the preset model and bridge combinations share a
kwarg.
"""
for model_setup_info in MODEL_KEY_TO_MODEL_SETUP.values():
model_class = model_setup_info.model_class
bridge_class = model_setup_info.bridge_class
model_args = set(get_function_argument_names(model_class))
bridge_args = set(get_function_argument_names(bridge_class))
# Intersection of two sets should be empty
self.assertEqual(model_args & bridge_args, set())
| [
"[email protected]"
] | |
e45d0ef5bbac9135d954bff5f36fcb887bb44cab | 57d041f4cb6a5f75d3e9b17e504f2fd90e1aaf1a | /moha/system/basis/gaussian_orbital.py | 07838d63d98741b18a3fb3a2e0f777aa75f8dea0 | [
"MIT"
] | permissive | ZhaoYilin/moha | e8e299d40012fa4179c36c402b0099c03d5e25ad | dc6c21b016f7d55009832957f5654b7c3d464b8b | refs/heads/master | 2022-09-17T15:42:38.010053 | 2022-05-12T15:01:48 | 2022-05-12T15:01:48 | 221,081,502 | 16 | 3 | MIT | 2023-09-08T17:53:25 | 2019-11-11T22:27:52 | Python | UTF-8 | Python | false | false | 2,174 | py | from scipy.special import factorial2 as fact2
import numpy as np
import copy
class GaussianOrbital(object):
"""Gaussian type orbital class.
Attributes
----------
n_number : int
Principal quantum number
shell : list
Angular momentum
exp : scalar
Primitive Gaussian exponent
coef : scalar
Primitive Gaussian coefficient
norm : scalar
Normalization factor
origin : list
Coordinate of the nuclei
Methods
-------
"""
def __init__(self,type,atom_index,origin,n_number,shell=(),exps=[],coefs=[]):
"""Initialize the instance.
Parameters
----------
n_number : int
Principal quantum number
shell : list
Angular momentum
exp : scalar
Primitive Gaussian exponent
coef : scalar
Primitive Gaussian coefficient
norm : scalar
Normalization factor
origin : list
Coordinate of the nuclei
"""
self.type = type
self.atom_index = atom_index
self.origin = origin
self.n_number = n_number
self.shell = shell
self.exps = exps
self.coefs = coefs
self.norm = self.normalize()
def normalize(self):
""" method to calculate the normalization factors
"""
l,m,n = self.shell
# self.norm is a list of length equal to number primitives
norm = np.sqrt(np.power(2,2*(l+m+n)+1.5)*
np.power(self.exps,l+m+n+1.5)/
fact2(2*l-1)/fact2(2*m-1)/
fact2(2*n-1)/np.power(np.pi,1.5))
return norm
@classmethod
def spatial(cls,atom_index,origin,n_number=0,shell=(),exps=[],coefs=[]):
orbital = cls('spatial',atom_index,origin,n_number,shell,exps,coefs)
return orbital
@classmethod
def spin(cls,atom_index,origin,spin,n_number=0,shell=(),exps=[],coefs=[]):
orbital = cls('spin',atom_index,origin,n_number,shell,exps,coefs)
orbital.spin = spin
return orbital
| [
"[email protected]"
] | |
7e4aa61f15e679437e319c208d0d83d174ad08bc | 8282c6674b6e33302f76882201e6db4a1c330b04 | /scrapy_crawler/scrapy_crawler/settings.py | 626b4389a175a23a4eb27256cf96f9aa71687e7a | [] | no_license | jmzhao/cs838-data-science | bfec402fc8949f381ae06ed9fe1178a02196b8dd | 753fb9f44ef107a7349d3a171c7a3f080d7b5dd5 | refs/heads/master | 2021-01-21T06:38:37.529135 | 2017-05-08T05:02:59 | 2017-05-08T05:02:59 | 83,261,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,206 | py | # -*- coding: utf-8 -*-
# Scrapy settings for scrapy_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapy_crawler'
SPIDER_MODULES = ['scrapy_crawler.spiders']
NEWSPIDER_MODULE = 'scrapy_crawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapy_crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapy_crawler.middlewares.ScrapyCrawlerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrapy_crawler.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scrapy_crawler.pipelines.ScrapyCrawlerPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
3bbcdc928dab9c90de1b211be2bb99414a98e7c2 | d87cb9ee238c0b49e23cb8a2834fae1886393296 | /st01.Python기초/py08반복문/py08_08_정수2개사이합계1.py | dad48e0fe9de4509c7284cf641492a20f9275476 | [] | no_license | eopr12/pythonmine | e1fb1a29430641f59a0274d607b75f82f79025b0 | 943b83cc64ec84b738e5c9fde53e3c224a0b04ea | refs/heads/master | 2022-05-31T10:31:27.148233 | 2020-04-26T09:24:38 | 2020-04-26T09:24:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,067 | py | # ▪ 순서도1
# 정수를 입력 받고 '시작값' 변수에 저장하시오
# 정수를 입력 받고 '종료값' 변수에 저장하시오
#
# 시작값이 종료값보다 크다면
# i 는 종료값부터 시작값까지 i를 1씩 증가시키면서 합계를 구해하시오
# 아니면
# i 는 시작값부터 종료값까지 i를 1씩 증가시키면서 합계를 구해하시오
#
# 합계를 출력하시오
#
#
# ▪ 순서도2
# 정수를 입력 받고 '시작값' 변수에 저장하시오
# 정수를 입력 받고 '종료값' 변수에 저장하시오
#
# 시작값이 종료값보다 크다면
# 종료값을 temp에 넣는다.
# 시작값을 종료값 에 넣는다.
# temp을 시작값에 넣는다.
# 아니면
#
# i 는 시작값부터 종료값까지 i를 1씩 증가시키면서 합계를 구해하시오.
#
# 합계를 출력하시오.
# ▪ 실행결과예시
# 시작값을 입력하세요. 1
# 종료값을 입력하세요. 4
# 1부터 4까지의 합계는 10입니다
#
# ▪ 실행결과예시
# 시작값을 입력하세요. 4
# 종료값을 입력하세요. 1
# 1부터 4까지의 합계는 10입니다
# 시작점수 입력,종료점수 입력
# 2+3+4+5+6 = 20
start = int(input("시작 정수를 입력하세요."))
end = int(input("종료 정수를 입력하세요."))
sum = 0
for i in range(start, end+1, 1):
sum = sum + i
print(i, end=" ")
# i가 end와 같으면 "="을 출력하고
# 아니면 "+"를 출력하세요.
if i == end:
print("=", end=" ")
else:
print("+", end=" ")
print(sum)
# 문자열 사용하는 방법
start = int(input("시작 정수를 입력하세요."))
end = int(input("종료 정수를 입력하세요."))
sum = 0
str1 = "" #다시 처음부터 더하지말고 기존의 str1에서 더해야 되서 for문 밖에서 정의
for i in range(start, end+1, 1):
sum = sum + i
str1 = str1 + str(i)
if i == end:
str1 = str1 + "="
else:
str1 = str1 + "+"
str1 = str1 + str(sum)
print(str1)
| [
"[email protected]"
] | |
b267cd07cada3fdd7ee8c3cd4507243ad1af85af | 95334df44b5391c085800ec7f52f469e77725990 | /leetcode/searchA2dMatrixIi.py | 837de494d093f8e3d24952cb6de188592f3d810d | [] | no_license | baieric/hackerRank | e83edd3d824a80b66eb3104f24debbdddc492fff | f1de027d0f99a159141976ac9226710de0c59e22 | refs/heads/master | 2020-12-20T21:23:34.351130 | 2017-12-24T17:34:27 | 2017-12-24T17:34:27 | 42,031,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,997 | py | # Solution to https://leetcode.com/problems/search-a-2d-matrix-ii/
class Solution(object):
def search(self, matrix, target, top, bottom, left, right):
print(str(top) + " " + str(bottom) + " " + str(left) + " " + str(right))
if bottom < top or right < left:
return False
if bottom - top <= 1 and right - left <= 1:
return matrix[top][left] == target or matrix[top][right] == target or matrix[bottom][left] == target or matrix[bottom][right] == target
midx = left + (right - left)/2
midy = top + (bottom-top)/2
mid = matrix[midy][midx]
print(mid)
if mid == target:
return True
if mid > target:
yOOB = midy - 1 < 0
xOOB = midx - 1 < 0
if not yOOB and not xOOB:
return self.search(matrix, target, top, bottom, left, midx-1) or self.search(matrix, target, top, midy-1, midx, right)
if yOOB:
return self.search(matrix, target, top, bottom, left, midx-1)
if xOOB:
return self.search(matrix, target, top, midy-1, midx, right)
return False
else:
yOOB = midy + 1 >= len(matrix)
xOOB = midx + 1 >= len(matrix[0])
if not yOOB and not xOOB:
return self.search(matrix, target, midy + 1, bottom, left, midx) or self.search(matrix, target, top, bottom, midx+1, right)
if yOOB:
return self.search(matrix, target, top, bottom, midx+1, right)
if xOOB:
return self.search(matrix, target, midy + 1, bottom, left, midx)
return False
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
top = 0
bottom = len(matrix) - 1
left = 0
right = len(matrix[0]) - 1
return self.search(matrix, target, top, bottom, left, right)
| [
"[email protected]"
] | |
de25897a528ec6975ede7fd8811d9ba5d69f6d73 | 739d8d0fc26965accbd67ab3e1d6c9ad269ca2f5 | /image_upload/image_upload/urls.py | 175aeecdd152f9d9b393da9a809689d2b782f7a2 | [] | no_license | IanWang24/image | ea975d0b3e93e5eca7fc48fa39de93873997328d | d0a251a7f0307c5a0496292ef2474de7505a892f | refs/heads/master | 2023-06-23T21:47:45.980691 | 2021-05-10T17:41:19 | 2021-05-10T17:41:19 | 364,211,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.conf import settings
from imageupload.views import post,display_images,delete
urlpatterns = [
url(r'^upload/',post, name='file-upload'),
url(r'^display/',display_images, name='display'),
path('admin/', admin.site.urls),
url(r'^delete/(?P<pk>[0-9]+)/$', delete, name='delete')
]+ static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT )
| [
"[email protected]"
] | |
c2ec078f2033997b3cbd4dbd327aa0e41169645c | 575d4b48a939d14da2a9bcb6a0ac085d81adae26 | /test/test.py | 30c349d94bdc94401b1a7333a31ee7d3a7c78ae4 | [] | no_license | cloudeyes/TestOTL | 60ddab65d58f4a8213dce975c20a778bce2429c0 | a01282ba2e45a945a6a380f8b58277e525cbf3e6 | refs/heads/master | 2021-07-12T21:30:11.574766 | 2017-10-12T19:27:33 | 2017-10-12T19:27:33 | 106,734,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from dbtransfer import TestDB
def main():
test = TestDB('dsn=test.db')
test.insert()
test.select(10)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
22320c3a54284f938889fd1e709f91f231c46507 | df3daf21008141551380c42642871c84dec6158f | /src/products/migrations/0035_auto_20160525_1459.py | 6454ae765c973e5c17eb284cdfa8c3d0d9afbe9a | [] | no_license | jawadiqbal/GameShop | befe98547f1fc28773cf88272e8952f7da6b86ca | 94ac073658eb666b4ccb44bad6822c83bdda6c8f | refs/heads/master | 2021-07-23T03:41:44.713069 | 2020-02-06T20:57:02 | 2020-02-06T20:57:02 | 59,277,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-25 08:59
from __future__ import unicode_literals
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0034_auto_20160525_1448'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.FileField(blank=True, null=True, upload_to=products.models.upload_location),
),
]
| [
"[email protected]"
] | |
26985a3fa2924d49fe857b48b9bafbb1b520a053 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/twisted/test/test_adbapi.py | 3f69ec3ac08a5a3404be34ad2db54903217d591b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 26,148 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.enterprise.adbapi.
"""
from twisted.trial import unittest
import os
import stat
from twisted.enterprise.adbapi import ConnectionPool, ConnectionLost
from twisted.enterprise.adbapi import Connection, Transaction
from twisted.internet import reactor, defer, interfaces
from twisted.python.failure import Failure
from twisted.python.reflect import requireModule
simple_table_schema = """
CREATE TABLE simple (
x integer
)
"""
class ADBAPITestBase(object):
"""
Test the asynchronous DB-API code.
"""
openfun_called = {}
if interfaces.IReactorThreads(reactor, None) is None:
skip = "ADB-API requires threads, no way to test without them"
def extraSetUp(self):
"""
Set up the database and create a connection pool pointing at it.
"""
self.startDB()
self.dbpool = self.makePool(cp_openfun=self.openfun)
self.dbpool.start()
def tearDown(self):
d = self.dbpool.runOperation('DROP TABLE simple')
d.addCallback(lambda res: self.dbpool.close())
d.addCallback(lambda res: self.stopDB())
return d
def openfun(self, conn):
self.openfun_called[conn] = True
def checkOpenfunCalled(self, conn=None):
if not conn:
self.assertTrue(self.openfun_called)
else:
self.assertIn(conn, self.openfun_called)
def test_pool(self):
d = self.dbpool.runOperation(simple_table_schema)
if self.test_failures:
d.addCallback(self._testPool_1_1)
d.addCallback(self._testPool_1_2)
d.addCallback(self._testPool_1_3)
d.addCallback(self._testPool_1_4)
d.addCallback(lambda res: self.flushLoggedErrors())
d.addCallback(self._testPool_2)
d.addCallback(self._testPool_3)
d.addCallback(self._testPool_4)
d.addCallback(self._testPool_5)
d.addCallback(self._testPool_6)
d.addCallback(self._testPool_7)
d.addCallback(self._testPool_8)
d.addCallback(self._testPool_9)
return d
def _testPool_1_1(self, res):
d = defer.maybeDeferred(self.dbpool.runQuery, "select * from NOTABLE")
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_1_2(self, res):
d = defer.maybeDeferred(self.dbpool.runOperation,
"deletexxx from NOTABLE")
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_1_3(self, res):
d = defer.maybeDeferred(self.dbpool.runInteraction,
self.bad_interaction)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_1_4(self, res):
d = defer.maybeDeferred(self.dbpool.runWithConnection,
self.bad_withConnection)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_2(self, res):
# verify simple table is empty
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.assertTrue(int(row[0][0]) == 0, "Interaction not rolled back")
self.checkOpenfunCalled()
d.addCallback(_check)
return d
def _testPool_3(self, res):
sql = "select count(1) from simple"
inserts = []
# add some rows to simple table (runOperation)
for i in range(self.num_iterations):
sql = "insert into simple(x) values(%d)" % i
inserts.append(self.dbpool.runOperation(sql))
d = defer.gatherResults(inserts)
def _select(res):
# make sure they were added (runQuery)
sql = "select x from simple order by x";
d = self.dbpool.runQuery(sql)
return d
d.addCallback(_select)
def _check(rows):
self.assertTrue(len(rows) == self.num_iterations,
"Wrong number of rows")
for i in range(self.num_iterations):
self.assertTrue(len(rows[i]) == 1, "Wrong size row")
self.assertTrue(rows[i][0] == i, "Values not returned.")
d.addCallback(_check)
return d
def _testPool_4(self, res):
# runInteraction
d = self.dbpool.runInteraction(self.interaction)
d.addCallback(lambda res: self.assertEqual(res, "done"))
return d
def _testPool_5(self, res):
# withConnection
d = self.dbpool.runWithConnection(self.withConnection)
d.addCallback(lambda res: self.assertEqual(res, "done"))
return d
def _testPool_6(self, res):
# Test a withConnection cannot be closed
d = self.dbpool.runWithConnection(self.close_withConnection)
return d
def _testPool_7(self, res):
# give the pool a workout
ds = []
for i in range(self.num_iterations):
sql = "select x from simple where x = %d" % i
ds.append(self.dbpool.runQuery(sql))
dlist = defer.DeferredList(ds, fireOnOneErrback=True)
def _check(result):
for i in range(self.num_iterations):
self.assertTrue(result[i][1][0][0] == i, "Value not returned")
dlist.addCallback(_check)
return dlist
def _testPool_8(self, res):
# now delete everything
ds = []
for i in range(self.num_iterations):
sql = "delete from simple where x = %d" % i
ds.append(self.dbpool.runOperation(sql))
dlist = defer.DeferredList(ds, fireOnOneErrback=True)
return dlist
def _testPool_9(self, res):
# verify simple table is empty
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.assertTrue(int(row[0][0]) == 0,
"Didn't successfully delete table contents")
self.checkConnect()
d.addCallback(_check)
return d
def checkConnect(self):
"""Check the connect/disconnect synchronous calls."""
conn = self.dbpool.connect()
self.checkOpenfunCalled(conn)
curs = conn.cursor()
curs.execute("insert into simple(x) values(1)")
curs.execute("select x from simple")
res = curs.fetchall()
self.assertEqual(len(res), 1)
self.assertEqual(len(res[0]), 1)
self.assertEqual(res[0][0], 1)
curs.execute("delete from simple")
curs.execute("select x from simple")
self.assertEqual(len(curs.fetchall()), 0)
curs.close()
self.dbpool.disconnect(conn)
def interaction(self, transaction):
transaction.execute("select x from simple order by x")
for i in range(self.num_iterations):
row = transaction.fetchone()
self.assertTrue(len(row) == 1, "Wrong size row")
self.assertTrue(row[0] == i, "Value not returned.")
self.assertIsNone(transaction.fetchone(), "Too many rows")
return "done"
def bad_interaction(self, transaction):
if self.can_rollback:
transaction.execute("insert into simple(x) values(0)")
transaction.execute("select * from NOTABLE")
def withConnection(self, conn):
curs = conn.cursor()
try:
curs.execute("select x from simple order by x")
for i in range(self.num_iterations):
row = curs.fetchone()
self.assertTrue(len(row) == 1, "Wrong size row")
self.assertTrue(row[0] == i, "Value not returned.")
finally:
curs.close()
return "done"
def close_withConnection(self, conn):
conn.close()
def bad_withConnection(self, conn):
curs = conn.cursor()
try:
curs.execute("select * from NOTABLE")
finally:
curs.close()
class ReconnectTestBase(object):
"""
Test the asynchronous DB-API code with reconnect.
"""
if interfaces.IReactorThreads(reactor, None) is None:
skip = "ADB-API requires threads, no way to test without them"
def extraSetUp(self):
"""
Skip the test if C{good_sql} is unavailable. Otherwise, set up the
database, create a connection pool pointed at it, and set up a simple
schema in it.
"""
if self.good_sql is None:
raise unittest.SkipTest('no good sql for reconnect test')
self.startDB()
self.dbpool = self.makePool(cp_max=1, cp_reconnect=True,
cp_good_sql=self.good_sql)
self.dbpool.start()
return self.dbpool.runOperation(simple_table_schema)
def tearDown(self):
d = self.dbpool.runOperation('DROP TABLE simple')
d.addCallback(lambda res: self.dbpool.close())
d.addCallback(lambda res: self.stopDB())
return d
def test_pool(self):
d = defer.succeed(None)
d.addCallback(self._testPool_1)
d.addCallback(self._testPool_2)
if not self.early_reconnect:
d.addCallback(self._testPool_3)
d.addCallback(self._testPool_4)
d.addCallback(self._testPool_5)
return d
def _testPool_1(self, res):
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.assertTrue(int(row[0][0]) == 0, "Table not empty")
d.addCallback(_check)
return d
def _testPool_2(self, res):
# reach in and close the connection manually
list(self.dbpool.connections.values())[0].close()
def _testPool_3(self, res):
sql = "select count(1) from simple"
d = defer.maybeDeferred(self.dbpool.runQuery, sql)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: None)
return d
def _testPool_4(self, res):
sql = "select count(1) from simple"
d = self.dbpool.runQuery(sql)
def _check(row):
self.assertTrue(int(row[0][0]) == 0, "Table not empty")
d.addCallback(_check)
return d
def _testPool_5(self, res):
self.flushLoggedErrors()
sql = "select * from NOTABLE" # bad sql
d = defer.maybeDeferred(self.dbpool.runQuery, sql)
d.addCallbacks(lambda res: self.fail('no exception'),
lambda f: self.assertFalse(f.check(ConnectionLost)))
return d
class DBTestConnector(object):
"""
A class which knows how to test for the presence of
and establish a connection to a relational database.
To enable test cases which use a central, system database,
you must create a database named DB_NAME with a user DB_USER
and password DB_PASS with full access rights to database DB_NAME.
"""
TEST_PREFIX = None # used for creating new test cases
DB_NAME = "twisted_test"
DB_USER = 'twisted_test'
DB_PASS = 'twisted_test'
DB_DIR = None # directory for database storage
nulls_ok = True # nulls supported
trailing_spaces_ok = True # trailing spaces in strings preserved
can_rollback = True # rollback supported
test_failures = True # test bad sql?
escape_slashes = True # escape \ in sql?
good_sql = ConnectionPool.good_sql
early_reconnect = True # cursor() will fail on closed connection
can_clear = True # can try to clear out tables when starting
num_iterations = 50 # number of iterations for test loops
# (lower this for slow db's)
def setUp(self):
self.DB_DIR = self.mktemp()
os.mkdir(self.DB_DIR)
if not self.can_connect():
raise unittest.SkipTest('%s: Cannot access db' % self.TEST_PREFIX)
return self.extraSetUp()
def can_connect(self):
"""Return true if this database is present on the system
and can be used in a test."""
raise NotImplementedError()
def startDB(self):
"""Take any steps needed to bring database up."""
pass
def stopDB(self):
"""Bring database down, if needed."""
pass
def makePool(self, **newkw):
"""Create a connection pool with additional keyword arguments."""
args, kw = self.getPoolArgs()
kw = kw.copy()
kw.update(newkw)
return ConnectionPool(*args, **kw)
def getPoolArgs(self):
"""Return a tuple (args, kw) of list and keyword arguments
that need to be passed to ConnectionPool to create a connection
to this database."""
raise NotImplementedError()
class SQLite3Connector(DBTestConnector):
"""
Connector that uses the stdlib SQLite3 database support.
"""
TEST_PREFIX = 'SQLite3'
escape_slashes = False
num_iterations = 1 # slow
def can_connect(self):
if requireModule('sqlite3') is None:
return False
else:
return True
def startDB(self):
self.database = os.path.join(self.DB_DIR, self.DB_NAME)
if os.path.exists(self.database):
os.unlink(self.database)
def getPoolArgs(self):
args = ('sqlite3',)
kw = {'database': self.database,
'cp_max': 1,
'check_same_thread': False}
return args, kw
class PySQLite2Connector(DBTestConnector):
"""
Connector that uses pysqlite's SQLite database support.
"""
TEST_PREFIX = 'pysqlite2'
escape_slashes = False
num_iterations = 1 # slow
def can_connect(self):
if requireModule('pysqlite2.dbapi2') is None:
return False
else:
return True
def startDB(self):
self.database = os.path.join(self.DB_DIR, self.DB_NAME)
if os.path.exists(self.database):
os.unlink(self.database)
def getPoolArgs(self):
args = ('pysqlite2.dbapi2',)
kw = {'database': self.database,
'cp_max': 1,
'check_same_thread': False}
return args, kw
class PyPgSQLConnector(DBTestConnector):
TEST_PREFIX = "PyPgSQL"
def can_connect(self):
try: from pyPgSQL import PgSQL
except: return False
try:
conn = PgSQL.connect(database=self.DB_NAME, user=self.DB_USER,
password=self.DB_PASS)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ('pyPgSQL.PgSQL',)
kw = {'database': self.DB_NAME, 'user': self.DB_USER,
'password': self.DB_PASS, 'cp_min': 0}
return args, kw
class PsycopgConnector(DBTestConnector):
TEST_PREFIX = 'Psycopg'
def can_connect(self):
try: import psycopg
except: return False
try:
conn = psycopg.connect(database=self.DB_NAME, user=self.DB_USER,
password=self.DB_PASS)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ('psycopg',)
kw = {'database': self.DB_NAME, 'user': self.DB_USER,
'password': self.DB_PASS, 'cp_min': 0}
return args, kw
class MySQLConnector(DBTestConnector):
TEST_PREFIX = 'MySQL'
trailing_spaces_ok = False
can_rollback = False
early_reconnect = False
def can_connect(self):
try: import MySQLdb
except: return False
try:
conn = MySQLdb.connect(db=self.DB_NAME, user=self.DB_USER,
passwd=self.DB_PASS)
conn.close()
return True
except:
return False
def getPoolArgs(self):
args = ('MySQLdb',)
kw = {'db': self.DB_NAME, 'user': self.DB_USER, 'passwd': self.DB_PASS}
return args, kw
class FirebirdConnector(DBTestConnector):
TEST_PREFIX = 'Firebird'
test_failures = False # failure testing causes problems
escape_slashes = False
good_sql = None # firebird doesn't handle failed sql well
can_clear = False # firebird is not so good
num_iterations = 5 # slow
def can_connect(self):
if requireModule('kinterbasdb') is None:
return False
try:
self.startDB()
self.stopDB()
return True
except:
return False
def startDB(self):
import kinterbasdb
self.DB_NAME = os.path.join(self.DB_DIR, DBTestConnector.DB_NAME)
os.chmod(self.DB_DIR, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
sql = 'create database "%s" user "%s" password "%s"'
sql %= (self.DB_NAME, self.DB_USER, self.DB_PASS);
conn = kinterbasdb.create_database(sql)
conn.close()
def getPoolArgs(self):
args = ('kinterbasdb',)
kw = {'database': self.DB_NAME, 'host': '127.0.0.1',
'user': self.DB_USER, 'password': self.DB_PASS}
return args, kw
def stopDB(self):
import kinterbasdb
conn = kinterbasdb.connect(database=self.DB_NAME,
host='127.0.0.1', user=self.DB_USER,
password=self.DB_PASS)
conn.drop_database()
def makeSQLTests(base, suffix, globals):
"""
Make a test case for every db connector which can connect.
@param base: Base class for test case. Additional base classes
will be a DBConnector subclass and unittest.TestCase
@param suffix: A suffix used to create test case names. Prefixes
are defined in the DBConnector subclasses.
"""
connectors = [PySQLite2Connector, SQLite3Connector, PyPgSQLConnector,
PsycopgConnector, MySQLConnector, FirebirdConnector]
tests = {}
for connclass in connectors:
name = connclass.TEST_PREFIX + suffix
class testcase(connclass, base, unittest.TestCase):
__module__ = connclass.__module__
testcase.__name__ = name
if hasattr(connclass, "__qualname__"):
testcase.__qualname__ = ".".join(
connclass.__qualname__.split()[0:-1] + [name])
tests[name] = testcase
globals.update(tests)
# PySQLite2Connector SQLite3ADBAPITests PyPgSQLADBAPITests
# PsycopgADBAPITests MySQLADBAPITests FirebirdADBAPITests
makeSQLTests(ADBAPITestBase, 'ADBAPITests', globals())
# PySQLite2Connector SQLite3ReconnectTests PyPgSQLReconnectTests
# PsycopgReconnectTests MySQLReconnectTests FirebirdReconnectTests
makeSQLTests(ReconnectTestBase, 'ReconnectTests', globals())
class FakePool(object):
"""
A fake L{ConnectionPool} for tests.
@ivar connectionFactory: factory for making connections returned by the
C{connect} method.
@type connectionFactory: any callable
"""
reconnect = True
noisy = True
def __init__(self, connectionFactory):
self.connectionFactory = connectionFactory
def connect(self):
"""
Return an instance of C{self.connectionFactory}.
"""
return self.connectionFactory()
def disconnect(self, connection):
"""
Do nothing.
"""
class ConnectionTests(unittest.TestCase):
"""
Tests for the L{Connection} class.
"""
def test_rollbackErrorLogged(self):
"""
If an error happens during rollback, L{ConnectionLost} is raised but
the original error is logged.
"""
class ConnectionRollbackRaise(object):
def rollback(self):
raise RuntimeError("problem!")
pool = FakePool(ConnectionRollbackRaise)
connection = Connection(pool)
self.assertRaises(ConnectionLost, connection.rollback)
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
class TransactionTests(unittest.TestCase):
"""
Tests for the L{Transaction} class.
"""
def test_reopenLogErrorIfReconnect(self):
"""
If the cursor creation raises an error in L{Transaction.reopen}, it
reconnects but log the error occurred.
"""
class ConnectionCursorRaise(object):
count = 0
def reconnect(self):
pass
def cursor(self):
if self.count == 0:
self.count += 1
raise RuntimeError("problem!")
pool = FakePool(None)
transaction = Transaction(pool, ConnectionCursorRaise())
transaction.reopen()
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
class NonThreadPool(object):
def callInThreadWithCallback(self, onResult, f, *a, **kw):
success = True
try:
result = f(*a, **kw)
except Exception:
success = False
result = Failure()
onResult(success, result)
class DummyConnectionPool(ConnectionPool):
"""
A testable L{ConnectionPool};
"""
threadpool = NonThreadPool()
def __init__(self):
"""
Don't forward init call.
"""
self._reactor = reactor
class EventReactor(object):
"""
Partial L{IReactorCore} implementation with simple event-related
methods.
@ivar _running: A C{bool} indicating whether the reactor is pretending
to have been started already or not.
@ivar triggers: A C{list} of pending system event triggers.
"""
def __init__(self, running):
self._running = running
self.triggers = []
def callWhenRunning(self, function):
if self._running:
function()
else:
return self.addSystemEventTrigger('after', 'startup', function)
def addSystemEventTrigger(self, phase, event, trigger):
handle = (phase, event, trigger)
self.triggers.append(handle)
return handle
def removeSystemEventTrigger(self, handle):
self.triggers.remove(handle)
class ConnectionPoolTests(unittest.TestCase):
"""
Unit tests for L{ConnectionPool}.
"""
def test_runWithConnectionRaiseOriginalError(self):
"""
If rollback fails, L{ConnectionPool.runWithConnection} raises the
original exception and log the error of the rollback.
"""
class ConnectionRollbackRaise(object):
def __init__(self, pool):
pass
def rollback(self):
raise RuntimeError("problem!")
def raisingFunction(connection):
raise ValueError("foo")
pool = DummyConnectionPool()
pool.connectionFactory = ConnectionRollbackRaise
d = pool.runWithConnection(raisingFunction)
d = self.assertFailure(d, ValueError)
def cbFailed(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
d.addCallback(cbFailed)
return d
def test_closeLogError(self):
"""
L{ConnectionPool._close} logs exceptions.
"""
class ConnectionCloseRaise(object):
def close(self):
raise RuntimeError("problem!")
pool = DummyConnectionPool()
pool._close(ConnectionCloseRaise())
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
def test_runWithInteractionRaiseOriginalError(self):
"""
If rollback fails, L{ConnectionPool.runInteraction} raises the
original exception and log the error of the rollback.
"""
class ConnectionRollbackRaise(object):
def __init__(self, pool):
pass
def rollback(self):
raise RuntimeError("problem!")
class DummyTransaction(object):
def __init__(self, pool, connection):
pass
def raisingFunction(transaction):
raise ValueError("foo")
pool = DummyConnectionPool()
pool.connectionFactory = ConnectionRollbackRaise
pool.transactionFactory = DummyTransaction
d = pool.runInteraction(raisingFunction)
d = self.assertFailure(d, ValueError)
def cbFailed(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.args[0], "problem!")
d.addCallback(cbFailed)
return d
def test_unstartedClose(self):
"""
If L{ConnectionPool.close} is called without L{ConnectionPool.start}
having been called, the pool's startup event is cancelled.
"""
reactor = EventReactor(False)
pool = ConnectionPool('twisted.test.test_adbapi', cp_reactor=reactor)
# There should be a startup trigger waiting.
self.assertEqual(reactor.triggers, [('after', 'startup', pool._start)])
pool.close()
# But not anymore.
self.assertFalse(reactor.triggers)
def test_startedClose(self):
"""
If L{ConnectionPool.close} is called after it has been started, but
not by its shutdown trigger, the shutdown trigger is cancelled.
"""
reactor = EventReactor(True)
pool = ConnectionPool('twisted.test.test_adbapi', cp_reactor=reactor)
# There should be a shutdown trigger waiting.
self.assertEqual(reactor.triggers,
[('during', 'shutdown', pool.finalClose)])
pool.close()
# But not anymore.
self.assertFalse(reactor.triggers)
| [
"[email protected]"
] | |
ba2e16746e47e23dbc9455210da3b23b7c3cbb5f | 64282d2009d8dd79d3f19185ce71c54b5453361d | /Flask/server.py | 7019df95f7effb5712739e0d9f766596344898f3 | [] | no_license | tural327/nltk_clustering-app_with_SQL | 37ab1c8cdc758d28b2e8a2c5f0d4128ffea9d020 | 99a704ac17671f5f6164e7b6f407d7f84d7b3527 | refs/heads/master | 2023-08-23T22:31:35.922799 | 2021-10-05T19:32:58 | 2021-10-05T19:32:58 | 404,428,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | import pandas as pd
import mysql.connector as sql
def add_server(msg,time,group):
connection = sql.connect(
host = "127.0.0.1",
user="root",
password='add your code',
auth_plugin = "mysql_native_password"
)
cursor = connection.cursor("SELECT * FROM my_schema.table")
text = "INSERT INTO `my_schema`.`table` (`message`, `time`, `group`) VALUES ('{}', '{}', '{}');".format(msg,time,group)
cursor.execute(text)
connection.commit()
table = pd.read_sql_query("SELECT * FROM my_schema.table",connection)
cursor.close()
connection.close()
return table
| [
"[email protected]"
] | |
af77209f1993cbd7e99f22f22ec1aba180e75074 | f72c9e46af5ce5ac738693daf65e67a0962a229a | /sdk/lusid/models/compliance_parameter.py | cb6fb63715a4b69a0b4802a978b906e7be9c6ded | [
"MIT"
] | permissive | finbourne/lusid-sdk-python | db8ce602f8408169f6583783c80ebbef83c77807 | 32fedc00ce5a37a6fe3bd9b9962570a8a9348e48 | refs/heads/master | 2023-08-29T18:22:49.488811 | 2023-08-29T15:57:26 | 2023-08-29T15:57:26 | 125,082,278 | 11 | 11 | NOASSERTION | 2023-04-28T07:16:48 | 2018-03-13T16:31:54 | Python | UTF-8 | Python | false | false | 8,809 | py | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 1.0.463
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class ComplianceParameter(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'compliance_parameter_type': 'str'
}
attribute_map = {
'compliance_parameter_type': 'complianceParameterType'
}
required_map = {
'compliance_parameter_type': 'required'
}
discriminator_value_class_map = {
'BoolComplianceParameter': 'BoolComplianceParameter',
'AddressKeyComplianceParameter': 'AddressKeyComplianceParameter',
'DecimalComplianceParameter': 'DecimalComplianceParameter',
'DateTimeComplianceParameter': 'DateTimeComplianceParameter',
'PortfolioGroupIdComplianceParameter': 'PortfolioGroupIdComplianceParameter',
'PortfolioGroupIdListComplianceParameter': 'PortfolioGroupIdListComplianceParameter',
'StringListComplianceParameter': 'StringListComplianceParameter',
'PropertyKeyListComplianceParameter': 'PropertyKeyListComplianceParameter',
'PortfolioIdListComplianceParameter': 'PortfolioIdListComplianceParameter',
'DateTimeListComplianceParameter': 'DateTimeListComplianceParameter',
'PropertyKeyComplianceParameter': 'PropertyKeyComplianceParameter',
'AddressKeyListComplianceParameter': 'AddressKeyListComplianceParameter',
'DecimalListComplianceParameter': 'DecimalListComplianceParameter',
'PortfolioIdComplianceParameter': 'PortfolioIdComplianceParameter',
'BoolListComplianceParameter': 'BoolListComplianceParameter',
'StringComplianceParameter': 'StringComplianceParameter'
}
def __init__(self, compliance_parameter_type=None, local_vars_configuration=None): # noqa: E501
"""ComplianceParameter - a model defined in OpenAPI"
:param compliance_parameter_type: The parameter type. The available values are: BoolComplianceParameter, StringComplianceParameter, DecimalComplianceParameter, DateTimeComplianceParameter, PropertyKeyComplianceParameter, AddressKeyComplianceParameter, PortfolioIdComplianceParameter, PortfolioGroupIdComplianceParameter, StringListComplianceParameter, BoolListComplianceParameter, DateTimeListComplianceParameter, DecimalListComplianceParameter, PropertyKeyListComplianceParameter, AddressKeyListComplianceParameter, PortfolioIdListComplianceParameter, PortfolioGroupIdListComplianceParameter (required)
:type compliance_parameter_type: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._compliance_parameter_type = None
self.discriminator = 'compliance_parameter_type'
self.compliance_parameter_type = compliance_parameter_type
@property
def compliance_parameter_type(self):
"""Gets the compliance_parameter_type of this ComplianceParameter. # noqa: E501
The parameter type. The available values are: BoolComplianceParameter, StringComplianceParameter, DecimalComplianceParameter, DateTimeComplianceParameter, PropertyKeyComplianceParameter, AddressKeyComplianceParameter, PortfolioIdComplianceParameter, PortfolioGroupIdComplianceParameter, StringListComplianceParameter, BoolListComplianceParameter, DateTimeListComplianceParameter, DecimalListComplianceParameter, PropertyKeyListComplianceParameter, AddressKeyListComplianceParameter, PortfolioIdListComplianceParameter, PortfolioGroupIdListComplianceParameter # noqa: E501
:return: The compliance_parameter_type of this ComplianceParameter. # noqa: E501
:rtype: str
"""
return self._compliance_parameter_type
@compliance_parameter_type.setter
def compliance_parameter_type(self, compliance_parameter_type):
"""Sets the compliance_parameter_type of this ComplianceParameter.
The parameter type. The available values are: BoolComplianceParameter, StringComplianceParameter, DecimalComplianceParameter, DateTimeComplianceParameter, PropertyKeyComplianceParameter, AddressKeyComplianceParameter, PortfolioIdComplianceParameter, PortfolioGroupIdComplianceParameter, StringListComplianceParameter, BoolListComplianceParameter, DateTimeListComplianceParameter, DecimalListComplianceParameter, PropertyKeyListComplianceParameter, AddressKeyListComplianceParameter, PortfolioIdListComplianceParameter, PortfolioGroupIdListComplianceParameter # noqa: E501
:param compliance_parameter_type: The compliance_parameter_type of this ComplianceParameter. # noqa: E501
:type compliance_parameter_type: str
"""
if self.local_vars_configuration.client_side_validation and compliance_parameter_type is None: # noqa: E501
raise ValueError("Invalid value for `compliance_parameter_type`, must not be `None`") # noqa: E501
allowed_values = ["BoolComplianceParameter", "StringComplianceParameter", "DecimalComplianceParameter", "DateTimeComplianceParameter", "PropertyKeyComplianceParameter", "AddressKeyComplianceParameter", "PortfolioIdComplianceParameter", "PortfolioGroupIdComplianceParameter", "StringListComplianceParameter", "BoolListComplianceParameter", "DateTimeListComplianceParameter", "DecimalListComplianceParameter", "PropertyKeyListComplianceParameter", "AddressKeyListComplianceParameter", "PortfolioIdListComplianceParameter", "PortfolioGroupIdListComplianceParameter"] # noqa: E501
if self.local_vars_configuration.client_side_validation and compliance_parameter_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `compliance_parameter_type` ({0}), must be one of {1}" # noqa: E501
.format(compliance_parameter_type, allowed_values)
)
self._compliance_parameter_type = compliance_parameter_type
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ComplianceParameter):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ComplianceParameter):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
66fb2e07a595159aac3f94a3a87e46ad46f8fda7 | 2d42759da3b73df3ee9eda9e4c2af9dc884169cf | /assessment3.py | 5b9bccb91d158efa7483c3831bbe5d9587cdfe42 | [] | no_license | ShinsakuOkazaki/PythonPractice | 63691fe49293de3b1e4bb6a0aa879d7be193db9b | 0e0dbe9004d1179f917e922174e074fba060e854 | refs/heads/master | 2020-04-26T05:23:47.769667 | 2019-03-01T16:20:22 | 2019-03-01T16:20:22 | 173,332,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 00:02:52 2018
@author: sinsakuokazaki
"""
x = 9
while x:
x -= 5
print(x, end=',') | [
"[email protected]"
] | |
081740f01ca6882d700dd9da3b1f920554b578a8 | 9391e4929bdd0a913cafe0b9dd517ff23d6c3d9f | /Detect/urls.py | a7cb6dceb52ddb63cde1a66786886306175238f3 | [] | no_license | JackInTaiwan/Internet-of-Cars | 253f68a4af3c83b8b679987cb7e025be7b9589d0 | c3af67b94755a31e0ccd6eb3e08fa482d342a11e | refs/heads/master | 2020-03-08T14:16:30.380515 | 2018-04-05T09:24:58 | 2018-04-05T09:24:58 | 128,180,580 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from django.urls import path
import Detect.views
urlpatterns = [
path("detect", Detect.views.detect),
] | [
"[email protected]"
] | |
bd27272e24eedb1baf8db77c1319d624b199ead6 | 18ed7a533428c6584559b3cdd801b213886e08ad | /count_bin/countbin.py | acd0fa5712404969d7e04a9f6688836db6433043 | [] | no_license | wolfsonliu/biotools | 51d1f95caa2956a690143e4a94155a9dddd969ad | 22b33cb4e39257753d45717c305b78f8ce2f04ab | refs/heads/master | 2020-12-20T13:20:37.753895 | 2019-03-26T07:48:59 | 2019-03-26T07:48:59 | 44,907,463 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,612 | py | #! /usr/bin/python3
#-*-coding:utf-8-*-
#####main file to calculate the distribution
import argparse
import time
import os
import bincount
workdir = os.popen("pwd").read()[:-1] + '/'
#####argument settings
parser = argparse.ArgumentParser(prog = "countbin", #using %(prog)s to get the program name later.
description = "Count numbers of reads in bins"
#parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2],default=0
# help="increase output verbosity")
#action="store_true" "count"
#group = parser.add_mutually_exclusive_group()
#group.add_argument("-v", "--verbose", action="store_true")
#group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("--situation",
type = str,
default = "countbin.out",
help = "run situation file, default to be countbin.out.")
parser.add_argument("-S", "--sam",
type = str,
help = "setting input sam file.")
parser.add_argument("-G", "--genome",
type = str,
default = "genome.csv",
help = "setting input genome file, default is genome.csv")
parser.add_argument("-d", "--directory",
type = str,
default = workdir,
help = "setting work directory, default is current working directory.")
parser.add_argument("-b", "--bin",
type = str,
default = '1000',
help = "setting bin size, use ',' to seperate different bin size without space, default is 1000.")
args = parser.parse_args()
#####program initiation
situationfile = open(args.situation, 'w+') #open situation file to output running situation
binsize = [int(i) for i in args.bin.split(',')] #transform type to int
####setting work directory
if args.sam[0:1] == './':
workdirectory = ''
elif args.sam[0] == '/':
workdirectory = ''
else:
workdirectory = args.directory
dist = {} #initiate dist dict
#####run count bin reads numbers
for bin in binsize:
samfile = workdirectory + args.sam
genomefile = workdirectory + args.genome
outfile = args.directory + str(bin) + ".csv"
dist[str(bin)] = bincount.Distribution('genome.csv',bin = bin) #initiate genome setting
dist[str(bin)].calculate(samfile) #count bin reads
time.sleep(60) #wait
dist[str(bin)].writefile(outfile) #output result
situationfile.write('{0} is over.\n'.format(bin)) #situation file output
time.sleep(10)
situationfile.close()
#####EOF
| [
"[email protected]"
] | |
ecdd644e145bd1d3ecd8442edcc3db63a37dfb39 | b3256db9a97fef697e77852ca8358760a4b491b9 | /aix_server_env.py | 667df33361426f263863dcc3144686a84f588959 | [] | no_license | angrek/dashboard | f6e36f6f1ad3ec0b2231e22b1a6be5c4c1843935 | 40f6602aeb569e49dfb68cbe99b5b207da1fd430 | refs/heads/master | 2020-04-05T14:07:07.025675 | 2016-09-16T19:07:43 | 2016-09-16T19:07:43 | 23,161,596 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,112 | py | #!/home/wrehfiel/ENV/bin/python2.7
#########################################################################
#
# Script to check for env/server.env
#
# Boomer Rehfield - 2/24/2015
#
#########################################################################
import os
from multiprocessing import Pool
from paramiko import SSHClient
# these are need in django 1.7 and needed vs the django settings command
from django.utils import timezone
import django
from server.models import AIXServer
import utilities
django.setup()
def update_server(server):
if utilities.ping(server):
client = SSHClient()
if utilities.ssh(server, client):
stdin, stdout, stderr = client.exec_command(' [ -f /etc/env/server.env ] && echo 1 || echo 0')
test = stdout.readlines()
print "------------------------------------------------------------"
if int(test[0]) == 0:
print "server " + server.name + " env files does NOT exist*********************"
AIXServer.objects.filter(name=server).update(server_env=0)
elif int(test[0]) == 1:
print "server " + server.name + " is good."
AIXServer.objects.filter(name=server).update(server_env=1)
stdin, stdout, stderr = client.exec_command('cat /etc/env/server.env')
test = stdout.readlines()
output = ''
for line in test:
output = output + line
print output
AIXServer.objects.filter(name=server).update(server_env_text=output)
else:
print "server " + server.name + " has no idea what it's doing."
if __name__ == '__main__':
print "Checking for server.env files..."
starting_time = timezone.now()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dashboard.settings')
server_list = AIXServer.objects.filter(decommissioned=False)
pool = Pool(10)
pool.map(update_server, server_list)
elapsed_time = timezone.now() - starting_time
print "Elapsed time: " + str(elapsed_time)
| [
"[email protected]"
] | |
5e5de9a7316307b921d645e0d9a9913b84936458 | d1e1af5f517f2f971cc2a6398390054c86f13f98 | /medianblr.py | 1c2cdcae1ed141d98eee6b96b4fca58bef166bce | [] | no_license | vikasK1640A02/opencv | c5b5bb54bced5cc01cbc72b9cf45bb14e01b7952 | ba6f9c50c66d21ed0863b013e85f463b24cdbd86 | refs/heads/master | 2020-05-18T14:06:52.367193 | 2019-05-01T18:28:01 | 2019-05-01T18:28:01 | 184,461,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | import cv2
img=cv2.imread('img/love.jpg')
kernal=3
median=cv2.medianBlur(img,kernal)
cv2.imshow('median',median)
cv2.waitKey(0)
cv2.destroyAllWindow() | [
"[email protected]"
] | |
b69c847e357e91c7e4874f40aedb89e9703aa92b | 990b5b545215e622c8b3bb614126a96945582e4e | /web_scrapper/code_challenge/day12/Day12_answer/scrapper.py | de67253d6aadb307e40d31edf099e8521a1a4330 | [] | no_license | WONILLISM/Study | 07448811d318ed0607c10531c693fe6f5bd24d9e | 0af974f2b28604a55f3b996a4e7f024182d940bc | refs/heads/master | 2023-02-15T06:59:29.571722 | 2020-07-10T07:57:09 | 2020-07-10T07:57:09 | 265,529,543 | 0 | 0 | null | 2021-01-06T10:46:58 | 2020-05-20T10:24:48 | Python | UTF-8 | Python | false | false | 1,727 | py | import requests
from bs4 import BeautifulSoup
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'}
def extract_post(html, subreddit):
votes = html.find("div", {"class":"_1rZYMD_4xY3gRcSS3p8ODO"})
if votes:
votes = votes.string
title = html.find("h3", {"class":"_eYtD2XCVieq6emjKBH3m"})
if title:
title = title.string
link = html.find("a", {"class":"SQnoC3ObvgnGjWt90zD9Z _2INHSNB8V5eaWp4P0rY_mE"})
if link:
link = link['href']
if votes and title and link:
return {'votes':int(votes), 'title':title, 'link':link, 'subreddit':subreddit}
else:
return None
def scrape_subreddit(subreddit):
all_posts = []
try:
url = f"https://www.reddit.com/r/{subreddit}/top/?t=month"
request = requests.get(url, headers=headers)
soup = BeautifulSoup(request.text, "html.parser")
post_container = soup.find("div", {"class":"rpBJOHq2PR60pnwJlUyP0"})
if post_container:
posts = post_container.find_all("div", {"class": None}, recursive=False)
for post in posts:
exctracted_post = extract_post(post, subreddit)
if exctracted_post:
all_posts.append(exctracted_post)
except Exception:
pass
return all_posts
def aggregate_subreddits(subreddits):
aggregated = []
for subreddit in subreddits:
posts = scrape_subreddit(subreddit)
aggregated = aggregated + posts
return aggregated
def check_subreddit(to_check):
try:
check_request = requests.get(f"https://reddit.com/r/{to_check}", headers=headers)
if check_request.status_code == 200:
return True
else:
return False
except Exception as e:
return False | [
"[email protected]"
] | |
f74fcde2553e9b77a315cd756af4dea80d595451 | bf61a24e89a5a21b3fb356a4acd3252d18b63d93 | /select_image_gui.py | d19a7c13b90bae5fcf68a35c0d15bd088fcfb7fc | [] | no_license | kaizen123/Mulimg_viewer | b2f5d5b803f1d42bcf78800726877513661dede2 | 357c08c04dd99eb3ab18fc864ac8120f3110b294 | refs/heads/master | 2022-11-29T22:41:56.755477 | 2020-08-14T08:04:15 | 2020-08-14T08:04:15 | 287,480,838 | 1 | 0 | null | 2020-08-14T08:15:21 | 2020-08-14T08:15:20 | null | UTF-8 | Python | false | false | 12,307 | py | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 26 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class SelectImgFrameGui
###########################################################################
class SelectImgFrameGui ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Mulimg viewer", pos = wx.DefaultPosition, size = wx.Size( 1000,445 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
fgSizer1 = wx.FlexGridSizer( 0, 1, 0, 0 )
fgSizer1.SetFlexibleDirection( wx.BOTH )
fgSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
gbSizer1 = wx.GridBagSizer( 0, 0 )
gbSizer1.SetFlexibleDirection( wx.BOTH )
gbSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_panel1 = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
bSizer1_1 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText1 = wx.StaticText( self.m_panel1, wx.ID_ANY, u"Control", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
bSizer1_1.Add( self.m_staticText1, 0, wx.ALL, 5 )
self.m_button1 = wx.Button( self.m_panel1, wx.ID_ANY, u"Next", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer1_1.Add( self.m_button1, 0, wx.ALL, 5 )
self.m_button2 = wx.Button( self.m_panel1, wx.ID_ANY, u"Last", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer1_1.Add( self.m_button2, 0, wx.ALL, 5 )
self.m_button3 = wx.Button( self.m_panel1, wx.ID_ANY, u"Save", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer1_1.Add( self.m_button3, 0, wx.ALL, 5 )
bSizer1.Add( bSizer1_1, 1, wx.EXPAND, 5 )
bSizer1_2 = wx.BoxSizer( wx.HORIZONTAL )
self.m_slider1 = wx.Slider( self.m_panel1, wx.ID_ANY, 0, 0, 100, wx.DefaultPosition, wx.Size( 215,-1 ), 0 )
bSizer1_2.Add( self.m_slider1, 0, wx.ALL, 5 )
self.slider_value = wx.StaticText( self.m_panel1, wx.ID_ANY, u"0", wx.DefaultPosition, wx.DefaultSize, 0 )
self.slider_value.Wrap( -1 )
bSizer1_2.Add( self.slider_value, 0, wx.ALL, 5 )
self.m_button4 = wx.Button( self.m_panel1, wx.ID_ANY, u"refresh", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer1_2.Add( self.m_button4, 0, wx.ALL, 5 )
bSizer1.Add( bSizer1_2, 1, wx.EXPAND, 5 )
self.m_panel1.SetSizer( bSizer1 )
self.m_panel1.Layout()
bSizer1.Fit( self.m_panel1 )
gbSizer1.Add( self.m_panel1, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 1 ), wx.EXPAND |wx.ALL, 5 )
self.m_panel2 = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
wSizer2 = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
wSizer2.SetMinSize( wx.Size( 650,-1 ) )
fgSizer2 = wx.FlexGridSizer( 0, 1, 0, 0 )
fgSizer2.SetFlexibleDirection( wx.BOTH )
fgSizer2.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText3 = wx.StaticText( self.m_panel2, wx.ID_ANY, u"Setting", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText3.Wrap( -1 )
fgSizer2.Add( self.m_staticText3, 0, wx.ALL, 5 )
m_choice1Choices = [ u"Each img", u"Stitch img", u"Each + Stitch" ]
self.m_choice1 = wx.Choice( self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.Size( 120,-1 ), m_choice1Choices, 0 )
self.m_choice1.SetSelection( 0 )
fgSizer2.Add( self.m_choice1, 0, wx.ALL, 5 )
self.auto_save_all = wx.CheckBox( self.m_panel2, wx.ID_ANY, u"Auto save all !", wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer2.Add( self.auto_save_all, 0, wx.ALL, 5 )
wSizer2.Add( fgSizer2, 1, wx.EXPAND, 5 )
fgSizer3 = wx.FlexGridSizer( 0, 1, 0, 0 )
fgSizer3.SetFlexibleDirection( wx.BOTH )
fgSizer3.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText8 = wx.StaticText( self.m_panel2, wx.ID_ANY, u"num per row", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText8.Wrap( -1 )
fgSizer3.Add( self.m_staticText8, 0, wx.ALL, 5 )
self.img_num_per_row = wx.TextCtrl( self.m_panel2, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer3.Add( self.img_num_per_row, 0, wx.ALL, 5 )
self.checkBox_orientation = wx.CheckBox( self.m_panel2, wx.ID_ANY, u"Vertical", wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
fgSizer3.Add( self.checkBox_orientation, 0, wx.ALL, 5 )
wSizer2.Add( fgSizer3, 1, wx.EXPAND, 5 )
fgSizer4 = wx.FlexGridSizer( 0, 1, 0, 0 )
fgSizer4.SetFlexibleDirection( wx.BOTH )
fgSizer4.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText6 = wx.StaticText( self.m_panel2, wx.ID_ANY, u"num per img", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText6.Wrap( -1 )
fgSizer4.Add( self.m_staticText6, 0, wx.ALL, 5 )
self.num_per_img = wx.TextCtrl( self.m_panel2, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
fgSizer4.Add( self.num_per_img, 0, wx.ALL, 5 )
self.auto_layout = wx.CheckBox( self.m_panel2, wx.ID_ANY, u"Auto layout", wx.DefaultPosition, wx.DefaultSize, 0 )
self.auto_layout.SetValue(True)
fgSizer4.Add( self.auto_layout, 0, wx.ALL, 5 )
wSizer2.Add( fgSizer4, 1, wx.EXPAND, 5 )
fgSizer5 = wx.FlexGridSizer( 0, 1, 0, 0 )
fgSizer5.SetFlexibleDirection( wx.BOTH )
fgSizer5.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText5 = wx.StaticText( self.m_panel2, wx.ID_ANY, u"num per column", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText5.Wrap( -1 )
fgSizer5.Add( self.m_staticText5, 0, wx.ALL, 5 )
self.img_num_per_column = wx.TextCtrl( self.m_panel2, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
fgSizer5.Add( self.img_num_per_column, 0, wx.ALL, 5 )
self.m_colourPicker1 = wx.ColourPickerCtrl( self.m_panel2, wx.ID_ANY, wx.BLACK, wx.DefaultPosition, wx.DefaultSize, wx.CLRP_DEFAULT_STYLE )
fgSizer5.Add( self.m_colourPicker1, 0, wx.ALL, 5 )
wSizer2.Add( fgSizer5, 1, wx.EXPAND, 5 )
fgSizer6 = wx.FlexGridSizer( 0, 1, 0, 0 )
fgSizer6.SetFlexibleDirection( wx.BOTH )
fgSizer6.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText81 = wx.StaticText( self.m_panel2, wx.ID_ANY, u"gap(pixel)", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText81.Wrap( -1 )
fgSizer6.Add( self.m_staticText81, 0, wx.ALL, 5 )
self.gap = wx.TextCtrl( self.m_panel2, wx.ID_ANY, u"5", wx.DefaultPosition, wx.DefaultSize, 0 )
fgSizer6.Add( self.gap, 0, wx.ALL, 5 )
self.m_staticText9 = wx.StaticText( self.m_panel2, wx.ID_ANY, u"Gap fill color", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText9.Wrap( -1 )
fgSizer6.Add( self.m_staticText9, 0, wx.ALL, 5 )
wSizer2.Add( fgSizer6, 1, wx.EXPAND, 5 )
self.m_panel2.SetSizer( wSizer2 )
self.m_panel2.Layout()
wSizer2.Fit( self.m_panel2 )
gbSizer1.Add( self.m_panel2, wx.GBPosition( 0, 3 ), wx.GBSpan( 1, 1 ), wx.EXPAND |wx.ALL, 5 )
fgSizer1.Add( gbSizer1, 1, wx.EXPAND, 5 )
self.m_staticText7 = wx.StaticText( self, wx.ID_ANY, u"Image show area", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText7.Wrap( -1 )
fgSizer1.Add( self.m_staticText7, 0, wx.ALL, 5 )
self.m_scrolledWindow1 = wx.ScrolledWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 100,100 ), wx.HSCROLL|wx.VSCROLL )
self.m_scrolledWindow1.SetScrollRate( 5, 5 )
img_Sizer = wx.GridBagSizer( 0, 0 )
img_Sizer.SetFlexibleDirection( wx.BOTH )
img_Sizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_scrolledWindow1.SetSizer( img_Sizer )
self.m_scrolledWindow1.Layout()
fgSizer1.Add( self.m_scrolledWindow1, 1, wx.EXPAND |wx.ALL, 5 )
self.SetSizer( fgSizer1 )
self.Layout()
self.m_menubar1 = wx.MenuBar( 0 )
self.MyMenu = wx.Menu()
self.m_menu1 = wx.Menu()
self.m_menu21 = wx.Menu()
self.m_menuItem1 = wx.MenuItem( self.m_menu21, wx.ID_ANY, u"Auto", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu21.Append( self.m_menuItem1 )
self.m_menuItem2 = wx.MenuItem( self.m_menu21, wx.ID_ANY, u"manual", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu21.Append( self.m_menuItem2 )
self.m_menu1.AppendSubMenu( self.m_menu21, u"One dir mul subdir" )
self.m_menuItem3 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"One dir mul img", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu1.Append( self.m_menuItem3 )
self.MyMenu.AppendSubMenu( self.m_menu1, u"Input path" )
self.m_menuItem4 = wx.MenuItem( self.MyMenu, wx.ID_ANY, u"Out path", wx.EmptyString, wx.ITEM_NORMAL )
self.MyMenu.Append( self.m_menuItem4 )
self.m_menubar1.Append( self.MyMenu, u"File" )
self.m_menu2 = wx.Menu()
self.menu_next = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"Next\tCtrl+N", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.Append( self.menu_next )
self.menu_last = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"Last\tCtrl+L", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.Append( self.menu_last )
self.menu_save = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"Save\tCtrl+S", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.Append( self.menu_save )
self.menu_up = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"Up", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.Append( self.menu_up )
self.menu_down = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"Down", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.Append( self.menu_down )
self.menu_right = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"Right", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.Append( self.menu_right )
self.menu_left = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"left", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu2.Append( self.menu_left )
self.m_menubar1.Append( self.m_menu2, u"Edit" )
self.m_menu3 = wx.Menu()
self.menu_about = wx.MenuItem( self.m_menu3, wx.ID_ANY, u"About", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu3.Append( self.menu_about )
self.m_menubar1.Append( self.m_menu3, u"Help" )
self.SetMenuBar( self.m_menubar1 )
self.m_statusBar1 = self.CreateStatusBar( 4, wx.STB_SIZEGRIP, wx.ID_ANY )
self.Centre( wx.BOTH )
# Connect Events
self.m_button1.Bind( wx.EVT_BUTTON, self.next_img )
self.m_button2.Bind( wx.EVT_BUTTON, self.last_img )
self.m_button3.Bind( wx.EVT_BUTTON, self.save_img )
self.m_slider1.Bind( wx.EVT_SCROLL, self.skip_to_n_img )
self.m_button4.Bind( wx.EVT_BUTTON, self.refresh )
self.m_colourPicker1.Bind( wx.EVT_COLOURPICKER_CHANGED, self.colour_change )
self.Bind( wx.EVT_MENU, self.one_dir_mul_dir_auto, id = self.m_menuItem1.GetId() )
self.Bind( wx.EVT_MENU, self.one_dir_mul_dir_manual, id = self.m_menuItem2.GetId() )
self.Bind( wx.EVT_MENU, self.one_dir_mul_img, id = self.m_menuItem3.GetId() )
self.Bind( wx.EVT_MENU, self.out_path, id = self.m_menuItem4.GetId() )
self.Bind( wx.EVT_MENU, self.next_img, id = self.menu_next.GetId() )
self.Bind( wx.EVT_MENU, self.last_img, id = self.menu_last.GetId() )
self.Bind( wx.EVT_MENU, self.save_img, id = self.menu_save.GetId() )
self.Bind( wx.EVT_MENU, self.up_img, id = self.menu_up.GetId() )
self.Bind( wx.EVT_MENU, self.down_img, id = self.menu_down.GetId() )
self.Bind( wx.EVT_MENU, self.right_img, id = self.menu_right.GetId() )
self.Bind( wx.EVT_MENU, self.left_img, id = self.menu_left.GetId() )
self.Bind( wx.EVT_MENU, self.about_gui, id = self.menu_about.GetId() )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def next_img( self, event ):
event.Skip()
def last_img( self, event ):
event.Skip()
def save_img( self, event ):
event.Skip()
def skip_to_n_img( self, event ):
event.Skip()
def refresh( self, event ):
event.Skip()
def colour_change( self, event ):
event.Skip()
def one_dir_mul_dir_auto( self, event ):
event.Skip()
def one_dir_mul_dir_manual( self, event ):
event.Skip()
def one_dir_mul_img( self, event ):
event.Skip()
def out_path( self, event ):
event.Skip()
def up_img( self, event ):
event.Skip()
def down_img( self, event ):
event.Skip()
def right_img( self, event ):
event.Skip()
def left_img( self, event ):
event.Skip()
def about_gui( self, event ):
event.Skip()
| [
"[email protected]"
] | |
daa233e4b8cb4ce70c932fcb7552e17fb17cdb67 | 41d7f856e255fa4fb85e68c64103006c7d0cdca7 | /codings/DeltaAdaptiveEncoder.py | bbc7bf801bf5660c92785a26b894995ce7d305d0 | [] | no_license | GeKeShi/CASQ | e7c1674f3b11664866ee6b54bf96824a0fbaa026 | 2e2407f6943d349850d7de40ba58e540acefc19f | refs/heads/main | 2023-05-03T23:21:34.814199 | 2021-05-25T12:30:03 | 2021-05-25T12:30:03 | 370,684,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | # -*- coding: utf-8 -*
class DeltaAdaptiveEncoder(object):
def encode(self, index):
pass
def decode(self, encoded_index):
pass | [
"[email protected]"
] | |
0554e1bd885c3a3f07dedb5cb3a41585cf78b610 | 0951c90e90febdc9d7e5cfbc63af7c78a0206269 | /common/desired_caps.py | 9eeb37a3e625f87013fde0d34806c2d4f3e3a933 | [] | no_license | iliun/uiTest | b860a278ccfcc17839f98c4f9f697d91ffcba4f0 | 3211b734b72cb21ba86db6312dd2ee65b935ca2c | refs/heads/master | 2022-09-13T13:39:00.568370 | 2020-06-03T07:27:19 | 2020-06-03T07:27:19 | 268,772,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | #coding=utf-8
from appium import webdriver
import os
import yaml
import logging
import logging.config
CON_LOG='../config/log.conf'
CON_LOG_PATH = os.path.join(os.path.join(os.path.dirname(__file__),'..'),'config/log.conf')
logging.config.fileConfig(CON_LOG_PATH)
logging=logging.getLogger()
def appium_desired():
with open("../config/caps.yaml",'r',encoding='utf-8') as file:
data = yaml.load(file,Loader=yaml.FullLoader)
desired_caps = {}
desired_caps['platformName']=data['platformName']
desired_caps['platformVersion']=str(data['platformVersion'])
desired_caps['deviceName']=data['deviceName']
desired_caps['udid']=data['udid']
dir_name = os.path.dirname(os.path.dirname(__file__))
app_path = os.path.join(dir_name,'app',data['app'])
desired_caps['app']= app_path
desired_caps['appActivity']=data['appActivity']
desired_caps['appPackage']=data['appPackage']
desired_caps['noReset']=data['noReset']
desired_caps['unicodeKeyboard']=data['unicodeKeyboard']
desired_caps['resetKeyboard']=data['resetKeyboard']
desired_caps['ip']=data['ip']
desired_caps['port']=data['port']
logging.info("启动app")
driver = webdriver.Remote('http://'+str(data['ip'])+':'+str(data['port'])+'/wd/hub',desired_caps)
return driver
if __name__ == '__main__':
appium_desired() | [
"[email protected]"
] | |
191b66f611aca8aa39a4d080bbf6434c8c04c134 | a95c5504bf4698d80050a923ce6d5f4ed85f6e19 | /01_NLRM_s_prirozene_konjugovanou_apriorni_hustotou/cv03_pr01_CAPM.py | 1a4feb71f7901fb17630bdf2434f20b3a86e8975 | [] | no_license | JanMelicharik/baan_python | e0ff0e772c293e529433dbce58534d0519edc78f | 0d50f4047c4c1aae716de9dc531d6c7991f2fbb9 | refs/heads/master | 2023-03-07T15:25:24.554842 | 2021-02-20T19:03:16 | 2021-02-20T19:03:16 | 303,178,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,478 | py | # Nastaveni cesty do domovske slozky
import sys
root_dir = "/".join(sys.argv[0].split("/")[:-2])
sys.path.insert(1, root_dir + "/support")
# Importy podpurnych funkci ze slozky Support
from my_nlrm import my_nlrm
from gamm_rnd_koop import gamm_rnd_koop
from norm_rnd import norm_rnd
from progress_info import progress_bar
from tabulate import tabulate
from math import exp
from scipy.stats import t as student
from numpy import ones,\
zeros,\
transpose as t
import pandas as pd
import numpy as np
import math
# ! Poznamka ke scriptu: !
# promenna beta_0 a cov_beta_0 musi byt zadane jako prvky 2D matice
# i v pripade, ze jde pouze o skalary (cili, vektory delky 1).
# Prvky 2D matice se zapisuji jako list listu --> [[1]]
# Dve moznosti nacteni dat (u CSV souboru je nutne specifikovat
# oddelovac sloupcu a nastavit znak pro desetinnou carku).
data = pd.read_excel(root_dir + "/data/capm2_data.xlsx")
# data = pd.read_csv(root_dir + "/data/capm2_data.csv", delimiter=";", decimal=",")
# ===== 1. Odhad modelu =====
y_0 = pd.DataFrame({"y": data.GM - data.RKFREE})
x_0 = pd.DataFrame({"const": 1, "x": data.MKT - data.RKFREE})
y_0 = np.array(y_0)
x_0 = np.array(x_0)
# Nastaveni apriornich hyperparametru
# beta|h ~ N(beta_0,V_0)
# h ~ G(h_0,nu_0)
s2_0 = 0.2 ** 2 # skalar
h_0 = 1 / s2_0 # skalar
nu_0 = 10 # skalar
beta_0 = np.array([[0],[1]]) # vektor (matice 2x1)
cov_beta_0 = np.diag([0.05 ** 2, 0.5 ** 2]) # matice
v_0 = cov_beta_0 * (nu_0 - 2) / nu_0 * h_0 # matice
res_gm = my_nlrm(y_0, x_0, beta_0, v_0, h_0, nu_0)
headers = ["Parametr", "Prior", "Prior std.", "Posterior", "Posterior std."]
table_1 = [
[
"Alpha",
res_gm["beta_0"][0][0], # prvni [0] bere prvni prvek vektoru
res_gm["b0_std"][0][0], # druha [0] rozbaluje list
round(res_gm["beta_1"][0][0], 4), # (jde pouze o vizualni stranku)
round(res_gm["b1_std"][0][0], 4) # (odstranuje hranate zavorky)
],
[
"Beta",
res_gm["beta_0"][1][0],
res_gm["b0_std"][1][0],
round(res_gm["beta_1"][1][0], 4),
round(res_gm["b1_std"][1][0], 4)
],
[
"h",
round(res_gm["h_0"], 4),
round(res_gm["h0_std"], 4),
round(res_gm["h_1"][0][0], 4),
round(res_gm["h1_std"], 4)
]
]
print("Odhad NLRM s NCP:")
print(tabulate(table_1, headers, tablefmt="pretty"), "\n")
# ===== 2. Test hypotezy, ze beta = 1 =====
y_1 = np.array(pd.DataFrame({"y": data.GM - data.MKT}))
x_1 = ones(y_1.shape)
# vstupy do funkce my_nlrm musi byt matice, viz vyse
beta_0_rest = np.array([beta_0[0]])
v_0_rest = np.array([[v_0[0][0]]])
res_gm_rest = my_nlrm(y_1, x_1, beta_0_rest, v_0_rest, h_0, nu_0)
table_2 = [
[
"Alpha",
res_gm_rest["beta_0"][0][0],
res_gm_rest["b0_std"][0][0],
round(res_gm_rest["beta_1"][0][0], 4),
round(res_gm_rest["b1_std"][0][0], 4)
],
[
"h",
round(res_gm_rest["h_0"], 4),
round(res_gm_rest["h0_std"], 4),
round(res_gm_rest["h_1"][0][0], 4),
round(res_gm_rest["h1_std"], 4)
]
]
print("Omezeny model (beta = 1):")
print(tabulate(table_2, headers, tablefmt="pretty"), "\n")
log_bf = res_gm_rest["log_ml"] - res_gm["log_ml"]
bf = exp(log_bf) # Bayesuv faktor (odlogaritmujeme predchozi vyraz)
print("Bayesuv faktor porovnavajici omezeny a neomezeny model:")
print(f"BF = {round(bf, 4)}", "\n")
# ===== 3. Hypoteza, ze beta > 1 =====
mc = 100_000 # pocet simulaci (zvyste napr. na 10_000)
beta_sim = np.array([[],[]])
print("Vypocet pravd., ze beta > 1 pomoci simulace:")
for i in range(mc):
h_sim = float(gamm_rnd_koop(res_gm["h_1"], res_gm["nu_1"], (1,1)))
new_column = norm_rnd(1/h_sim * res_gm["v_1"]) + res_gm["beta_1"]
beta_sim = np.append(beta_sim, new_column, axis=1)
progress_bar(i, mc)
# Vypocet pravd. beta > 1
pr_beta = sum(t(beta_sim > 1))[1] / mc
print(f"Pravdepodobnost, ze beta > 1:")
print(f"Pr. = {round(pr_beta, 4)}")
# Analyticky vypocet pravdepodobnost
# a) standardizace skalovaneho t-rozdeleni (p(beta|y)) pro druhy prvek vektoru parametru beta
zscore = float((1 - res_gm["beta_1"][1]) / res_gm["b1_std"][1])
# b) vypocet odpovidajiciho kvantilu ze standardizovaneho centrovaneho t-rozdeleni
pr_beta_analyticky = 1 - student.cdf(zscore, res_gm["nu_1"])
print(f"Pr. = {round(pr_beta_analyticky, 4)} (analyticky)")
| [
"[email protected]"
] | |
3f7ce7cb74b715c3703912a7e9ead4849add7895 | b1c713eeac247133de57808789f371f1f7607786 | /Jogos/migrations/0004_alter_jogo_jogo_data.py | f1f80b35e23e1725276bd1d9dead3be5a3ba3205 | [] | no_license | welligtonlins/Tesi_2 | 6dd55ac61de20c10b292818e456b1f7973b7c0b0 | b6575ca832bd4ea06b5723693a3c8f1d5d9f9dae | refs/heads/main | 2023-09-01T20:03:52.260577 | 2021-10-05T22:03:25 | 2021-10-05T22:03:25 | 413,992,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # Generated by Django 3.2.7 on 2021-10-04 22:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Jogos', '0003_alter_jogo_jogo_data'),
]
operations = [
migrations.AlterField(
model_name='jogo',
name='Jogo_Data',
field=models.DateField(),
),
]
| [
"[email protected]"
] | |
d6125a922b7ce93233cdb821efa9fe109e52fa2f | 77cd4237596251cd591da28e4a2a7128ac15570f | /metrics.py | 67c58643a48971217438a6ca13b6b04136ff201c | [] | no_license | Huy-LichHoang/SNA2002 | 31ab56b55a518733423d85f1ab0f4b4a80f72339 | ecc732fbf094a3690966b06860d16e7182405452 | refs/heads/main | 2023-03-29T20:29:24.793334 | 2021-01-22T17:09:55 | 2021-01-22T17:09:55 | 354,713,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | py | import math
from collections import defaultdict
import numpy as np
from util import *
from sklearn.metrics import pairwise_distances_argmin
from scipy.stats import wasserstein_distance
def compute_centrality(graph):
return nx.eigenvector_centrality_numpy(graph, weight='weight')
def find_node_roles(graph: nx.Graph, attribute='weight'):
# Build dictionary mapping node_ids to adjacent edge weights
edge_weights = defaultdict(list)
# Loop through all nodes, add attribute to each that is the sum of all adjacent edge weights
for node in graph.nodes():
for neighbor in graph.neighbors(node):
weight = graph.get_edge_data(node, neighbor)[attribute]
if weight > 0:
if attribute == 'weight':
edge_weights[node].append(math.log(weight))
else:
edge_weights[node].append(weight)
# Convert each node array of edges to histogram; Find global min and max values of weights
min_weight = min([min(weights) for node, weights in edge_weights.items()])
max_weight = max([max(weights) for node, weights in edge_weights.items()])
# Build histograms
edge_weights_his = {}
for node, weights in edge_weights.items():
hist = np.histogram(weights, bins=13, range=(min_weight, max_weight))
edge_weights_his[node] = list(hist[0])
# Finish
nodes, histograms = zip(*edge_weights_his.items())
centers, labels = find_clusters(np.array(histograms), 5)
node_roles = dict(zip(nodes, labels))
# Return
return node_roles
def find_clusters(X, n_clusters, seed=2):
# 1. Randomly choose clusters
rng = np.random.RandomState(seed)
i = rng.permutation(X.shape[0])[:n_clusters]
centers = X[i]
while True:
# print(centers)
# 2a. Assign labels based on closest center
labels = pairwise_distances_argmin(X, centers, metric=wasserstein_distance)
# 2b. Find new centers from means of points
new_centers = np.array([X[labels == i].mean(0) for i in range(n_clusters)])
# 2c. Check for convergence
if np.all(centers == new_centers): break
centers = new_centers
# Break if values are invalid
if np.any(np.isnan(centers)) or np.any(np.isinf(centers)): break
return centers, labels
def reachability(G: nx.Graph):
return nx.floyd_warshall(G, 'duration_seconds')
def walkability(G: nx.Graph, n):
return sorted(nx.connected_components(G), key=len, reverse=True)[:n]
| [
"[email protected]"
] | |
ef11ab855087e5da864942444ca80ac57c439d7c | faa458873e01cc29623452c02b936720d4568b01 | /postbar_test.py | ca78d1f3fef92503760b19c7e3990bd514ac8db9 | [] | no_license | mafumaru/webstorm-flask | a16812212dcc78726ca18acba2bba70ab26f55d3 | cd31f3639dd93f569078f2615797084ffbd2937d | refs/heads/master | 2021-06-13T02:06:25.254133 | 2019-06-07T10:47:42 | 2019-06-07T10:47:42 | 190,702,840 | 0 | 0 | null | 2021-06-01T23:49:51 | 2019-06-07T07:12:54 | HTML | UTF-8 | Python | false | false | 395 | py | import pymongo
from framework.module.tieba.helper import TiebaPostbarHelper
from framework.module.tieba.postbar import Postbar
b = Postbar('bilibili')
b.debug=True
b.get_lists()
h = TiebaPostbarHelper()
h.write_to_db(b.datacore)
# conn = pymongo.MongoClient()
# db = conn['tieba_postbar']
# db['postbar'].insert(
# {"postbar": "bilibili", "id_list": [], "mark_id_list": []})
# print('')
| [
"[email protected]"
] | |
1bbfd42506f9595618291c45e80888f2bde04db1 | 3eef8e65164d5c6b4580b4ea26a5fd9ac49302f7 | /exchange/currency_parser.py | 4139410ba36081b8cc3d2ec22debcf4d143eccfc | [] | no_license | EugeneBilenko/neuroflas_test | bcccfce7fd70f03eeda4cf4422b68ecf6480da09 | 41a060470e370fee1511cad768e32c80c34a87b5 | refs/heads/master | 2022-12-12T21:39:22.171003 | 2019-01-08T09:13:49 | 2019-01-08T09:13:49 | 164,609,342 | 0 | 0 | null | 2022-12-08T01:31:33 | 2019-01-08T09:09:09 | Python | UTF-8 | Python | false | false | 1,348 | py | import urllib3
from bs4 import BeautifulSoup
class CurrencyParser:
def __init__(self, debug=False):
self.url = "https://www.ecb.europa.eu/stats/policy_and_exchange_rates/euro_reference_exchange_rates/html/index.en.html"
self.debug_file = "test_scrapped_data.txt.txt"
self.debug = debug
self.source_page = None
def run(self) -> list:
self.source_page = self.get_source_info()
return self.prepare_codes()
def get_source_info(self) -> bytes:
if self.debug:
with open(self.debug_file, "rb") as f:
data = f.read()
else:
http = urllib3.PoolManager()
page = http.request('GET', self.url)
if int(page.status) != 200:
print("error occured")
data = page.data
return data
def prepare_codes(self) -> list:
soup = BeautifulSoup(self.source_page, 'html.parser')
rows = soup.find('table', attrs={'class': 'ecb-forexTable fullWidth'}).find("tbody").find_all("tr")
currency_data = []
for row in rows:
tmp = {}
tmp['code'] = row.find("td", attrs={'class': 'currency'}).text
tmp['value'] = float(row.find("span", attrs={'class': 'rate'}).text)
currency_data.append(tmp)
return currency_data
| [
"[email protected]"
] | |
c1b86c9bab71398a763c9bd986c8bc20596c8308 | 38da215b9b50d0743f34cd0ef38e75db557deeff | /swagger_client/api/clubs_api.py | 53a01f198682ed1fca54842dada603ca7db0d966 | [
"MIT"
] | permissive | HalestormAI/stravaio | 0742dc5749d90840ef6c4638ca4c3ee3040d57ce | 9d99179eb70bf5219ab4d2c7d7b5d3617457ae9e | refs/heads/master | 2020-09-07T14:57:59.836372 | 2020-02-29T00:03:57 | 2020-02-29T00:03:57 | 220,817,328 | 0 | 0 | MIT | 2020-02-29T00:03:47 | 2019-11-10T16:29:45 | null | UTF-8 | Python | false | false | 20,879 | py | # coding: utf-8
"""
Strava API v3
Strava API # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class ClubsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_club_activities_by_id(self, id, **kwargs): # noqa: E501
"""List Club Activities # noqa: E501
Retrieve recent activities from members of a specific club. The authenticated athlete must belong to the requested club in order to hit this endpoint. Pagination is supported. Athlete profile visibility is respected for all activities. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_club_activities_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the club. (required)
:param int page: Page number.
:param int per_page: Number of items per page. Defaults to 30.
:return: list[SummaryActivity]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_club_activities_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_club_activities_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_club_activities_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""List Club Activities # noqa: E501
Retrieve recent activities from members of a specific club. The authenticated athlete must belong to the requested club in order to hit this endpoint. Pagination is supported. Athlete profile visibility is respected for all activities. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_club_activities_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the club. (required)
:param int page: Page number.
:param int per_page: Number of items per page. Defaults to 30.
:return: list[SummaryActivity]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page', 'per_page'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_club_activities_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_club_activities_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'per_page' in params:
query_params.append(('per_page', params['per_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['strava_oauth'] # noqa: E501
return self.api_client.call_api(
'/clubs/{id}/activities', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SummaryActivity]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_club_admins_by_id(self, id, **kwargs): # noqa: E501
"""List Club Administrators. # noqa: E501
Returns a list of the administrators of a given club. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_club_admins_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the club. (required)
:param int page: Page number.
:param int per_page: Number of items per page. Defaults to 30.
:return: list[SummaryAthlete]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_club_admins_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_club_admins_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_club_admins_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""List Club Administrators. # noqa: E501
Returns a list of the administrators of a given club. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_club_admins_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the club. (required)
:param int page: Page number.
:param int per_page: Number of items per page. Defaults to 30.
:return: list[SummaryAthlete]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page', 'per_page'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_club_admins_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_club_admins_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'per_page' in params:
query_params.append(('per_page', params['per_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['strava_oauth'] # noqa: E501
return self.api_client.call_api(
'/clubs/{id}/admins', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SummaryAthlete]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_club_by_id(self, id, **kwargs): # noqa: E501
"""Get Club # noqa: E501
Returns a given club using its identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_club_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the club. (required)
:return: DetailedClub
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_club_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_club_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_club_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Get Club # noqa: E501
Returns a given club using its identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_club_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the club. (required)
:return: DetailedClub
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_club_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_club_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['strava_oauth'] # noqa: E501
return self.api_client.call_api(
'/clubs/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DetailedClub', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_club_members_by_id(self, id, **kwargs): # noqa: E501
"""List Club Members # noqa: E501
Returns a list of the athletes who are members of a given club. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_club_members_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the club. (required)
:param int page: Page number.
:param int per_page: Number of items per page. Defaults to 30.
:return: list[SummaryAthlete]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_club_members_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_club_members_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_club_members_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""List Club Members # noqa: E501
Returns a list of the athletes who are members of a given club. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_club_members_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the club. (required)
:param int page: Page number.
:param int per_page: Number of items per page. Defaults to 30.
:return: list[SummaryAthlete]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page', 'per_page'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_club_members_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_club_members_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'per_page' in params:
query_params.append(('per_page', params['per_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['strava_oauth'] # noqa: E501
return self.api_client.call_api(
'/clubs/{id}/members', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SummaryAthlete]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_logged_in_athlete_clubs(self, **kwargs): # noqa: E501
"""List Athlete Clubs # noqa: E501
Returns a list of the clubs whose membership includes the authenticated athlete. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_logged_in_athlete_clubs(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Page number.
:param int per_page: Number of items per page. Defaults to 30.
:return: list[SummaryClub]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_logged_in_athlete_clubs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_logged_in_athlete_clubs_with_http_info(**kwargs) # noqa: E501
return data
def get_logged_in_athlete_clubs_with_http_info(self, **kwargs): # noqa: E501
"""List Athlete Clubs # noqa: E501
Returns a list of the clubs whose membership includes the authenticated athlete. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_logged_in_athlete_clubs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Page number.
:param int per_page: Number of items per page. Defaults to 30.
:return: list[SummaryClub]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'per_page'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_logged_in_athlete_clubs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'per_page' in params:
query_params.append(('per_page', params['per_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['strava_oauth'] # noqa: E501
return self.api_client.call_api(
'/athlete/clubs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SummaryClub]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
8e42ea28f707628f9911dc9bffc3f74240670caf | 7c7b8e3a83a610dedbdef9377a9074b680fa19e3 | /evaluation/models/frozen_models/export_stylegan.py | 04b84053a3619ba6f75b0f9f1481c7907ba2d301 | [] | no_license | CorneliusHagmeister/AttentionSampling | fcf9d11d5dd7fa49d051dac0623797516e5ef06e | 1ff439af98cbb984810db0e81c7e853199ec68c4 | refs/heads/master | 2022-12-29T14:02:23.079880 | 2020-10-11T13:54:13 | 2020-10-11T13:54:13 | 261,579,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,187 | py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Minimal script for generating an image using pre-trained StyleGAN generator."""
import os
import pickle
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import config
import tensorflow as tf
def export_graphdef(filename):
from tensorflow.python.tools import freeze_graph
graph = tf.get_default_session().graph
sess = tf.get_default_session()
graph_def = graph.as_graph_def()
# fix batch norm nodes
for node in graph_def.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in range(len(node.input)):
if 'moving_' in node.input[index]:
node.input[index] = node.input[index] + '/read'
elif node.op == 'AssignSub':
node.op = 'Sub'
if 'use_locking' in node.attr: del node.attr['use_locking']
elif node.op == 'AssignAdd':
node.op = 'Add'
if 'use_locking' in node.attr: del node.attr['use_locking']
inputs = []
for inp in node.input:
if inp[0] == '^':
node.input.remove(inp)
saver_path = tf.train.Saver().save(sess, 'cache/karras2019stylegan-ffhq-1024x1024.ckpt')
converted_graph = tf.graph_util.convert_variables_to_constants(sess, graph_def, ['Gs/images_out'])
tf.train.write_graph(converted_graph, 'cache', f'{filename}_converted.pb', as_text=False)
graph_path = tf.train.write_graph(converted_graph, 'cache', f'{filename}.pbtxt')
print('Freezing graph')
freeze_graph.freeze_graph(
input_graph=graph_path,
input_saver='',
input_binary=False,
input_checkpoint=saver_path,
output_node_names=['Gs/images_out'],
restore_op_name='',
filename_tensor_name='',
output_graph=f'cache/frozen_{filename}.pb',
clear_devices=False,
initializer_nodes='',
variable_names_whitelist="",
variable_names_blacklist="",
input_meta_graph=None,
input_saved_model_dir=None
)
def main():
# Initialize TensorFlow.
tflib.init_tf()
# Load pre-trained network.
url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl
with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
_G, _D, Gs = pickle.load(f)
# _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
# _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
# Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
# Print network details.
Gs.print_layers()
export_graphdef('karras2019stylegan-ffhq-1024x1024')
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
679f1526559941affa64666c6d28e4bbf5986a38 | d79e3f30b4969c805bdb3b16f8e32c28ffbbdd1d | /apps/exam/views.py | a8368c16c519f48c2855efa331021015c553c50e | [] | no_license | michaelquon/python_exam | e7aaf76c1a8d9b3074533c3d87721c405715c849 | 3cfb2dfb9075a05e4203cb24ab0c10635b97f151 | refs/heads/master | 2021-01-24T16:28:26.751474 | 2018-02-27T22:53:12 | 2018-02-27T22:53:12 | 123,198,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,488 | py | from __future__ import unicode_literals
from django.shortcuts import render, redirect, HttpResponse
from models import User, Wish
from django.contrib import messages
import re
def index(request):
return render(request, 'exam/index.html')
def register(request):
return redirect('/')
def login(request):
# if User.objects.filter(username=postData['username']).exists() and User.objects.filter(password=postData['password']).exists():
# return redirect('/')
# else:
return redirect('/dashboard')
def create(request):
errors = User.objects.validateUser(request.POST)
if len(errors):
for tag, error in errors.iteritems():
messages.error(request, error, extra_tags=tag)
return redirect('/')
else:
new_user = User.objects.create(
name = request.POST['name'],
username = request.POST['username'],
password = request.POST['password'],
date_hired = request.POST['date_hired'])
request.session['user_id'] =new_user.id
return redirect('/dashboard')
def dashboard(request):
my_wish = Wish.objects.filter(wishers = User.objects.get(id = request.session['user_id']))
all_wish = Wish.objects.all()
user_name = User.objects.get(id=request.session['user_id']).name
all_other_wishes = all_wish.difference(my_wish)
context = {
'my_wish' : my_wish,
'other_wishes' : all_other_wishes,
'user_name' : user_name
}
return render(request,'exam/dashboard.html', context)
def add(request):
return render(request, 'exam/add.html')
def addWish(request):
response = Wish.objects.validateWish(request.POST, request.session['user_id'])
return redirect('/dashboard', response)
def show(request, wish_id):
thisWish = Wish.objects.get(id=wish_id)
# others = thisWish.wishers.all().exclude()
others = User.objects.filter(wish_joined=thisWish).exclude(added_joined=thisWish)
context = {
'thisWish' : thisWish,
'others' : others
}
return render(request, 'exam/show.html', context)
def createJoin(request, wish_id):
user = User.objects.get(id = request.session['user_id'])
wish = Wish.objects.get(id = wish_id)
wish.wishers.add(user)
return redirect('/dashboard')
def delete(request):
return redirect('/dashboard')
def destroy(request, wish_id):
Wish.objects.filter(id = wish_id).delete()
return redirect('/dashboard')
| [
"[email protected]"
] | |
f633f49359304d391a8c5e5dc74279db7bffa100 | 40bbfbf840d696024b8e0aaa24b7e4124b57ad42 | /build/robot17/gscam/catkin_generated/pkg.installspace.context.pc.py | 2f1d0e4db87da72e1ca739409e7fc1ea59282b69 | [] | no_license | brunamdb/Projeto-Final | a6d490ae4b5f759fb81c969b82f7b37328be4baf | f531797e11b440b6369ecf34d67d22af4ad06cf0 | refs/heads/master | 2020-03-30T13:46:00.204841 | 2018-10-02T16:25:26 | 2018-10-02T16:25:26 | 151,286,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/parallels/catkin_ws/install/include;/usr/include/gstreamer-1.0;/usr/lib/x86_64-linux-gnu/gstreamer-1.0/include;/usr/include/glib-2.0;/usr/lib/x86_64-linux-gnu/glib-2.0/include".split(';') if "/home/parallels/catkin_ws/install/include;/usr/include/gstreamer-1.0;/usr/lib/x86_64-linux-gnu/gstreamer-1.0/include;/usr/include/glib-2.0;/usr/lib/x86_64-linux-gnu/glib-2.0/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nodelet;image_transport;sensor_msgs;camera_calibration_parsers;camera_info_manager".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lgscam;-lgstapp-1.0;-lgstbase-1.0;-lgstreamer-1.0;-lgobject-2.0;-lglib-2.0".split(';') if "-lgscam;-lgstapp-1.0;-lgstbase-1.0;-lgstreamer-1.0;-lgobject-2.0;-lglib-2.0" != "" else []
PROJECT_NAME = "gscam"
PROJECT_SPACE_DIR = "/home/parallels/catkin_ws/install"
PROJECT_VERSION = "0.1.3"
| [
"[email protected]"
] | |
3867bb299e08aa05c4840df5b8945158058367b5 | 9b5dc68ccfcb7163935d96ebb2aca33eed0a26ce | /Tiny.py | cb76e2ad856283cf987aaf8aaa565506b0007fcc | [] | no_license | tiffanyelston/Python-Scripts | cc89f4cd7016548e0984ea7e21ac3086da204db7 | 86907f19e807abf1171ac2d17d01aafe3a640ad7 | refs/heads/master | 2021-08-30T21:29:28.948481 | 2017-12-15T17:26:20 | 2017-12-15T17:26:20 | 114,761,794 | 0 | 0 | null | 2017-12-19T13:36:23 | 2017-12-19T12:18:49 | Python | UTF-8 | Python | false | false | 85 | py | #!/usr/bin/env python3
# Print something else
print("http://tinyurl.com/noeujfu")
| [
"[email protected]"
] | |
dead32bd92d6308b06af3f1529784bfc0c916c2e | d93cd56d58401f1d4d50b2df08d1d56bd889efef | /venv/Scripts/pip3.7-script.py | 901443af4d40b8cd15ef5d6b74de8fd76d67875d | [] | no_license | TUnveiled/DatabasesProject | d3a3c60ce22b24361133d46d0dd59d6f291cf1b5 | e61fccbdebfe14474547b2758507329c0984d6a0 | refs/heads/master | 2020-04-23T18:49:22.701187 | 2019-03-30T02:49:31 | 2019-03-30T02:49:31 | 171,381,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | #!C:\Users\const\PycharmProjects\DatabasesProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
96b637dbf12f2cf705b79914d6633eba9ce6e99f | fb5e390f62c3764dc90333b6b70ed1e7d36faf31 | /Python/5kyu-simple pig latin.py | e5145ae393bf8574f77129a0805d88407062839c | [
"Unlicense"
] | permissive | jangoertzen/Codewars | b40f3f05c7004714b374baa236c2196cdbbd8bce | 34d13c34b6526a1a1050948c1ac2fb977aba6583 | refs/heads/master | 2020-12-13T05:58:29.024965 | 2020-03-11T14:35:27 | 2020-03-11T14:35:27 | 234,329,518 | 0 | 0 | null | 2020-01-16T13:49:06 | 2020-01-16T13:45:08 | null | UTF-8 | Python | false | false | 473 | py | """
Move the first letter of each word to the end of it, then add "ay" to the end of the word. Leave punctuation marks untouched.
"""
def pig_it(text):
words = text.split(' ')
pigworte = []
for i in words:
if i.isalpha():
if len(i) > 1:
pigworte.append(i[1:]+i[0]+'ay')
else:
pigworte.append(i + 'ay')
else:
pigworte.append(i)
result = ' '.join(pigworte)
return result | [
"[email protected]"
] | |
2b4ed78b6e90b3a2f42759218b0092e0d0bd3fad | 26cc3a937c172f600f8fe8d0dcf222f8ba99ed32 | /manage.py | 9d25cd84acf0b5874557843f2f2eb62c508cb851 | [] | no_license | rafaelbcerri/hype | 15c5ac7247fc848f2880d608c16a7ffca9e8d32a | 9f7669b598879587f8d315644244367bde0aecd9 | refs/heads/master | 2022-12-10T00:53:19.448488 | 2019-05-28T12:58:45 | 2019-05-28T12:58:45 | 183,120,416 | 1 | 0 | null | 2022-12-08T05:01:16 | 2019-04-24T01:09:41 | Python | UTF-8 | Python | false | false | 624 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hype.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
782dadfb3c5b097c52b291e702ab3c4f7f78c21f | 4c7e536f185c02878a0a6f7a602d9a85f84544fa | /doc.teste.py | 5c93d5bab7830077dee88cea7e27a706714e24e7 | [] | no_license | GOM001/Treino | 3959121f0711f47dbb24747923998f859c0de708 | 11de869d86467998a92c2c1081cbef67a9654565 | refs/heads/master | 2020-07-02T22:20:35.085969 | 2019-08-10T22:10:47 | 2019-08-10T22:10:47 | 201,685,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | def dormir(dia_semana, feriado):
if feriado:
return True
elif dia_semana:
return False
return True
def alunos_problema(a_sorri, b_sorri):
if a_sorri and b_sorri:
return True
elif a_sorri or b_sorri:
return False
return True
def soma_dobro(a, b):
if a==b:
return 2*(a+b)
return a+b
def diff21(n):
if abs(n)<=21:
return 21-abs(n)
return (abs(21-n))*2
def apaga(s, n):
b=''
for a in range(len(s)):
if s[a]!=s[n]:
b=b+s[a]
else:
b=b+""
return b
def troca(s):
if len(s)<=1:
return s
else:
b=s[len(s)-1]+s[1:(len(s)-1)]+s[0]
return b
| [
"[email protected]"
] | |
2cb25dd12c9b3d5d9d2da9c17a17a398c5bdb8dd | 751f306dcf7566dbfbbc2ef122fb3abdd720ed76 | /addons/source-python/plugins/battle_royal/commands/group_command.py | e99383d10cb4610c07548e4badadaf97e35b9b2b | [] | no_license | VenomzGaming/Sp-Battle-royal | d07865eb2318a6ba7a6e6d59c45d81b2d42eeb69 | 3948678adbc07844c4fc74e4def01de7190db05b | refs/heads/master | 2021-01-20T00:13:42.611165 | 2017-06-06T09:55:43 | 2017-06-06T09:55:43 | 89,097,578 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,543 | py | ## IMPORTS
import re
from commands import CommandReturn
from commands.typed import TypedSayCommand
from messages import SayText2
from players.entity import Player
from .filter import Filter
from ..entity.battleroyal import _battle_royal
from ..entity.group import BattleRoyalGroup
from ..menus.group import group_menu
## CLASS COMMAND GROUP
class CommandGroup:
'''
CommandGroup manage all group command
executed by a player.
:param Object command_info:
TypedSayCommand info
:param str use_filter (default=False)
Use filter to get player.
'''
def __init__(self, command_info, use_filter=False):
self.caller = _battle_royal.get_player(Player(command_info.index)) if command_info.index is not None else None
if len(command_info.command) >= 2:
self.type, self.filter, self.args = self._parse_command(command_info)
if not use_filter:
self.args = self.filter
self.filter = None
else:
self.target = self._get_player()
@staticmethod
def _parse_command(command_info):
'''Method used to parse the command info.'''
command = list(command_info.command)
command_name = re.sub(r'(!|/)', '', command[0])
command_filter = command[1]
args = ','.join(command[2:])
return (command_name, command_filter, args)
def _get_player(self):
'''Get filter player'''
find = None
players = [user for user in Filter(self.filter, self.caller)]
if len(players) == 0:
if self.caller is not None:
SayText2('Not Found').send(self.caller.index)
else:
print('Player not found.')
else:
find = players
return find
def _check_group(self):
'''Check if owner have already a group'''
if self.caller.group == None:
SayText2('You have any group').send()
return False
return True
def _check_owner(self):
'''Check if player is group's owner.'''
if self.caller.group.owner.userid != self.caller.userid:
SayText2('You must be the owner to manage the group').send()
return False
return True
def create(self):
'''Create a group.'''
if self._check_group():
SayText2('You have already a group.').send()
return
if self.args not in _battle_royal.teams:
group = BattleRoyalGroup(self.caller, self.args)
_battle_royal.add_team(group)
SayText2('Group ' + group.name + ' created').send()
else:
SayText2('Group already exist').send()
def delete(self):
'''Delete a group.'''
if not self._check_owner():
return
if self.caller.group.name in _battle_royal.teams:
team = self.caller.group
_battle_royal.remove_team(team)
for player in team:
player.group = None
SayText2('Group deleted').send()
else:
SayText2('Group does not exist').send()
def leave(self):
'''Leave a group.'''
if not self._check_group:
return False
group = self.caller.group
group.remove_player(self.caller)
if len(group.players) == 0 and group.name in _battle_royal.teams:
_battle_royal.remove_team(group)
del group
def add_player(self):
'''Add player to group.'''
if not self._check_group and not self._check_owner:
return False
if not isinstance(self.target, list):
SayText2('More thant one player').send()
else:
if self.target is not None:
group_menu.sender = self.caller
group_menu.send(self.target.index)
else:
SayText2('Player with ' + self.filter + ' is not found').send()
def remove_player(self):
'''Remove player from group'''
if not self._check_group and not self._check_owner:
return False
if not isinstance(self.target, list):
SayText2('More thant one player').send()
else:
br_player = _battle_royal.get_player(self.target)
self.caller.group.remove_player(br_player)
SayText2('Removed ' + br_player.name + ' from the group').send()
## GROUP COMMANDS
@TypedSayCommand('/create')
@TypedSayCommand('!create')
def _create_group(command_info, group_name:str):
command = CommandGroup(command_info)
command.create()
return CommandReturn.BLOCK
@TypedSayCommand('/delete')
@TypedSayCommand('!delete')
def _create_group(command_info):
command = CommandGroup(command_info)
command.delete()
return CommandReturn.BLOCK
@TypedSayCommand('/invit')
@TypedSayCommand('!invit')
@TypedSayCommand('/group')
@TypedSayCommand('!group')
def _invit_to_group(command_info, filter_value:str):
command = CommandGroup(command_info, True)
command.add_player()
return CommandReturn.BLOCK
@TypedSayCommand('/leave')
@TypedSayCommand('!leave')
def _invit_to_group(command_info):
command = CommandGroup(command_info)
command.leave()
return CommandReturn.BLOCK
@TypedSayCommand('/remove')
@TypedSayCommand('!remove')
def _remove_to_group(command_info, filter_value:str):
command = CommandGroup(command_info, True)
command.remove_player()
return CommandReturn.BLOCK | [
"[email protected]"
] | |
2c9f8fc03dcfe948913b1f8ec476e69c72fb5fed | 6d94ddbe6bb313f95e4193efb14197b055d36d31 | /code/python/oscillo/__init__.py | f9d241875a6b5735c2b12cb1ac99507fc055a8df | [] | no_license | alejandrorosas/trowelproject | 340d292815207e355e0f50adc15b6e1f51940f97 | 46eef4bbb75486b39f5bbf41b6a8db36a982d1a4 | refs/heads/master | 2021-01-10T14:23:44.458570 | 2010-06-07T08:56:10 | 2010-06-07T08:56:10 | 46,488,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | from oscillo import *
| [
"arthur.vuillard@16d9f100-46de-11de-89ec-3d9467717a5a"
] | arthur.vuillard@16d9f100-46de-11de-89ec-3d9467717a5a |
094d0e3e7f5d443efdac75291542fcf38e1fc5de | eae62368c5a1d0eed52e8130cc9fbfda3883998f | /project_euler_10.py | 18baaf3e2a8f69b2325aee18714e91e23d284c22 | [] | no_license | malga94/Project_Euler | 04ff1fddcf358f7ac7329aadd808cfca76901506 | d77e4dc8416fdd26aa9e70551deba67f527c40d8 | refs/heads/master | 2021-11-10T23:49:28.821710 | 2021-11-03T18:12:37 | 2021-11-03T18:12:37 | 247,135,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | #!/usr/bin/env python3
import math
def slow_sieve(max_num):
top_val = int(math.ceil(math.sqrt(max_num)))
primes = []
for i in range(1, max_num):
primes.append(i)
lista = [2]
for val in range(3, top_val, 2):
lista.append(val)
for x in lista:
for y in primes:
if y%x == 0 and y>x:
primes.remove(y)
primes.remove(1)
return primes
def sieve(max_num):
marked = [0]*max_num
val = 3
s = 2
while val <= max_num:
if marked[val] == 0:
s+=val
i = val**2
while i < max_num:
marked[i] = 1
i += val
val += 2
return s
def isprime(i):
top_val = int(math.ceil(math.sqrt(i)))
for x in range(2, top_val+1):
if i%x == 0:
return False
return True
def main():
maxval = 1000000000
# somma = 2
# for i in range(3, maxval, 2):
# if isprime(i):
# somma += i
#print(somma)
# primes = slow_sieve(20000)
# print(sum(primes))
somma = sieve(maxval)
print(somma)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
94aa48dab808e3d25c7e8e57168a5497e0979285 | 3eafbf2bf3cc346a31a6400dee9a2181cf68817a | /envv/bin/pip3 | e50bc6b5dddf852f83170fd83bf856787bfafe16 | [] | no_license | promaroy/django1 | 100dc4a8246dcad0bff868100a0e53597330927a | 39b9027aea71c1bf10703239e739c4d6d8309177 | refs/heads/master | 2020-05-05T07:57:11.910800 | 2019-04-22T08:01:53 | 2019-04-22T08:01:53 | 179,844,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | #!/home/proma/Desktop/django/envv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
f62873626373188cf2a3addff0682298dc88f6ab | 02dbb4326db68dc2f9d9630274b784023cb4ed01 | /main/migrations/0006_question_points.py | 0d7c51091fbd2eb06a3aee34ac431d762755fc5e | [] | no_license | valenpino/django_app | c7f2956927b7cece363c7ae0f648fc85152368fe | b406de65e21dc8fce6449ef6524686f6b61f6804 | refs/heads/main | 2023-03-25T16:06:27.286417 | 2021-03-21T00:27:14 | 2021-03-21T00:27:14 | 349,594,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Generated by Django 3.0.5 on 2020-04-18 20:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_question_answers_count'),
]
operations = [
migrations.AddField(
model_name='question',
name='points',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
374b7473299fc7cfc2fab43c8d3d1f0f9f8c9b60 | 92f22b717cd3b804b7fab83d519b79a31b2844e8 | /serv.py | d423e1eb669f53f0ee2ba9eafe877a1067a7d9d4 | [] | no_license | VexelB/game | 614d8a378874cc957120cc0a924ccef33e48b3d8 | 15021f60a74c3fe093b4da483e07264afa0e9a02 | refs/heads/master | 2020-04-24T22:38:59.141440 | 2019-12-26T08:40:26 | 2019-12-26T08:40:26 | 172,319,085 | 1 | 1 | null | 2019-03-26T10:32:39 | 2019-02-24T09:30:19 | Python | UTF-8 | Python | false | false | 13,492 | py | import socket
import engine
import time
def buletmove(q):
for bullet in bullets[q//2]:
x, y = int(bullet.x*len(engine.maps[q//2])/engine.win_height), int(bullet.y*len(engine.maps[q//2][0])/engine.win_width)
if x >= 9:
x -= 1
if y >= 9:
y -= 1
if engine.maps[q//2][x][y] != 0:
if engine.units[q].x == x and engine.units[q].y == y:
bullets[q//2].pop(bullets[q//2].index(bullet))
engine.units[q].destroy(q//2)
if engine.units[q+1].x == x and engine.units[q+1].y == y:
engine.units[q+1].destroy(q//2)
bullets[q//2].pop(bullets[q//2].index(bullet))
for wall in engine.walls[q//2]:
if wall.x == x and wall.y == y:
wall.destroy(q//2)
bullets[q//2].pop(bullets[q//2].index(bullet))
if bullet.x < engine.win_height and bullet.x > 0 and bullet.y < engine.win_width and bullet.y > 0:
bullet.move()
else:
bullets[q//2].pop(bullets[q//2].index(bullet))
def reinit(j):
j = j//2
j = j*2
if engine.units[j].reload == 'yes' and engine.units[j+1].reload == 'yes':
engine.map = [[0 for i in range(9)] for j in range(9)]
engine.gen()
engine.maps[j//2] = engine.map
bullets[j//2] = []
engine.units[j].__init__(j//2)
engine.units[j+1].__init__(j//2)
def parser(data1, j):
dataset = data1.split('/')
for data in dataset:
if engine.units[j].helth != 0:
if data == 'up' and engine.units[j].y > 0:
engine.units[j].move(engine.units[j].x, engine.units[j].y-1, j//2)
elif data == 'down' and engine.units[j].y < 8:
engine.units[j].move(engine.units[j].x, engine.units[j].y+1, j//2)
elif data == 'left' and engine.units[j].x > 0:
engine.units[j].move(engine.units[j].x-1, engine.units[j].y, j//2)
elif data == 'right' and engine.units[j].x < 8:
engine.units[j].move(engine.units[j].x+1, engine.units[j].y, j//2)
elif data == 'w':
engine.units[j].orient = 'up'
elif data == 's':
engine.units[j].orient = 'down'
elif data == 'a':
engine.units[j].orient = 'left'
elif data == 'd':
engine.units[j].orient = 'right'
elif data == 'space':
if engine.units[j].bullet == 'reload':
if engine.units[j].bullets == 5:
engine.units[j].bullet = 'no'
else:
engine.units[j].bullets += 1
if engine.units[j].bullets >= 0 and engine.units[j].bullet != 'reload':
engine.units[j].bullets -= 1
if engine.units[j].orient == 'up':
if engine.units[j].y != 0:
bullets[(j)//2].append(engine.Bullet(int(engine.win_height / len(engine.maps[(j)//2]) * engine.units[j].x + engine.units[j].width//2) + 5, int(engine.win_height / len(engine.maps[(j)//2][0]) * engine.units[j].y), engine.units[j].orient, (255, 255, 0)))
elif engine.units[j].orient == 'down':
if engine.units[j].y !=8:
bullets[(j)//2].append(engine.Bullet(int(engine.win_height / len(engine.maps[(j)//2]) * engine.units[j].x + engine.units[j].width//2) + 5, int(engine.win_height / len(engine.maps[(j//2)][0]) * engine.units[j].y + engine.units[j].height + 10), engine.units[j].orient, (255, 255, 0)))
elif engine.units[j].orient == 'left':
if engine.units[j].x != 0:
bullets[j//2].append(engine.Bullet(int(engine.win_height / len(engine.maps[(j)//2]) * engine.units[j].x), int(engine.win_height / len(engine.maps[(j)//2][0]) * engine.units[j].y + engine.units[j].width//2) + 5, engine.units[j].orient, (255, 255, 0)))
elif engine.units[j].orient == 'right':
if engine.units[j].x != 8:
bullets[j//2].append(engine.Bullet(int(engine.win_height / len(engine.maps[(j)//2]) * engine.units[j].x + engine.units[j].width) + 10, int(engine.win_height / len(engine.maps[(j)//2][0]) * engine.units[j].y + engine.units[j].width//2) + 5, engine.units[j].orient, (255, 255, 0)))
if engine.units[j].bullets < 1:
engine.units[j].bullet = 'reload'
if data == 'r':
if engine.units[j].reload == 'no':
engine.units[j].reload = 'yes'
else:
engine.units[j].reload = 'no'
reinit(j)
def sender(conn, conn1, q):
ab = int(engine.win_height / len(engine.maps[q//2]) * engine.units[q+1].x)
bb = int(engine.win_height / len(engine.maps[q//2][0]) * engine.units[q+1].y)
ar = int(engine.win_height / len(engine.maps[q//2]) * engine.units[q].x)
br = int(engine.win_height / len(engine.maps[q//2][0]) * engine.units[q].y)
sendata = 'map'
map1 = ''
for i in engine.maps[q//2]:
for j in i:
map1 += str(j)
sendata += map1 + '/'
if engine.units[q].helth != 0:
if engine.units[q].orient == 'up':
sendata += str(ar + engine.units[q].width//2 + 3)+',' + str(br)+','
sendata += 'orient red/'
elif engine.units[q].orient == 'down':
sendata += str(ar + engine.units[q].width//2 + 3)+',' + str(br + engine.units[q].height + 5)+','
sendata += 'orient red/'
elif engine.units[q].orient == 'left':
sendata += str(ar)+',' + str(br + engine.units[q].width//2 + 3)+','
sendata += 'orient red/'
elif engine.units[q].orient == 'right':
sendata += str(ar + engine.units[q].width + 5)+',' + str(br + engine.units[q].width//2 + 3)+','
sendata += 'orient red/'
if engine.units[q+1].helth != 0:
if engine.units[q+1].orient == 'up':
sendata += str(ab + engine.units[q+1].width//2 + 3)+',' + str(bb)+','
sendata += 'orient blue/'
elif engine.units[q+1].orient == 'down':
sendata += str(ab + engine.units[q+1].width//2 + 3)+',' + str(bb + engine.units[q+1].height + 5)+','
sendata += 'orient blue/'
elif engine.units[q+1].orient == 'left':
sendata += str(ab)+',' + str(bb + engine.units[q+1].width//2 + 3)+','
sendata += 'orient blue/'
elif engine.units[q+1].orient == 'right':
sendata += str(ab + engine.units[q+1].width + 5)+',' + str(bb + engine.units[q+1].width//2 + 3)+','
sendata += 'orient blue/'
for bullet in bullets[q//2]:
sendata += str(bullet.x) + ',' + str(bullet.y) + ',' + 'bullet/'
sendata += engine.units[q].reload + ',red re/'
sendata += engine.units[q+1].reload + ',blue re/'
sendata += str(engine.score[q//2][0]) + ',' + str(engine.score[q//2][1]) + ',' + 'score/'
sendata += str(engine.units[q+1].bullets) + ',' + engine.units[q+1].bullet + ',' + 'blue bul1/'
sendata += str(engine.units[q].bullets) + ',' + engine.units[q].bullet + ',' + 'red bul1/'
sendata += str(engine.units[q].helth) + ',' + 'red helth/'
sendata += str(engine.units[q+1].helth) + ',' + 'blue helth/'
if engine.units[q].helth != 0:
a, b, c, d = int(engine.win_height / len(engine.maps[q//2]) * engine.units[q].x)+5, int(engine.win_height / len(engine.maps[q//2][0]) * engine.units[q].y) + engine.units[q].height // 2, engine.units[q].width, 10
a1, b1, c1, d1 = int(engine.win_height / len(engine.maps[q//2]) * engine.units[q].x)+5, int(engine.win_height / len(engine.maps[q//2][0]) * engine.units[q].y) + engine.units[q].height // 2 + 2, engine.UnitRed.width * engine.units[q].helth // 5, 6
sendata += str(a) + ',' + str(b) + ',' + str(c) + ',' + str(d) + ',' + str(a1) + ',' + str(b1) + ',' + str(c1) + ',' + str(d1) + ',' + engine.units[q].name + ',hp/'
if engine.units[q+1].helth != 0:
a, b, c, d = int(engine.win_height / len(engine.maps[q//2]) * engine.units[q+1].x)+5, int(engine.win_height / len(engine.maps[q//2][0]) * engine.units[q+1].y) + engine.units[q+1].height // 2, engine.units[q+1].width, 10
a1, b1, c1, d1 = int(engine.win_height / len(engine.maps[q//2]) * engine.units[q+1].x)+5, int(engine.win_height / len(engine.maps[q//2][0]) * engine.units[q+1].y) + engine.units[q+1].height // 2 + 2, engine.UnitBlue.width * engine.units[q+1].helth // 5, 6
sendata += str(a) + ',' + str(b) + ',' + str(c) + ',' + str(d) + ',' + str(a1) + ',' + str(b1) + ',' + str(c1) + ',' + str(d1) + ',' + engine.units[q+1].name + ',hp/'
sendata += f"{time.time()},delay/"
conn.send(sendata.encode())
conn1.send(sendata.encode())
i = 0
conns = []
addrs = []
bullets = []
sock = socket.socket()
sock.bind(('', 9090))
sock.listen(2)
lcon = time.time()
lcon1 = time.time()
while True:
sock.settimeout(0.0000001)
log = open("log.txt", "a")
try:
#if (lcon - lcon1) >= 300:
# conns[i].close()
# conns.pop(i)
# addrs.pop(i)
# engine.maps.pop(i//2)
# engine.units.pop(i)
# engine.score.pop(i//2)
# bullets.pop(i//2)
# i -= 1
conn, addr = sock.accept()
if i%2 == 0:
bullets.append([])
engine.score.append([0, 0])
engine.map = [[0 for i in range(9)] for j in range(9)]
engine.walls.append([])
engine.gen(i//2)
engine.maps.append(engine.map)
engine.units.append(engine.UnitRed(i//2))
#try:
engine.units[i].name = conn.recv(512).decode()
#if len(engine.units[i].name) > 6:
# try:
# conn.close()
# engine.maps.pop(i//2)
# engine.units.pop(i)
# engine.score.pop(i//2)
# bullets.pop(i//2)
# i -= 1
# except:
# pass
#except:
# conn.close()
# engine.maps.pop(i//2)
# engine.units.pop(i)
# engine.score.pop(i//2)
# bullets.pop(i//2)
# i -= 1
#if engine.units[i].name == '|bot':
# conns.append(conn)
# addrs.append(addr)
# i += 1
# engine.units.append(engine.UnitBlue(i//2))
# engine.units[i].name = '|bot'
# log.write(time.ctime(time.time())+ " Bot Conn:"+conns[i]+'\n')
conns.append(conn)
addrs.append(addr)
log.write(f"{time.ctime(time.time())} New Conn: i = {i} units = {len(engine.units)}\n {addrs[i]} name = {engine.units[i].name} \n")
lcon = time.time()
elif i%2 == 1:
engine.units.append(engine.UnitBlue(i//2))
engine.units[i].name = conn.recv(512).decode()
conns.append(conn)
addrs.append(addr)
conns[i].send(str(i%2).encode())
conns[i-1].send(str((i-1)%2).encode())
log.write(f"{time.ctime(time.time())} Conns: i = {i} units = {len(engine.units)}\n")
j = 0
while j < len(conns) - 1:
log.write(f" {j//2}: {addrs[j]} {str(engine.units[j])[8:13:]} |{j}| name = '{engine.units[j].name}' {addrs[j+1]} {str(engine.units[j+1])[8:13:]} |{j+1}| name = '{engine.units[j+1].name}'\n")
j += 2
lcon1 = time.time()
i += 1
except Exception as e:
if type(e) != socket.timeout:
log.write(f"{time.ctime(time.time())} There is an error with conectiong: i={i} {str(e)} \n")
if len(conns) > 1:
j = 0
m = 0
while j < len(conns) - 1:
sock.settimeout(0)
try:
buletmove(j)
except:
pass
#start_time=time.time()
try:
if addrs[j] != addrs[j+1]:
parser(conns[j+1].recv(512).decode(), j+1)
parser(conns[j].recv(512).decode(), j)
if addrs[j] != addrs[j+1]:
parser(conns[j+1].recv(512).decode(), j+1)
parser(conns[j].recv(512).decode(), j)
sender(conns[j], conns[j+1], j)
except Exception as e:
log.write(f"{time.ctime(time.time())} Closed Conn: {str(e)}\n")
log.write(f" {j//2}: score: {engine.score[j//2]}\n")
log.write(f" users: {engine.units[j].name} {addrs[j]} {engine.units[j+1].name} {addrs[j+1]} \n")
conns[j].close()
conns.pop(j)
conns[j].close()
conns.pop(j)
addrs.pop(j)
addrs.pop(j)
engine.maps.pop(j//2)
engine.units.pop(j)
engine.units.pop(j)
engine.score.pop(j//2)
bullets.pop(j//2)
i -= 2
#delay = time.time() - start_time
#if delay > 0.3:
# log.write(f"Dellay is {delay} \n")
#j += 2
log.close()
| [
"[email protected]"
] | |
a859752d38280713419b523ced15594d0446574e | 3c6f0ac56a30ffb561f15c645f62ec1e5884f331 | /nematus/util.py | 347120687283db9b588462e09098c719eefdb587 | [] | no_license | zhengzx-nlp/past-and-future-nmt | 7735c700bcf6af93436c552790b444513e069bdd | dd7e4f8de8e629f5203f8038db3003bbd8af5d8e | refs/heads/master | 2021-09-09T13:29:46.994988 | 2018-03-16T13:51:53 | 2018-03-16T13:51:53 | 110,409,940 | 19 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | '''
Utility functions
'''
import sys
import json
import cPickle as pkl
#json loads strings as unicode; we currently still work with Python 2 strings, and need conversion
def unicode_to_utf8(d):
return dict((key.encode("UTF-8"), value) for (key,value) in d.items())
def load_dict(filename):
try:
with open(filename, 'rb') as f:
return unicode_to_utf8(json.load(f))
except:
with open(filename, 'rb') as f:
return pkl.load(f)
def load_config(basename):
try:
with open('%s.json' % basename, 'rb') as f:
return json.load(f)
except:
try:
with open('%s.pkl' % basename, 'rb') as f:
return pkl.load(f)
except:
sys.stderr.write('Error: config file {0}.json is missing\n'.format(basename))
sys.exit(1)
def seqs2words(seq, inverse_target_dictionary, join=True):
words = []
for w in seq:
if w == 0:
break
if w in inverse_target_dictionary:
words.append(inverse_target_dictionary[w])
else:
words.append('UNK')
return ' '.join(words) if join else words
| [
"[email protected]"
] | |
522da58245141da37e495ac174ae806e5876a53c | a83bafc38b514a0339a5991be15870551ac49681 | /bimdata_api_client/model/ifc_merge_request.py | da1ae22f0cde4f3b8a4e1c129295ede5ae081f8c | [] | no_license | bimdata/python-api-client | 4ec2f81e404ef88d3a7e4d08e18965b598c567a2 | c9b6ea0fbb4729b2a1c10522bdddfe08d944739d | refs/heads/master | 2023-08-17T13:38:43.198097 | 2023-08-09T12:48:12 | 2023-08-09T12:48:12 | 131,603,315 | 0 | 4 | null | 2022-10-10T15:21:26 | 2018-04-30T14:06:15 | Python | UTF-8 | Python | false | false | 12,336 | py | """
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1 (v1)
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from bimdata_api_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from bimdata_api_client.exceptions import ApiAttributeError
class IfcMergeRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('ifc_ids',): {
'min_items': 2,
},
('export_name',): {
'max_length': 512,
'min_length': 1,
},
('floating_point_reduction',): {
'inclusive_maximum': 15,
'inclusive_minimum': 6,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'ifc_ids': ([int],), # noqa: E501
'export_name': (str,), # noqa: E501
'floating_point_reduction': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'ifc_ids': 'ifc_ids', # noqa: E501
'export_name': 'export_name', # noqa: E501
'floating_point_reduction': 'floating_point_reduction', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, ifc_ids, export_name, *args, **kwargs): # noqa: E501
"""IfcMergeRequest - a model defined in OpenAPI
Args:
ifc_ids ([int]):
export_name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
floating_point_reduction (int): Precision of geometries. 6 is micrometre, 9 is nanometre, ect.... [optional] if omitted the server will use the default value of 9 # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.ifc_ids = ifc_ids
self.export_name = export_name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, ifc_ids, export_name, *args, **kwargs): # noqa: E501
"""IfcMergeRequest - a model defined in OpenAPI
Args:
ifc_ids ([int]):
export_name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
floating_point_reduction (int): Precision of geometries. 6 is micrometre, 9 is nanometre, ect.... [optional] if omitted the server will use the default value of 9 # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.ifc_ids = ifc_ids
self.export_name = export_name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
] | |
38bd0417d50449425352bcf8bad30cf100dab3a2 | a32a2261c5044d1ecb4b411de8a20702102b3514 | /ca_bidding_28990/wsgi.py | a3393fdf1d7529c100b32b6c2c7109611b19f385 | [] | no_license | crowdbotics-apps/ca-bidding-28990 | 311ad817f573966afbee6e8d84e54d315e7bbd78 | ded02db6683659c34496aed13955b7bb17703dcc | refs/heads/master | 2023-06-26T17:44:15.305575 | 2021-07-20T19:59:42 | 2021-07-20T19:59:42 | 387,900,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for ca_bidding_28990 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ca_bidding_28990.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
5bc713f3ae2ab2b81e4b175884f540d23c648192 | 89fc7d29f2ca7549b4a67d3fc7e63e95ce66f482 | /backend/venv/lib/python3.6/encodings/koi8_r.py | ae991956f7bd4373a166a458b95bb8ef65ff9f20 | [] | no_license | Daul89/simple_todoapp_flask | 54f4be62eb1040bcfba0fce633ee43a493a107d2 | ac73d811c791fac1316f5c86dc61c1b04deca2d0 | refs/heads/master | 2020-04-01T12:40:08.362760 | 2019-03-26T08:55:08 | 2019-03-26T08:55:08 | 153,217,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | /usr/local/var/pyenv/versions/3.6.4/lib/python3.6/encodings/koi8_r.py | [
"[email protected]"
] | |
bbcdc1844d97f559edeb426e83156f6881993669 | 78f96b321dc541026f43a6c8505228f83685fb8e | /app/mod_interaction/resources/PassengerResource.py | 3a9e9f1bbf894b8cad0a964f27193d57580b3401 | [] | no_license | xiaofud/syllabus_backend | a54e1844b7b8b84dd77cbfb549b8f9686da3cd84 | 076f2a6ed334f8a96b741d0c5c9d268f3716c8b3 | refs/heads/master | 2021-06-30T13:15:50.569168 | 2017-09-20T13:34:29 | 2017-09-20T13:34:29 | 54,321,524 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 7,122 | py | # coding=utf-8
__author__ = 'smallfly'
from flask_restful import Resource, fields, marshal
from flask_restful.reqparse import RequestParser
from app.mod_interaction.database_operations import common
from app.mod_interaction import models
from app.mod_interaction.resources import helpers
from app import db
import time
# 返回的JSON结构
PASSENGER_STRUCTURE = {
# # 主键
# id = db.Column(db.Integer, primary_key=True, autoincrement=True)
#
# # 发起拼车的童鞋
# uid = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
#
# # 拼车信息id
# carpool_id = db.Column(db.Integer, db.ForeignKey("carpools.id"), nullable=False)
#
# # join time
# join_time = db.Column(db.TIMESTAMP, default=None)
#
# # 自己的联系方式(用json方式存储)
# contact = db.Column(db.VARCHAR(200), nullable=True)
"id": fields.Integer,
"uid": fields.Integer,
"carpool_id": fields.Integer,
"join_time": fields.String,
"contact": fields.String
}
class PassengerResource(Resource):
GET_PARSER = RequestParser(trim=True)
POST_PARSER = RequestParser(trim=True)
PUT_PARSER = RequestParser(trim=True)
DELETE_PARSER = RequestParser(trim=True)
def get(self):
"""
获取拼车的人的信息
API请求地址:
/interaction/api/v2/passenger
方法: GET
参数:
必选参数:
id 乘客的id
"""
self.GET_PARSER.add_argument("id", required=True, type=int, location="args")
args = self.GET_PARSER.parse_args()
id_ = args["id"]
passenger = common.query_single_by_id(models.Passenger, id_)
if passenger is None:
return {"error": "not found"}, 404
return marshal(passenger, PASSENGER_STRUCTURE)
def post(self):
"""
加入某个拼车
API请求地址:
/interaction/api/v2/passenger
方法: POST
参数: 参数位置为form
必选参数:
carpool_id 已经存在的某个拼车id
uid 用户id
token 用户token
contact 用户自己的联系信息, 存储JSON字符串, 和iOS端沟通好结构
例: {"wechat": "xxx", "phone": xxx} 等, 方便用于复制联系信息到剪贴板
"""
self.POST_PARSER.add_argument("contact", required=True, location="form")
# self.POST_PARSER.add_argument("id", type=int, required=True, location="form")
self.POST_PARSER.add_argument("carpool_id", type=int, required=True, location="form")
self.POST_PARSER.add_argument("uid", type=int, required=True, location="form")
self.POST_PARSER.add_argument("token", required=True, location="form")
args = self.POST_PARSER.parse_args()
# 检查token
if not common.check_token(args):
return {"error": "wrong token"}, 401
del args["token"]
# 检查carpool存不存在
carpool = common.query_single_by_id(models.Carpool, args["carpool_id"])
if carpool is None:
return {"error": "carpool not exists"}, 404
# 不允许加入几次拼车
passenger = models.Passenger.query.filter_by(uid=args["uid"]).filter_by(carpool_id=carpool.id).first()
if passenger is not None:
return {"error": "already in this carpool"}, 400
# 加入时间戳
args["join_time"] = helpers.timestamp_to_string(int(time.time()))
passenger = models.Passenger(**args)
count = carpool.people_count + 1
if count > carpool.max_people:
return {"error": "people overflows"}, 400
carpool.people_count = count
if common.add_to_db(db, passenger) == True and common.add_to_db(db, carpool) == True:
return {"id": common.get_last_inserted_id(models.Passenger)}, 200
else:
return {"error": "Internal Server Error"}, 500
def put(self):
"""
修改自己的联系方式
API请求地址:
/interaction/api/v2/passenger
方法: PUT
参数: 参数位置为form
必选参数:
id 乘客id
carpool_id 已经存在的某个拼车id
uid 用户id
token 用户token
contact 用户自己的联系信息, 存储JSON字符串, 和iOS端沟通好结构
例: {"wechat": "xxx", "phone": xxx} 等, 方便用于复制联系信息到剪贴板
"""
# 用于更新信息, 只允许修改contact信息
self.PUT_PARSER.add_argument("id", type=int, required=True, location="form")
# self.PUT_PARSER.add_argument("carpool_id", type=int, required=True, location="form")
self.PUT_PARSER.add_argument("uid", type=int, required=True, location="form")
self.PUT_PARSER.add_argument("token", required=True, location="form")
self.PUT_PARSER.add_argument("contact", required=True, location="form")
args = self.PUT_PARSER.parse_args()
# 检查token
if not common.check_token(args):
return {"error": "wrong token"}, 401
# passenger = models.Passenger.query.filter_by(uid=args["uid"]).filter_by(carpool_id=args["carpool_id"]).first()
passenger = models.Passenger.query.filter_by(id=args["id"]).first()
# 并未上车
if passenger is None:
return {"error": "passenger not exists"}, 404
passenger.contact = args["contact"]
if common.add_to_db(db, passenger) == True:
return {"status": "updated"}, 200
else:
return {"error": "Internal Server Error"}, 500
def delete(self):
"""
退出某个拼车
API请求地址:
/interaction/api/v2/passenger
方法: DELETE
参数: 位于请求报头
必选参数:
id 乘客id
uid 用户id
token 用户token
"""
self.DELETE_PARSER.add_argument("id", type=int, required=True, location="headers")
self.DELETE_PARSER.add_argument("uid", type=int, required=True, location="headers")
self.DELETE_PARSER.add_argument("token", required=True, location="headers")
args = self.DELETE_PARSER.parse_args()
# 检查token
if not common.check_token(args):
return {"error": "wrong token"}, 401
# passenger = models.Passenger.query.filter_by(id=args["id"]).first()
# # 并未上车
# if passenger is None:
# return {"error": "passenger not exists"}, 404
status = common.delete_from_db(db, models.Passenger, args["id"], args["uid"])
if status == True:
return {"status": "deleted"}
else:
code = status[1]
if code == common.ERROR_NOT_FOUND:
return {"error": "not found"}, 404
elif code == common.ERROR_USER_ID_CONFLICT:
return {"error": "forbidden"}, 403
elif code == common.ERROR_COMMIT_FAILED:
return {"error": "Internal Server Error"}, 500 | [
"[email protected]"
] | |
a3b8b0802c60c4bc05c19ff5c1d221b00d1ce01a | 126be7c907249f116a3740c7f00871c86ce52ad5 | /CustomerSupport/CustomerSupport/settings.py | 9c2d3a58419ffbf2f706e1db8222e8813960f05f | [
"Apache-2.0"
] | permissive | noohshaikh/customer-support-application | 3b2ee06cdc5673cc11b22d568ceb8a2b7fb12b75 | 23f1c665c27e199de40d4d9d9450fe84a9138ed0 | refs/heads/main | 2023-03-30T12:20:14.344931 | 2021-03-31T18:45:47 | 2021-03-31T18:45:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,060 | py | """
Django settings for CustomerSupport project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# host url
CURRENT_HOST = 'http://localhost:3000'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n_v1a*&b@9p#x)#%dpxbol=x#$f8a(!r9^z^@=m0x98@v*2&(e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# enable/disable localhost email server
USE_EMAIL_LOCALHOST = True
# feedback email delay value minutes
EMAIL_DELAY = 60
# Review url validity time in minutes
REVIEW_URL_VALIDITY = 30
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'support'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CustomerSupport.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CustomerSupport.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'customersupportdb.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Use localhost as email server for development
# for localhost with python smtp do the following
# > open terminal/cmd and execute 'python -m smtpd -n -c DebuggingServer localhost:1025'
# > the email is printed on the console itself
if USE_EMAIL_LOCALHOST:
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
DEFAULT_FROM_EMAIL = '[email protected]'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = '' # SMTP host e.g. smtp.gmail.com
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = '' # email username
EMAIL_HOST_PASSWORD = '' # email password
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
271c9116999b9388019fccd3cc8b82c5f804ffee | eb83cbf4550efc35bb74d4da3e040de8a4844493 | /venv/bin/easy_install-3.6 | 13bbe128dfaa9d9c2824f63e59c893641da4a083 | [] | no_license | iamabhishek0/DivineCareWeb | f2450d041b656ca2f8a638d60036ad0763aaeb0f | 1cf324e8a2fd5bcaaa76265b0289327a08678b44 | refs/heads/master | 2020-05-30T20:21:57.495482 | 2019-06-02T20:34:31 | 2019-06-02T20:34:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | 6 | #!/home/sid/PycharmProjects/DivineCareWeb/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.6')()
)
| [
"[email protected]"
] | |
d5652b32df071041fcf039675e6e869d4c131a69 | 9073635cd2aa4181060abaa43328f66e4d7cbc34 | /python/Flatmates-Bill-Generator/.idea/.name | 8ad429783a53f76c0aa368065ed63b208e004320 | [
"MIT"
] | permissive | smv1999/hacktoberfest_2021 | 45e5c5d8f2879040e4dd4449f1fb2b7dc6145682 | 245e7f5b7eed3fb5143bb7f870c62135e9289f3d | refs/heads/main | 2023-08-27T20:38:11.520723 | 2021-10-17T05:38:00 | 2021-10-17T05:38:00 | 414,287,739 | 0 | 0 | MIT | 2021-10-06T16:25:08 | 2021-10-06T16:25:07 | null | UTF-8 | Python | false | false | 10 | name | reports.py | [
"[email protected]"
] | |
a00cdc03efedff09e5feff5426901ef0c709d3cd | e354fd9a70376849f16b4a1cfac23210ef578975 | /code/plot_velocity.py | 625a19456013ba5a8c28648dd908b32b2ce856e3 | [] | no_license | QsingularityAi/polar-pfc-master_active-crystel | dc316438ad1e30ac3f4a9236c42f312e41c82af4 | 9be6ba1c4a0dd80877c2830119fab58b75c8c1e1 | refs/heads/main | 2023-06-11T07:03:24.428858 | 2021-06-22T22:01:06 | 2021-06-22T22:01:06 | 379,266,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | #!/usr/bin/python
import sys
import os
import glob
import fnmatch
import csv
import numpy as np
import matplotlib.pyplot as plt
from matplotlib2tikz import save as tikz_save
# R=80
v0 = [0.1, 0.2, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.4, 0.425, 0.45]
v0_ = ['R80_0.1', 'R80_0.2', 'R80_0.3', 'R80_0.31', 'R80_0.32', 'R80_0.33', 'R80_0.34', 'R80_0.35', 'R80_0.36', 'R80_0.37', 'R80_0.38', 'R80_0.39', 'R80_0.4', 'R80_0.425', 'R80_0.45']
v0_idx = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
# R=100
#v0 = [0.1, 0.2, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.4, 0.425, 0.45, 0.475, 0.8, 0.9]
#v0_ = ['0.1', '0.2', '0.3', '0.31', '0.32', '0.33', '0.34', '0.35', '0.36', '0.37', '0.38', '0.39', '0.4', '0.4_b', '0.425_b', '0.45', '0.45_b', '0.475_b', '0.8', '0.9']
#v0_idx = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 14, 15, 16, 17]
mean_velocity = [0]*len(v0)
n_mean_velocity = [0]*len(v0)
for v, idx in zip(v0_, v0_idx):
timesteps= []
#polar = []
#nematic = []
velocity = []
with open('time_series_' + str(v) + '.csv','r') as in_file:
reader = csv.DictReader(in_file, delimiter=',')
tol=1.0e-2
for row in reader:
t = float(row['t'])
#p = float(row['polar'])
#n = float(row['nematic'])
v = float(row['velocity'])
timesteps.append(t)
#polar.append(p)
#nematic.append(n)
velocity.append(v)
min_time = 500
mean_vel = 0.0
n_vel = 0
for t,vel in zip(timesteps, velocity):
if t >= min_time:
mean_vel += vel
n_vel += 1
mean_velocity[idx] += mean_vel / n_vel;
n_mean_velocity[idx] += 1
for i in range(0, len(mean_velocity)):
mean_velocity[i] *= 1.0/n_mean_velocity[i]
# plot timeseries
plt.plot(v0, mean_velocity,'-*', v0,v0,'--k')
plt.xlabel('activity $v_0$')
plt.legend(['velocity $\\|\\bar{\\mathbf{v}}(v_0)\\|$', 'activity $v_0$'], loc='lower right')
tikz_save('mean_velocity.tikz',
figureheight = '\\figureheight',
figurewidth = '\\figurewidth')
plt.show()
| [
"[email protected]"
] | |
433837f6d0a6410fe773f21255403a4bed9867f5 | b80d90ed1365e559353ec3b05c81dacf32ee5ace | /IITGSearch/spiders/ZeroSpider.py | 93cd11fb068da0a0942a1fc7113d33d565da3bed | [
"MIT"
] | permissive | rajan-garg/Search_engine | 61fad0e0b6f5e6c946352f5f63bdd948a72a5933 | 521d2f4969e6163fae9448cb21c64250a6aedc42 | refs/heads/master | 2021-01-11T03:59:29.765283 | 2016-10-18T16:18:52 | 2016-10-18T16:18:52 | 71,263,320 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | import os
import scrapy
import w3lib.url
from scrapy.http import Request
import re
import urlparse
import mimetypes
from IITGSearch.items import IITGSearchItem
class ZeroSpider(scrapy.Spider):
name = "ZeroSpider"
allowed_domains = ["iitg.ernet.in"]
start_urls = [
"http://intranet.iitg.ernet.in",
"http://local.iitg.ernet.in"
]
if not os.path.exists("Links/Zero"):
os.makedirs("Links/Zero")
crawledLinks = set()
link_file = open(r'Links/Zero/links.txt', 'a+')
unaccepted_url_file = open(r'Links/Zero/unaccepted_links.txt', 'a+')
all_url_file = open(r'Links/Zero/all_links.txt', 'a+')
def parse(self, response):
if response.status == 200:
ZeroSpider.link_file.write(response.url + '\n')
# IITGSpider.link_file.write(
# response.url + " type: " +
# str(mimetypes.guess_extension(response.headers['content-type'])) + "\n")
links = response.xpath("//a/@href").extract()
for link in links:
_link = self.abs_url(link, response)
link_clean = w3lib.url.url_query_cleaner(_link)
ZeroSpider.all_url_file.write(str(link_clean) + '\n')
# If it is a proper link and is not checked yet, yield it to the Spider
if link_clean not in ZeroSpider.crawledLinks and self.desired_link(link_clean):
ZeroSpider.crawledLinks.add(link_clean)
yield Request(link_clean, self.parse)
def abs_url(self, url, response):
base = response.xpath('//head/base/@href').extract()
if base:
base = base[0]
else:
base = response.url
return urlparse.urljoin(base, url)
def desired_link(self, url):
events = re.compile(
".*intranet\.iitg\.ernet\.in/cclrs/.*|.*csea\.iitg\.ernet\.in/csea/Public/web_new/index\.php/activities/.*|.*intranet\.iitg\.ernet\.in/eventcal/.*|.*shilloi\.iitg\.ernet\.in/~hss/reservation/.*|.*intranet\.iitg\.ernet\.in/news/user/login\?.*|.*local\.iitg\.ernet\.in/node/46/.*|.*jatinga\.iitg\.ernet\.in/~dppc/.*")
if events.match(url):
ZeroSpider.unaccepted_url_file.write("Returning False: " + url + '\n')
return False
return True
| [
"[email protected]"
] | |
7e65d5dde72b67f17eeb459cbf00b00b7e4d666d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02843/s458829649.py | 88e61af86bd661cdf4ee943bb4568478740beac7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | x = int(input())
li = [0]*100105
li[0] = 1
for i in range(x):
if li[i] == 1:
for j in range(6):
li[i+j+100] = 1
print(li[x]) | [
"[email protected]"
] | |
186529a0ee4a15d170cb4b5be3ee8f65b2be010c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02881/s868917213.py | 00c5e1e2ec8b27ee34d07f71120ba33fb7e0b8db | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from math import sqrt
N=int(input())
K=int(sqrt(N)+2)
ans=N
for i in range(1,K):
if N%i==0:
j=N//i
ans=min(ans,(i-1) + (j-1))
print(ans) | [
"[email protected]"
] | |
8b067a1ff95250e3105b66869fc7cafdc9711dfa | 11ba942f2deba9063fcd3d91ce5e67507cc96bfa | /Server/youtube_api.py | 893ed0d890b7cbcedfa7e662d4cc7c61f8288432 | [] | no_license | z0dz0dz0dz0d/csn | 614c1dea7bff2de203e82e65f6f19701d03e4e59 | fabbded190ff6cd59af1a25cac8a6cf95a4e748a | refs/heads/master | 2023-08-15T06:03:25.632874 | 2021-10-15T16:57:10 | 2021-10-15T16:57:10 | 417,571,865 | 0 | 0 | null | 2021-10-15T16:57:11 | 2021-10-15T16:47:43 | TypeScript | UTF-8 | Python | false | false | 6,330 | py | from types import new_class
import requests
import numpy as np
token = 'AIzaSyBrVRgddachzL32XdZT8mWf8eix3mF3udw'
api = 'https://content-youtube.googleapis.com/youtube/v3'
operation = '/search'
safe_search = 'strict'
youtube_api_topics_mapping = {
"Adventures": "/m/02jjt", # Entertainment
"Alphabet": "/m/01k8wb", # Knowledge
"Animals": "/m/068hy", # Pets
"Arts & Crafts": "/m/05qjc", # Performing arts
"Baby Shark": "/m/02jjt", # Entertainment
"Business": "/m/09s1f", # Business
"Cars & Trucks": "/m/07yv9", # Vehicles
"Challenge": "/m/02jjt", # Entertainment
"Colors": "/m/01k8wb", # Knowledge
"Comedy": "/m/09kqc", # Humor
"Construction": "/m/02jjt", # Entertainment
"Cooking & Baking": "/m/02wbm", # Food
"Counting": "/m/09s1f", # Business
"Daily Routine": "/m/0kt51", # Health
"Dinosaurs": "/m/068hy", # Pets,
"Disney": "/m/02jjt", # Entertainment
"Dragons": "/m/02jjt", # Entertainment
"Emotions / Feelings": "/m/098wr", # Society
"English as Second Language": "/m/01k8wb", # Knowledge
"Excursions & Travel": "/m/07bxq", # Tourism
"Fairy Tales/Fables": "/m/02jjt", # Entertainment
"Family": "/m/098wr", # Society
"Fantasy": "/m/02jjt", # Entertainment
"Finger Family": "/m/02jjt", # Entertainment
"Food & Drink": "/m/02wbm", # Food
"Foreign Language": "/m/01k8wb", # Knowledge
"Friendship": "/m/098wr", # Society
"Games & Puzzles": "/m/04q1x3q", # Puzzle video game
"History": "/m/01k8wb", # Knowledge,
"Hobbies": "/m/03glg", # Hobby
"Identity": "/m/098wr", # Society
"Insects & Spiders": "/m/068hy", # Pets
"Institutional Channel": "/m/098wr", # Society
"Jobs & Professions": "/m/098wr", # Society
"Johnny Johnny": "/m/02jjt", # Entertainment
"Jokes/Pranks": "/m/09kqc", # Humor
"Lullaby": "/m/02jjt", # Entertainment
"Math": "/m/01k8wb", # Knowledge
"Mindfulness & Yoga": "/m/0kt51", # Health
"Movement & Dance": "/m/05qjc", # Performing arts
"Music": "/m/04rlf", # Music
"Nature": "/m/07bxq", # Tourism
"Nursery Rhymes": "/m/02jjt", # Entertainment
"Personality": "/m/098wr", # Society
"Phonics": "/m/01k8wb", # Knowledge
"Poetry": "/m/01k8wb", # Knowledge
"Puppets": "/m/02jjt", # Entertainment
"Read-along": "/m/01k8wb", # Knowledge
"Religion": "/m/06bvp", # Religion
"Robots": "/m/07c1v", # Technology
"Safety": "/m/0kt51", # Health
"School": "/m/01k8wb", # Knowledge
"Science": "/m/01k8wb", # Knowledge
"Seasonal Holidays": "/m/07bxq", # Tourism
"Shapes": "/m/01k8wb", # Knowledge
"Sports": "/m/06ntj", # Sports
"Super Heroes": "/m/02jjt", # Entertainment
"Taste Test": "/m/02wbm", # Food
"Theatre Arts": "/m/05qjc", # Performing arts
"Time Travel": "/m/02jjt", # Entertainment
"Toy Playtime": "/m/02jjt", # Entertainment
"Transportation": "/m/07yv9", # Vehicles
"Trick Shots": "/m/02jjt", # Entertainment
"Unboxing": "/m/02jjt", # Entertainment
"Unicorns": "/m/02jjt", # Entertainment
"Video Games": "/m/0bzvm2", # gaming,
"Vocabulary": "/m/01k8wb", # Knowledge
}
# deprecated
def get_youtube_topics(topics):
youtube_topics = []
for topic in topics:
youtube_topic = youtube_api_topics_mapping[topic]
if not youtube_topic in youtube_topics:
youtube_topics.append(youtube_topic)
return ','.join(youtube_topics)
def generate_channel(entry, csn_channel_ids):
channel_id = entry['snippet']['channelId']
new_to_csn = False if channel_id in csn_channel_ids else True
return {
'channel_url': f'https://www.youtube.com/channel/{channel_id}',
'channel_id': channel_id,
'title': entry['snippet']['channelTitle'],
'description': entry['snippet']['description'],
'thumbnail': entry['snippet']['thumbnails']['default']['url'],
'new_to_csn': new_to_csn
}
def generate_video(entry, for_kids_mapping):
channel_id = entry['snippet']['channelId']
video_id = entry['id']
keywords = entry['snippet']['tags'] if 'tags' in entry['snippet'].keys() else None
for_kids = for_kids_mapping.get(channel_id)
return {
'channel_url': f'https://www.youtube.com/channel/{channel_id}',
'channel_id': channel_id,
'video_url': f'https://www.youtube.com/watch?v={video_id}',
'video_id': video_id,
'title': entry['snippet']['title'],
'description': entry['snippet']['description'],
'keywords': keywords,
'for_kids': for_kids
}
def construct_search_request(type, part, order, region_code, keywords, language, max_results):
keywords_str = '|'.join(keywords)
keywords_str = keywords_str.replace('#', '')
return f'{api}{operation}?type={type}&part={part}&order={order}®ionCode={region_code}&maxResults={max_results}&q={keywords_str}&safeSearch={safe_search}&relevanceLanguage={language}&key={token}'
def dispatch_request(query):
req = requests.get(query)
return req.json()['items']
def construct_metadata_request(part, ids):
ids_str = ','.join(ids)
return f'https://youtube.googleapis.com/youtube/v3/videos?part={part}&id={ids_str}&key={token}'
def construct_channels_request(part, ids):
ids_str = ','.join(ids)
return f'https://youtube.googleapis.com/youtube/v3/channels?part={part}&id={ids_str}&key={token}'
def find_youtube_resources(keywords, max_results, resource_type, order_by, topics, csn_channel_ids):
query = construct_search_request(resource_type, 'snippet', order_by, 'US', np.concatenate((topics, keywords)), 'en', max_results)
items = dispatch_request(query)
if resource_type == 'video':
video_ids = list(map(lambda x: x['id']['videoId'], items))
channel_ids = list(map(lambda x: x['snippet']['channelId'], items))
metadata_query = construct_metadata_request('snippet', video_ids)
items_metadata = dispatch_request(metadata_query)
channels_query = construct_channels_request('status', channel_ids)
items_channels = dispatch_request(channels_query)
for_kids_mapping = {channel['id']:channel['status']['madeForKids'] if 'madeForKids' in channel['status'].keys() else False for channel in items_channels}
return list(map(lambda x: generate_video(x, for_kids_mapping), items_metadata))
else:
return list(map(lambda x: generate_channel(x, csn_channel_ids), items)) | [
"-"
] | - |
020c8ffd28b11ae026ef3465dc90b806b52b8ecf | d57876ae7629202f8464f05f4be925351b78cc51 | /tundra/tundra/wsgi.py | da18d65cf1e2d25bb5ad0b24635b4a96554ee252 | [
"MIT"
] | permissive | TundraStorm/TundraStorm.github.io | bd0f699c37af1d491a2bfa5c88e2a43b7954df54 | b9c298d6b917965b78a1fc2204393489a1817d35 | refs/heads/master | 2020-12-30T12:12:41.077652 | 2017-05-16T08:16:30 | 2017-05-16T08:16:30 | 91,419,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for tundra project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tundra.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
08c4444e805d53294233737b2cc9896028f204f4 | fd48fba90bb227017ac2da9786d59f9b9130aaf0 | /digsby/src/msn/SOAP/MSNSecurityTokenService/__init__.py | 993066248fe1783def5e3f25e15a073e1b84b8e9 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | niterain/digsby | bb05b959c66b957237be68cd8576e3a7c0f7c693 | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | refs/heads/master | 2021-01-18T10:07:10.244382 | 2013-11-03T02:48:25 | 2013-11-03T02:48:25 | 5,991,568 | 1 | 0 | null | 2013-11-03T02:48:26 | 2012-09-28T02:24:50 | Python | UTF-8 | Python | false | false | 83 | py | from SecurityTokenService_types import *
from SecurityTokenService_client import *
| [
"[email protected]"
] | |
7867c46f92c0901c0257a0fe845951ab4e638072 | 7f078dd24f1d0beaf8cc212853d7dd1dabc241df | /bc4py/chain/checking/checktx.py | be7dd9d809ec49c6908f8cffb5a1162561c57d7b | [
"MIT"
] | permissive | kumacoinproject/bc4py | 44db46475b3748d339df5dce57046bed0af8b3ae | 6484d356096261d0d57e9e1f5ffeae1f9a9865f3 | refs/heads/develop | 2022-12-24T18:33:41.827064 | 2020-09-13T16:01:27 | 2020-09-13T16:01:27 | 135,440,859 | 6 | 4 | MIT | 2020-03-19T16:51:51 | 2018-05-30T12:40:07 | Python | UTF-8 | Python | false | false | 7,068 | py | from bc4py.config import C, V, BlockChainError
from bc4py.chain.tx import TX
from bc4py.chain.block import Block
from bc4py.chain.checking.tx_reward import *
from bc4py.chain.checking.tx_mintcoin import *
from bc4py.chain.checking.utils import *
from logging import getLogger
from time import time
import hashlib
log = getLogger('bc4py')
def check_tx(tx, include_block):
# TXの正当性チェック
f_inputs_origin_check = True
f_amount_check = True
f_signature_check = True
f_size_check = True
f_minimum_fee_check = True
payfee_coin_id = 0
# 共通検査
if include_block:
# tx is included block
if tx not in include_block.txs:
raise BlockChainError('Block not include the tx')
elif not (tx.time <= include_block.time <= tx.deadline):
raise BlockChainError('block time isn\'t include in TX time-deadline. [{}<={}<={}]'.format(
tx.time, include_block.time, tx.deadline))
if 0 == include_block.txs.index(tx):
if tx.type not in (C.TX_POS_REWARD, C.TX_POW_REWARD):
raise BlockChainError('tx index is zero, but not proof tx')
elif tx.type in (C.TX_POS_REWARD, C.TX_POW_REWARD):
raise BlockChainError('{} index is not 0 idx:{}'.format(tx, include_block.txs.index(tx)))
# 各々のタイプで検査
if tx.type == C.TX_GENESIS:
return
elif tx.type == C.TX_POS_REWARD:
f_amount_check = False
f_minimum_fee_check = False
# TODO: POS tx need Multisig? f_signature_check
if include_block.flag == C.BLOCK_COIN_POS:
check_tx_pos_reward(tx=tx, include_block=include_block)
elif include_block.flag == C.BLOCK_CAP_POS:
f_signature_check = False
f_inputs_origin_check = False
check_tx_poc_reward(tx=tx, include_block=include_block)
elif include_block.flag == C.BLOCK_FLK_POS:
raise BlockChainError("unimplemented")
else:
raise BlockChainError('Unknown block type {}'.format(include_block.flag))
elif tx.type == C.TX_POW_REWARD:
f_amount_check = False
f_signature_check = False
f_minimum_fee_check = False
check_tx_pow_reward(tx=tx, include_block=include_block)
elif tx.type == C.TX_TRANSFER:
if not (0 < len(tx.inputs) < 256 and 0 < len(tx.outputs) < 256):
raise BlockChainError('Input and output is 1~256')
# payCoinFeeID is default 0, not only 0
_address, payfee_coin_id, _amount = tx.outputs[0]
elif tx.type == C.TX_MINT_COIN:
f_amount_check = False
f_minimum_fee_check = False
f_signature_check = False
check_tx_mint_coin(tx=tx, include_block=include_block)
else:
raise BlockChainError('Unknown tx type "{}"'.format(tx.type))
# Inputs origin チェック
if f_inputs_origin_check:
inputs_origin_check(tx=tx, include_block=include_block)
# 残高移動チェック
if f_amount_check:
amount_check(tx=tx, payfee_coin_id=payfee_coin_id, include_block=include_block)
# 署名チェック
if f_signature_check:
signature_check(tx=tx, include_block=include_block)
# hash-locked check
if tx.message_type == C.MSG_HASHLOCKED:
check_hash_locked(tx=tx)
else:
if tx.R != b'':
raise BlockChainError('Not hash-locked tx R={}'.format(tx.R))
# message type check
if tx.message_type not in C.msg_type2name:
raise BlockChainError('Not found message type {}'.format(tx.message_type))
# Feeチェック
if f_minimum_fee_check:
if tx.gas_amount < tx.size + C.SIGNATURE_GAS * len(tx.signature):
raise BlockChainError('Too low fee [{}<{}+{}]'.format(tx.gas_amount, tx.size,
C.SIGNATURE_GAS * len(tx.signature)))
# TX size チェック
if f_size_check:
if tx.size > C.SIZE_TX_LIMIT:
raise BlockChainError('TX size is too large. [{}>{}]'.format(tx.size, C.SIZE_TX_LIMIT))
if include_block:
log.debug("check success {}".format(tx))
else:
log.info("check unconfirmed tx hash={}".format(tx.hash.hex()))
def check_tx_time(tx):
# For unconfirmed tx
now = int(time()) - V.BLOCK_GENESIS_TIME
if tx.time > now + C.ACCEPT_MARGIN_TIME:
raise BlockChainError('TX time too early. {}>{}+{}'.format(tx.time, now, C.ACCEPT_MARGIN_TIME))
if tx.deadline < now - C.ACCEPT_MARGIN_TIME:
raise BlockChainError('TX time is too late. [{}<{}-{}]'.format(tx.deadline, now, C.ACCEPT_MARGIN_TIME))
# common check
if tx.deadline - tx.time < 10800:
raise BlockChainError('TX acceptable spam is too short. {}-{}<{}'.format(tx.deadline, tx.time, 10800))
if tx.deadline - tx.time > 3600 * 24 * 30: # 30days
raise BlockChainError('TX acceptable spam is too long. {}-{}>{}'.format(tx.deadline, tx.time,
3600 * 24 * 30))
def check_hash_locked(tx):
if len(tx.R) == 0:
raise BlockChainError('R of Hash-locked is None type')
if len(tx.R) > 64:
raise BlockChainError('R is too large {}bytes'.format(len(tx.R)))
size = len(tx.message)
if size == 20:
if hashlib.new('ripemd160', tx.R).digest() != tx.message:
raise BlockChainError('Hash-locked check RIPEMD160 failed')
elif size == 32:
if hashlib.new('sha256', tx.R).digest() != tx.message:
raise BlockChainError('Hash-locked check SHA256 failed')
else:
raise BlockChainError('H of Hash-locked is not correct size {}'.format(size))
def check_unconfirmed_order(best_block, ordered_unconfirmed_txs):
if len(ordered_unconfirmed_txs) == 0:
return None
s = time()
dummy_proof_tx = TX()
dummy_proof_tx.type = C.TX_POW_REWARD,
dummy_block = Block()
dummy_block.height = best_block.height + 1
dummy_block.previous_hash = best_block.hash
dummy_block.txs.append(dummy_proof_tx) # dummy for proof tx
dummy_block.txs.extend(ordered_unconfirmed_txs)
tx = None
try:
for tx in ordered_unconfirmed_txs:
if tx.type == C.TX_GENESIS:
pass
elif tx.type == C.TX_POS_REWARD:
pass
elif tx.type == C.TX_POW_REWARD:
pass
elif tx.type == C.TX_TRANSFER:
pass
elif tx.type == C.TX_MINT_COIN:
check_tx_mint_coin(tx=tx, include_block=dummy_block)
else:
raise BlockChainError('Unknown tx type "{}"'.format(tx.type))
else:
log.debug('Finish unconfirmed order check {}mSec'.format(int((time() - s) * 1000)))
return None
except Exception as e:
log.warning(e, exc_info=True)
# return errored tx
return tx
__all__ = [
"check_tx",
"check_tx_time",
"check_hash_locked",
"check_unconfirmed_order",
]
| [
"[email protected]"
] | |
fc622226959b4aeda42963de88accea859cd282b | bab1a70f0b843949bad9588642ecf026642bb7c3 | /mock_interviews/microsoft/ip_address.py | 2fa4bffbf999c84b1201333f6499eea75ddfe5b6 | [] | no_license | makhmudislamov/leetcode_problems | 6fc270255ad684acd0863189e7702e3c6654e26f | fcbae727d19960efdd16a1ebff9dcaafa3b5e65b | refs/heads/master | 2021-01-16T07:03:55.367430 | 2020-05-05T21:43:43 | 2020-05-05T21:43:43 | 243,018,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | """Given a string containing only digits, restore it by returning all possible valid IP address combinations.
Example:
Input: "25525511135"
Output: ["255.255.11.135", "255.255.111.35"]"""
class Solution:
def restoreIpAddresses(self, s: str) -> List[str]:
| [
"[email protected]"
] | |
170d8d414353b3f720ffdb5d0b0c9948640f0a48 | aadad415f425b9f45fed14290235488a46687a4f | /2010/cms/zeescripts/removeDirectoriesFromCastor.py | 6ab58804007d2ab6cdf42004e85ab02662158ce9 | [] | no_license | enucatl-university/lab-unipd | c1fdae198ccc3af3f75ad07554e148427a9cc096 | c197bb92f479913c1183375fa22fd1619e6bbad4 | refs/heads/master | 2023-08-15T01:59:55.502505 | 2016-11-11T19:20:13 | 2016-11-11T19:20:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,985 | py | #!/usr/bin/env python
import commands
import re
import os
def removeFiles(paths, sampleName, sampleWeight):
for path in paths:
output = commands.getoutput("nsls "+path)
outFiles = re.split(r'\n',output)
for fname in outFiles:
if re.search("pat_fromAOD_PF2PAT",fname)!=None:
rm = "rfrm "+path+fname
print rm
os.system(rm)
def createFiles(paths, sampleName, sampleWeight):
newDir = "/castor/cern.ch/user/b/bianchi/CMSSW370p2/"
newDir2 = "/castor/cern.ch/user/b/bianchi/CMSSW370p2/patLayer/"
#os.system("rfmkdir "+newDir)
#os.system("rfmkdir "+newDir2)
mkdir = "rfmkdir "+newDir2+sampleName
chmod = "rfchmod +775 "+newDir2+sampleName
print mkdir,chmod
os.system(mkdir)
os.system(chmod)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/May6thPDSkim2_SD_EG_TauRel/",
"/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/SD_EG_TauRel"]
#createFiles(paths,"Data_ee",1)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/Run2010AJul6thReReco"]
createFiles(paths,"Data",1)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/Zee/"]
createFiles(paths,"Zee",1300.)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/Ztautau/"]
createFiles(paths,"Ztautau",1300.)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/WJets-madgraph/"]
createFiles(paths,"WJets-madgraph",24170.)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/Wenu/"]
createFiles(paths,"Wenu",6153.)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/TTbar/"]
createFiles(paths,"TTbar",149.8 )
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/QCDEM2030/"]
createFiles(paths,"QCDEM2030",0.2355*1000000000*0.0073)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/QCDEM3080/"]
createFiles(paths,"QCDEM3080",0.0593*1000000000*0.059)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/QCDEM80170/"]
createFiles(paths,"QCDEM80170",0.906*1000000*0.148)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/BctoE2030/"]
createFiles(paths,"BctoE2030",0.2355*1000000000*0.00046)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/BctoE3080/"]
createFiles(paths,"BctoE3080",0.0593*1000000000*0.00234)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/BctoE80170/"]
createFiles(paths,"BctoE80170",0.906*1000000*0.0104)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/PhotonJet015/"]
createFiles(paths,"PhotonJet015",8446*10000000)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/PhotonJet1520/"]
createFiles(paths,"PhotonJet1520",1.147*100000)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/PhotonJet2030/"]
createFiles(paths,"PhotonJet2030",5.718*10000)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/PhotonJet3050/"]
createFiles(paths,"PhotonJet3050",1.652*10000)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/PhotonJet5080/"]
createFiles(paths,"PhotonJet5080",2.723*1000)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/PhotonJet80120/"]
createFiles(paths,"PhotonJet80120",4.462*100)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/PhotonJet120170/"]
createFiles(paths,"PhotonJet120170",8.443*10)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/PhotonJet170300/"]
createFiles(paths,"PhotonJet170300",2.255*10)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/PhotonJet300500/"]
createFiles(paths,"PhotonJet300500",1.545)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/Comm10May27thSkim_SD_EG/"]
createFiles(paths,"DataComm10May",1.)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/Run2010AMay27thReReco/"]
createFiles(paths,"DataRunMay27",1.)
paths = ["/castor/cern.ch/user/b/bianchi/CMSSW361p2/patLayer/Run2010APromptReco/"]
createFiles(paths,"DataRunPrompt",1.)
| [
"[email protected]"
] | |
d892af3a80505f3f58b69da3b62744ddbcf37977 | 04eae3251d6abb6a81016a38f2f2124c50f4d6c8 | /tests/fields/test_float_field.py | f088d7597c92ab91da2aa0b5750c3155245e3ce9 | [] | no_license | ilex/aiomotorengine | d8f9272a919b07370c63ea510aa20a7074a29348 | d3b2eb7ce382ed83a5d02f94883bd7d1d2d1b9a1 | refs/heads/master | 2021-01-10T09:43:56.326421 | 2017-04-20T08:46:54 | 2017-04-20T08:46:54 | 45,910,234 | 31 | 13 | null | 2017-04-18T10:44:27 | 2015-11-10T12:23:48 | Python | UTF-8 | Python | false | false | 1,661 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from preggy import expect
from aiomotorengine import FloatField
from tests import AsyncTestCase
class TestFloatField(AsyncTestCase):
def test_create_float_field(self):
field = FloatField(db_field="test", min_value=10.5, max_value=200.6)
expect(field.db_field).to_equal("test")
expect(field.min_value).to_equal(10.5)
expect(field.max_value).to_equal(200.6)
def test_to_son(self):
field = FloatField()
expect(field.to_son(10.0230)).to_equal(10.023)
expect(field.to_son("10.56")).to_equal(10.56)
def test_from_son(self):
field = FloatField()
expect(field.from_son(10.0230)).to_equal(10.023)
expect(field.from_son("10.56")).to_equal(10.56)
def test_validate_enforces_floats(self):
field = FloatField()
expect(field.validate(1.0)).to_be_true()
expect(field.validate("1.5")).to_be_true()
expect(field.validate("qwe")).to_be_false()
expect(field.validate(None)).to_be_true()
def test_validate_is_empty(self):
field = FloatField()
expect(field.is_empty(None)).to_be_true()
def test_validate_enforces_min_value(self):
field = FloatField(min_value=5.4)
expect(field.validate(1)).to_be_false()
expect(field.validate(5.5)).to_be_true()
expect(field.validate("5.5")).to_be_true()
def test_validate_enforces_max_value(self):
field = FloatField(max_value=5.4)
expect(field.validate(5.3)).to_be_true()
expect(field.validate(5.5)).to_be_false()
expect(field.validate("5.2")).to_be_true()
| [
"[email protected]"
] | |
7959ba0c0a930b24339c35601ac8f75c41807863 | c6402c24176a30becbd6de24defcb12915500473 | /manage_web/apps.py | 2a2c0e203e780c89d779fd35b89b071b13e35410 | [] | no_license | yiwenDing/fake_platform | d874ad72702939bc261d4caebef652ea11265acb | fc24bd128ac2172d3a4e7f5074a2620dc4ec084f | refs/heads/master | 2020-04-06T06:58:49.921153 | 2016-08-23T12:23:39 | 2016-08-23T12:23:39 | 65,884,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class ManageWebConfig(AppConfig):
name = 'manage_web'
| [
"[email protected]"
] | |
5f41abc1d57e47f11f01856efa27099ade69507b | a3e55fcad5eec28c119a82ee030f50a29244af40 | /app.py | e3a97ec78aab4d484cf2164175c8aae420525257 | [] | no_license | RebeccaNacshon/Swagger-in-python-using-flask | 591cd8c89c2b3bc1eb4aee01f6b1d6149c0e1da3 | 7ec0415b4829de2f63cfae4812e689d649bc12ea | refs/heads/master | 2022-06-20T05:38:00.893238 | 2020-05-09T07:39:39 | 2020-05-09T07:39:39 | 262,517,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | import argparse
import os
from flask import Flask, jsonify, make_response
from flask_cors import CORS
from flask_swagger_ui import get_swaggerui_blueprint
import request_api
APP = Flask(__name__)
### swagger specific ###
SWAGGER_URL = '/swagger'
API_URL = '/static/swagger.json'
SWAGGERUI_BLUEPRINT = get_swaggerui_blueprint(
SWAGGER_URL,
API_URL,
config={
'app_name': "Rebecca-Flask-Rest-UserManagement"
}
)
APP.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)
### end swagger specific ###
APP.register_blueprint(request_api.get_blueprint())
@APP.errorhandler(400)
def handle_400_error(_error):
"""Return a http 400 error to client"""
return make_response(jsonify({'error': 'Misunderstood'}), 400)
@APP.errorhandler(401)
def handle_401_error(_error):
"""Return a http 401 error to client"""
return make_response(jsonify({'error': 'Unauthorised'}), 401)
@APP.errorhandler(404)
def handle_404_error(_error):
"""Return a http 404 error to client"""
return make_response(jsonify({'error': 'Not found'}), 404)
@APP.errorhandler(500)
def handle_500_error(_error):
"""Return a http 500 error to client"""
return make_response(jsonify({'error': 'Server error'}), 500)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description="Rebecca-Flask-Rest-UserManagement")
PARSER.add_argument('--debug', action='store_true',
help="Use flask debug/dev mode with file change reloading")
ARGS = PARSER.parse_args()
PORT = int(os.environ.get('PORT', 5000))
if ARGS.debug:
print("Running in debug mode")
CORS = CORS(APP)
APP.run(host='127.0.0.1', port=PORT, debug=True)
else:
APP.run(host='127.0.0.1', port=PORT, debug=False)
| [
"[email protected]"
] | |
495cbeb447c3103bf74656f80d880833a9b80326 | eba8ac6028fc0307b2ded3322e2d700521d9a14d | /wavelet_transform.py | 94c4ec637dac3fc1bb13b114f3291e1bd35b737d | [] | no_license | orvergon/detector_ensemble_deepfake | 7698ba8e06c189b42ecb611a2870d4eddba21ec1 | d206d2626cd6cefeee152702e6d43164341f626a | refs/heads/main | 2023-08-02T08:46:29.562622 | 2021-10-06T06:58:23 | 2021-10-06T06:58:23 | 414,052,716 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,350 | py | import pywt
import cv2
from video_data import VideoData
import numpy
from math import sqrt
from numpy.lib.stride_tricks import as_strided
import image_utils
import blur_wavelet
treshold = 35
def get_bouding_box(landmarks:list[tuple[int, int]]):
max_x, max_y = (0, 0)
min_x, min_y = (10**100, 10**100)
for x, y in landmarks:
max_x = max(x, max_x)
max_y = max(y, max_y)
min_x = min(x, min_x)
min_y = min(y, min_y)
return ((max_x, max_y), (min_x, min_y))
def flat_pontos_faciais(pontos_faciais):
flat = list()
for face in pontos_faciais:
for frame in face:
flat.append(frame)
return flat
def analisa_video_rodenas(video_data:VideoData):
flat_landmarks = flat_pontos_faciais(video_data.pontos_faciais)
blur_ext_list_face = list()
blur_per_list_face = list()
blur_ext_list_frame = list()
blur_per_list_frame = list()
for i, landmarks in enumerate(flat_landmarks):
try:
frame = numpy.copy(video_data.frames_originais[i])
except:
return 1
bounding_box = get_bouding_box(landmarks)
face_crop = numpy.copy(frame[bounding_box[1][1]:bounding_box[0][1], bounding_box[1][0]:bounding_box[0][0]])
frame[bounding_box[1][1]:bounding_box[0][1], bounding_box[1][0]:bounding_box[0][0]] = 0
blur_face = blur_wavelet.blur_detect(face_crop, treshold)
blur_frame = blur_wavelet.blur_detect(frame, treshold)
blur_per_list_frame.append(blur_frame[0])
blur_ext_list_frame.append(blur_frame[1])
blur_per_list_face.append(blur_face[0])
blur_ext_list_face.append(blur_face[1])
blur_frame = sum(blur_ext_list_frame)/len(blur_ext_list_frame)
blur_face = sum(blur_ext_list_face)/len(blur_ext_list_face)
return (blur_face/blur_frame)
def analisa_video(video_data:VideoData):
for frame in video_data.frames_originais:
haar1, haar2, haar3 = processa_haar_recursivo(frame, 3)
print(haar1)
def mapa_edge(LH, HL, HH):
return numpy.sqrt(numpy.power(LH, 2)+numpy.power(HL, 2)+numpy.power(HH, 2))
def processa_haar_recursivo(imagem, profundidade=3):
if profundidade == 0:
return []
LL, (LH, HL, HH) = pywt.dwt2(imagem, "haar")
return [mapa_edge(LH, HL, HH), *processa_haar_recursivo(LL, profundidade-1)]
| [
"[email protected]"
] | |
f67012ecc90a56a469f02d570044e3665c770dc0 | 13a2a06303bf65dd5de11436297b99ca064a5b2f | /emissions.py | 5593859a6ecae6a0413208e708e6f6c6fe17e8fb | [] | no_license | malfahad/carbon-data | 1c25095de7839287a86d387cf6f5526b147b326c | 9936f3983cb0107cac96d9a3f1f2faf34df30ec9 | refs/heads/main | 2023-04-02T19:21:54.261747 | 2021-04-06T11:56:24 | 2021-04-06T11:56:24 | 355,171,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,368 | py |
import requests
from datetime import datetime, timedelta
class API:
base_url = None
def call_api(self):
if self.request_url:
r = requests.get(self.request_url)
return r.json()
class CarbonIntesnsity(API):
def __init__(self):
self.base_url = 'https://api.carbonintensity.org.uk'
def _parse_time(self, obj):
if not obj:
return None
if not isinstance(obj, datetime):
raise Exception('time should be a valid date time object')
return obj.strftime('%Y-%m-%dT%H:%MZ')
def get_endpoint(self, endpoint, start_from=None, to=None):
no_filter = start_from == None and to == None
start_from = self._parse_time(start_from)
to = self._parse_time(to)
self.request_url = f'{self.base_url}/{endpoint}' if no_filter else f'{self.base_url}/{endpoint}/{start_from}/{to}'
return self.call_api()
def get_intensity_last_half_hour(self):
return self.get_endpoint('intensity')
def get_intensity_between_days(self, start_from, to):
return self.get_endpoint('intensity', start_from=start_from, to=to)
def get_generation_last_half_hour(self):
return self.get_endpoint('generation')
def get_generation_between_dates(self, start_from, to):
return self.get_endpoint('generation', start_from=start_from, to=to)
def print_data(title, data):
rows = {}
for item in data:
for key in item.keys():
existing_val = rows.get(key,'')
rows[key] = f'{existing_val}\t{item[key]}\t'
print(title)
print("=======================")
for key in rows.keys():
print(f'{key} : {rows[key]}')
print("--------------")
print("\n")
def check_emissions():
"""
calls the carpon emissions api and prints the response on teh console in tabular format
Sample response:
Generation mix from 2021-04-06T14:00Z to 2021-04-06T14:30Z
=======================
fuel : biomass coal imports gas nuclear other hydro solar wind
perc : 4 0 5.8 14.6 14 0 1.8 13 46.8
--------------
Cabon Intensity, last 30 minutes
=======================
forecast : 84
actual : 90
index : low
--------------
"""
api = CarbonIntesnsity()
# get generation in last half hour
result = api.get_generation_last_half_hour()
if result:
print_data('Generation mix, last 30 minutes', result['data']['generationmix'])
# get generation in last 2 days
today = datetime.now()
days_ago = today - timedelta(days=2)
result = api.get_generation_between_dates(days_ago, today)
if result:
for data in result['data']:
title = f'Generation mix from {data["from"]} to {data["to"]}'
print_data(title, data['generationmix'])
# get intensity in last half hour
result = api.get_intensity_last_half_hour()
if result:
intensity_result = [result['data'][0]['intensity']]
print_data('Cabon Intensity, last 30 minutes', intensity_result)
if __name__ == '__main__':
check_emissions() | [
"[email protected]"
] | |
ece30aa5c9bd507575818d05b717f1a0e72a4def | 7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14 | /airbyte-integrations/connectors/source-zendesk-talk/source_zendesk_talk/client.py | 668969a8a5f1426ced4cfdb5943e381143d75333 | [
"MIT",
"Elastic-2.0"
] | permissive | Velocity-Engineering/airbyte | b6e1fcead5b9fd7c74d50b9f27118654604dc8e0 | 802a8184cdd11c1eb905a54ed07c8732b0c0b807 | refs/heads/master | 2023-07-31T15:16:27.644737 | 2021-09-28T08:43:51 | 2021-09-28T08:43:51 | 370,730,633 | 0 | 1 | MIT | 2021-06-08T05:58:44 | 2021-05-25T14:55:43 | Java | UTF-8 | Python | false | false | 2,690 | py | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from typing import Any, Mapping, Tuple
from base_python import BaseClient
from requests import HTTPError
from .api import (
API,
AccountOverviewStream,
AddressesStream,
AgentsActivityStream,
AgentsOverviewStream,
CallLegsStream,
CallsStream,
CurrentQueueActivityStream,
GreetingCategoriesStream,
GreetingsStream,
IVRMenusStream,
IVRRoutesStream,
IVRsStream,
PhoneNumbersStream,
)
class Client(BaseClient):
"""Zendesk client, provides methods to discover and read streams"""
def __init__(self, start_date: str, subdomain: str, access_token: str, email: str, **kwargs):
self._start_date = start_date
self._api = API(subdomain=subdomain, access_token=access_token, email=email)
common_params = dict(api=self._api, start_date=self._start_date)
self._apis = {
"phone_numbers": PhoneNumbersStream(**common_params),
"addresses": AddressesStream(**common_params),
"greeting_categories": GreetingCategoriesStream(**common_params),
"greetings": GreetingsStream(**common_params),
"ivrs": IVRsStream(**common_params),
"ivr_menus": IVRMenusStream(**common_params),
"ivr_routes": IVRRoutesStream(**common_params),
"account_overview": AccountOverviewStream(**common_params),
"agents_activity": AgentsActivityStream(**common_params),
"agents_overview": AgentsOverviewStream(**common_params),
"current_queue_activity": CurrentQueueActivityStream(**common_params),
"calls": CallsStream(**common_params),
"call_legs": CallLegsStream(**common_params),
}
super().__init__(**kwargs)
def _enumerate_methods(self) -> Mapping[str, callable]:
return {name: api.list for name, api in self._apis.items()}
def stream_has_state(self, name: str) -> bool:
"""Tell if stream supports incremental sync"""
return hasattr(self._apis[name], "state")
def get_stream_state(self, name: str) -> Any:
"""Get state of stream with corresponding name"""
return self._apis[name].state
def set_stream_state(self, name: str, state: Any):
"""Set state of stream with corresponding name"""
self._apis[name].state = state
def health_check(self) -> Tuple[bool, str]:
alive = True
error_msg = None
try:
_ = list(self._apis["phone_numbers"].list(fields=[]))
except HTTPError as error:
alive = False
error_msg = repr(error)
return alive, error_msg
| [
"[email protected]"
] | |
989cf604278c236e8ba41bd6840f43091a111504 | d8bc4d108baa770455ea294498be87b5f51f0b23 | /stock_base/stock_backtest_function.py | eb00df89b798810a65e6e48cd64dbf55c06a1d5f | [] | no_license | dxcv/pratice_project | 852ccde684a05ff4f17f5d17bea0cfddc4c3015c | f4b78bf9f6ed196ff926036a51bfb638bb240369 | refs/heads/master | 2020-05-26T19:01:42.983442 | 2018-11-23T08:36:43 | 2018-11-23T08:36:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,222 | py | # -*- coding: utf-8 -*-
"""
# 用于储存计算股票持有收益的部分函数
Fri 2018/09/07
@author: Tracy Zhu
"""
# 导入系统库
import sys
# 导入用户库
sys.path.append("..")
from stock_base.stock_data_api import *
def stock_holding_profit(stock_code, trading_day, holding_period):
"""
股票从当天选出,以收盘价持有n天的收益;
:param stock_code:
:param trading_day:
:param holding_period:
:return:
"""
start_date = get_next_trading_day_stock(trading_day, 1)
end_date = get_next_trading_day_stock(trading_day, holding_period)
stock_df = get_stock_df(stock_code, start_date, end_date)
stock_df = stock_df[stock_df.PCT_CHG > -10]
cumprod_series = Series((stock_df.PCT_CHG / 100 + 1).cumprod().values, index=stock_df.time)
return cumprod_series.values[-1]
def trans_cumprod_pct(pct_chg_series):
"""
将日收益率序列转化成累计收益率序列
"""
cumprod_pct = (1 + pct_chg_series / 100).cumprod()
return cumprod_pct
def calc_evaluation_index(holding_cumprod_pct):
"""
根据策略收益率序列计算出策略的参数:夏普比,最大回撤,年化收益率
年化收益率, 夏普比仅适用于日线数据,最大回撤可以应用到其他数据;
:param holding_cumprod_pct:
:return:
"""
trading_days = len(holding_cumprod_pct)
annulized_return = float(holding_cumprod_pct.values[-1] + 1) ** (float(252) / float(trading_days)) - 1
sharpe_ratio = holding_cumprod_pct.mean() / (holding_cumprod_pct.std() + 1e-8) * float(252) ** 0.5
res = pd.DataFrame()
max_value = -999
for index_name in holding_cumprod_pct.index[1:]:
if not np.isnan(holding_cumprod_pct.loc[index_name]):
max_value = max(max_value, holding_cumprod_pct.loc[index_name])
res.loc[index_name, 'drawback'] = float(holding_cumprod_pct.loc[index_name]) / float(max_value) - 1
max_drowback = res['drawback'].min()
print("annulized return is {}%".format(annulized_return * 100))
print("sharp ratio is {}".format(sharpe_ratio))
print("max drawback is {}%".format(max_drowback * 100))
return annulized_return, sharpe_ratio, max_drowback
| [
"[email protected]"
] | |
ab9bb7381b1c6c995e3ab0d2d98413ffb89750ea | d1de9fdc4a444ff1c322e09c684ccb5247c22164 | /Analysis.py | 697fa13f96c73ef672c7c3fa929875337730197e | [] | no_license | AntoineValera/SynaptiQs | a178ddf5aa3269fe677afa68f6838db219763a65 | b44a27ba01262e68d74488f98502083c9d681eb6 | refs/heads/master | 2021-01-18T21:12:16.543581 | 2016-05-12T14:52:27 | 2016-05-12T14:52:27 | 40,709,264 | 0 | 0 | null | 2015-10-07T11:32:01 | 2015-08-14T10:17:16 | Python | UTF-8 | Python | false | false | 85,641 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 21 23:50:11 2013
@author: Antoine Valera
"""
from PyQt4 import QtCore, QtGui
import numpy
import scipy
from OpenElectrophy import AnalogSignal,SpikeTrain,gui,sql
from matplotlib import pyplot,numpy
import warnings
warnings.filterwarnings("ignore")
#TODO :
#Remove_a_Leak still contains some manual parameters
class Analysis(object):
"""
This Class Contains integrated SynaptiQs analyses function
"""
def __init__(self):
self.__name__="Analysis"
self.UseUserDefinedColorset = False
self.Color = 'k'
self.SuperimposedOnAverage = True #if False, default Average button will not show superimposed traces
self.SuperImposeSpikesOnScalogram = False
def _all(self,All=False):
List=[]
i=self.__name__
for j in dir(eval(i)):
if All==False and j[:2] == '__':
pass
else:
List.append(i+'.'+j)
for i in List:
print i
def Remove_a_Leak(self,Signal):#,Leak_Removing_Interval_Start=0,Leak_Removing_Interval_End=-1):
#TODO ugly manual value to check here
#SealTestStart=2500
#SealTestStop=3600
SealTestStart=2500
SealTestStop=2501
if hasattr(Main, 'Measurement_Interval') == False:
Main.Display_Measures_Button.setCheckState(2)
StimStart=self.Measurement_Interval[2]
mask=numpy.zeros(len(Signal))
mask[SealTestStart:SealTestStop]=1
mask[StimStart:]=1
Max = numpy.ma.masked_array(Signal, mask)
if Mapping.CM.Types_of_Events_to_Measure == 'Negative':
self.Current_Leak=scipy.stats.scoreatpercentile(Max, 85)
elif Mapping.CM.Types_of_Events_to_Measure == 'Positive':
self.Current_Leak=scipy.stats.scoreatpercentile(Max, 15)
#self.Current_Leak=numpy.median(Signal[Leak_Removing_Interval_Start:Leak_Removing_Interval_End]) Median Filtering
#self.Current_Leak=scipy.stats.scoreatpercentile(Signal, 75)
#self.Current_Leak=scipy.stats.scoreatpercentile(scipy.signal.decimate(Navigate.si,10), 75) #Even faster
Signal=Signal-self.Current_Leak
return Signal
def Measure_Local_Extremum(self,Signal,loc,meth):
bgn=str("Main."+str(loc[0])+".text()")
end=str("Main."+str(loc[1])+".text()")
size=str("Main."+str(loc[2])+".text()")
meth=str("Main."+str(meth)+".currentText()")
#We extract the wanted time for the ROI
#There is always a possibility to fall between two points,
# in which case we take the floor value
ROIBgn=int(float(eval(bgn))*Navigate.Points_by_ms)
ROIEnd=int(float(eval(end))*Navigate.Points_by_ms)
#Small bug correction if ROIBgn=ROIEnd
if ROIBgn == ROIEnd:
ROIEnd+=1
#Small bug correction if ROIBgnis already the last point
if ROIBgn >= len(Signal):
ROIBgn = len(Signal)-1
ROIEnd = len(Signal)
return ROIBgn,ROIEnd
# msgBox = QtGui.QMessageBox()
# msgBox.setText(
# """
# <b>Measure range is wrong</b>
# <p>You can measure beyond the last point
# %s ignored
# """ %(loc))
# msgBox.exec_()
# return
#Location of the extremum position
if eval(meth) == 'Max':
ExtremumLocation = numpy.argmax(Signal[ROIBgn:ROIEnd])
elif eval(meth) == 'Min':
ExtremumLocation = numpy.argmin(Signal[ROIBgn:ROIEnd])
#Now, we create a range around that point corresponding to 'length'
#The average value will be the average of the range
LeftPointOfMeasurementWindow = int(ExtremumLocation-float(eval(size))*Navigate.Points_by_ms/2+float(eval(bgn))*Navigate.Points_by_ms)
#If the measurment window touch the begining of the signal, we crop it at 0
if LeftPointOfMeasurementWindow<0:
LeftPointOfMeasurementWindow=0
#Not sure when that apply
if LeftPointOfMeasurementWindow<float(eval(bgn))*Navigate.Points_by_ms:
LeftPointOfMeasurementWindow=int(float(eval(bgn))*Navigate.Points_by_ms)
#If the measurment window touch the end of the signal, we crop it at len(Signal)
RightPointOfMeasurementWindow = int(ExtremumLocation+float(eval(size))*Navigate.Points_by_ms/2+float(eval(bgn))*Navigate.Points_by_ms)
if RightPointOfMeasurementWindow>len(Signal):
RightPointOfMeasurementWindow=len(Signal)
#Not sure when that apply
if RightPointOfMeasurementWindow>float(eval(end))*Navigate.Points_by_ms:
RightPointOfMeasurementWindow=int(float(eval(end))*Navigate.Points_by_ms)
return LeftPointOfMeasurementWindow,RightPointOfMeasurementWindow
def Measure_on_Average(self,List_of_Ids=None,Measure_All_from_Baseline1=False,Display_Superimposed_Traces=False,Rendering=True,Position=(None,None),Origin=None,All_from_Zero=False,ProgressDisplay=True,Channel=None,Color='k'):
"""
This function measure the average trace of a given list of Analogsignal ids (default is Requete.Analogsignal_ids tagged traces)
It calculates mean amplitude 1, 2 and 3 and mean charge 1, 2 and 3 from SynaptiQs settings. These values are also returned (in this order)
The function creates an Analysis.mean trace (which is also returned)
If Measure_All_from_Baseline1 is True, mean amplitude 1, 2 and 3 and mean charge 1, 2 and 3 are calculated from baseline 1 value
If All_from_Zero is True, mean amplitude 1, 2 and 3 and mean charge 1, 2 and 3 are calculated from 0 after leak substraction
color can be a string, or a vector
"""
##scipy.signal.decimate could accelerate the display
if List_of_Ids == None:
List_of_Ids = Requete.Analogsignal_ids
if Channel == None:
Channels=range(Requete.NumberofChannels)
else:
if type(Channel) == list:
Channels=Channel
else:
print 'channel parameter must be a list'
return
self.Currently_Used_Sweep_nb_for_Local_Average=[]#[[numpy.NaN]*Requete.NumberofChannels]*len(List_of_Ids)
NumberofChannels=len(Channels)
for idx,n in enumerate(Channels):
self.Check_Measuring_Parameters_Validity()
# if Main.SQLTabWidget.currentIndex() == 0 or Main.SQLTabWidget.currentIndex() == 1:
# sig = AnalogSignal().load(List_of_Ids[0],session=Requete.Global_Session)
# elif Main.SQLTabWidget.currentIndex() == 2:
# sig = eval("Analysis.RecordA"+str(Requete.Current_Sweep_Number))
#TODO : Temporary implementation
if self.UseUserDefinedColorset == True:
Color=self.Color
if type(Color) == str:
Color=[Color]*len(List_of_Ids)
#self.Check_Measuring_Parameters_Validity()
self.mean = numpy.zeros(len(Navigate.si[n]))
counter=0
self.List_of_Averaged_Sweeps=[]
for i in range(len(List_of_Ids)):
if ProgressDisplay==True:
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(List_of_Ids)-1)
Main.progress.setValue(i)
if Main.SQLTabWidget.currentIndex() == 2: # if Local file only
Requete.Current_Sweep_Number=i
if ((List_of_Ids is Requete.Analogsignal_ids) and (i >= int(Main.From.text())) and (i <= int(Main.To.text())) and (Requete.tag["Selection"][i][n] == 1)) or (List_of_Ids is not Requete.Analogsignal_ids):
counter+=1
self.List_of_Averaged_Sweeps.append(i)
if Main.Analyze_Filtered_Traces_Button.checkState() == 0:
Navigate.Load_This_Trace(List_of_Ids[i])
self.mean = self.mean+Navigate.si[n]
elif Main.Analyze_Filtered_Traces_Button.checkState() == 2:
Navigate.Load_This_Trace(List_of_Ids[i])
self.mean = self.mean+Navigate.Filtered_Signal[n]
Info_Message="It's an average of "+str(counter)+" Sweeps"
Main.status_text.setText(Info_Message)
self.mean/=counter
# if all form zero is true, we need to substract leak, so the checkbox must be ticked
LeakSubstractionIgnored=False
if All_from_Zero == True or Main.Measure_From_Zero_Button.checkState() == 2:
All_from_Zero = True
Measure_All_from_Baseline1 = False #cant be True at the same time
if Main.Remove_Leak_Button.checkState() == 0:
Main.Remove_Leak_Button.setCheckState(2)
else:
if Main.Remove_Leak_Button.checkState() == 2:
Main.Remove_Leak_Button.setCheckState(0)
LeakSubstractionIgnored=True
# If we have to
# if Main.Remove_Leak_Button.checkState() == 2:
#if All_from_Zero == False and Main.Remove_Leak_Button.checkState() == 2:
# Main.Remove_Leak_Button.setCheckState(0)
#leaktemporaryremoved=True
#else:
#leaktemporaryremoved=False
self.Ampvalues = range(6)
self.Surfacevalues = range(6)
self.Measurement_Interval = range(6)
self.left = range(6)
listofmeth=["Baseline1_meth","Peak1_meth",
"Baseline2_meth","Peak2_meth",
"Baseline3_meth","Peak3_meth"]
compteur=0
for loc in Main.listofcoord:
leftpnt,rightpnt = self.Measure_Local_Extremum(self.mean,loc,listofmeth[compteur])
avalue = numpy.mean(self.mean[leftpnt:rightpnt])
self.Ampvalues[compteur]=avalue
self.Measurement_Interval[compteur]=rightpnt-leftpnt
self.left[compteur]=leftpnt
compteur+=1
if Main.Measure_From_Baseline1_Button.checkState() == 0 :
self.Mean_Amplitude_1=(self.Ampvalues[1]-self.Ampvalues[0])
self.Mean_Amplitude_2=(self.Ampvalues[3]-self.Ampvalues[2])
self.Mean_Amplitude_3=(self.Ampvalues[5]-self.Ampvalues[4])
self.Mean_Charge_1=sum(self.mean[int(float(Main.Peak1_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak1_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)-self.Ampvalues[0]*float(len(self.mean[int(float(Main.Peak1_begin.text())):int(float(Main.Peak1_end.text()))]))/1000
self.Mean_Charge_2=sum(self.mean[int(float(Main.Peak2_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak2_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)-self.Ampvalues[2]*float(len(self.mean[int(float(Main.Peak2_begin.text())):int(float(Main.Peak2_end.text()))]))/1000
self.Mean_Charge_3=sum(self.mean[int(float(Main.Peak3_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak3_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)-self.Ampvalues[4]*float(len(self.mean[int(float(Main.Peak3_begin.text())):int(float(Main.Peak3_end.text()))]))/1000
elif Main.Measure_From_Baseline1_Button.checkState() == 2 or Measure_All_from_Baseline1 == True:
self.Mean_Amplitude_1=(self.Ampvalues[1]-self.Ampvalues[0])
self.Mean_Amplitude_2=(self.Ampvalues[3]-self.Ampvalues[0])
self.Mean_Amplitude_3=(self.Ampvalues[5]-self.Ampvalues[0])
self.Mean_Charge_1=sum(self.mean[float(Main.Peak1_begin.text())*Navigate.Points_by_ms:float(Main.Peak1_end.text())*Navigate.Points_by_ms])/(Navigate.Points_by_ms*1000)-self.Ampvalues[0]*float(len(self.mean[float(Main.Peak1_begin.text()):float(Main.Peak1_end.text())]))/1000
self.Mean_Charge_2=sum(self.mean[float(Main.Peak2_begin.text())*Navigate.Points_by_ms:float(Main.Peak2_end.text())*Navigate.Points_by_ms])/(Navigate.Points_by_ms*1000)-self.Ampvalues[0]*float(len(self.mean[float(Main.Peak2_begin.text()):float(Main.Peak2_end.text())]))/1000
self.Mean_Charge_3=sum(self.mean[float(Main.Peak3_begin.text())*Navigate.Points_by_ms:float(Main.Peak3_end.text())*Navigate.Points_by_ms])/(Navigate.Points_by_ms*1000)-self.Ampvalues[0]*float(len(self.mean[float(Main.Peak3_begin.text()):float(Main.Peak3_end.text())]))/1000
elif All_from_Zero == True:
self.Mean_Amplitude_1=self.Ampvalues[1]
self.Mean_Amplitude_2=self.Ampvalues[3]
self.Mean_Amplitude_3=self.Ampvalues[5]
self.baseline=numpy.zeros(int(len(self.Amplitudes_1)+2))
self.Mean_Charge_1=sum(self.mean[float(Main.Peak1_begin.text())*Navigate.Points_by_ms:float(Main.Peak1_end.text())*Navigate.Points_by_ms])/(Navigate.Points_by_ms*1000)
self.Mean_Charge_2=sum(self.mean[float(Main.Peak2_begin.text())*Navigate.Points_by_ms:float(Main.Peak2_end.text())*Navigate.Points_by_ms])/(Navigate.Points_by_ms*1000)
self.Mean_Charge_3=sum(self.mean[float(Main.Peak3_begin.text())*Navigate.Points_by_ms:float(Main.Peak3_end.text())*Navigate.Points_by_ms])/(Navigate.Points_by_ms*1000)
if LeakSubstractionIgnored == True:
Main.Remove_Leak_Button.setCheckState(2)
if Rendering == True: #Still some pb if called from outside
print 'rendering On'
#Info_Message="Amp1 = "+str(self.Mean_Amplitude_1)+" Amp2 = "+str(self.Mean_Amplitude_2)+" Amp3 = "+str(self.Mean_Amplitude_3)
#Main.status_text.setText(Info_Message)
#Creating measurements labels
self.Base1 = numpy.ones(self.Measurement_Interval[0])*self.Ampvalues[0]
self.Base1_coord = numpy.array(range(len(self.Base1)))+self.left[0]
self.Peak1 = numpy.ones(self.Measurement_Interval[1])*self.Ampvalues[1]
self.Peak1_coord = numpy.array(range(len(self.Peak1)))+self.left[1]
self.Base2 = numpy.ones(self.Measurement_Interval[2])*self.Ampvalues[2]
self.Base2_coord = numpy.array(range(len(self.Base2)))+self.left[2]
self.Peak2 = numpy.ones(self.Measurement_Interval[3])*self.Ampvalues[3]
self.Peak2_coord = numpy.array(range(len(self.Peak2)))+self.left[3]
self.Base3 = numpy.ones(self.Measurement_Interval[4])*self.Ampvalues[4]
self.Base3_coord = numpy.array(range(len(self.Base3)))+self.left[4]
self.Peak3 = numpy.ones(self.Measurement_Interval[5])*self.Ampvalues[5]
self.Peak3_coord = numpy.array(range(len(self.Peak3)))+self.left[5]
# if Main.Measure_From_Zero_Button.checkState() == 2 or All_from_Zero == True:
# self.Base1-=self.Ampvalues[0]
# self.Peak1-=self.Ampvalues[0]
# self.Base2-=self.Ampvalues[0]
# self.Peak2-=self.Ampvalues[0]
# self.Base3-=self.Ampvalues[0]
# self.Peak3-=self.Ampvalues[0]
# elif Main.Measure_From_Baseline1_Button.checkState() == 2 or Measure_All_from_Baseline1 == True:
# self.Base1-=self.Ampvalues[0]
# self.Peak1-=self.Ampvalues[0]
# self.Base2-=self.Ampvalues[2]
# self.Peak2-=self.Ampvalues[2]
# self.Base3-=self.Ampvalues[4]
# self.Peak3-=self.Ampvalues[4]
#Only Once
if QtCore.QObject().sender().__class__.__name__ == 'QCheckBox':
if idx == 0:
self.Wid.canvas.axes.clear()
else:
if idx == 0:
#For the first trace, we create the widget
self.Wid = MyMplWidget(title = 'Averaged Trace',subplots=[NumberofChannels,1,idx+1])
self.Wid.canvas.Superimpose_Used_Traces_Button = QtGui.QCheckBox()
self.Wid.canvas.Superimpose_Used_Traces_Button.setText("Superimpose")
if Main.Superimpose_Used_Traces == False or Display_Superimposed_Traces == False:
self.Wid.canvas.Superimpose_Used_Traces_Button.setCheckState(0)
if Main.Superimpose_Used_Traces == True or Display_Superimposed_Traces == True :
self.Wid.canvas.Superimpose_Used_Traces_Button.setCheckState(2)
self.Wid.toolbar.addWidget(self.Wid.canvas.Superimpose_Used_Traces_Button)
QtCore.QObject.connect(self.Wid.canvas.Superimpose_Used_Traces_Button,QtCore.SIGNAL('stateChanged(int)'),self.Wid.canvas.Update_Superimpose)
else:
#For the next ones we do just add subplots
self.Wid.canvas.axes = self.Wid.canvas.fig.add_subplot(NumberofChannels,1,idx+1)
#This can be optimized
if Main.Superimpose_Used_Traces == True or Display_Superimposed_Traces == True and self.SuperimposedOnAverage == True:
self.Wid.canvas.Object_Selection_Mode = 'Trace'
for i,j in enumerate(List_of_Ids):
if ((List_of_Ids is Requete.Analogsignal_ids) and (i >= int(Main.From.text())) and (i <= int(Main.To.text())) and (Requete.tag["Selection"][i][n] == 1)) or (List_of_Ids is not Requete.Analogsignal_ids):
if Main.SQLTabWidget.currentIndex() == 2:
Requete.Current_Sweep_Number=i
Navigate.Load_This_Trace(i)
else:
Navigate.Load_This_Trace(j)
if Main.Analyze_Filtered_Traces_Button.checkState() == 0:
locals()["Displayed_"+str(i)]=Navigate.si[n]
elif Main.Analyze_Filtered_Traces_Button.checkState() == 2:
locals()["Displayed_"+str(i)]=Navigate.Filtered_Signal[n]
if List_of_Ids is Requete.Analogsignal_ids:
#i is the sweepnumber
#print i,self.Currently_Used_Sweep_nb_for_Local_Average[i]
self.Currently_Used_Sweep_nb_for_Local_Average.append(i+n)#[i][n]=i
else:
#j is the analogsignal id
self.Currently_Used_Sweep_nb_for_Local_Average.append(j)#[i][n]=j
self.Wid.canvas.axes.plot(Requete.timescale,eval("Displayed_"+str(i)),'k',alpha=0.3,picker=1)
self.Wid.Status.setText("It's an average of "+str(counter)+" Sweeps"+" at position "+str(Position)+
"<p>"+"Average A1 = "+str(self.Mean_Amplitude_1)+"\t Average C1 = "+str(self.Mean_Charge_1)+
"<p>"+"Average A2 = "+str(self.Mean_Amplitude_2)+"\t Average C2 = "+str(self.Mean_Charge_2)+
"<p>"+"Average A3 = "+str(self.Mean_Amplitude_3)+"\t Average C3 = "+str(self.Mean_Charge_3)+
"<p>"+"Sweep "+str(self.Currently_Used_Sweep_nb_for_Local_Average)+" were used")
else:
self.Wid.Status.setText("It's an average of "+str(counter)+" Sweeps"+
"<p>"+"Average A1 = "+str(self.Mean_Amplitude_1)+"\t Average C1 = "+str(self.Mean_Charge_1)+
"<p>"+"Average A2 = "+str(self.Mean_Amplitude_2)+"\t Average C2 = "+str(self.Mean_Charge_2)+
"<p>"+"Average A3 = "+str(self.Mean_Amplitude_3)+"\t Average C3 = "+str(self.Mean_Charge_3)+
"<p>"+"Sweep "+str(self.List_of_Averaged_Sweeps)+" were used")
self.Wid.canvas.axes.plot(Requete.timescale,self.mean,picker=1,lw=2,c='r')
self.Wid.canvas.axes.plot(self.Base1_coord/Navigate.Points_by_ms,self.Base1,'r',linewidth=3)
self.Wid.canvas.axes.plot(self.Peak1_coord/Navigate.Points_by_ms,self.Peak1,'r',linewidth=3)
self.Wid.canvas.axes.plot(self.Base2_coord/Navigate.Points_by_ms,self.Base2,'r',linewidth=3)
self.Wid.canvas.axes.plot(self.Peak2_coord/Navigate.Points_by_ms,self.Peak2,'r',linewidth=3)
self.Wid.canvas.axes.plot(self.Base3_coord/Navigate.Points_by_ms,self.Base3,'r',linewidth=3)
self.Wid.canvas.axes.plot(self.Peak3_coord/Navigate.Points_by_ms,self.Peak3,'r',linewidth=3)
self.Wid.canvas.axes.set_xlabel("Time (ms)")
self.Wid.canvas.axes.set_ylabel("Amplitude")
if Rendering == True:
if QtCore.QObject().sender().__class__.__name__ == 'QCheckBox':
self.Wid.canvas.draw()
else:
self.Wid.show()
#Requete.Current_Sweep_Number=int(Main.Sweep_Number_Input_Field.text())
return self.Mean_Amplitude_1,self.Mean_Amplitude_2,self.Mean_Amplitude_3,self.Mean_Charge_1,self.Mean_Charge_2,self.Mean_Charge_3, self.mean, List_of_Ids
def Display_Superimposed_Traces(self,Traces_to_Display=None,color='k',alpha=0.3):
"""
This function displays all the tagged traces if Traces_to_Display == None, or the AnalogSignal.ids in Traces_to_Display List
You can substract the leak with by checking the Main.Remove_Leak_Button checkbox
"""
#TODO merge with Average
self.Wid = MyMplWidget(title = 'SuperImposed Traces')
self.Wid.canvas.Object_Selection_Mode = 'Trace'
if Traces_to_Display == None: #SynaptiQs internal Call
Traces_to_Display = Requete.Analogsignal_ids
Number_of_Superimposed_Traces=0
if Main.Analyze_Filtered_Traces_Button.checkState() == 0:
SignalMode="Navigate.si[n]"
else:
SignalMode="Navigate.Filtered_Signal[n]"
for n in range(Requete.NumberofChannels):
for i in range(len(Traces_to_Display)):
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(Traces_to_Display)-1)
Main.progress.setValue(i)
if i >= int(Main.From.text()):
if i <= int(Main.To.text()):
if Requete.tag["Selection"][i][n] == 1:
if Main.SQLTabWidget.currentIndex() == 2: # if Local file only
Requete.Current_Sweep_Number=i
Navigate.Load_This_Trace(Traces_to_Display[i])
Signal=eval(SignalMode)
#Signal=scipy.signal.decimate(Signal,10)
#Timescale=scipy.signal.decimate(Requete.timescale,10)
#Timescale=Requete.timescale
self.Wid.canvas.axes.plot(Requete.timescale,Signal,color=color,alpha=alpha)
Number_of_Superimposed_Traces+=1
else: #Plugin Call
Number_of_Superimposed_Traces=0
for i in range(len(Traces_to_Display)):
temp_event=AnalogSignal.load(Traces_to_Display[i],session=Requete.Global_Session)
temp_signal=temp_event.signal
temp_time_scale=numpy.array(range(len(temp_signal)))/(temp_event.sampling_rate/1000)
Navigate.Load_This_Trace(Traces_to_Display[i])
self.Wid.canvas.axes.plot(temp_time_scale,temp_signal,color=color,alpha=alpha)
Number_of_Superimposed_Traces+=1
self.Wid.canvas.axes.set_xlabel("Time")
self.Wid.canvas.axes.set_ylabel("Amplitude")
self.Wid.show()
Info_Message="It's a superposition of "+str(Number_of_Superimposed_Traces)+" sweeps"
Main.status_text.setText(Info_Message)
def PeakDetection(self,Signal , delta, x = None):
'''
from http://baccuslab.github.io/pyret/_modules/spiketools.html
'''
maxtab = []
mintab = []
if x is None:
x = numpy.arange(len(Signal))
Signal = numpy.asarray(Signal)
if delta<0:
Signal*=-1
delta*=-1
mn, mx = numpy.Inf, -numpy.Inf
mnpos, mxpos = numpy.NaN, numpy.NaN
lookformax = True
for i in numpy.arange(len(Signal)):
this = Signal[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
if delta<0:
Signal*=-1
return numpy.array(maxtab), numpy.array(mintab)
def FastPeakDetection(self,Signal , delta, x = None):
if x is None:
x = numpy.arange(len(Signal))
scipy.signal.argrelextrema(Signal, numpy.greater)
# for local minima
argrelextrema(Signal, numpy.less)
return
def DetectSpikesOnLocalFile(self,Thr):
Source = Requete.Analogsignal_ids
counter=0
#for n in range(Requete.NumberofChannels):
n=int(Mapping.CurrentChannel)
for i in range(len(Source)):
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(Source)-1)
Main.progress.setValue(i)
Requete.Current_Sweep_Number=i
Navigate.Load_This_Trace(i)
if i >= int(Main.From.text()) and i <= int(Main.To.text()) and Requete.tag["Selection"][i][n] == 1:
Max,Min=self.PeakDetection(Navigate.si[n], Thr, x = Navigate.timescale)
Current_Spike_Times=[]
Amplitude_At_Spike_Time=[]
if Thr>0:
for event in Max:
Current_Spike_Times.append(event[0])
Amplitude_At_Spike_Time.append(event[1])
else:
for event in Max:
Current_Spike_Times.append(event[0])
Amplitude_At_Spike_Time.append(event[1]*-1)
Requete.SpikeTrainfromLocal[str(i)+'_'+str(n)]=Current_Spike_Times
Requete.AmpSpikeTrainfromLocal[str(i)+'_'+str(n)]=Amplitude_At_Spike_Time
counter+=1
return
def MeasureNoise(self):
Bsl_bgn=float(Main.Baseline1_begin.text())/1000
Bsl_end=float(Main.Baseline1_end.text())/1000
Mes_bgn=float(Main.Peak1_begin.text())/1000
Mes_end=float(Main.Peak1_end.text())/1000
Source = Requete.Spiketrain_ids
Maximal_Frequency=[numpy.NaN]*len(Requete.Spiketrain_ids)
Events=[numpy.NaN]*len(Requete.Spiketrain_ids)
n=0 #temporary channel
counter=0
for i in range(len(Source)):
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(Source)-1)
Main.progress.setValue(i)
if Main.SQLTabWidget.currentIndex() == 2: # if Local file only
Requete.Current_Sweep_Number=i
if i >= int(Main.From.text()) and i <= int(Main.To.text()) and Requete.tag["Selection"][i][n] == 1:
if Main.SQLTabWidget.currentIndex() != 2:
sptr=SpikeTrain.load(Source[i][n],session=Requete.Global_Session)
#first we could calculate the baseline number of events, not done
subspikes=[]
#Second, we count the evnts in measurement range
for j in sptr._spike_times-sptr.t_start:
if j >Mes_bgn and j <Mes_end:
subspikes.append(j)
elif Main.SQLTabWidget.currentIndex() == 2:
sptr=Requete.SpikeTrainfromLocal[str(i)]
#first we could calculate the baseline number of events, not done
subspikes=[]
#Second, we count the evnts in measurement range
for j in sptr:
if j/1000. >Mes_bgn and j/1000. <Mes_end:
subspikes.append(j)
Events[i]=len(subspikes)
temp=[]
if len(subspikes)>1:
for i in range(len(subspikes)-1):
if subspikes[i+1]-subspikes[i]!=0: #Usually due to a bug with duplicate point
temp.append(subspikes[i+1]-subspikes[i])
Maximal_Frequency[i]=1/numpy.min(temp)
elif len(subspikes) == 1:
Maximal_Frequency[i]=0
elif len(subspikes) == 0:
Maximal_Frequency[i]=numpy.NaN
counter+=1
return numpy.nanmean(Events),numpy.nanstd(Events)
def Count_All_Events(self):#,Rendering=True,Range=None,Silent=False):
Source = Requete.Spiketrain_ids
self.Events=[numpy.NaN]*len(Requete.Spiketrain_ids)
self.Maximal_Frequency=[numpy.NaN]*len(Requete.Spiketrain_ids)
counter=0
for i in range(len(Source)):
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(Source)-1)
Main.progress.setValue(i)
if Main.SQLTabWidget.currentIndex() == 2: # if Local file only
Requete.Current_Sweep_Number=i
if i >= int(Main.From.text()) and i <= int(Main.To.text()) and Requete.tag["Selection"][i] == 1:
sptr=SpikeTrain.load(Source[i],session=Requete.Global_Session)
self.Events[i]=len(sptr._spike_times)
temp=[]
if len(sptr._spike_times)>1:
for i in range(len(sptr._spike_times)-1):
if sptr._spike_times[i+1]-sptr._spike_times[i]!=0: #Usually due to a bug with duplicate point
temp.append(sptr._spike_times[i+1]-sptr._spike_times[i])
self.Maximal_Frequency[i]=1/numpy.min(temp)
elif len(sptr._spike_times) == 1:
self.Maximal_Frequency[i]=0
elif len(sptr._spike_times) == 0:
self.Maximal_Frequency[i]=numpy.NaN
counter+=1
return self.Events,self.Events,self.Events,self.Maximal_Frequency,self.Maximal_Frequency,self.Maximal_Frequency
def Count_Events(self):#,Rendering=True,Range=None,Silent=False):
Bsl_bgn=float(Main.Baseline1_begin.text())/1000
Bsl_end=float(Main.Baseline1_end.text())/1000
Mes_bgn=float(Main.Peak1_begin.text())/1000
Mes_end=float(Main.Peak1_end.text())/1000
Source = Requete.Spiketrain_ids
self.Events=[numpy.NaN]*len(Requete.Spiketrain_ids)
self.Maximal_Frequency=[numpy.NaN]*len(Requete.Spiketrain_ids)
n=0 #temporary channel
counter=0
for i in range(len(Source)):
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(Source)-1)
Main.progress.setValue(i)
if Main.SQLTabWidget.currentIndex() == 2: # if Local file only
Requete.Current_Sweep_Number=i
if i >= int(Main.From.text()) and i <= int(Main.To.text()) and Requete.tag["Selection"][i][n] == 1:
if Main.SQLTabWidget.currentIndex() != 2:
sptr=SpikeTrain.load(Source[i][n],session=Requete.Global_Session)
#first we could calculate the baseline number of events, not done
subspikes=[]
#Second, we count the evnts in measurement range
for j in sptr._spike_times-sptr.t_start:
if j >Mes_bgn and j <Mes_end:
subspikes.append(j)
elif Main.SQLTabWidget.currentIndex() == 2:
sptr=Requete.SpikeTrainfromLocal[str(i)]
#first we could calculate the baseline number of events, not done
subspikes=[]
#Second, we count the evnts in measurement range
for j in sptr:
if j/1000. >Mes_bgn and j/1000. <Mes_end:
subspikes.append(j)
self.Events[i]=len(subspikes)
temp=[]
if len(subspikes)>1:
for i in range(len(subspikes)-1):
if subspikes[i+1]-subspikes[i]!=0: #Usually due to a bug with duplicate point
temp.append(subspikes[i+1]-subspikes[i])
self.Maximal_Frequency[i]=1/numpy.min(temp)
elif len(subspikes) == 1:
self.Maximal_Frequency[i]=0
elif len(subspikes) == 0:
self.Maximal_Frequency[i]=numpy.NaN
counter+=1
return self.Events,self.Events,self.Events,self.Maximal_Frequency,self.Maximal_Frequency,self.Maximal_Frequency
def Measure(self,Rendering=True,Measure_Filtered=False,Measure_All_from_Baseline1=False,Silent=False,All_from_Zero=False,Channel=None):
"""
Mesure les amplitudes et la charge entre
-Baseline1 et Peak1
-Baseline2 et Peak2
-Baseline3 et Peak3
Sachant que
Baseline1 = moyenne entre Analysis.Baseline1_begin et Analysis.Baseline1_end sur Analysis.Baseline1_size(en ms)*Navigate.Points_by_ms
et que
Baseline1_meth definit si c'est un maximum ou un minimum que l'on cherche (courant + ou -)
enfin, selon si Main.Analyze_Filtered_Traces_Button est coché ou non, on analyse sur la trace brute ou sur la trace filtrée
La fonction retourne 3 Listes pour les amplitudes:
Analysis.Amplitudes_1, Analysis.Amplitudes_2, Analysis.Amplitudes_3 qui sont les liste des amplitudes 1,2 et 3 respectivement
La fonction retourne 3 Listes pour les charges:
Analysis.Charges_1, Analysis.Charges_2, Analysis.Charges_3 qui sont les liste des amplitudes 1,2 et 3 respectivement
La fonction sort aussi 2*3 tables de valeurs et fait 2*3 plots
Les points non tagguées sont des Nan
if Rendering=False, theanalysis doesn't show the final figure and value tables
"""
if Main.Measure_From_Zero_Button.checkState() == 2:
All_from_Zero == True
if All_from_Zero == True:
Main.Remove_Leak_Button.setCheckState(2)
Measure_All_from_Baseline1 = False
if Main.Remove_Leak_Button.checkState() == 2:
if All_from_Zero == False:
pass
#Main.Remove_Leak_Button.setCheckState(0)
leaktemporaryremoved=True
else:
leaktemporaryremoved=False
if Main.Analyze_Filtered_Traces_Button.checkState() == 0:
si = Navigate.si
elif Main.Analyze_Filtered_Traces_Button.checkState() == 2 or Measure_Filtered == True:
si = Navigate.Filtered_Signal
FullPopupList=[]
if Channel == None:
Channel=range(Requete.NumberofChannels)
else:
if type(Channel) == int or type(Channel) == float:
Channel = [Channel]
for n in Channel:
#On importe le signal
self.Check_Measuring_Parameters_Validity()
Ampvalues = range(6)
Chargevalues = range(6)
self.Amplitudes_1=range(len(Requete.Analogsignal_ids))
self.Amplitudes_2=range(len(Requete.Analogsignal_ids))
self.Amplitudes_3=range(len(Requete.Analogsignal_ids))
self.Charges_1=range(len(Requete.Analogsignal_ids))
self.Charges_2=range(len(Requete.Analogsignal_ids))
self.Charges_3=range(len(Requete.Analogsignal_ids))
compteur2=0
listofmeth= ["Baseline1_meth","Peak1_meth",
"Baseline2_meth","Peak2_meth",
"Baseline3_meth","Peak3_meth"]
for i,j in enumerate(Requete.Analogsignal_ids):
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(Requete.Analogsignal_ids)-1)
Main.progress.setValue(compteur2)
if Requete.tag["Selection"][compteur2][n] == 1 and compteur2 >= int(Main.From.text()) and compteur2 <= int(Main.To.text()): #On n'analyse que les amplitudes sur les sweeps taggués
if Main.SQLTabWidget.currentIndex() == 2:
Requete.Current_Sweep_Number=i
Navigate.Load_This_Trace(i)
else:
Navigate.Load_This_Trace(j)
if Main.Analyze_Filtered_Traces_Button.checkState() == 0:
si = Navigate.si[n]
elif Main.Analyze_Filtered_Traces_Button.checkState() == 2:
si = Navigate.Filtered_Signal[n]
compteur=0
for loc in Main.listofcoord:
#loc[0] est le début du range
#loc[1] est la fin du range
#loc[2] est la taille du range qui sera utilisé pour le calcul de la moyenne
leftpnt,rightpnt = self.Measure_Local_Extremum(si,loc,listofmeth[compteur])
avalue = numpy.mean(si[leftpnt:rightpnt]) #a value est l'amplitude
Ampvalues[compteur]=avalue
compteur+=1
else:
for a in range(6):
Ampvalues[a]=numpy.NaN
Chargevalues[a]=numpy.NaN
if Main.Measure_From_Baseline1_Button.checkState() == 0:
self.Amplitudes_1[compteur2]=(Ampvalues[1]-Ampvalues[0])
self.Amplitudes_2[compteur2]=(Ampvalues[3]-Ampvalues[2])
self.Amplitudes_3[compteur2]=(Ampvalues[5]-Ampvalues[4])
self.baseline=numpy.zeros(int(len(self.Amplitudes_1)+2))
self.Charges_1[compteur2]=sum(si[int(float(Main.Peak1_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak1_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)-Ampvalues[0]*float(len(si[int(float(Main.Peak1_begin.text())):int(float(Main.Peak1_end.text()))]))/1000
self.Charges_2[compteur2]=sum(si[int(float(Main.Peak2_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak2_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)-Ampvalues[2]*float(len(si[int(float(Main.Peak2_begin.text())):int(float(Main.Peak2_end.text()))]))/1000
self.Charges_3[compteur2]=sum(si[int(float(Main.Peak3_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak3_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)-Ampvalues[4]*float(len(si[int(float(Main.Peak3_begin.text())):int(float(Main.Peak3_end.text()))]))/1000
elif Main.Measure_From_Baseline1_Button.checkState() == 2 or Measure_All_from_Baseline1 == True:
self.Amplitudes_1[compteur2]=(Ampvalues[1]-Ampvalues[0])
self.Amplitudes_2[compteur2]=(Ampvalues[3]-Ampvalues[0])
self.Amplitudes_3[compteur2]=(Ampvalues[5]-Ampvalues[0])
self.baseline=numpy.zeros(int(len(self.Amplitudes_1)+2))
self.Charges_1[compteur2]=sum(si[int(float(Main.Peak1_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak1_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)-Ampvalues[0]*float(len(si[int(float(Main.Peak1_begin.text())):int(float(Main.Peak1_end.text()))]))/1000
self.Charges_2[compteur2]=sum(si[int(float(Main.Peak2_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak2_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)-Ampvalues[0]*float(len(si[int(float(Main.Peak2_begin.text())):int(float(Main.Peak2_end.text()))]))/1000
self.Charges_3[compteur2]=sum(si[int(float(Main.Peak3_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak3_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)-Ampvalues[0]*float(len(si[int(float(Main.Peak3_begin.text())):int(float(Main.Peak3_end.text()))]))/1000
elif All_from_Zero == True:
self.Amplitudes_1[compteur2]=Ampvalues[1]
self.Amplitudes_2[compteur2]=Ampvalues[3]
self.Amplitudes_3[compteur2]=Ampvalues[5]
self.baseline=numpy.zeros(int(len(self.Amplitudes_1)+2))
self.Charges_1[compteur2]=sum(si[int(float(Main.Peak1_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak1_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)
self.Charges_2[compteur2]=sum(si[int(float(Main.Peak2_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak2_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)
self.Charges_3[compteur2]=sum(si[int(float(Main.Peak3_begin.text())*Navigate.Points_by_ms):int(float(Main.Peak3_end.text())*Navigate.Points_by_ms)])/(Navigate.Points_by_ms*1000)
compteur2+=1
if Rendering==True:
#TODO: only show the 2nd round
name='popupWidget'+str(n)
setattr(self,name,QtGui.QWidget())
popup=eval('self.'+name)
FullPopupList.append(popup)
popup.setMinimumSize(600,600) #definit la taille minimale du Widget (largeur, hauteur)
vbox = QtGui.QVBoxLayout()
hbox = QtGui.QHBoxLayout()
#1 : Création des onglet; valeurs chiffrées
self.ValueTab = QtGui.QTabWidget(popup)
self.ValueTab.setMaximumSize(400,1024)
self.Amplitude_table = SpreadSheet(parent=self.ValueTab,Source=[self.Amplitudes_1,self.Amplitudes_2,self.Amplitudes_3],Labels=["Amp1","Amp2","Amp3"])
self.Charge_table = SpreadSheet(parent=self.ValueTab,Source=[self.Charges_1,self.Charges_2,self.Charges_3],Labels=["Char1","Char2","Char3"])
vbox.addWidget(self.ValueTab)
hbox.addLayout(vbox)
vbox = QtGui.QVBoxLayout()
self.ValueTab.addTab(self.Amplitude_table,"Amplitudes")
self.ValueTab.addTab(self.Charge_table,"Charges")
#2 : Création des onglet; Graphiques
self.Graphtab = QtGui.QTabWidget(popup)
self.Wid=MyMplWidget()#self.Amplitude_graph)
self.Wid2=MyMplWidget()#self.Charge_graph)
self.Graphtab.addTab(self.Wid,"Amplitudes")
self.Graphtab.addTab(self.Wid2,"Charges")
vbox.addWidget(self.Graphtab)
hbox.addStrut(50)
hbox.addLayout(vbox)
popup.setLayout(hbox)
self.Wid.canvas.axes.plot(self.baseline,'k--',)
A1, = self.Wid.canvas.axes.plot(self.Amplitudes_1,'bo-',alpha=0.7)
A2, = self.Wid.canvas.axes.plot(self.Amplitudes_2,'ro-',alpha=0.7)
A3, = self.Wid.canvas.axes.plot(self.Amplitudes_3,'go-',alpha=0.7)
l=self.Wid.canvas.axes.legend([A1, A2, A3], ["Amplitude 1", "Amplitude 2", "Amplitude 3"], loc='best',fancybox=True)
l.get_frame().set_alpha(0.5)
self.Wid.canvas.axes.set_xlabel("Sweep #")
self.Wid.canvas.axes.set_ylabel("Amplitude (pA)")
self.Wid2.canvas.axes.plot(self.baseline,'k--',)
C1, = self.Wid2.canvas.axes.plot(self.Charges_1,'bo-',alpha=0.7)
C2, = self.Wid2.canvas.axes.plot(self.Charges_2,'ro-',alpha=0.7)
C3, = self.Wid2.canvas.axes.plot(self.Charges_3,'go-',alpha=0.7)
l=self.Wid2.canvas.axes.legend([C1, C2, C3], ["Charge 1", "Charge 2", "Charge 3"], loc='best',fancybox=True)
l.get_frame().set_alpha(0.5)
self.Wid2.canvas.axes.set_xlabel("Sweep #")
self.Wid2.canvas.axes.set_ylabel("Charge (pC)")
Infos.Add_Array(Arrays=[ "Analysis.Amplitudes_1",
"Analysis.Amplitudes_2",
"Analysis.Amplitudes_3",
"Analysis.Charges_1",
"Analysis.Charges_2",
"Analysis.Charges_3"])
if leaktemporaryremoved == True and All_from_Zero == False:
Main.Remove_Leak_Button.setCheckState(2)
for i in FullPopupList:
i.show()
return self.Amplitudes_1,self.Amplitudes_2,self.Amplitudes_3,self.Charges_1,self.Charges_2,self.Charges_3
def Set_User_Parameters(self,name):
#Index is the position corresponding to the wanted name
index=Main.User_Defined_Measurement_Parameters.findText(name)
if index != -1 :
Main.User_Defined_Measurement_Parameters.setCurrentIndex(index)
self.Load_User_Defined_Parameters(index,True)
else:
msgBox = QtGui.QMessageBox()
msgBox.setText(
"""
<b> %s doesn't exist in Main.User_Defined_Measurement_Parameters
""" % (name))
msgBox.exec_()
def Get_User_Parameters(self):
return str(Main.User_Defined_Measurement_Parameters.currentText())
def Set_User_Defined_Measurement_Parameters_to_Zero(self):
for i in range(12):
Main.param_inf[i+10]=float(0.0)
for i in range(6):
Main.param_inf[i+22]=float(1.0)
Main.Create_Measure_Variables()
for a in Main.listofmeasurecoord:
exec('Main.'+str(a)+'.setText("0.0")')
for a in ["Baseline1_size","Peak1_size","Baseline2_size","Peak2_size","Baseline3_size","Peak3_size"]:
exec('Main.'+a+'.setText("1.0")')
for a in ["Baseline1_meth","Peak1_meth","Baseline2_meth","Peak2_meth","Baseline3_meth","Peak3_meth"]:
exec('index=Main.'+a+'.findText("Min")')
exec('Main.'+a+'.setCurrentIndex(index)')
self.Load_User_Defined_Parameters(0)
def Add_User_Defined_Measurement_Parameters(self):
Wid=MyMplWidget()
savename, ok = QtGui.QInputDialog.getText(Wid,'Input Dialog',
'Please enter parameters name')
savename=str(savename)
if ok:
a=int(Main.User_Defined_Measurement_Parameters.count())
Main.User_Defined_Measurement_Parameters.insertItem(a,savename)
Main.User_Defined_Measurement_Parameters.setCurrentIndex(a)
templist=[savename]
for a in Main.listofmeasurecoord:
exec('temp = Main.'+str(a)+'.text()')
templist.append(str(temp))
for a in ["Baseline1_size","Peak1_size","Baseline2_size","Peak2_size","Baseline3_size","Peak3_size"]:
exec('temp = Main.'+str(a)+'.text()')
templist.append(str(temp))
for a in ["Baseline1_meth","Peak1_meth","Baseline2_meth","Peak2_meth","Baseline3_meth","Peak3_meth"]:
exec('temp =Main.'+a+'.currentText()')
templist.append(str(temp))
Main.Analysis_Preferences.append(templist)
self.Load_User_Defined_Parameters(0)
def Remove_User_Defined_Measurement_Parameters(self):
a=int(Main.User_Defined_Measurement_Parameters.count())
Main.Analysis_Preferences.pop(int(Main.User_Defined_Measurement_Parameters.currentIndex()))
Main.User_Defined_Measurement_Parameters.removeItem(Main.User_Defined_Measurement_Parameters.currentIndex())
Main.User_Defined_Measurement_Parameters.setCurrentIndex(a-2)
self.Load_User_Defined_Parameters(0)
def Load_User_Defined_Parameters(self,index,External=False):
#name of the loaded list: Main.Analysis_Preferences[int(Main.sender().currentIndex())][0]
if External == False:
for i in range(23):
Main.param_inf[i+10]=Main.Analysis_Preferences[int(Main.User_Defined_Measurement_Parameters.currentIndex())][i+1]
if i < 18:
Main.param_inf[i+10]=float(Main.param_inf[i+10])
Main.Create_Measure_Variables()
compteur=0
for a in Main.listofmeasurecoord:
setnew = 'Main.'+str(a)+'.setText("'+str(Main.param_inf[compteur+10])+'")'
exec(setnew)
compteur+=1
compteur=0
for a in ["Baseline1_size","Peak1_size","Baseline2_size","Peak2_size","Baseline3_size","Peak3_size"]:
setnew = 'Main.'+str(a)+'.setText("'+str(Main.param_inf[compteur+22])+'")'
exec(setnew)
compteur+=1
compteur=0
for a in ["Baseline1_meth","Peak1_meth","Baseline2_meth","Peak2_meth","Baseline3_meth","Peak3_meth"]:
exec('index=Main.'+a+'.findText(str(Main.param_inf[compteur+28]))')
#print 'internal',index,str(Main.param_inf[compteur+28])
exec('Main.'+a+'.setCurrentIndex(index)')
compteur+=1
elif External == True:
for i in range(23):
Main.param_inf[i+10]=Main.Analysis_Preferences[int(Main.User_Defined_Measurement_Parameters.currentIndex())][i+1]
#Main.param_inf[i+10]=Main.Analysis_Preferences[int(Main.User_Defined_Measurement_Parameters.count()-1)][i+1]
if i < 18:
Main.param_inf[i+10]=float(Main.param_inf[i+10])
Main.Create_Measure_Variables()
compteur=0
for a in Main.listofmeasurecoord:
setnew = 'Main.'+str(a)+'.setText("'+str(Main.param_inf[compteur+10])+'")'
exec(setnew)
compteur+=1
compteur=0
for a in ["Baseline1_size","Peak1_size","Baseline2_size","Peak2_size","Baseline3_size","Peak3_size"]:
setnew = 'Main.'+str(a)+'.setText("'+str(Main.param_inf[compteur+22])+'")'
exec(setnew)
compteur+=1
compteur=0
for a in ["Baseline1_meth","Peak1_meth","Baseline2_meth","Peak2_meth","Baseline3_meth","Peak3_meth"]:
exec('index=Main.'+a+'.findText(str(Main.param_inf[compteur+28]))')
#print 'external',index,str(Main.param_inf[compteur+28])
exec('Main.'+a+'.setCurrentIndex(index)')
compteur+=1
self.Check_Measuring_Parameters_Validity()
self.Save_User_Defined_Parameters()
def Save_User_Defined_Parameters(self):
print "-----------> user parameters valid and saved"
parameters = open(Main.Analysis_Preferences_Path,'w')
saving =''
for i in Main.Analysis_Preferences:
saving=saving+str(i)+"\n"
parameters.write(saving)
parameters.close()
def Check_Measuring_Parameters_Validity(self):
"""
This function check that the point n+1 in measure paramteres is AFTER the point n.
"""
previous_value=0 #c'est la position du point précédent dans le systeme de mesur, +1 point
#le premier point doit être positif ou nul
if Main.measurecoord["Baseline1_begin"] < 0:
Main.measurecoord["Baseline1_begin"]=0
Main.Baseline1_begin.setText("0")
for a in Main.listofmeasurecoord:
name = 'Main.measurecoord["'+str(a)+'"]' #c'est le nom de la variable Main.measurecoord["champs"]. eval(name) est sa valeur en ms
cond="float(Main."+str(a)+".text())*Navigate.Points_by_ms" #c'est la valeur du pnt d'interet en points
namevalue=eval(cond) #la coordonnée demandée est convertie en points
if namevalue <= previous_value: #Le point n+1 doit etre >au point n, sinon on corrige
print "cursor position correction!"
namevalue=previous_value+1 #on corrige la valeur
previous_value = namevalue #on redefinit previous value pour la boucle suivante
convert=round(100.*namevalue/Navigate.Points_by_ms)/100. #arrondi à 10µs près la valeur du point, pour la lisibilité
setnew = 'Main.'+str(a)+'.setText("'+str(convert)+'")'
setnew2='Main.measurecoord["'+str(a)+'"]=round(100*namevalue/Navigate.Points_by_ms)/100'
exec(setnew)
exec(setnew2)
else:
setnew2='Main.measurecoord["'+str(a)+'"]=round(100*namevalue/Navigate.Points_by_ms)/100'
exec(setnew2)
previous_value = namevalue #on redefinit previous value pour la boucle suivante
#same loop, + the "size" parameters
liste=["Baseline1_begin","Baseline1_end","Peak1_begin","Peak1_end","Baseline2_begin","Baseline2_end","Peak2_begin","Peak2_end","Baseline3_begin","Baseline3_end","Peak3_begin","Peak3_end"]
for i in range(len(liste)):
#print Main.measurecoord[liste[i]]
Main.param_inf[i+10]=Main.measurecoord[liste[i]]
def Measure_On_Off_Activated(self):
"""
Affiche les mesures sans changer de sweep
"""
try:
Navigate.Load_This_Trace(Requete.Analogsignal_ids[Requete.Current_Sweep_Number])
Main.MainFigure.canvas.Update_Figure()
except AttributeError: # if no Analogsignal_ids usually. for example if you haven't loaded anything yet
pass
def Measure_On_Off(self,channel=0):
"""
Cette Fonction permet l'affichage des curseurs et de la charge sur la trace
"""
if Main.Display_Measures_Button.checkState() == 2:
if Main.Filtered_Display.checkState() == 0:
si = Navigate.si[channel]
elif Main.Filtered_Display.checkState() == 2:
si = Navigate.Filtered_Signal[channel]
self.Check_Measuring_Parameters_Validity()
if Main.Display_Charge_Button.checkState() == 2:
self.Charge_trace = numpy.array(si)
self.Charge_trace[0:int(float(Main.Peak1_begin.text())*float(Navigate.Points_by_ms))]=numpy.NaN
self.Charge_trace[int(float(Main.Peak1_end.text())*float(Navigate.Points_by_ms)):len(self.Charge_trace)]=numpy.NaN
Ampvalues = range(6)
self.Measurement_Interval = range(6)
self.left = range(6)
listofmeth=["Baseline1_meth","Peak1_meth",
"Baseline2_meth","Peak2_meth",
"Baseline3_meth","Peak3_meth"]
compteur=0
for loc in Main.listofcoord:
leftpnt,rightpnt = self.Measure_Local_Extremum(si,loc,listofmeth[compteur])
avalue = numpy.mean(si[leftpnt:rightpnt])
Ampvalues[compteur]=avalue
self.Measurement_Interval[compteur]=rightpnt-leftpnt
self.left[compteur]=leftpnt
compteur+=1
if Main.Measure_From_Zero_Button.checkState() == 2:
self.Amplitude_1=Ampvalues[1]
self.Amplitude_2=Ampvalues[3]
self.Amplitude_3=Ampvalues[5]
elif Main.Measure_From_Baseline1_Button.checkState() == 2:
self.Amplitude_1=(Ampvalues[1]-Ampvalues[0])
self.Amplitude_2=(Ampvalues[3]-Ampvalues[0])
self.Amplitude_3=(Ampvalues[5]-Ampvalues[0])
else:
self.Amplitude_1=(Ampvalues[1]-Ampvalues[0])
self.Amplitude_2=(Ampvalues[3]-Ampvalues[2])
self.Amplitude_3=(Ampvalues[5]-Ampvalues[4])
#Les listes des amplitudes
#print self.Amplitude_1
#print self.Amplitude_2
#print self.Amplitude_3
#print self.Measurement_Interval
Info_Message="Amp1 = "+str(self.Amplitude_1)+" Amp2 = "+str(self.Amplitude_2)+" Amp3 = "+str(self.Amplitude_3)
Main.status_text.setText(Info_Message)
self.Base1 = numpy.ones(self.Measurement_Interval[0])*Ampvalues[0]
self.Base1_coord = numpy.array(range(len(self.Base1)))+self.left[0]
self.Peak1 = numpy.ones(self.Measurement_Interval[1])*Ampvalues[1]
self.Peak1_coord = numpy.array(range(len(self.Peak1)))+self.left[1]
self.Base2 = numpy.ones(self.Measurement_Interval[2])*Ampvalues[2]
self.Base2_coord = numpy.array(range(len(self.Base2)))+self.left[2]
self.Peak2 = numpy.ones(self.Measurement_Interval[3])*Ampvalues[3]
self.Peak2_coord = numpy.array(range(len(self.Peak2)))+self.left[3]
self.Base3 = numpy.ones(self.Measurement_Interval[4])*Ampvalues[4]
self.Base3_coord = numpy.array(range(len(self.Base3)))+self.left[4]
self.Peak3 = numpy.ones(self.Measurement_Interval[5])*Ampvalues[5]
self.Peak3_coord = numpy.array(range(len(self.Peak3)))+self.left[5]
def Raster_Plot(self,Bar_time=0.2,Bar_Width=0.1,Length=None,Rendering=True,Source=None):
"""
This function display a Raster plot of all the spike times using "Source" list of spiketrain ids.
Source must be a list. If None, Source is Requete.Spiketrain_ids
The function returns the figure
Length is the sweep length is s.
"""
NumberofChannels=len(Requete.Spiketrain_ids[0])
if Source == None:
Source = Requete.Spiketrain_ids
if QtCore.QObject().sender() == Main.Rasterplot:
Bar_time=float(Main.Raster_Start.text())
Bar_Width=float(Main.Raster_Duration.text())
Length=Requete.Shortest_Sweep_Length
self.Wid = MyMplWidget(title = 'Raster Plot',subplots=[NumberofChannels,1,1])#, width=6, height=4)
concatenatedEvents=[]
if Main.SQLTabWidget.currentIndex() == 0 or Main.SQLTabWidget.currentIndex() == 1:
sptr=SpikeTrain.load(Source[0][0],session=Requete.Global_Session)
try:
h=[0,sptr.t_stop-sptr.t_start,-1,len(Source)+1]
except AttributeError:
msgBox = QtGui.QMessageBox()
msgBox.setText(
"""
<b>Raster Plot Error</b>
<p>No Spiketrains in the selection
""")
msgBox.exec_()
return
h=[0,0,-1,len(Source)]
self.Wid.canvas.axes.axis(h)
counter=0
if Source is Requete.Spiketrain_ids:
for n in range(Requete.NumberofChannels):
if n>0:
self.Wid.canvas.axes = self.Wid.canvas.fig.add_subplot(NumberofChannels,1,n+1)
for i in range(len(Source)):
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(Source)-1)
Main.progress.setValue(i)
if i >= int(Main.From.text()) and i <= int(Main.To.text()) and Requete.tag["Selection"][i][n] == 1:
sptr=SpikeTrain.load(Source[i][n],session=Requete.Global_Session)
try:
for j in range(len(sptr._spike_times)):
sptr._spike_times[j]-=sptr.t_start
#print sptr.t_start, sptr._spike_times
y=i*numpy.ones(len(sptr._spike_times))
self.Wid.canvas.axes.plot(sptr._spike_times,y, 'k|')
concatenatedEvents.extend(sptr._spike_times)
except ValueError:
print "ID ",Source[i]," passed"
counter+=1
if Rendering == True:
self.Wid.canvas.axes.set_xlabel("Time")
self.Wid.canvas.axes.set_ylabel("Sweep Number")
self.Wid.canvas.axes.invert_yaxis()
self.Wid.canvas.axes.axvspan(Bar_time,Bar_time+Bar_Width,facecolor='b', alpha=0.3)
self.Wid.canvas.axes.set_xbound(0.,Length)
self.Wid.canvas.axes.set_ybound(-1.,len(Source)+2.)
self.Wid.canvas.axes.hist(concatenatedEvents, bins=100, range=(0.,Length),histtype="stepfilled",alpha=0.6, normed=True)
else:
for i in range(len(Source)):
sptr=SpikeTrain.load(Source[i],session=Requete.Global_Session)
for j in range(len(sptr._spike_times)):
sptr._spike_times[j]-=sptr.t_start
y=i*numpy.ones(len(sptr._spike_times))
self.Wid.canvas.axes.plot(sptr._spike_times,y, 'k|')
concatenatedEvents.extend(sptr._spike_times)
counter+=1
elif Main.SQLTabWidget.currentIndex() == 2:
h=[0,0,-1,len(Source)]
self.Wid.canvas.axes.axis(h)
counter=0
for n in range(Requete.NumberofChannels):
if n>0:
self.Wid.canvas.axes = self.Wid.canvas.fig.add_subplot(Requete.NumberofChannels,1,n+1)
for i in range(len(Source)):
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(Source)-1)
Main.progress.setValue(i)
if i >= int(Main.From.text()) and i <= int(Main.To.text()) and Requete.tag["Selection"][i][n] == 1:
try:
sptr=Requete.SpikeTrainfromLocal[str(i)+'_'+str(n)]
#for j in range(len(sptr)):
y=i*numpy.ones(len(sptr))
self.Wid.canvas.axes.plot(sptr,y, 'k|')
concatenatedEvents.extend(sptr)
except (ValueError,KeyError):
print "ID ",Source[i]," passed"
counter+=1
if Rendering == True:
self.Wid.show()
Info_Message="It's a superposition of "+str(counter)+" sweeps"
Main.status_text.setText(Info_Message)
return self.Wid
def ShowEventsOptions(self):
Main.OptionPlugin(self,Type='Locals',funcname='Analysis.Display_Events')
#self.Display_Events(**opt)
def Display_Events(self,leftsweep=0.005,rightsweep=0.005,Source=None,Baseline=None,Range=None,Rendering=True,Raster=True,DetectionChannel=0,DisplayChannel=0,syncThr=0.05,Sync=None,StoredST=None):
"""
This function is able to display at the same time -a raster plot and
-a superposition of all detected events
if Source is None (or Requete.Spiketrain_ids) all the tagging/Intervall system of SynaptiQs is used
leftsweep/rightsweep is the intervall used for display around _spiketime
Baseline is the time BEFORE the event used for offset substraction. if None, the whole AnalogSignal_id corresponding signal is used
Range is the range in second where the events are selected
DetectionChannel indicates the SpikeSorting channel used
DisplayChannel indicates the channel use for displaying traces
to be solved : sometimes, some events are missed. their position is printed.
"""
from OpenElectrophy import AnalogSignal,SpikeTrain
from matplotlib import numpy
if Source== None:
Source=Requete.Spiketrain_ids
if Rendering == True:
self.Widget=QtGui.QWidget()
if Raster == True:
vbox=QtGui.QVBoxLayout()
Raster=self.Raster_Plot(Source=Source,Rendering=False,Bar_time=0.2,Bar_Width=0.2)
self.Wid = MyMplWidget(subplots=None)
self.Wid.canvas.axes = self.Wid.canvas.fig.add_subplot(111)
As=AnalogSignal.load(Requete.SpikeTrain_id_and_Corresponding_AnalogSignal_id_Dictionnary[Source[0][DetectionChannel]])
pnts_by_s=int(As.sampling_rate)
counter=0
L=int(leftsweep*pnts_by_s)
H=int(rightsweep*pnts_by_s)
average_trace=numpy.zeros(L+H)
croped_axe=numpy.arange(float(leftsweep*-1),float(rightsweep),float(1/pnts_by_s))
if Range == None:
Range = [0.,len(As.signal)/pnts_by_s]
else:
if Rendering == True:
Raster.canvas.axes.axvspan(Range[0],Range[1],facecolor='r', alpha=0.3)
for n in range(Requete.NumberofChannels):
for j,i in enumerate(Source):
if (Source is Requete.Spiketrain_ids) and (j < int(Main.From.text())) or (j > int(Main.To.text())) or (Requete.tag["Selection"][j][n] == 0):
pass
else:
#Loading spiketrain
st=SpikeTrain.load(i[DetectionChannel])
As=AnalogSignal.load(Requete.SpikeTrain_id_and_Corresponding_AnalogSignal_id_Dictionnary[i[0]]+DisplayChannel)
pnts_by_s=As.sampling_rate
#Defining the ref spiketrain if necessary
if StoredST is not None:
refST=StoredST[j]
else:
refST=st._spike_times
#Removing baseline
if Baseline == None:
baseline=numpy.mean(As.signal)
else:
baseline=Baseline
try:
for k in st._spike_times: #k is a spike
if (k-st.t_start > Range[0]) and (k-st.t_start < Range[1]):
Cond=False
#we test if there is any spike in the reference array close to our spike of interrest
test=[numpy.allclose(test,[k-st.t_start],atol=syncThr) for test in refST]
if Sync == True: #We keep it if there's one
Cond= any(numpy.array(test))
elif Sync == False: #We keep it if there's none
Cond= all(~numpy.array(test))
elif Sync == None: #We keep it anyway
Cond=True
if Cond:
lower=int((k-st.t_start)*pnts_by_s)-L
higher=int((k-st.t_start)*pnts_by_s)+H
event = As.signal[lower:higher]
event=event-numpy.mean(event[(leftsweep-baseline)*pnts_by_s:leftsweep*pnts_by_s])
try:
average_trace+=event
counter+=1
except ValueError:
print 'error in spiketrain id %s, at %s ms' % (i,(k-st.t_start)*pnts_by_s/1000)
#print len(croped_axe), len(event)
if len(list(croped_axe))>len(list(event)):
croped_axe=list(croped_axe)
#print len(croped_axe), len(event)
#croped_axe.pop()
if Rendering == True :
if len(list(croped_axe)) != len(list(event)):
print j,i, 'passed'
pass
else:
self.Wid.canvas.axes.plot(croped_axe,event,color='k',alpha=0.15)
except ValueError:
pass
average_trace/=counter
#HACK
Min=min([len(croped_axe),len(average_trace)])
croped_axe=croped_axe[:Min]
average_trace=average_trace[:Min]
if Rendering == True:
self.Wid.canvas.axes.plot(croped_axe,average_trace,color='red',alpha=1)
if Raster == True:
vbox.addWidget(Raster)
vbox.addWidget(self.Wid)
self.Widget.setLayout(vbox)
self.Widget.show()
return croped_axe,average_trace
def Load_Tags(self):
"""
This function allow you to load an alternative tag list directly from a file
"""
path = QtGui.QFileDialog()
path.setNameFilter("Tags Files (*.txt)")
path.setAcceptMode(QtGui.QFileDialog.AcceptOpen)
path.setFileMode(QtGui.QFileDialog.ExistingFiles)
if (path.exec_()) :
parameters = open(path.selectedFiles()[0])
a=parameters.readlines()
for i in range(len(a)):
for n in range(len(i)):
a[i].replace('\n','')
Requete.tag["Selection"][i][n]=float(a[i][n])
parameters.close()
print "///////////////Tags Loaded from selected file"
def Manip_Check_up(self,window=10,Leakbegin=0,Leakend=None):
"""
This function allows a quick measurement of the signal, based on one point, on Tagged traces
Filtering is ignored
Leak is measured between Leakbegin (defaut is 0) and Leakend (defaut is sealtest time -1ms)
Sealtest is the minimal value (on 1 point) between time and time +window (defaut is 10ms)
"""
print 'To be carefully checked first'
return
self.Seal_test=[numpy.NaN]*len(Requete.Analogsignal_ids)
self.Leak=[numpy.NaN]*len(Requete.Analogsignal_ids)
self.Noise_Level=[numpy.NaN]*len(Requete.Analogsignal_ids)
self.Leak_Drop=[numpy.NaN]*len(Requete.Analogsignal_ids)
self.Manip_Diagnosis_Widget = MyMplWidget(subplots=None,sharex=True, title = 'Manip Diagnosis')
value, ok = QtGui.QInputDialog.getInt(Main.FilteringWidget, 'Input Dialog',
'Please Enter Seal Test time:')
value=int(value)
if Leakend==None:
Leakend=value-1
for n in range(Requete.NumberofChannels):
for i in range(len(Requete.Analogsignal_ids)):
Main.progress.setMinimum(0)
Main.progress.setMaximum(len(Requete.Analogsignal_ids)-1)
Main.progress.setValue(i)
if Requete.tag["Selection"][i][n] == 1:
sig = AnalogSignal().load(Requete.Analogsignal_ids[i],session=Requete.Global_Session)
si = sig.signal
self.Leak[i] = numpy.mean(si[float(Leakbegin)*Navigate.Points_by_ms:float(Leakend)*Navigate.Points_by_ms])
self.Seal_test[i] = min(si[float(value)*Navigate.Points_by_ms:float(value+window)*Navigate.Points_by_ms])-self.Leak[i]
self.Noise_Level[i] = numpy.std(si[float(Leakbegin)*Navigate.Points_by_ms:float(Leakend)*Navigate.Points_by_ms])
self.Leak_Drop[i] = numpy.mean(si[float(Leakbegin)*Navigate.Points_by_ms:float(Leakend)*Navigate.Points_by_ms])-numpy.mean(si[((Leakbegin)*Navigate.Points_by_ms)*-1:-1])
self.Seal_test=numpy.array(self.Seal_test)
self.Manip_Diagnosis_Widget.canvas.axes = self.Manip_Diagnosis_Widget.canvas.fig.add_subplot(511)
self.Manip_Diagnosis_Widget.canvas.axes.plot(self.Seal_test,'ro-')
self.Manip_Diagnosis_Widget.canvas.axes.set_ylabel("Seal Test (pA)")
self.Manip_Diagnosis_Widget.canvas.axes = self.Manip_Diagnosis_Widget.canvas.fig.add_subplot(512)
self.Manip_Diagnosis_Widget.canvas.axes.plot(-0.01/(self.Seal_test*0.000000000001),'go-')
self.Manip_Diagnosis_Widget.canvas.axes.set_ylabel("Serie Resistance (MOhm)")
self.Manip_Diagnosis_Widget.canvas.axes = self.Manip_Diagnosis_Widget.canvas.fig.add_subplot(513)
self.Manip_Diagnosis_Widget.canvas.axes.plot(self.Leak,'bo-')
self.Manip_Diagnosis_Widget.canvas.axes.set_ylabel("Leak (pA)")
self.Manip_Diagnosis_Widget.canvas.axes = self.Manip_Diagnosis_Widget.canvas.fig.add_subplot(514)
self.Manip_Diagnosis_Widget.canvas.axes.plot(self.Noise_Level,'ko-')
self.Manip_Diagnosis_Widget.canvas.axes.set_ylabel("Noise_Level")
self.Manip_Diagnosis_Widget.canvas.axes = self.Manip_Diagnosis_Widget.canvas.fig.add_subplot(515)
self.Manip_Diagnosis_Widget.canvas.axes.plot(self.Leak_Drop,'co-')
self.Manip_Diagnosis_Widget.canvas.axes.set_ylabel("Leak_Drop")
self.Manip_Diagnosis_Widget.show()
def fft_passband_filter(sig,
f_low =0,f_high=1,
axis = 0,
) :
"""
pass band filter using fft for real 1D signal.
sig : a numpy.array signal
f_low : low pass niquist frequency (1 = samplin_rate/2)
f_high : high cut niquist frequency (1 = samplin_rate/2)
"""
n = sig.shape[axis]
N = int(2**(numpy.ceil(numpy.log(n)/numpy.log(2))))
SIG = numpy.fft(sig,n = N , axis = axis)
n_low = numpy.floor((N-1)*f_low/2)+1;
fract_low = 1-((N-1)*f_low/2-numpy.floor((N-1)*f_low/2));
n_high = numpy.floor((N-1)*f_high/2)+1;
fract_high = 1-((N-1)*f_high/2-numpy.floor((N-1)*f_high/2));
s = [ slice(None) for i in range(sig.ndim) ]
if f_low >0 :
s[axis] = 0
SIG[s] = 0
s[axis] = slice(1,n_low)
SIG[ s ] = 0
s[axis] = n_low
SIG[s] *= fract_low
s[axis] = -n_low
SIG[s] *= fract_low
if n_low !=1 :
s[axis] = slice(-n_low+1, None)
SIG[s] = 0
if f_high <1 :
s[axis] = n_high
SIG[s] *= fract_high
s[axis] = slice(n_high+1,-n_high)
SIG[ s ] = 0
s[axis] = -n_high
SIG[s] *= fract_high
s[axis] = slice(0,n)
return numpy.real(numpy.ifft(SIG , axis=axis)[s])
def Display_Infos(self):
a='None'
self.Tagged_Sweeps=0
for n in range(Requete.NumberofChannels):
self.Tagged_Sweeps=0
for i in range(len(Requete.Analogsignal_ids)):
if Requete.tag["Selection"][i][n]==1:
self.Tagged_Sweeps+=1
msgBox = QtGui.QMessageBox()
msgBox.setText(
"<b>SQL Request</b>"+
"<p>"+
str(Requete.query)+
"<p><b>General Infos</b>"+
"<p>Channel number is "+
str(n)+
"<p>Number of Loaded Blocks : "+
str(len(set(numpy.array(Requete.Block_ids).flatten())))+
"<p>Number of Loaded Sweeps : "+
str(len(Requete.Analogsignal_ids))+
"<p>Number of Tagged Sweeps : "+
str(self.Tagged_Sweeps)+
"<p>Type of experiment : "+
str(a)+
"<p>Sampling rate : "+
str(int(Navigate.Points_by_ms))+
" points by ms (" +
str(int(Navigate.Points_by_ms))+
"kHz) , or 1 point = "+
str(1./Navigate.Points_by_ms)+
" ms"+
"<p><b>Mapping Infos (saved in the tag field)</b>"
"<p>Have a nice analysis...")
msgBox.exec_()
def Concat_Scalogram(self):
List=[]
Events=self.SuperImposeSpikesOnScalogram
#if we want to superimpose spikes
if Events == True:
Events=[]
Navigate.Display_First_Trace()
else:
Events = None
for i,j in enumerate(Requete.Analogsignal_ids):
if (i >= int(Main.From.text())) and (i <= int(Main.To.text())) and (Requete.tag["Selection"][i][0] == 1):
if Events != None: #Spikes
Navigate.Display_Next_Trace()
Events.extend(Requete.Current_Spike_Times/1000.+(i*Navigate.signal_length_in_ms))
List.append(j)
if Main.Scalogram_Min.text() not in ['None','none','NaN','nan','']:
Min=float(Main.Scalogram_Min.text())
else:
Min=None
if Main.Scalogram_Max.text() not in ['None','none','NaN','nan','']:
Max=float(Main.Scalogram_Max.text())
else:
Max=None
Navigate.Concatenate(Source=List)
self.Scalogram(Source=Navigate.Concatenated,vmin=Min,vmax=Max,Events=Events)
def EmbeddedScalogram(self):
if Main.Scalogram_Min.text() not in ['None','none','NaN','nan','']:
Min=float(Main.Scalogram_Min.text())
else:
Min=None
if Main.Scalogram_Max.text() not in ['None','none','NaN','nan','']:
Max=float(Main.Scalogram_Max.text())
else:
Max=None
self.Scalogram(vmin=Min,vmax=Max)
def Average_Scalograms(self,**kargs):
'''
'''
orginal=self.Scalogram(Just_Data=True)
orginal.map[:,:]=numpy.nan
av=orginal.map[:,:,numpy.newaxis]
for i in range(len(Requete.Analogsignal_ids)-1):
v=self.Scalogram(Just_Data=True).map
av=numpy.append(av,v[:,:,numpy.newaxis],axis=2)
Navigate.Display_Next_Trace()
av=numpy.nanmean(av,axis=2)
pyplot.imshow(abs(av).transpose(),
interpolation='nearest',
extent=(orginal.t_start, orginal.t_stop, orginal.f_start-orginal.deltafreq/2., orginal.f_stop-orginal.deltafreq/2.),
origin ='lower' ,
aspect = 'normal',
**kargs)
pyplot.show()
def Scalogram(self,Source=None,Type='RAW',Sampling_Rate=None, Channel=0, Filtered=False,Events=None,Just_Data=False, **kargs):
'''
Uses a slightly modified OpenElectrophy plot_scalogram()
Source is by default the current sweep but any other analogsignal.id OR 1D array can be passed
Display arguments from imshow can be passed as **kargs (ex: vmin, vmax, cmap)
'''
from OpenElectrophy import AnalogSignal
if Source == None:
if Filtered == True or Main.Analyze_Filtered_Traces_Button.checkState () == 2:
Source=Navigate.Filtered_Signal
else:
Source=Navigate.si
if Sampling_Rate == None:
Sampling_Rate=Requete.BypassedSamplingRate
A=[]
for i in Source:
#TODO Type could be autodetected
if Type == 'RAW':
A.append(list(i))
elif Type == 'Id':
A.append(list(AnalogSignal.load(i[Channel]).signal))
A=numpy.array([numpy.array(xi) for xi in A])
print len(A), len(A[0])
for n,i in enumerate(A):
if type(i[0]) in [list,numpy.ndarray]:
Average=numpy.average(i,axis=0)
else:
Average=i
B=AnalogSignal()
B.signal=Average
B.sampling_rate=Sampling_Rate
sc=B.plot_scalogram(just_data=Just_Data,**kargs)
if Just_Data == True:
return sc
if Events != None:
pyplot.plot(Events,numpy.ones(len(Events))*20,'wo',alpha=0.5)
pyplot.show()
| [
"[email protected]"
] | |
7e37f7f29e3d4b42b9d6b76c5933c8de9d1a2bee | d3235aaa6194224d48b85a126218f7bf0c7c1a08 | /apps/platform_pages/migrations/0010_publicidad_page.py | 4ada86b4314bf8b6a3066d1a5ae2781160931a1e | [] | no_license | javierbuitrago/freenetsas | b95c158fa9bef8c5298ebc8896093469865ce24a | 9d7c2859aa74153beb9fe35c743e0f176d10bfbd | refs/heads/master | 2020-05-17T11:43:47.199164 | 2019-04-29T17:17:56 | 2019-04-29T17:17:56 | 183,691,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,213 | py | # Generated by Django 2.2 on 2019-04-28 15:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('platform_pages', '0009_auto_20190427_1855'),
]
operations = [
migrations.CreateModel(
name='Publicidad_page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('favicon', models.ImageField(upload_to='static/img/favicon/')),
('title_pestana', models.CharField(max_length=90)),
('logo', models.ImageField(upload_to='static/img/logo/')),
('width_logo', models.CharField(max_length=90)),
('background_color_page', models.CharField(max_length=90)),
('font_family_titles', models.CharField(max_length=90)),
('font_sizes_titles', models.CharField(max_length=90)),
('background_color_titles', models.CharField(max_length=90)),
('color_titles', models.CharField(max_length=90)),
('width_titles', models.CharField(max_length=90)),
('height_titles', models.CharField(max_length=90)),
('padding_titles', models.CharField(max_length=90)),
('text_aling_titles', models.CharField(max_length=90)),
('border_titles', models.CharField(max_length=90)),
('border_radius_titles', models.CharField(max_length=90)),
('font_family_paragrafos', models.CharField(max_length=90)),
('font_sizes_paragrafos', models.CharField(max_length=90)),
('background_color_paragrafos', models.CharField(max_length=90)),
('color_paragrafos', models.CharField(max_length=90)),
('width_paragrafos', models.CharField(max_length=90)),
('text_aling_paragrafos', models.CharField(max_length=90)),
('height_paragrafos', models.CharField(max_length=90)),
('border_paragrafos', models.CharField(max_length=90)),
('border_radius_paragrafos', models.CharField(max_length=90)),
('title1', models.CharField(max_length=90)),
('color_title1', models.CharField(max_length=90)),
('background_color_title1', models.CharField(max_length=90)),
('border_radius_title1', models.CharField(max_length=90)),
('horizontal_text_shadow', models.CharField(max_length=90)),
('vertical_text_shadow', models.CharField(max_length=90)),
('blur_text_shadow', models.CharField(max_length=90)),
('color_text_shadow', models.CharField(max_length=90)),
('image_title1', models.ImageField(upload_to='static/img/images_counter/')),
('background_color_image', models.ImageField(upload_to='static/img/images_counter/')),
('background_repeat', models.ImageField(upload_to='static/img/images_counter/')),
('description1', models.CharField(max_length=9000)),
('color_descripcion1', models.CharField(max_length=90)),
('seccion2_title', models.CharField(max_length=90)),
('image_seccion2', models.ImageField(upload_to='static/img/images_counter/')),
('description_seccion2', models.CharField(max_length=9000)),
('width_image_seccion2', models.CharField(max_length=90)),
('height_image_seccion2', models.CharField(max_length=90)),
('padding_image_seccion2', models.CharField(max_length=90)),
('border_image_seccion2', models.CharField(max_length=90)),
('border_radius_image_seccion2', models.CharField(max_length=90)),
('seccion3_title', models.CharField(max_length=90)),
('seccion3_description', models.CharField(max_length=9000)),
('seccion4_title', models.CharField(max_length=90)),
('seccion4_description', models.CharField(max_length=9000)),
('for_page', models.CharField(max_length=30)),
],
),
]
| [
"[email protected]"
] | |
f5c0d4910269e0b5b334417e7411d352f7e4a81a | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_25772.py | 4fa27e59e3a620c464325c0e5654dcbc4cf57d02 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,849 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((469.783, 482.982, 415.437), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((487.577, 527.58, 365.825), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((499.903, 576.376, 300.082), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((377.815, 508.699, 316.389), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((570.274, 695.137, 165.506), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((484.953, 504.393, 377.19), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((484.938, 503.254, 377.672), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((495.973, 487.073, 357.364), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((485.36, 474.435, 334.673), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((497.081, 469.25, 309.624), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((501.032, 443.921, 321.059), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((508.054, 422.995, 338.386), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((484.793, 502.973, 405.667), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((537.802, 344.185, 273.65), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((599.565, 512.515, 180.358), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((599.565, 512.515, 180.358), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((593.062, 507.418, 208.083), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((585.146, 502.51, 235.355), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((575.161, 501.184, 262.295), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((561.471, 507.101, 286.863), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((547.798, 515.154, 310.836), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((528.257, 518.346, 331.427), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((558.456, 374.856, 118.779), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((493.863, 660.653, 545.749), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((538.708, 560.441, 323.723), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((538.708, 560.441, 323.723), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((517.869, 548.09, 307.624), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((490.541, 545.421, 296.802), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((468.446, 564.477, 299.822), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((418.247, 502.883, 396.528), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((509.638, 637.011, 206.759), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((459.679, 522.218, 368.682), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((459.563, 522.226, 368.713), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((457.896, 515.122, 395.882), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((472.604, 495.717, 409.907), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((493.206, 479.006, 400.674), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((509.782, 460.178, 387.849), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((522.256, 457.162, 362.871), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((535.185, 433.983, 353.79), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((558.907, 511.078, 382.833), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((506.947, 357.066, 323.26), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((540.249, 553.087, 387.232), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((515.568, 554.444, 372.955), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((465.128, 558.364, 342.127), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((421.729, 560.635, 306.575), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((386.186, 555.047, 376.859), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((390.621, 566.054, 207.352), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((458.511, 491.945, 353.039), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((469.903, 511.376, 336.355), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((483.273, 535.004, 327.211), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((494.411, 561.921, 328.847), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((488.287, 590.427, 328.616), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((493.051, 618.438, 323.281), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((492.449, 554.483, 371.264), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((493.9, 686.392, 276.093), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
6d77da2bee031058527dadae919a1ddcad7b094b | e44abd0100eb0a2dab6b95cc4e1bacddcbf08b2e | /predict.py | ede815bc1424453f1a926c8594d3410167df5a6b | [] | no_license | xFANx/clf | 85fdf7a1d0cfd4486ff9fe189ef98ff65933f654 | e81264bf0da9c78257be94a162ade9c37182578a | refs/heads/main | 2022-12-25T07:20:00.637097 | 2020-10-09T01:27:22 | 2020-10-09T01:27:22 | 302,378,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,170 | py |
"""author
baiyu
"""
import argparse
import glob
import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
#from PIL import Image
import transforms
#from torchvision import transforms
from tensorboardX import SummaryWriter
from conf import settings
from utils import *
from lr_scheduler import WarmUpLR
from criterion import LSR
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-net', type=str, required=True, help='net type')
parser.add_argument('-w', type=int, default=2, help='number of workers for dataloader')
parser.add_argument('-b', type=int, default=256, help='batch size for dataloader')
parser.add_argument('-lr', type=float, default=0.04, help='initial learning rate')
parser.add_argument('-e', type=int, default=450, help='training epoches')
parser.add_argument('-warm', type=int, default=5, help='warm up phase')
parser.add_argument('-gpus', nargs='+', type=int, default=0, help='gpu device')
args = parser.parse_args()
#checkpoint directory
checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, args.net, settings.TIME_NOW)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{type}.pth')
#tensorboard log directory
log_path = os.path.join(settings.LOG_DIR, args.net, settings.TIME_NOW)
if not os.path.exists(log_path):
os.makedirs(log_path)
writer = SummaryWriter(log_dir=log_path)
#get dataloader
train_transforms = transforms.Compose([
#transforms.ToPILImage(),
transforms.ToCVImage(),
transforms.RandomResizedCrop(settings.IMAGE_SIZE),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4, saturation=0.4, hue=0.4),
#transforms.RandomErasing(),
#transforms.CutOut(56),
transforms.ToTensor(),
transforms.Normalize(settings.TRAIN_MEAN, settings.TRAIN_STD)
])
test_transforms = transforms.Compose([
transforms.ToCVImage(),
transforms.CenterCrop(settings.IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize(settings.TRAIN_MEAN, settings.TRAIN_STD)
])
train_dataloader = get_train_dataloader(
settings.DATA_PATH,
train_transforms,
args.b,
args.w
)
test_dataloader = get_test_dataloader(
settings.DATA_PATH,
test_transforms,
args.b,
args.w
)
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = get_network(args)
net = init_weights(net)
if isinstance(args.gpus, int):
args.gpus = [args.gpus]
net = nn.DataParallel(net, device_ids=args.gpus)
net = net.cuda()
#visualize the network
visualize_network(writer, net.module)
#cross_entropy = nn.CrossEntropyLoss()
lsr_loss = LSR()
#apply no weight decay on bias
params = split_weights(net)
optimizer = optim.SGD(params, lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True)
#set up warmup phase learning rate scheduler
iter_per_epoch = len(train_dataloader)
warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * args.warm)
#set up training phase learning rate scheduler
train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=settings.MILESTONES)
#train_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.e - args.warm)
best_acc = 0.0
net.eval()
total_loss = 0
correct = 0
for images, labels in test_dataloader:
images = images.cuda()
labels = labels.cuda()
predicts = net(images)
_, preds = predicts.max(1)
correct += preds.eq(labels).sum().float()
loss = lsr_loss(predicts, labels)
total_loss += loss.item()
test_loss = total_loss / len(test_dataloader)
acc = correct / len(test_dataloader.dataset)
print('Test set: loss: {:.4f}, Accuracy: {:.4f}'.format(test_loss, acc))
print()
writer.close()
| [
"[email protected]"
] | |
fb2493c92162fedb2c572c93d6abda726ed0b606 | a67861f256bfcb43b3dd542be540e00a1eb22c22 | /get_avg_model_2.py | 903a36952e017a93864cbde73ea0d35163ea9dae | [] | no_license | kirca97/NBPiXML | afedc0d220ae4f3551fe08c25f09b725ecd0d5f5 | 88b3cde7a47e31b59df079a68ecb1bbcdcc70c12 | refs/heads/master | 2023-02-25T15:16:30.616885 | 2021-02-01T22:12:51 | 2021-02-01T22:12:51 | 331,459,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | from rejson import Client
import time
rj = Client(host='localhost', port=6379, decode_responses=True)
times = []
for i in range(19464):
print(i)
start_time = time.time()
rj.jsonget("post_" + str(i))
elapsed_time = (time.time() - start_time)
times.append(elapsed_time)
print("---Time elapsed---")
print(sum(times) / len(times))
| [
"[email protected]"
] | |
5d20df02c25095650ccda8686f9948304197c987 | 00a83ff0ed77bd53c1c9a22a9ed47598095226e3 | /raspi/motor-temperature-humidity/app.py | 6ca3560aba9f38f17bf5a009efcf6bc35b333290 | [] | no_license | htlvb/racing-car | e719be609780b6280f72fc89fb72ce8f21512838 | b2a7be390b763d8bc4aea3c2d746ca6089e5371a | refs/heads/master | 2023-01-08T06:19:52.514659 | 2020-11-08T08:38:13 | 2020-11-08T08:38:13 | 303,775,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | #!/usr/bin/env python3
from prometheus_client import start_http_server, Gauge
import signal,sys
import time
import Adafruit_DHT
temperature_metric = Gauge('motorTemperature', 'Motor temperature in °C')
humidity_metric = Gauge('motorHumidity', 'Motor humidity in %')
start_http_server(8000)
def handle_sigterm(*args):
sys.exit()
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 17)
temperature_metric.set(temperature)
humidity_metric.set(humidity)
time.sleep(3)
| [
"[email protected]"
] | |
8806bf55d1c0ee0fd5535b1dacff8b521d93944f | 8332d884a222a5308bb3fcddbf2ef385c19f1162 | /units/unit7-bigData.py | ae96eda7a8418a88a3217bacc4df98bd160e6b6e | [] | no_license | pankajcivil/stat243-fall-2015 | 2de561e32cc2733ce2a3ad5263bfe0dadd1ecc0e | ee3c3c2c523a96eefceddf8703d4938396730993 | refs/heads/master | 2020-05-04T23:34:47.070807 | 2017-08-22T18:02:35 | 2017-08-22T18:02:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,183 | py | #####################################################
# 6: Hadoop, MapReduce, and Spark
#####################################################
### 6.3 Spark
### 6.3.1 Getting set up
## @knitr spark-setup
export SPARK_VERSION=1.5.1
export CLUSTER_SIZE=12 # number of slave nodes
export mycluster=sparkvm-paciorek # need unique name relative to other users
# I unzipped the Spark tarball to /usr/lib/spark via sudo on BCE
cd /usr/lib/spark/ec2
# set Amazon secret keys (manually or in my case by querying them elsewhere)
#AWS_ACCESS_KEY_ID=blah
AWS_ACCESS_KEY_ID=$(grep -i "^AWS_ACCESS_KEY_ID" ~/stat243-fall-2015-credentials.boto | cut -d' ' -f3)
#AWS_SECRET_ACCESS_KEY=blahhhh
AWS_SECRET_ACCESS_KEY=$(grep -i "^AWS_SECRET_ACCESS_KEY" ~/stat243-fall-2015-credentials.boto | cut -d' ' -f3)
### DO NOT HARD CODE YOUR AMAZON SECRET KEY INFORMATION INTO ANY PUBLIC FILE, INCLUDING A GITHUB REPO !!!!! ###
# start cluster
./spark-ec2 -k [email protected]:stat243-fall-2015 -i ~/.ssh/stat243-fall-2015-ssh_key.pem \
--region=us-west-2 -s ${CLUSTER_SIZE} -v ${SPARK_VERSION} launch ${mycluster}
# login to cluster
# as root
./spark-ec2 -k [email protected]:stat243-fall-2015 -i ~/.ssh/stat243-fall-2015-ssh_key.pem --region=us-west-2 \
login ${mycluster}
# or you can ssh in directly if you know the URL
# ssh -i ~/.ssh/stat243-fall-2015-ssh_key.pem [email protected]
# you can check your nodes via the EC2 management console
# to logon to one of the slaves, look at /root/ephemeral-hdfs/conf/slaves
# and ssh to that address
ssh `head -n 1 /root/ephemeral-hdfs/conf/slaves`
# We can view system status through a web browser interface
# on master node of the EC2 cluster, do:
MASTER_IP=`cat /root/ephemeral-hdfs/conf/masters`
echo ${MASTER_IP}
# Point a browser on your own machine to the result of the next command
# you'll see info about the "Spark Master", i.e., the cluster overall
echo "http://${MASTER_IP}:8080/"
# Point a browser on your own machine to the result of the next command
# you'll see info about the "Spark Stages", i.e., the status of Spark tasks
echo "http://${MASTER_IP}:4040/"
# Point a browser on your own machine to the result of the next command
# you'll see info about the HDFS"
echo "http://${MASTER_IP}:50070/"
# when you are done and want to shutdown the cluster:
# IMPORTANT to avoid extra charges!!!
./spark-ec2 --region=us-west-2 --delete-groups destroy ${mycluster}
## @knitr spark-hdfs
export PATH=$PATH:/root/ephemeral-hdfs/bin/
hadoop fs -mkdir /data
hadoop fs -mkdir /data/airline
df -h
mkdir /mnt/airline
cd /mnt/airline
# for in-class demo:
scp [email protected]:/scratch/users/paciorek/243/AirlineData/198*bz2 .
# scp [email protected]:/scratch/users/paciorek/243/AirlineData/[12]*bz2 .
# for students:
# wget http://www.stat.berkeley.edu/share/paciorek/1987-2008.csvs.tgz
# tar -xvzf 1987-2008.csvs.tgz
# or individual files, e.g., data for 1987
# wget http://www.stat.berkeley.edu/share/paciorek/1987.csv.bz2
hadoop fs -copyFromLocal /mnt/airline/*bz2 /data/airline
# check files on the HDFS, e.g.:
hadoop fs -ls /data/airline
# get numpy installed
# there is a glitch in the EC2 setup that Spark provides -- numpy is not installed on the version of Python that Spark uses (Python 2.7). To install numpy on both the master and worker nodes, do the following as root on the master node.
yum install -y python27-pip python27-devel
pip-2.7 install 'numpy==1.9.2' # 1.10.1 has an issue with a warning in median()
/root/spark-ec2/copy-dir /usr/local/lib64/python2.7/site-packages/numpy
# pyspark is in /root/spark/bin
export PATH=${PATH}:/root/spark/bin
# start Spark's Python interface as interactive session
pyspark
## @knitr spark-data
from operator import add
import numpy as np
lines = sc.textFile('/data/airline')
# particularly for in-class demo - good to repartition the 3 files to more partitions
# lines = lines.repartition(96).cache()
numLines = lines.count()
# mapper
def stratify(line):
vals = line.split(',')
return(vals[16], 1)
result = lines.map(stratify).reduceByKey(add).collect()
# reducer is simply the addition function
# >>> result
#[(u'Origin', 22), (u'CIC', 7281), (u'LAN', 67897), (u'LNY', 289), (u'DAB', 86656), (u'APF', 4074), (u'ATL', 6100953), (u'BIL', 92503), (u'JAN', 190044), (u'GTR', 7520), (u'ISO', 5945), (u'SEA', 1984077), (u'PIT', 2072303), (u'ONT', 774679), (u'ROW', 1196), (u'PWM', 161602), (u'FAY', 44564), (u'SAN', 1546835), (u'ADK', 589), (u'ADQ', 10327), (u'IAD', 1336957), (u'ANI', 475), (u'CHO', 19324), (u'HRL', 116018), (u'ACV', 23782), (u'DAY', 380459), (u'ROA', 69361), (u'VIS', 1993), (u'PSC', 38408), (u'MDW', 1170344), (u'MRY', 67926), (u'MCO', 1967493), (u'EKO', 12808), (u'RNO', 510023), (u'TPA', 1321652), (u'OME', 21403), (u'DAL', 952216), (u'GJT', 34921), (u'ALB', 292764), (u'SJT', 16590), (u'CAK', 80821), (u'TUP', 1971), (u'MKG', 396), (u'DEN', 3319905), (u'MDT', 167293), (u'RKS', 954), (u'GSP', 200147), (u'LAW', 18019), (u'MCN', 7203), (u'PIA', 44780), (u'ROC', 368099), (u'BQK', 6934), (u'MSP', 2754997), (u'ACT', 21081), (u'SBA', 119959), (u'HPN', 125500), (u'RFD', 1560), (u'CCR', 4465), (u'BWI', 1717380), (u'SJU', 461019), (u'SAV', 185855), (u'HOU', 1205951), (u'BPT', 8452), (u'RDU', 103678 ....
# this counting by key could have been done
# more easily using countByKey()
vals = [x[1] for x in result]
sum(vals) == numLines # a bit of a check
# True
[x[1] for x in result if x[0] == "SFO"] # SFO result
# [2733910]
# if don't collect, can grab a few results
output = lines.map(stratify).reduceByKey(add)
output.take(5)
#[(u'Origin', 22), (u'CIC', 7281), (u'LAN', 67897), (u'LNY', 289), (u'DAB', 86656)]
# also, you can have interim results stored as objects
mapped = lines.map(stratify)
result = mapped.reduceByKey(add).collect()
lines.filter(lambda line: "SFO" in line.split(',')[16]).saveAsTextFile('/data/airline-SFO')
## make sure it's all in one chunk for easier manipulation on master
lines.filter(lambda line: "SFO" in line.split(',')[16]).repartition(1).saveAsTextFile('/data/airline-SFO2')
#lines.filter(lambda line: "SFO" in line.split(',')[16]).repartition(1).
#saveAsTextFile('/data/airline-SFO2')
## @knitr spark-nonstandard
def computeKeyValue(line):
vals = line.split(',')
# key is carrier-month-origin-destination
keyVals = '-'.join([vals[x] for x in [8,1,16,17]])
if vals[0] == 'Year':
return('0', [0,0,1,1])
cnt1 = 1
cnt2 = 1
# 14 and 15 are arrival and departure delays
if vals[14] == 'NA':
vals[14] = '0'
cnt1 = 0
if vals[15] == 'NA':
vals[15] = '0'
cnt2 = 0
return(keyVals, [int(vals[14]), int(vals[15]), cnt1, cnt2])
def medianFun(input):
if len(input) == 2: # input[0] should be key and input[1] set of values
if len(input[1]) > 0:
# iterate over set of values
# input[1][i][0] is arrival delay
# input[1][i][1] is departure delay
m1 = np.median([val[0] for val in input[1] if val[2] == 1])
m2 = np.median([val[1] for val in input[1] if val[3] == 1])
return((input[0], m1, m2)) # m1, m2))
else:
return((input[0], -999, -999))
else:
return((input[0], -9999, -9999))
output = lines.map(computeKeyValue).groupByKey()
medianResults = output.map(medianFun).collect()
medianResults[0:5]
# [(u'DL-8-PHL-LAX', 85.0, 108.0), (u'OO-12-IAH-CLL', -6.0, 0.0), (u'AA-4-LAS-JFK', 2.0, 0.0), (u'WN-8-SEA-GEG', 0.0, 0.0), (u'MQ-1-ORD-MDT', 3.0, 1.0)]
## @knitr spark-fit1
lines = sc.textFile('/data/airline')
def screen(vals):
vals = vals.split(',')
return(vals[0] != 'Year' and vals[14] != 'NA' and
vals[18] != 'NA' and vals[3] != 'NA' and
float(vals[14]) < 720 and float(vals[14]) > (-30) )
# 0 field is Year
# 14 field is ArrDelay
# 18 field is Distance
# 3 field is DayOfWeek
lines = lines.filter(screen).repartition(192).cache()
# 192 is a multiple of the total number of cores: 24 (12 nodes * 2 cores/node)
n = lines.count()
import numpy as np
from operator import add
P = 8
#######################
# calc xtx and xty
#######################
def crossprod(line):
vals = line.split(',')
y = float(vals[14])
dist = float(vals[18])
dayOfWeek = int(vals[3])
xVec = np.array([0.0] * P)
xVec[0] = 1.0
xVec[1] = float(dist)/1000
if dayOfWeek > 1:
xVec[dayOfWeek] = 1.0
xtx = np.outer(xVec, xVec)
xty = xVec * y
return(np.c_[xtx, xty])
xtxy = lines.map(crossprod).reduce(add)
# 11 minutes
# now just solve system of linear equations!!
#######################
# calc xtx and xty w/ mapPartitions
#######################
# dealing with x matrix via mapPartitions
def readPointBatch(iterator):
strs = list(iterator)
matrix = np.zeros((len(strs), P+1))
for i in xrange(len(strs)):
vals = strs[i].split(',')
dist = float(vals[18])
dayOfWeek = int(vals[3])
xVec = np.array([0.0] * (P+1))
xVec[8] = float(vals[14]) # y
xVec[0] = 1.0 # int
xVec[1] = float(dist) / 1000
if(dayOfWeek > 1):
xVec[dayOfWeek] = 1.0
matrix[i] = xVec
return([matrix.T.dot(matrix)])
xtxyBatched = lines.mapPartitions(readPointBatch).reduce(add)
# 160 seconds
mle = np.linalg.solve(xtxy[0:P,0:P], xtxy[0:P,P])
## @knitr spark-fit2
def readPointPartition(iterator):
strs = list(iterator)
matrix = np.zeros((len(strs), P+1))
print(len(strs))
for i in xrange(len(strs)):
vals = strs[i].split(',')
dist = float(vals[18])
dayOfWeek = int(vals[3])
xVec = np.array([0.0] * (P+1))
xVec[8] = float(vals[14]) # y
xVec[0] = 1.0 # int
xVec[1] = float(dist) / 1000
if(dayOfWeek > 1):
xVec[dayOfWeek] = 1.0
matrix[i] = xVec
return([matrix])
batches = lines.mapPartitions(readPointPartition).cache()
# 3 min
def denomSumSqPartition(mat):
return((mat*mat).sum(axis=0))
# notice I do use global variables in here
# one may be able to avoid this by using
# nested functions, if one wanted to
def getNumPartition(mat):
beta[p] = 0
sumXb = mat[:, 0:P].dot(beta)
return(sum((mat[:,P] - sumXb)*mat[:,p]))
sumx2 = batches.map(denomSumSqPartition).reduce(add)
beta = np.array([0.0] * P)
p = 0
oldBeta = beta.copy() # otherwise a shallow (i.e., pointer) copy!
it = 0
tol = .001
maxIts = 10
crit = 1e16
while crit > tol and it <= maxIts:
#for it in range(1,6):
for p in xrange(P):
# get numerator as product of residual and X for coordinate
sumNum = batches.map(getNumPartition).reduce(add)
beta[p] = sumNum / sumx2[p]
print("Updated var " + str(p) + " in iteration ", str(it), ".")
crit = sum(abs(beta - oldBeta))
oldBeta = beta.copy()
print("-"*100)
print(beta)
print(crit)
print("-"*100)
it = it+1
# 7 s per iteration; ~9 minutes for 10 iterations
beta
#array([ 6.59246803, 0.76054724, -0.92357814, 0.16881708, 2.00073749,
# 2.66270618, -2.65116571, -0.36017589])
## @knitr spark-fit3
alpha = .4
def sumVals(mat):
return(sum(mat[:,P]))
beta = np.array([0.0] * P)
beta[0] = batches.map(sumVals).reduce(add) / n
oldBeta = beta.copy()
def getGradBatch(mat):
sumXb = mat[:, 0:P].dot(beta)
return( ((sumXb - mat[:,P])*((mat[:, 0:P]).T)).sum(1) )
def ssqObj(mat):
return ( (pow(mat[:,P] - mat[:, 0:P].dot(beta), 2)).sum() )
objValue = batches.map(ssqObj).reduce(add)
nIts = 100
storeVals = np.zeros((nIts, P+2))
tol = .001
maxIts = 100
crit = 1e16
while crit > tol and it < maxIts:
gradVec = batches.map(getGradBatch).reduce(add)
beta = beta - alpha*gradVec / n
crit = sum(abs(beta - oldBeta))
objValue = batches.map(ssqObj).reduce(add)
oldBeta = beta.copy()
storeVals[it, 0] = pow(objValue/n,0.5)
storeVals[it, 1] = crit
storeVals[it, 2:(P+2)] = beta
print("-"*100)
print(it)
print(beta)
print(crit)
print(pow(objValue/n,0.5))
print("-"*100)
it = it + 1
# 15 min
#[ 6.57348292 0.75335604 -0.9251238 0.16222806 1.98565752 2.64468325
# -2.63650861 -0.36507276]
## @knitr pyspark-script
import sys
from pyspark import SparkContext
from numpy import random as rand
if __name__ == "__main__":
sc = SparkContext()
# use sys.argv to get arguments
# for example:
total_samples = int(sys.argv[1]) if len(sys.argv) > 1 else 1000000
num_slices = int(sys.argv[2]) if len(sys.argv) > 2 else 2
samples_per_slice = round(total_samples / num_slices)
def sample(p):
rand.seed(p)
x, y = rand.random(samples_per_slice), rand.random(samples_per_slice)
# x, y = rand.random(samples_per_slice),
# rand.random(samples_per_slice)
return sum(x*x + y*y < 1)
count = sc.parallelize(xrange(0, num_slices), num_slices).map(sample).reduce(lambda a, b: a + b)
#count = sc.parallelize(xrange(0, num_slices), num_slices).
# map(sample).reduce(lambda a, b: a + b)
print "Pi is roughly %f" % (4.0 * count / (num_slices*samples_per_slice))
| [
"[email protected]"
] | |
d12abdc96ff2a4a3dcceac91fcc0efdf9b70ecfc | 12f4ed0f5c462fbe5429cf5d58d15d53d7b21fd3 | /modules/timescan_plot.py | d162ee9462095fcbe70b34e35be198f6d0b7ef65 | [
"MIT"
] | permissive | zamerman/FELion-Spectrum-Analyser | 1c6d421e8b80a97431349362f4e04aff6c2a13ed | 1e026e0646c88d1c537e9f4b73ea290a559642f1 | refs/heads/master | 2020-04-18T15:22:05.417904 | 2019-01-25T20:36:20 | 2019-01-25T20:36:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,019 | py | #!/usr/bin/python3
import matplotlib.pyplot as plt
import math
import os
import numpy as np
from scipy.optimize import curve_fit
def func(x, a, b, c):
#return a * np.exp(-b * x) + c
return a * np.log(b * x) + c
def timescanplot(filename, location, deg):
os.chdir(location)
f = open(filename)
no_of_mass = 0
start = False
raw_datas = []
iterations = []
time = []
for line in f:
if line.find("#mass")>=0:
no_of_mass += 1
tmp = line.split(":")[-1].strip()
iterations.append(int(tmp))
if not line[0] == "#" and not line == "\n":
if line.strip() == "ALL:":
start = True
if not start:
tmp = line.split()
time.append(tmp)
if start and not line.strip()=="ALL:":
raw_datas.append(line)
f.close()
time = [float(i) for i in list(zip(*(time)))[0]]
all_datas = [[float(j) for j in i.split()] for i in raw_datas]
data_run_cycle = int(len(all_datas)/sum(iterations))
datas = [[[] for j in range(iterations[i])] for i in range(no_of_mass)]
k = 0
for i in range(no_of_mass):
for j in range(iterations[i]):
d = data_run_cycle
datas[i][j] = all_datas[k:d+k]
k += d
mass_values = [i[0][0][0] for i in datas]
data_sets = [list(zip(*datas[i])) for i in range(no_of_mass)]
mass_mean_adder, mass_mean, m = [], [], []
varience, varience_collector1, varience_collector2 = [], [], []
for i in range(no_of_mass):
for k in range(data_run_cycle):
for j in range(iterations[i]):
#mass counts
m_tmp = data_sets[i][k][j][2]
mass_mean_adder.append(m_tmp)
#varience
varience_adder = mass_mean_adder
m.append(sum(mass_mean_adder)/len(mass_mean_adder))
mass_mean_adder = []
#varience
for v in range(iterations[i]):
if iterations[i]>1:
v_tmp = (varience_adder[v]-m[k])**2
varience_collector1.append(v_tmp)
if iterations[i]>1:
varience_collector2.append(sum(varience_collector1)/(len(varience_collector1)-1))
else: varience_collector2.append(0)
varience_collector1 = []
#mass counts
mass_mean.append(m)
m = []
#varience
varience.append(varience_collector2)
varience_collector2 = []
#standard deviation
standard_deviation = [[math.sqrt(i) for i in j] for j in varience]
#standard error
standard_error = [[i/math.sqrt(k) for i in j] for j, k in zip(standard_deviation, iterations)]
if len(time)<len(mass_mean[0]):
for i in range(no_of_mass):
del mass_mean[i][-1]
del standard_error[i][-1]
plt.figure(figsize=(10,5), dpi = 100)
for i in range(no_of_mass):
if iterations[i]>1:
x, y, err = time, mass_mean[i], standard_error[i]
lg = "{} : {}".format(mass_values[i], iterations[i])
plt.errorbar(x, y, yerr = err, fmt = ".", label = lg )
#Polyfit
z = np.polyfit(x, y, deg)
p = np.poly1d(z)
y_fit = [p(i) for i in x]
plt.plot(x, y_fit, "k-")
'''# log fit
popt, pcov = curve_fit(func, x, y)
#brutal force to avoid errors
x = np.array(x, dtype=float) #transform your data in a numpy array of floats
y = np.array(y, dtype=float) #so the curve_fit can work
yy = func(x, *popt)
plt.plot(x, yy, 'r-', label = "log fit")'''
plt.grid(True)
plt.xlabel("Time (ms)")
plt.ylabel("Ion Counts")
plt.legend()
plt.title(filename + ": Polyfit of Order: %i"%deg)
plt.savefig(filename+".png")
plt.show()
plt.close() | [
"[email protected]"
] | |
ccc2d167ac107d67d5dce9eccc319ea10ee961bc | 6c986942c41e3ce58e1f65e33965aeca76b28be2 | /build/config.gypi | eafd5f2954985787e1461375c621b2687e7007be | [
"MIT"
] | permissive | DeveloperOnCall/ccxt-rest | 1cad481dcd0911ee8a874ce6470c13ec431e2022 | 236fdeb13ba899efdae1c2c10c251d0140ed71df | refs/heads/master | 2020-04-21T11:33:50.128165 | 2019-02-18T09:37:36 | 2019-02-18T09:37:36 | 169,530,056 | 0 | 0 | MIT | 2019-02-18T09:37:37 | 2019-02-07T06:40:58 | JavaScript | UTF-8 | Python | false | false | 2,515 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": [],
"msbuild_toolset": "v141",
"msvs_windows_target_platform_version": "10.0.17763.0"
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt62l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "62",
"nasm_version": "2.13",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.64",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\bsworld\\.node-gyp\\10.15.1",
"standalone_static_library": 1,
"msbuild_path": "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\BuildTools\\MSBuild\\15.0\\Bin\\MSBuild.exe"
}
}
| [
"[email protected]"
] | |
54875a6f1d4757cff47825ae74a8ec4bd7b10b27 | 119c14bd2b00b5d2fdb7919e42758989a62fe0d6 | /tc/scripts/prepare_release_package.py | 00bff703110d531d81ed98d15b4a7c62f80def92 | [] | no_license | scorpionipx/PythonNewProjectUtils | f0006b156b4496a8a781a35977aeae97496b8bee | a75cac254d724ea130fa9ad3f5e09247eb535c6a | refs/heads/main | 2023-04-11T16:21:18.603901 | 2021-04-24T13:14:39 | 2021-04-24T13:14:39 | 361,159,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,055 | py | from os.path import (
dirname as directory_name,
join as join_paths,
)
# prepare Python environment on TeamCity build agent
# ======================================================================================================================
import sys
sys.path.append(directory_name(directory_name(__file__)))
# ======================================================================================================================
try:
# for local builds
from tc.scripts import utils
from tc.scripts import version_number_check
except ImportError:
# for TeamCity builds
from scripts import utils
from scripts import version_number_check
from shutil import (
rmtree as delete_directory,
)
from os import (
W_OK,
access,
chmod,
remove as delete_file,
)
from stat import S_IWUSR
ROOT_DIRECTORY = utils.ROOT_DIRECTORY
BUILD_CONFIGURATION = utils.BUILD_CONFIGURATION
def cleanup_package():
"""cleanup_package
Delete unnecessary content.
:return: process result
:rtype: bool
"""
print('Deleting unnecessary content...')
# specify relative path to the root (e.g.: CarMaker/src, CarMaker/include)
to_be_deleted_directories = [
'lab2daq_info/lab2daq_info_logs',
]
to_be_deleted_directories.sort() # sort the list (alphabetically) for an easier overview in the log
# do not specify files in this tuple if they are within to_be_deleted_directories
# specify relative path to the root (e.g.: CarMaker/doc/Docs_link.url)
to_be_deleted_files = [
]
to_be_deleted_files.sort() # sort the list (alphabetically) for an easier overview in the log
if to_be_deleted_directories:
print('Deleting directories:')
# enumerating directories
for to_be_deleted_directory in to_be_deleted_directories:
print('- {}'.format(to_be_deleted_directory))
total = len(to_be_deleted_directories)
for index, to_be_deleted_directory in enumerate(to_be_deleted_directories):
print('[{}/{}] Deleting directory: {}...'.format(index + 1, total, to_be_deleted_directory))
try:
delete_directory(to_be_deleted_directory)
except Exception as exception:
error = 'Failed to delete directory: {}! {}'.format(to_be_deleted_directory, exception)
print(error)
return False
print('Directories deleted!')
else:
print('No directory specified to be deleted! Step skipped!')
if to_be_deleted_files:
print('Deleting files:')
for to_be_deleted_file in to_be_deleted_files:
print('- {}'.format(to_be_deleted_file))
total = len(to_be_deleted_files)
for index, to_be_deleted_file in enumerate(to_be_deleted_files):
print('[{}/{}] Deleting file: {}...'.format(index + 1, total, to_be_deleted_file))
try:
delete_file(to_be_deleted_file)
except Exception as exception:
error = 'Failed to delete file: {}! {}'.format(to_be_deleted_file, exception)
print(error)
return False
print('Files deleted!')
else:
print('No file specified to be deleted! Step skipped!')
return True
def handle_delete_errors(func, path, _):
"""handle_delete_errors
Used for hidden or read-only files to by-pass access limitations.
:param func: caller
:param path: path that caused the error
:param _: to be displayed
:return: function call
"""
if not access(path, W_OK):
chmod(path, S_IWUSR)
func(path)
# main function
def prepare_release_package():
"""prepare_release_package
Prepare the artifacts to be published.
:return: process result
:rtype: bool
"""
if not cleanup_package():
return False
return True
if __name__ == '__main__':
if not prepare_release_package():
exit(utils.ERROR_CODE_FAILED_TO_PREPARE_RELEASE_PACKAGE)
exit(0)
| [
"[email protected]"
] | |
1cc45b363b57795b57d684c33403d76ce57ad105 | 3529ecaa44a53172094ba13498097057c8972723 | /Questiondir/645.set-mismatch/645.set-mismatch_110729827.py | b0f89e7b1a8eafb7c1765f6f336372c5914e18a2 | [] | no_license | cczhong11/Leetcode-contest-code-downloader | 0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6 | db64a67869aae4f0e55e78b65a7e04f5bc2e671c | refs/heads/master | 2021-09-07T15:36:38.892742 | 2018-02-25T04:15:17 | 2018-02-25T04:15:17 | 118,612,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
dup = None
s = set()
for n in nums:
if n in s:
dup = n
break
s.add(n)
expect = len(nums) * (len(nums) + 1) / 2
actual = sum(nums)
return [dup, expect - actual + dup] | [
"[email protected]"
] | |
7edd261ecd3cac0778eaa30bd79a5765e255a77f | 71441983c59d35d102d82d5a604e17aaa123850d | /django/q_manager/urls.py | df66401fdcfe4c5528c331361c8d766a4a9f869d | [] | no_license | ITCSsDeveloper/Q-Manager | b1dd14f9949f386f7765fc75de778a805a7b6a81 | 9a56d7cbb09ebc1068ef62ad373271d5927205c3 | refs/heads/master | 2023-03-02T04:36:13.858634 | 2021-02-10T06:55:38 | 2021-02-10T06:55:38 | 334,401,971 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | from django.contrib import admin
from django.urls import path
from . import views
from . import api_web_controller as api_web
from . import api_log
# WebManageAPI
# ClientApi
urlpatterns = [
# ROOT RENDER VIEWS
path('',views.index),
path('task/create',views.create_view),
path('task/logs',views.log_view),
path('task/update',views.update_view),
path('task/delete',views.delete_view),
path('/task/monitor',views.monitor),
# WEB API
path('api/task/start', api_web.api_start),
path('api/task/stop', api_web.api_stop),
path('api/task/reset', api_web.api_reset),
path('api/task/show', api_web.api_show_all_task),
path('api/task/create', api_web.api_create_task),
path('api/task/delete', api_web.api_delete_task),
path('api/task/log/show', api_web.api_get_logs),
path('api/task/log/clear', api_web.api_clear_logs),
# JOB API
path('api/helper/get_task', api_log.api_get_task),
path('api/helper/insert_log', api_log.api_insert_log),
path('api/helper/update_status', api_log.api_update_status),
path('api/helper/update_pid', api_log.api_update_pid),
]
| [
"[email protected]"
] | |
3ef55c9c4a412644229dcfa5d230983a9af3e4f0 | e707164df1aa8edb5d276179538bd1eb1805f759 | /CODE/fedora_application/env/lib/python2.7/site-packages/tw2/core/compat.py | 6b5bccad18d59cde247a54813bc5cc5ef061f8a8 | [] | no_license | beckastar/cleaner_markov | af5816c14c94a8cb7924728179470e7db9ed2bc0 | a6de3fd87db77c0d80789cbce0ff409c222b4e67 | refs/heads/master | 2021-01-02T22:52:08.989862 | 2013-11-10T04:51:04 | 2013-11-10T04:51:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,342 | py | """ Collections of extra pieces to help smooth over community divergence. """
from widgets import Widget
class TGStyleController(Widget):
""" A widget mixin that provides more advanced controller routing and
dispatching.
The need for this mainly sprung from a divergence of source trees
(unintentionally forking) between the developers of tw2 while it was still
alpha/beta. One team expected users to define controllers as a 'request'
classmethod on the widget and another team expected users to define a
Controller class as a child of the widget. Team A's pattern is now the
default in the main tree. This is a shim to support Team B's approach.
Use it like this:
>>> import tw2.core
>>> class MyWidget(tw2.core.TGStyleController, tw2.core.Widget):
... class Controller(object):
... @jsonify
... def some_method(self, req):
... return dict(foo="bar")
"""
@classmethod
def dispatch(cls, req, controller):
path = req.path_info.strip('/').split('/')[2:]
if len(path) == 0:
method_name = 'index'
else:
method_name = path[0]
# later we want to better handle .ext conditions, but hey
# this aint TG
if method_name.endswith('.json'):
method_name = method_name[:-5]
method = getattr(controller, method_name, None)
if not method:
method = getattr(controller, 'default', None)
return method
@classmethod
def request(cls, req):
"""
Override this method to define your own way of handling a widget
request.
The default does TG-style object dispatch.
"""
authn = cls.attrs.get('_check_authn')
authz = cls.attrs.get('_check_authz')
if authn and not authn(req):
return util.abort(req, 401)
controller = cls.attrs.get('controller', cls.Controller)
if controller is None:
return util.abort(req, 404)
method = cls.dispatch(req, controller)
if method:
if authz and not authz(req, method):
return util.abort(req, 403)
controller = cls.Controller()
return method(controller, req)
return util.abort(req, 404)
| [
"[email protected]"
] | |
6029e1d727d38a647c9641eb60993c66f6d6b945 | aef01bd9a269407e28229f9d796222d519087e0d | /rotation_modeling/views.py | 9035de85b194094189a3d2fd6f294c87dc4ca0d9 | [] | no_license | SerMcDonald/my-first-blog | 72f7fc242f937bfc5cd0970fc48a1c75301ca395 | f90cdb78876af42818d58a62afd26b14041feb2e | refs/heads/master | 2021-01-19T23:05:27.611379 | 2017-03-26T23:19:33 | 2017-03-26T23:19:39 | 83,783,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,963 | py | from django.http import HttpResponse
from django.http import JsonResponse
from django.shortcuts import render, get_object_or_404
from django.template.context_processors import csrf
from django.views.decorators.csrf import csrf_protect
from .models import RotationModel
from django.utils import timezone
from .math_model import *
import json
# Create your views here.
def model_main(request):
if request.GET.get('orbit_hight'):
H = int(request.GET["orbit_hight"])
m1 = int(request.GET["mas1"])
m2 = int(request.GET["mas2"])
L = int(request.GET["cab_len"])
F = float(request.GET["thrust"])
α = int(request.GET["ang_a"])
γ = int(request.Get["ang_g"])
rmm = RotationMathModel(H, m1, m2, L, F, α, γ)
y0 = np.array([0, 0, 0, 0, 0, 0, 0])
res, t = rmm.runge1(rmm.f, y0, 0, 3150)
w=res[:,1]
o=res[:,0]
return render(request, 'rotation_modeling/build_model.html', {'mas': zip(t, w), 'mas2': zip(o, w)})
return render(request, 'rotation_modeling/build_model.html', {'mas': zip([0, 0], [0, 0])})
def build_model(request):
if request.GET.get('orbit_hight'):
H = int(request.GET["orbit_hight"])
m1 = int(request.GET["mas1"])
m2 = int(request.GET["mas2"])
L = int(request.GET["cab_len"])
F = float(request.GET["thrust"])
α = int(request.GET["ang_a"])
γ = int(request.GET["ang_g"])
rmm =RotationMathModel(H,m1,m2,L,F,α,γ)
y0 = np.array([0, 0, 0, 0, 0, 0, 0])
res, t = rmm.runge2(rmm.f, y0, 0, 3150)
w=res[:,1]
o=res[:,0]
m1=[[x,y] for x,y in zip(t,w)]
m2=[[x,y] for x,y in zip(o,w)]
zip(o, w, t)
T = [ rmm.TT(op,wp,0,0,tp) for op,wp,tp in zip(o, w, t)]
m3 =[[x,y] for x,y in zip(t, T)]
return JsonResponse({"mas1":m1, "mas2":m2, "mas3":m3});
return "{'mas':'1'}"
def saved_models(request):
models = RotationModel.objects.all();
return render(request, "rotation_modeling/saved_models.html", {'models':models})
@csrf_protect
def save_model(request):
c = {}
c.update(csrf(request))
if request.is_ajax():
if request.method == 'POST':
data =json.loads(request.body)
RotationModel.objects.create(name=data['name'], time_avel_data=data['time_avel_data'],
ang_vel_data = data['ang_vel_data'], time_thrust_data = data['time_thrust_data'],
comment = data['comment'], orbit_hight=data['orbit_hight'], cab_len = data['cab_len'],
mas1=data['mas1'], mas2 = data['mas2'], thrust = data['thrust'], published_date = timezone.now())
return HttpResponse("OK")
def saved_model_detail(request, pk):
model = get_object_or_404(RotationModel, pk=pk)
return render(request, 'rotation_modeling/saved_model_detail.html', {'model': model}) | [
"[email protected]"
] | |
3c4c7fbe4db52ee08aecdc41c231b4ab72f76621 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/287/92439/submittedfiles/testes.py | 8e0bed67fc84c29fdd272a49a066e73613123d99 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | # -*- coding: utf-8 -*-
from minha_bib import*
import time
import os
import random
'''
board=[" "," "," "," "," "," "," "," "," "," "]
print(""+board[1]+"|"+board[2]+"|"+board[3]+"")
print(""+board[4]+"|"+board[5]+"|"+board[6]+"")
print(""+board[7]+"|"+board[8]+"|"+board[9]+"")
'''
'''
notas = []
for i in range(0,50,1):
notas.append(float(input('digite a nota %d: ' % (i+1))))
media = 0
for i in range(0,50,1):
media += notas[i]/5.0
print(notas)
print(media)
'''
'''
notas = []
for i in range(0,4,1):
notas.append(float(input('digite a nota %d: ' % (i+1))))
media = 0
for i in range(0,50,1):
media += notas[i]/4.0
print[1]
print(media)
'''
'''
n=[1,2,3,4,5,6]
print(n) #o vetor n
print(sum(n))
print(len(n))
del n[2]
print(n)
print(6 in n)
n.append(3)
print(n)
'''
'''
fatorial(5)
'''
'''
quadro([' ','O',' ',' ',' ',' ',' ',' ',' ',' '])
n=int(input('qual a sua jogada?'))
if n==11:
quadro([' ','O',' ',' ','X',' ',' ',' ',' ',' '])
elif n==00:
quadro(['X','O',' ',' ',' ',' ',' ',' ',' ',' '])
'''
lista1=[1,2,3,4,5]
print(lista1[len(lista1)-1])
| [
"[email protected]"
] | |
48aa18c811e997b713e2a4733c34b1162b413ca0 | 1cde834a4f3052b5135deb5bc294988a5026ce34 | /RegistersGenerator/valuesmap/OTG_HS_DEVICE.py | 9cc4121d74736bcdae1700bec87bfea99f0dad03 | [] | no_license | KorolyovNikita/stmdemoproject | 6abe665740fe3edea5a408b1a341593b005ba33d | e9b6aa0329ef3aa12c35adac246057a2f4f4acbd | refs/heads/master | 2023-03-13T21:33:39.088190 | 2021-03-08T16:53:35 | 2021-03-08T16:53:35 | 345,728,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,724 | py | from tempvalue import*
def apply(OTG_HS_DEVICE):
# -------------------------------------------------------------------------
# OTG_HS_DCFG (OTG_HS device configuration register)
# Offset: 0 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DCFG']['PERSCHIVL'].values = None
OTG_HS_DEVICE['OTG_HS_DCFG']['PFIVL'].values = None
OTG_HS_DEVICE['OTG_HS_DCFG']['DAD'].values = None
OTG_HS_DEVICE['OTG_HS_DCFG']['NZLSOHSK'].values = None
OTG_HS_DEVICE['OTG_HS_DCFG']['DSPD'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DCTL (OTG_HS device control register)
# Offset: 4 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DCTL']['POPRGDNE'].values = None
OTG_HS_DEVICE['OTG_HS_DCTL']['CGONAK'].values = None
OTG_HS_DEVICE['OTG_HS_DCTL']['SGONAK'].values = None
OTG_HS_DEVICE['OTG_HS_DCTL']['CGINAK'].values = None
OTG_HS_DEVICE['OTG_HS_DCTL']['SGINAK'].values = None
OTG_HS_DEVICE['OTG_HS_DCTL']['TCTL'].values = None
OTG_HS_DEVICE['OTG_HS_DCTL']['GONSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DCTL']['GINSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DCTL']['SDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DCTL']['RWUSIG'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DSTS (OTG_HS device status register)
# Offset: 8 Size: 32 Access: ReadMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DSTS']['FNSOF'].values = None
OTG_HS_DEVICE['OTG_HS_DSTS']['EERR'].values = None
OTG_HS_DEVICE['OTG_HS_DSTS']['ENUMSPD'].values = None
OTG_HS_DEVICE['OTG_HS_DSTS']['SUSPSTS'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPMSK (OTG_HS device IN endpoint common interrupt mask register)
# Offset: 16 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPMSK']['BIM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPMSK']['TXFURM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPMSK']['INEPNEM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPMSK']['INEPNMM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPMSK']['ITTXFEMSK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPMSK']['TOM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPMSK']['EPDM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPMSK']['XFRCM'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPMSK (OTG_HS device OUT endpoint common interrupt mask register)
# Offset: 20 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPMSK']['BOIM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPMSK']['OPEM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPMSK']['B2BSTUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPMSK']['OTEPDM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPMSK']['STUPM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPMSK']['EPDM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPMSK']['XFRCM'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DAINT (OTG_HS device all endpoints interrupt register)
# Offset: 24 Size: 32 Access: ReadMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DAINT']['OEPINT'].values = None
OTG_HS_DEVICE['OTG_HS_DAINT']['IEPINT'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DAINTMSK (OTG_HS all endpoints interrupt mask register)
# Offset: 28 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DAINTMSK']['OEPM'].values = None
OTG_HS_DEVICE['OTG_HS_DAINTMSK']['IEPM'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DVBUSDIS (OTG_HS device VBUS discharge time register)
# Offset: 40 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DVBUSDIS']['VBUSDT'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DVBUSPULSE (OTG_HS device VBUS pulsing time register)
# Offset: 44 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DVBUSPULSE']['DVBUSP'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DTHRCTL (OTG_HS Device threshold control register)
# Offset: 48 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DTHRCTL']['ARPEN'].values = None
OTG_HS_DEVICE['OTG_HS_DTHRCTL']['RXTHRLEN'].values = None
OTG_HS_DEVICE['OTG_HS_DTHRCTL']['RXTHREN'].values = None
OTG_HS_DEVICE['OTG_HS_DTHRCTL']['TXTHRLEN'].values = None
OTG_HS_DEVICE['OTG_HS_DTHRCTL']['ISOTHREN'].values = None
OTG_HS_DEVICE['OTG_HS_DTHRCTL']['NONISOTHREN'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPEMPMSK (OTG_HS device IN endpoint FIFO empty interrupt mask register)
# Offset: 52 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPEMPMSK']['INEPTXFEM'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DEACHINT (OTG_HS device each endpoint interrupt register)
# Offset: 56 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DEACHINT']['OEP1INT'].values = None
OTG_HS_DEVICE['OTG_HS_DEACHINT']['IEP1INT'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DEACHINTMSK (OTG_HS device each endpoint interrupt register mask)
# Offset: 60 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DEACHINTMSK']['OEP1INTM'].values = None
OTG_HS_DEVICE['OTG_HS_DEACHINTMSK']['IEP1INTM'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPEACHMSK1 (OTG_HS device each in endpoint-1 interrupt register)
# Offset: 64 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPEACHMSK1']['NAKM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPEACHMSK1']['BIM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPEACHMSK1']['TXFURM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPEACHMSK1']['INEPNEM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPEACHMSK1']['INEPNMM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPEACHMSK1']['ITTXFEMSK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPEACHMSK1']['TOM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPEACHMSK1']['EPDM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPEACHMSK1']['XFRCM'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPEACHMSK1 (OTG_HS device each OUT endpoint-1 interrupt register)
# Offset: 128 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['NYETM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['NAKM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['BERRM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['BIM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['TXFURM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['INEPNEM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['INEPNMM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['ITTXFEMSK'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['TOM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['EPDM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPEACHMSK1']['XFRCM'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPCTL0 (OTG device endpoint-0 control register)
# Offset: 256 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['TXFNUM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL0']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPINT0 (OTG device endpoint-0 interrupt register)
# Offset: 264 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['NAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['BERR'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['PKTDRPSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['BNA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['TXFIFOUDRN'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['TXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['INEPNE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['ITTXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['TOC'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT0']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPTSIZ0 (OTG_HS device IN endpoint 0 transfer size register)
# Offset: 272 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ0']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ0']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPDMA1 (OTG_HS device endpoint-1 DMA address register)
# Offset: 276 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPDMA1']['DMAADDR'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DTXFSTS0 (OTG_HS device IN endpoint transmit FIFO status register)
# Offset: 280 Size: 32 Access: ReadMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DTXFSTS0']['INEPTFSAV'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPCTL1 (OTG device endpoint-1 control register)
# Offset: 288 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['TXFNUM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL1']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPINT1 (OTG device endpoint-1 interrupt register)
# Offset: 296 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['NAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['BERR'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['PKTDRPSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['BNA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['TXFIFOUDRN'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['TXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['INEPNE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['ITTXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['TOC'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT1']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPTSIZ1 (OTG_HS device endpoint transfer size register)
# Offset: 304 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ1']['MCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ1']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ1']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPDMA2 (OTG_HS device endpoint-2 DMA address register)
# Offset: 308 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPDMA2']['DMAADDR'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DTXFSTS1 (OTG_HS device IN endpoint transmit FIFO status register)
# Offset: 312 Size: 32 Access: ReadMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DTXFSTS1']['INEPTFSAV'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPCTL2 (OTG device endpoint-2 control register)
# Offset: 320 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['TXFNUM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL2']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPINT2 (OTG device endpoint-2 interrupt register)
# Offset: 328 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['NAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['BERR'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['PKTDRPSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['BNA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['TXFIFOUDRN'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['TXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['INEPNE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['ITTXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['TOC'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT2']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPTSIZ2 (OTG_HS device endpoint transfer size register)
# Offset: 336 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ2']['MCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ2']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ2']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPDMA3 (OTG_HS device endpoint-3 DMA address register)
# Offset: 340 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPDMA3']['DMAADDR'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DTXFSTS2 (OTG_HS device IN endpoint transmit FIFO status register)
# Offset: 344 Size: 32 Access: ReadMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DTXFSTS2']['INEPTFSAV'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPCTL3 (OTG device endpoint-3 control register)
# Offset: 352 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['TXFNUM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL3']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPINT3 (OTG device endpoint-3 interrupt register)
# Offset: 360 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['NAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['BERR'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['PKTDRPSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['BNA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['TXFIFOUDRN'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['TXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['INEPNE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['ITTXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['TOC'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT3']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPTSIZ3 (OTG_HS device endpoint transfer size register)
# Offset: 368 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ3']['MCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ3']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ3']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPDMA4 (OTG_HS device endpoint-4 DMA address register)
# Offset: 372 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPDMA4']['DMAADDR'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DTXFSTS3 (OTG_HS device IN endpoint transmit FIFO status register)
# Offset: 376 Size: 32 Access: ReadMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DTXFSTS3']['INEPTFSAV'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPCTL4 (OTG device endpoint-4 control register)
# Offset: 384 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['TXFNUM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL4']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPINT4 (OTG device endpoint-4 interrupt register)
# Offset: 392 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['NAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['BERR'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['PKTDRPSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['BNA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['TXFIFOUDRN'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['TXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['INEPNE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['ITTXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['TOC'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT4']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPTSIZ4 (OTG_HS device endpoint transfer size register)
# Offset: 400 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ4']['MCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ4']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ4']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPDMA5 (OTG_HS device endpoint-5 DMA address register)
# Offset: 404 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPDMA5']['DMAADDR'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DTXFSTS4 (OTG_HS device IN endpoint transmit FIFO status register)
# Offset: 408 Size: 32 Access: ReadMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DTXFSTS4']['INEPTFSAV'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPCTL5 (OTG device endpoint-5 control register)
# Offset: 416 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['TXFNUM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL5']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPINT5 (OTG device endpoint-5 interrupt register)
# Offset: 424 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['NAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['BERR'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['PKTDRPSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['BNA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['TXFIFOUDRN'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['TXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['INEPNE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['ITTXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['TOC'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT5']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPTSIZ5 (OTG_HS device endpoint transfer size register)
# Offset: 432 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ5']['MCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ5']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPTSIZ5']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DTXFSTS5 (OTG_HS device IN endpoint transmit FIFO status register)
# Offset: 440 Size: 32 Access: ReadMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DTXFSTS5']['INEPTFSAV'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPCTL6 (OTG device endpoint-6 control register)
# Offset: 448 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['TXFNUM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL6']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPINT6 (OTG device endpoint-6 interrupt register)
# Offset: 456 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['NAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['BERR'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['PKTDRPSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['BNA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['TXFIFOUDRN'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['TXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['INEPNE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['ITTXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['TOC'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT6']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPCTL7 (OTG device endpoint-7 control register)
# Offset: 480 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['TXFNUM'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPCTL7']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DIEPINT7 (OTG device endpoint-7 interrupt register)
# Offset: 488 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['NAK'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['BERR'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['PKTDRPSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['BNA'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['TXFIFOUDRN'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['TXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['INEPNE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['ITTXFE'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['TOC'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DIEPINT7']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPCTL0 (OTG_HS device control OUT endpoint 0 control register)
# Offset: 768 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['SNPM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL0']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPINT0 (OTG_HS device endpoint-0 interrupt register)
# Offset: 776 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPINT0']['NYET'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT0']['B2BSTUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT0']['OTEPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT0']['STUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT0']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT0']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPTSIZ0 (OTG_HS device endpoint-1 transfer size register)
# Offset: 784 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ0']['STUPCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ0']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ0']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPCTL1 (OTG device endpoint-1 control register)
# Offset: 800 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['SNPM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL1']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPINT1 (OTG_HS device endpoint-1 interrupt register)
# Offset: 808 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPINT1']['NYET'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT1']['B2BSTUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT1']['OTEPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT1']['STUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT1']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT1']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPTSIZ1 (OTG_HS device endpoint-2 transfer size register)
# Offset: 816 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ1']['RXDPID_STUPCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ1']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ1']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPCTL2 (OTG device endpoint-2 control register)
# Offset: 832 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['SNPM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL2']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPINT2 (OTG_HS device endpoint-2 interrupt register)
# Offset: 840 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPINT2']['NYET'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT2']['B2BSTUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT2']['OTEPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT2']['STUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT2']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT2']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPTSIZ2 (OTG_HS device endpoint-3 transfer size register)
# Offset: 848 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ2']['RXDPID_STUPCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ2']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ2']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPCTL3 (OTG device endpoint-3 control register)
# Offset: 864 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['EPENA'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['EPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['SODDFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['SD0PID_SEVNFRM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['SNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['CNAK'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['Stall'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['SNPM'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['EPTYP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['NAKSTS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['EONUM_DPID'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['USBAEP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPCTL3']['MPSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPINT3 (OTG_HS device endpoint-3 interrupt register)
# Offset: 872 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPINT3']['NYET'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT3']['B2BSTUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT3']['OTEPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT3']['STUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT3']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT3']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPTSIZ3 (OTG_HS device endpoint-4 transfer size register)
# Offset: 880 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ3']['RXDPID_STUPCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ3']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ3']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPINT4 (OTG_HS device endpoint-4 interrupt register)
# Offset: 904 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPINT4']['NYET'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT4']['B2BSTUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT4']['OTEPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT4']['STUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT4']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT4']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPTSIZ4 (OTG_HS device endpoint-5 transfer size register)
# Offset: 912 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ4']['RXDPID_STUPCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ4']['PKTCNT'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPTSIZ4']['XFRSIZ'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPINT5 (OTG_HS device endpoint-5 interrupt register)
# Offset: 936 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPINT5']['NYET'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT5']['B2BSTUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT5']['OTEPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT5']['STUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT5']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT5']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPINT6 (OTG_HS device endpoint-6 interrupt register)
# Offset: 968 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPINT6']['NYET'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT6']['B2BSTUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT6']['OTEPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT6']['STUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT6']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT6']['XFRC'].values = None
# -------------------------------------------------------------------------
# OTG_HS_DOEPINT7 (OTG_HS device endpoint-7 interrupt register)
# Offset: 1000 Size: 32 Access: ReadWriteMode
# -------------------------------------------------------------------------
OTG_HS_DEVICE['OTG_HS_DOEPINT7']['NYET'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT7']['B2BSTUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT7']['OTEPDIS'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT7']['STUP'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT7']['EPDISD'].values = None
OTG_HS_DEVICE['OTG_HS_DOEPINT7']['XFRC'].values = None
| [
"[email protected]"
] | |
28b8696bea272343c845ca333a86adba41b97f89 | 9f2445e9a00cc34eebcf3d3f60124d0388dcb613 | /2021-12-15-APWidth/OutputModels/outputModels_dict_177964669_1p1x.py | b9e9f3ce2800c370c9a40d0c5179d360c9173d89 | [] | no_license | analkumar2/Thesis-work | 7ee916d71f04a60afbd117325df588908518b7d2 | 75905427c2a78a101b4eed2c27a955867c04465c | refs/heads/master | 2022-01-02T02:33:35.864896 | 2021-12-18T03:34:04 | 2021-12-18T03:34:04 | 201,130,673 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 87,375 | py | # exec(open('OutputModels/outputModels_dict_177964669.py').read())
Models = {}
Models['ModelNa_Chan_m_vhalf_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 333594147.16633767, 'Em': -0.026166709551186194}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.030600000000000002, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002310812240534199}
Models['ModelNa_Chan_m_slope_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 588929499.2427237, 'Em': -0.01441151749020704}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0078, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0022983219762647694}
Models['ModelNa_Chan_m_A'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 205728299.07669124, 'Em': -0.05317993915163798}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0355, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0022793110369287994}
Models['ModelNa_Chan_m_B'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 618726674.6098562, 'Em': -0.01631691026006032}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.021, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023539597401172063}
Models['ModelNa_Chan_m_C'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 390568573.23018324, 'Em': -0.01898744790926187}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0171, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002336037467592922}
Models['ModelNa_Chan_m_D'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 215617876.27046952, 'Em': -0.05159769155873739}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0557, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002328894944730431}
Models['ModelNa_Chan_m_E'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 417272566.08047783, 'Em': -0.023155036225332076}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0321, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002360219828696497}
Models['ModelNa_Chan_m_F'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 454315634.64521134, 'Em': -0.01164713822389808}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00164, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002290390613669091}
Models['ModelNa_Chan_h_vhalf_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 225442050.88816452, 'Em': -0.045945952676630576}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.065, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0024087379218782345}
Models['ModelNa_Chan_h_slope_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 608442150.231875, 'Em': -0.015306490275597944}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0043, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0022680342859373592}
Models['ModelNa_Chan_h_A'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 661102699.0394332, 'Em': -0.0027941803523634395}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04460699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002363523778938781}
Models['ModelNa_Chan_h_B'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 656330254.2086437, 'Em': -0.025884823075958838}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00533522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002442952422647693}
Models['ModelNa_Chan_h_C'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 562134323.5558146, 'Em': -0.014817547983848602}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.012975750000000001, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0025446994640316856}
Models['ModelNa_Chan_h_D'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 309211001.4834687, 'Em': -0.039102299350309226}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02717791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.00235038937905796}
Models['ModelNa_Chan_h_E'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 396588051.1572409, 'Em': -0.01945253249129592}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00953832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002364574317104795}
Models['ModelNa_Chan_h_F'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 927586595.1511579, 'Em': 0.03843993948728425}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.040003210000000004, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023532388022422523}
Models['ModelNa_Chan_s_vhalf_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 249901595.6992308, 'Em': -0.03986996325369243}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.032, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023475696124317658}
Models['ModelNa_Chan_s_slope_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 664782717.4076881, 'Em': 0.008914895637681433}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.005, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023158704371346595}
Models['ModelNa_Chan_s_A'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 468231363.34919786, 'Em': -0.01833711196792252}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1.001, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002388533157725581}
Models['ModelNa_Chan_s_B'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 497941552.9550698, 'Em': -0.03199960065976513}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.002, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023803125985624174}
Models['ModelNa_Chan_s_C'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 638804874.5560255, 'Em': 0.0062988293761354775}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.002, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023128367366660463}
Models['ModelNa_Chan_s_D'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 291957887.4480391, 'Em': -0.0410984396121779}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.501, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023737122210072403}
Models['ModelNa_Chan_s_E'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 1429908207.2948582, 'Em': 0.09067687219660206}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.002, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023368259575657913}
Models['ModelNa_Chan_s_F'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 966171430.350947, 'Em': 0.025411860880751876}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1.001}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023711831171979725}
Models['ModelK_DR_Chan_n_vhalf_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 212777030.29012504, 'Em': -0.05216987018297732}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013999999999999999, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023263927361094616}
Models['ModelK_DR_Chan_n_slope_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 268272544.73712504, 'Em': -0.03192686538474785}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0097666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002338408495858868}
Models['ModelK_DR_Chan_n_A'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 325760429.2558085, 'Em': -0.0449252177779402}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.013600000000000001, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023549766855095466}
Models['ModelK_DR_Chan_n_B'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 206083083.48368245, 'Em': -0.05111928740544698}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0183, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023016192888862808}
Models['ModelK_DR_Chan_n_C'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 481620118.3676282, 'Em': -0.034149115161917494}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0.001, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023399670740573697}
Models['ModelK_DR_Chan_n_D'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 249724048.58872327, 'Em': -0.045629924762764125}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0.001, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002367907994527574}
Models['ModelK_DR_Chan_n_E'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 277258160.1707651, 'Em': -0.04714790141935278}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0353, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023415819190106824}
Models['ModelK_DR_Chan_n_F'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 446879483.825087, 'Em': -0.01685361426463463}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.004059999999999999}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002335329309293366}
Models['ModelK_A_Chan_n_vhalf_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 461323283.0893757, 'Em': -0.026339321227533203}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.026000000000000002, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023738725748287948}
Models['ModelK_A_Chan_n_slope_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 231258695.83829924, 'Em': -0.052492448741198766}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.018000000000000002, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0021590474906918455}
Models['ModelK_A_Chan_n_A'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 390137824.4113088, 'Em': -0.017776881987497262}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00778, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0022941957404289948}
Models['ModelK_A_Chan_n_B'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 960913939.0088881, 'Em': 0.0403396867381537}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.057300000000000004, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023197540045500897}
Models['ModelK_A_Chan_n_C'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 774287534.0670455, 'Em': -0.008475288807122595}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0.001, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023987567124452713}
Models['ModelK_A_Chan_n_D'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 749026206.1026433, 'Em': 0.011429150049697107}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0.001, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023578448833534438}
Models['ModelK_A_Chan_n_E'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 772188502.9898665, 'Em': 0.0069155902377175645}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0275, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002405088584344295}
Models['ModelK_A_Chan_n_F'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 368054439.20111096, 'Em': -0.044820143166204654}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0115, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023605468251222117}
Models['ModelK_A_Chan_l_vhalf_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 271346517.0748666, 'Em': -0.04201333196927862}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.055, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023124989163025855}
Models['ModelK_A_Chan_l_slope_inf'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 951396195.109464, 'Em': 0.03417704655242522}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00777, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.002377881314465702}
Models['ModelK_A_Chan_l_min'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 480620633.39889145, 'Em': -0.026554330761160136}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.003, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023099240781716546}
Models['ModelK_A_Chan_l_m'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 617148018.0631377, 'Em': 0.0030803624342173696}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.261, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023277584399414852}
Models['ModelK_A_Chan_l_cm'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 221414342.96694714, 'Em': -0.03942441361743569}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.051000000000000004}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 3.3e-05}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023270744517451813}
Models['ModelK_M_Chan_factor'] = {'Parameters': {'notes': '', 'Morphology': {'sm_len': 6.73077545020806e-05, 'sm_diam': 6.73077545020806e-05}, 'Passive': {'Cm': 1.19e-10, 'Rm': 1092093004.0578883, 'Em': 0.04784235494974525}, 'Channels': {'Na_Chan': {'Gbar': 0.00018720713325928506, 'Erev': 0.06, 'Kinetics': '../../Compilations/Kinetics/Na_Chan_Custom4', 'KineticVars': {'m_vhalf_inf': -0.0316, 'm_slope_inf': 0.0068, 'm_A': -0.0365, 'm_B': 0.02, 'm_C': 0.0161, 'm_D': 0.0547, 'm_E': 0.0311, 'm_F': 0.00064, 'h_vhalf_inf': -0.066, 'h_slope_inf': -0.0053, 'h_A': -0.04560699, 'h_B': 0.00433522, 'h_C': 0.01197575, 'h_D': 0.02617791, 'h_E': 0.00853832, 'h_F': 0.03900321, 's_vhalf_inf': -0.033, 's_slope_inf': -0.006, 's_A': 1, 's_B': 0.001, 's_C': 0.001, 's_D': 0.5, 's_E': 0.001, 's_F': 1}}, 'K_DR_Chan': {'Gbar': 1.0502259538910637e-07, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_DR_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.013, 'n_slope_inf': 0.0087666, 'n_A': 0.0126, 'n_B': 0.0173, 'n_C': 0, 'n_D': 0, 'n_E': 0.0343, 'n_F': 0.00306}}, 'K_A_Chan': {'Gbar': 1.008422244061249e-06, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_A_Chan_Custom3', 'KineticVars': {'n_vhalf_inf': 0.025, 'n_slope_inf': 0.017, 'n_A': -0.00878, 'n_B': 0.0563, 'n_C': 0, 'n_D': 0, 'n_E': 0.0265, 'n_F': 0.0105, 'l_vhalf_inf': -0.056, 'l_slope_inf': -0.00877, 'l_min': 0.002, 'l_m': 0.26, 'l_cm': 0.05}}, 'K_M_Chan': {'Gbar': 8.032153557169671e-09, 'Erev': -0.09, 'Kinetics': '../../Compilations/Kinetics/K_M_Chan_Custom1', 'KineticVars': {'factor': 0.001033}}, 'h_Chan': {'Gbar': 5.3739087243907273e-11, 'Erev': -0.04, 'Kinetics': '../../Compilations/Kinetics/h_Chan_Custom1', 'KineticVars': {}}}, 'Ca_Conc': {'Ca_B': 75427936887.46373, 'Ca_tau': 0.038, 'Ca_base': 8e-05, 'Kinetics': '../../Compilations/Kinetics/Ca_Conc_(Common)'}}, 'Scores': {}, 'AP1_width': 0.0023515978410686422}
| [
"[email protected]"
] | |
5ce746b092109719ad6ead33f68b51e6e8877210 | 952d8c37764393aa180f53ba4d31bec6c0806fd6 | /py-polars/tests/unit/datatypes/test_categorical.py | 2275b5211a2c91b32b959437f1f62b6996f861b0 | [
"MIT"
] | permissive | pola-rs/polars | dff713f82f0cc29a98bc3f0e3ee3ba1e0fb49ef3 | c50c1e69bd36f60a8864ea49fe40d0e17503f11c | refs/heads/main | 2023-08-28T00:13:27.043234 | 2023-08-27T18:34:52 | 2023-08-27T18:34:52 | 263,727,855 | 18,211 | 1,202 | MIT | 2023-09-14T18:52:43 | 2020-05-13T19:45:33 | Rust | UTF-8 | Python | false | false | 13,587 | py | from __future__ import annotations
import io
from typing import Any
import pytest
import polars as pl
from polars import StringCache
from polars.exceptions import StringCacheMismatchError
from polars.testing import assert_frame_equal
@StringCache()
def test_categorical_outer_join() -> None:
df1 = pl.DataFrame(
[
pl.Series("key1", [42]),
pl.Series("key2", ["bar"], dtype=pl.Categorical),
pl.Series("val1", [1]),
]
).lazy()
df2 = pl.DataFrame(
[
pl.Series("key1", [42]),
pl.Series("key2", ["bar"], dtype=pl.Categorical),
pl.Series("val2", [2]),
]
).lazy()
expected = pl.DataFrame(
{"key1": [42], "key2": ["bar"], "val1": [1], "val2": [2]},
schema_overrides={"key2": pl.Categorical},
)
out = df1.join(df2, on=["key1", "key2"], how="outer").collect()
assert_frame_equal(out, expected)
dfa = pl.DataFrame(
[
pl.Series("key", ["foo", "bar"], dtype=pl.Categorical),
pl.Series("val1", [3, 1]),
]
)
dfb = pl.DataFrame(
[
pl.Series("key", ["bar", "baz"], dtype=pl.Categorical),
pl.Series("val2", [6, 8]),
]
)
df = dfa.join(dfb, on="key", how="outer")
# the cast is important to test the rev map
assert df["key"].cast(pl.Utf8).to_list() == ["bar", "baz", "foo"]
def test_read_csv_categorical() -> None:
f = io.BytesIO()
f.write(b"col1,col2,col3,col4,col5,col6\n'foo',2,3,4,5,6\n'bar',8,9,10,11,12")
f.seek(0)
df = pl.read_csv(f, has_header=True, dtypes={"col1": pl.Categorical})
assert df["col1"].dtype == pl.Categorical
def test_cat_to_dummies() -> None:
df = pl.DataFrame({"foo": [1, 2, 3, 4], "bar": ["a", "b", "a", "c"]})
df = df.with_columns(pl.col("bar").cast(pl.Categorical))
assert df.to_dummies().to_dict(False) == {
"foo_1": [1, 0, 0, 0],
"foo_2": [0, 1, 0, 0],
"foo_3": [0, 0, 1, 0],
"foo_4": [0, 0, 0, 1],
"bar_a": [1, 0, 1, 0],
"bar_b": [0, 1, 0, 0],
"bar_c": [0, 0, 0, 1],
}
def test_categorical_describe_3487() -> None:
# test if we don't err
df = pl.DataFrame({"cats": ["a", "b"]})
df = df.with_columns(pl.col("cats").cast(pl.Categorical))
df.describe()
@StringCache()
def test_categorical_is_in_list() -> None:
# this requires type coercion to cast.
# we should not cast within the function as this would be expensive within a
# group by context that would be a cast per group
df = pl.DataFrame(
{"a": [1, 2, 3, 1, 2], "b": ["a", "b", "c", "d", "e"]}
).with_columns(pl.col("b").cast(pl.Categorical))
cat_list = ("a", "b", "c")
assert df.filter(pl.col("b").is_in(cat_list)).to_dict(False) == {
"a": [1, 2, 3],
"b": ["a", "b", "c"],
}
@StringCache()
def test_unset_sorted_on_append() -> None:
df1 = pl.DataFrame(
[
pl.Series("key", ["a", "b", "a", "b"], dtype=pl.Categorical),
pl.Series("val", [1, 2, 3, 4]),
]
).sort("key")
df2 = pl.DataFrame(
[
pl.Series("key", ["a", "b", "a", "b"], dtype=pl.Categorical),
pl.Series("val", [5, 6, 7, 8]),
]
).sort("key")
df = pl.concat([df1, df2], rechunk=False)
assert df.group_by("key").count()["count"].to_list() == [4, 4]
def test_categorical_error_on_local_cmp() -> None:
df_cat = pl.DataFrame(
[
pl.Series("a_cat", ["c", "a", "b", "c", "b"], dtype=pl.Categorical),
pl.Series("b_cat", ["F", "G", "E", "G", "G"], dtype=pl.Categorical),
]
)
with pytest.raises(
pl.ComputeError,
match=(
"cannot compare categoricals originating from different sources; consider"
" setting a global string cache"
),
):
df_cat.filter(pl.col("a_cat") == pl.col("b_cat"))
def test_cast_null_to_categorical() -> None:
assert pl.DataFrame().with_columns(
[pl.lit(None).cast(pl.Categorical).alias("nullable_enum")]
).dtypes == [pl.Categorical]
def test_shift_and_fill() -> None:
df = pl.DataFrame({"a": ["a", "b"]}).with_columns(
[pl.col("a").cast(pl.Categorical)]
)
s = df.with_columns(pl.col("a").shift_and_fill("c", periods=1))["a"]
assert s.dtype == pl.Categorical
assert s.to_list() == ["c", "a"]
@StringCache()
def test_merge_lit_under_global_cache_4491() -> None:
df = pl.DataFrame(
[
pl.Series("label", ["foo", "bar"], dtype=pl.Categorical),
pl.Series("value", [3, 9]),
]
)
assert df.with_columns(
pl.when(pl.col("value") > 5)
.then(pl.col("label"))
.otherwise(pl.lit(None, pl.Categorical))
).to_dict(False) == {"label": [None, "bar"], "value": [3, 9]}
def test_nested_cache_composition() -> None:
# very artificial example/test, but validates the behaviour
# of nested StringCache scopes, which we want to play well
# with each other when composing more complex pipelines.
assert pl.using_string_cache() is False
# function representing a composable stage of a pipeline; it implements
# an inner scope for the case where it is called by itself, but when
# called as part of a larger series of ops it should not invalidate
# the string cache (eg: the outermost scope should be respected).
def create_lazy(data: dict) -> pl.LazyFrame: # type: ignore[type-arg]
with pl.StringCache():
df = pl.DataFrame({"a": ["foo", "bar", "ham"], "b": [1, 2, 3]})
lf = df.with_columns(pl.col("a").cast(pl.Categorical)).lazy()
# confirm that scope-exit does NOT invalidate the
# cache yet, as an outer context is still active
assert pl.using_string_cache() is True
return lf
# this outer scope should be respected
with pl.StringCache():
lf1 = create_lazy({"a": ["foo", "bar", "ham"], "b": [1, 2, 3]})
lf2 = create_lazy({"a": ["spam", "foo", "eggs"], "c": [3, 2, 2]})
res = lf1.join(lf2, on="a", how="inner").collect().rows()
assert sorted(res) == [("bar", 2, 2), ("foo", 1, 1), ("ham", 3, 3)]
# no other scope active; NOW we expect the cache to have been invalidated
assert pl.using_string_cache() is False
def test_categorical_max_null_5437() -> None:
assert (
pl.DataFrame({"strings": ["c", "b", "a", "c"], "values": [0, 1, 2, 3]})
.with_columns(pl.col("strings").cast(pl.Categorical).alias("cats"))
.select(pl.all().max())
).to_dict(False) == {"strings": ["c"], "values": [3], "cats": [None]}
def test_categorical_in_struct_nulls() -> None:
s = pl.Series(
"job", ["doctor", "waiter", None, None, None, "doctor"], pl.Categorical
)
df = pl.DataFrame([s])
s = (df.select(pl.col("job").value_counts(sort=True)))["job"]
assert s[0] == {"job": None, "counts": 3}
assert s[1] == {"job": "doctor", "counts": 2}
assert s[2] == {"job": "waiter", "counts": 1}
def test_cast_inner_categorical() -> None:
dtype = pl.List(pl.Categorical)
out = pl.Series("foo", [["a"], ["a", "b"]]).cast(dtype)
assert out.dtype == dtype
assert out.to_list() == [["a"], ["a", "b"]]
with pytest.raises(
pl.ComputeError, match=r"casting to categorical not allowed in `list.eval`"
):
pl.Series("foo", [["a", "b"], ["a", "b"]]).list.eval(
pl.element().cast(pl.Categorical)
)
@pytest.mark.slow()
def test_stringcache() -> None:
N = 1_500
with pl.StringCache():
# create a large enough column that the categorical map is reallocated
df = pl.DataFrame({"cats": pl.arange(0, N, eager=True)}).select(
[pl.col("cats").cast(pl.Utf8).cast(pl.Categorical)]
)
assert df.filter(pl.col("cats").is_in(["1", "2"])).to_dict(False) == {
"cats": ["1", "2"]
}
@StringCache()
def test_categorical_sort_order(monkeypatch: Any) -> None:
# create the categorical ordering first
pl.Series(["foo", "bar", "baz"], dtype=pl.Categorical)
df = pl.DataFrame(
{
"n": [0, 0, 0],
# use same categories in different order
"x": pl.Series(["baz", "bar", "foo"], dtype=pl.Categorical),
}
)
assert df.sort(["n", "x"])["x"].to_list() == ["foo", "bar", "baz"]
assert df.with_columns(pl.col("x").cat.set_ordering("lexical")).sort(["n", "x"])[
"x"
].to_list() == ["bar", "baz", "foo"]
monkeypatch.setenv("POLARS_ROW_FMT_SORT", "1")
assert df.sort(["n", "x"])["x"].to_list() == ["foo", "bar", "baz"]
assert df.with_columns(pl.col("x").cat.set_ordering("lexical")).sort(["n", "x"])[
"x"
].to_list() == ["bar", "baz", "foo"]
def test_err_on_categorical_asof_join_by_arg() -> None:
df1 = pl.DataFrame(
[
pl.Series("cat", ["a", "foo", "bar", "foo", "bar"], dtype=pl.Categorical),
pl.Series("time", [-10, 0, 10, 20, 30], dtype=pl.Int32),
]
)
df2 = pl.DataFrame(
[
pl.Series(
"cat",
["bar", "bar", "bar", "bar", "foo", "foo", "foo", "foo"],
dtype=pl.Categorical,
),
pl.Series("time", [-5, 5, 15, 25] * 2, dtype=pl.Int32),
pl.Series("x", [1, 2, 3, 4] * 2, dtype=pl.Int32),
]
)
with pytest.raises(
StringCacheMismatchError,
match="cannot compare categoricals coming from different sources",
):
df1.join_asof(df2, on=pl.col("time").set_sorted(), by="cat")
def test_categorical_list_get_item() -> None:
out = pl.Series([["a"]]).cast(pl.List(pl.Categorical)).item()
assert isinstance(out, pl.Series)
assert out.dtype == pl.Categorical
def test_nested_categorical_aggregation_7848() -> None:
# a double categorical aggregation
assert pl.DataFrame(
{
"group": [1, 1, 2, 2, 2, 3, 3],
"letter": ["a", "b", "c", "d", "e", "f", "g"],
}
).with_columns([pl.col("letter").cast(pl.Categorical)]).group_by(
maintain_order=True, by=["group"]
).all().with_columns(
[pl.col("letter").list.lengths().alias("c_group")]
).group_by(
by=["c_group"], maintain_order=True
).agg(
pl.col("letter")
).to_dict(
False
) == {
"c_group": [2, 3],
"letter": [[["a", "b"], ["f", "g"]], [["c", "d", "e"]]],
}
def test_nested_categorical_cast() -> None:
values = [["x"], ["y"], ["x"]]
dtype = pl.List(pl.Categorical)
s = pl.Series(values).cast(dtype)
assert s.dtype == dtype
assert s.to_list() == values
def test_struct_categorical_nesting() -> None:
# this triggers a lot of materialization
df = pl.DataFrame(
{"cats": ["Value1", "Value2", "Value1"]},
schema_overrides={"cats": pl.Categorical},
)
s = df.select(pl.struct(pl.col("cats")))["cats"].implode()
assert s.dtype == pl.List(pl.Struct([pl.Field("cats", pl.Categorical)]))
# triggers recursive conversion
assert s.to_list() == [[{"cats": "Value1"}, {"cats": "Value2"}, {"cats": "Value1"}]]
# triggers different recursive conversion
assert len(s.to_arrow()) == 1
def test_categorical_fill_null_existing_category() -> None:
# ensure physical types align
assert pl.DataFrame(
{"col": ["a", None, "a"]}, schema={"col": pl.Categorical}
).fill_null("a").with_columns(pl.col("col").to_physical().alias("code")).to_dict(
False
) == {
"col": ["a", "a", "a"],
"code": [0, 0, 0],
}
@StringCache()
def test_categorical_fill_null_stringcache() -> None:
df = pl.LazyFrame(
{"index": [1, 2, 3], "cat": ["a", "b", None]},
schema={"index": pl.Int64(), "cat": pl.Categorical()},
)
a = df.select(pl.col("cat").fill_null("hi")).collect()
assert a.to_dict(False) == {"cat": ["a", "b", "hi"]}
assert a.dtypes == [pl.Categorical]
def test_fast_unique_flag_from_arrow() -> None:
df = pl.DataFrame(
{
"colB": ["1", "2", "3", "4", "5", "5", "5", "5"],
}
).with_columns([pl.col("colB").cast(pl.Categorical)])
filtered = df.to_arrow().filter([True, False, True, True, False, True, True, True])
assert pl.from_arrow(filtered).select(pl.col("colB").n_unique()).item() == 4 # type: ignore[union-attr]
def test_construct_with_null() -> None:
# Example from https://github.com/pola-rs/polars/issues/7188
df = pl.from_dicts([{"A": None}, {"A": "foo"}], schema={"A": pl.Categorical})
assert df.to_series().to_list() == [None, "foo"]
s = pl.Series([{"struct_A": None}], dtype=pl.Struct({"struct_A": pl.Categorical}))
assert s.to_list() == [{"struct_A": None}]
def test_categorical_concat_string_cached() -> None:
with pl.StringCache():
df1 = pl.DataFrame({"x": ["A"]}).with_columns(pl.col("x").cast(pl.Categorical))
df2 = pl.DataFrame({"x": ["B"]}).with_columns(pl.col("x").cast(pl.Categorical))
out = pl.concat([df1, df2])
assert out.dtypes == [pl.Categorical]
assert out["x"].to_list() == ["A", "B"]
def test_list_builder_different_categorical_rev_maps() -> None:
with pl.StringCache():
# built with different values, so different rev-map
s1 = pl.Series(["a", "b"], dtype=pl.Categorical)
s2 = pl.Series(["c", "d"], dtype=pl.Categorical)
assert pl.DataFrame({"c": [s1, s2]}).to_dict(False) == {
"c": [["a", "b"], ["c", "d"]]
}
| [
"[email protected]"
] | |
d864a4db68b90c60949426e4a34b6eed612fd8d5 | da0418dc9e322ecc77a2296bf3ebfa55b73aef03 | /gm1200.py | 44b8758cf2227a72a874f9b563477babf82ebbd1 | [] | no_license | GasdaSoftware/XTL-SB9600-Playground | 507b14db9976669d64367624468e6fc4a7a954f0 | 5df2fd5e7a3418056b5c3e92a35f7a989b53af9b | refs/heads/main | 2023-07-17T17:36:42.735093 | 2021-07-26T02:13:17 | 2021-07-26T02:13:17 | 400,980,028 | 0 | 0 | null | 2021-08-29T07:34:35 | 2021-08-29T07:34:35 | null | UTF-8 | Python | false | false | 5,651 | py | #!/bin/env python3
# GM1200 Controller class
# Copyright (C) 2014 Paul Banks (http://paulbanks.org)
#
# This file is part of GM1200Controller
#
# GM1200Controller is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GM1200Controller is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GM1200Controller. If not, see <http://www.gnu.org/licenses/>.
#
from time import sleep
from binascii import hexlify, unhexlify
import sb9600
# Addressable modules
MODULE_BCAST = 0
MODULE_RADIO = 1
MODULE_FRONTPANEL = 5
# Lamp mappings
lamps_map = {
"L1": 0x0D, "L2RED": 0x0B, "L2GREEN": 0x0C, "L3": 0x01, "L4": 0x02, "L5":
0x04, "L5B": 0x05, "L6": 0x10, "L7": 0x07, "L8": 0x11, "L9": 0x12, "L10":
0x13, "L11": 0x0E, "L12": 0x0F, "L13": 0x14, "L14": 0x15, "L15": 0x16,
"L16": 0x17, "L17": 0x18, "L18": 0x19,
}
# Lamp attributes
LAMP_OFF = 0
LAMP_ON = 1
LAMP_FLASH = 2
# Illumination addresses
ILLUM_MIC = 1 #TODO: A guess - I didn't have a mic attached to verify it!
ILLUM_DISPLAY = 2
ILLUM_BUTTONS = 3
# Control values
BUTTON_DOWN = 1
BUTTON_UP = 0
class GM1200:
"""GM1200 Controller"""
def __init__(self, bus):
self.bus = bus
def CSQ(self):
"""Enter CSQ mode"""
self.bus.sb9600_send(MODULE_RADIO, 0x02, 0, 0x40)
def Reset(self):
self.bus.sb9600_send(MODULE_BCAST, 0x00, 0x01, 0x08)
def SBEP(self, module):
"""Enter SBEP mode"""
self.bus.sb9600_send(MODULE_BCAST, 0x12, module, 0x06)
self.bus.sbep_enter()
def Display(self, text, offset=0):
"""Send text to display"""
if len(text) > 14:
raise ValueError("Text too long!")
# Build display message
msg = bytes((0x80, 0x00, len(text), 0x00, offset))
msg += bytes(text, "ASCII")
msg += b"\x00" * len(text) # Character attributes
# Send it
self.SBEP(MODULE_FRONTPANEL)
self.bus.sbep_send(0x01, msg)
self.bus.sbep_leave()
def Lamp(self, lamp, function):
"""Switch on/off/flash lamp"""
# If lamp is not an integer, use it as key to look up lampID in map
if not isinstance(lamp, int):
lamp = lamps_map[lamp]
self.SBEP(MODULE_FRONTPANEL)
self.bus.sbep_send(0x21, bytes((0x01, lamp, function)))
self.bus.sbep_leave()
def Illumination(self, illum, level):
"""Change level of illumination"""
self.bus.sb9600_send(MODULE_FRONTPANEL, illum, level, 0x58)
def Control(self, controlid, value):
"""Indicate a control use"""
self.bus.sb9600_send(MODULE_FRONTPANEL, controlid, value & 0xFF, 0x57)
def ReadEEPROM(self, module, startaddr, endaddr, callback=None):
"""Read EEPROM data. Note: you'll need to reset the radio after this!"""
self.CSQ()
bus.wait_for_quiet()
# Select device? (TODO: What is this?)
# You'll need to reset the radio after this command
self.bus.sb9600_send(MODULE_BCAST, module, 0x01, 0x08)
# Must wait some time before entering SBEP mode
sleep(0.5)
self.SBEP(MODULE_RADIO)
# Read the data
eedata = b''
chunklen = 0x40
for addr in range(startaddr, endaddr, chunklen):
msg = bytes(
(chunklen, (addr >> 16) & 0xFF, (addr >> 8) & 0xFF, addr & 0xFF))
self.bus.sbep_send(0x11, msg)
op, data = self.bus.sbep_recv()
if op==0x80: # Reply is EEPROM data
addr_rx = data[0]<<16 | data[1]<<8 | data[2]
if addr_rx != addr:
raise RuntimeError("Unexpected address in reply addr=0x%x" % addr_rx )
if len(data[3:]) != chunklen:
raise RuntimeError("Unexpected data length!")
eedata += data[3:]
else:
raise RuntimeError("Unexpected reply op=%d" % op)
# Notify of progress
if callback:
callback(addr)
# Done with SBEP mode
self.bus.sbep_leave()
return eedata
def Audio(self, enable):
enable = 1 if enable else 0
self.bus.sb9600_send(MODULE_RADIO, 0x00, enable, 0x1D)
def SetRXFrequency(self, frequency):
"""Set the receiver frequency"""
ch = int( (frequency*1E6 / 6250) - 60000 )
self.bus.sb9600_send(0x03, (ch>>8) & 0xFF, ch & 0xFF, 0x3F)
def SetTXFrequency(self, frequency):
"""Set the transmitter frequency"""
ch = int( (frequency*1E6 / 6250) - 60000 )
self.bus.sb9600_send(0x02, (ch>>8) & 0xFF, ch & 0xFF, 0x3F)
if __name__=="__main__":
print("GM1200 controller tester")
bus = sb9600.Serial("/dev/ttyUSB0")
gm1200 = GM1200(bus)
#eedata = gm1200.ReadEEPROM(1, 0, 0x800)
#f = open("EEDUMP.bin", "wb")
#f.write(eedata)
#f.close()
#gm1200.Reset()
#bus.wait_for_quiet()
gm1200.CSQ()
bus.wait_for_quiet()
gm1200.SetRXFrequency(433.5) # MHz
gm1200.SetTXFrequency(433.5) # MHz
gm1200.Audio(1)
gm1200.Lamp("L2RED", LAMP_FLASH)
gm1200.Lamp("L8", LAMP_ON)
gm1200.Illumination(ILLUM_DISPLAY, 0xd4)
gm1200.Illumination(ILLUM_BUTTONS, 0xd4)
msg = "HELLO WORLD "
try:
lamps = [[0x14,0x19],[0x15,0x18],[0x16,0x17]]
pos = 0
while True:
gm1200.Display(msg[pos:]+msg[0:pos])
pos+=1
pos%=14
for lg in lamps:
for l in lg:
gm1200.Lamp(l, LAMP_ON)
sleep(0.1)
for l in lg:
gm1200.Lamp(l, LAMP_OFF)
except KeyboardInterrupt:
gm1200.Reset()
| [
"[email protected]"
] | |
ff890750a3349b804af5b3b5fb297b2bbb6fc1f3 | 4117a3cdd06e43a054bc3a8856dad3d450174c37 | /interview/clockdeco_demo.py | ee7f84ba4a8f905dee5f0bac16ac8375e2190fa4 | [] | no_license | Caratpine/study | 15382964172cb520f6e9e572130157558ff1e4ec | 1ff4fbf7006a30a85741a665a3d7532f2cdaa8de | refs/heads/master | 2021-01-12T15:42:05.726049 | 2020-02-25T15:50:56 | 2020-02-25T15:50:56 | 71,852,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | import time
def clock(func):
def clocked(*args):
t0 = time.perf_counter()
result = func(*args)
elapsed = time.perf_counter() - t0
name = func.__name__
arg_str = ', '.join(repr(arg) for arg in args)
print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result))
return result
return clocked
@clock
def snooze(seconds):
time.sleep(seconds)
if __name__ == '__main__':
snooze(.123)
| [
"[email protected]"
] | |
bf645370ee54badf8a7ad04b4ae9c0a6e5e3fe26 | 2bd0c77329f6b3f9077434662760a83c34d249e4 | /utils/str2bf.py | f907a5b02c3bbe20dc0246fe142583e7151356bd | [
"EFL-2.0"
] | permissive | vifino/Hecta | 5eacf9062d34f0fedee55f1c79bc22cd394871e9 | 943a471d6aae6c8c39989b74abdaa476d4c814e1 | refs/heads/master | 2020-12-25T19:15:08.837124 | 2014-11-27T14:21:36 | 2014-11-27T14:21:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,423 | py | #!/usr/bin/env python
# coding: utf-8
"""
String to Brainfuck.
Converts a string to a brainfuck code that prints that string.
Author: j0hn <[email protected]>
"""
import sys
def char2bf(char):
"""Convert a char to brainfuck code that prints that char."""
result_code = ""
ascii_value = ord(char)
factor = ascii_value / 10
remaining = ascii_value % 10
result_code += "%s\n" % ("+" * 10)
result_code += "[\n"
result_code += " >\n"
result_code += " %s\n" % ("+" * factor)
result_code += " <\n"
result_code += " -\n"
result_code += "]\n"
result_code += ">\n"
result_code += "%s\n" % ("+" * remaining)
result_code += ".\n"
result_code += "[-]\n"
return result_code
def str2bf(string):
"""Convert a string to brainfuck code that prints that string."""
result = ""
for char in string:
result += char2bf(char)
return result
def print_help():
"""Print the help message."""
message = "python %s: missing arguments\n\n" % sys.argv[0]
message += "Usage: %s [OPTIONS] STRING\n" % sys.argv[0]
message += "Options:\n"
message += " -h, --help displays this help message.\n"
message += " -s, --small prints the code in one liner.\n"
message += " -n, --newline adds a new line character "
message += "at the end of the string.\n"
sys.stderr.write(message)
def main():
"""Reads the arguments from stdin and outputs the code."""
if len(sys.argv) < 2:
print_help()
sys.exit(0)
add_new_line = False
small_output = False
if "-n" in sys.argv or "--newline" in sys.argv:
add_new_line = True
try:
sys.argv.remove("-n")
except ValueError:
sys.argv.remove("--newline")
if "-s" in sys.argv or "--small" in sys.argv:
small_output = True
try:
sys.argv.remove("-s")
except ValueError:
sys.argv.remove("--small")
if "-h" in sys.argv or "--help" in sys.argv:
print_help()
sys.exit(0)
input_string = " ".join(sys.argv[1:])
result = str2bf(input_string + ("\n" * add_new_line))
if small_output:
result = result.replace(" ", "").replace("\n", "")
print result
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
1bffc70142a56a22d6885a27136660c15c3504c5 | 0e892156bb8f7b79462f25c23cd88baba3db8f30 | /users/migrations/0001_initial.py | ca0435088bcdaaba9dc21dc268af7b812e799699 | [] | no_license | codingmedved/tickets | a0ffa90f055dc8642e80f9c31541d3ab115f035f | 79d4f76701ce46f2233fd839a0d7425913c1524e | refs/heads/master | 2021-01-01T20:03:03.393306 | 2017-08-05T19:52:58 | 2017-08-05T19:52:58 | 98,752,002 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-07-29 18:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=128)),
('address', models.CharField(max_length=128)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
b0240fe840fdc4de767c4ce3b443698e464e19bd | f25974debb523ce8bd8c098e8fbfc68ba939236b | /cloud/ottscaleout/service/scripts/mgmtexamine.py | 3ca68ba81d67281e064afce5d3b363bcf291cec1 | [
"UPL-1.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | gonmers/oracle-timesten-samples | 24c6a984ecd06a60cf16c889e44bf828873f5c1a | 97a7a523a78f79edaad0ca4b49f1f9a4c2bcf03f | refs/heads/master | 2023-06-09T17:30:19.709949 | 2021-06-30T15:15:25 | 2021-06-30T15:15:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | #!/usr/bin/env python
# Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Universal Permissive License v 1.0 as shown
# at http://oss.oracle.com/licenses/upl
import sys
import subprocess
import fileinput
import string
teststr='ttgridadmin mgmtexamine'
def mgmtExamine(ttenv_path):
try:
#cmd=ttenv_path + ' ttgridadmin mgmtexamine | sed -r -e \'/Recommended commands:/{n;$p}\''
cmd=ttenv_path + " ttgridadmin mgmtexamine | awk 'x==1 {print} /Recommended commands:/ {x=1}'"
z=subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print ('mgmtexamine:\n{}\n{}'.format(cmd, z))
for line in z.splitlines():
# work around a bug where instance name isn't separated from bin/ttenv
y = line.replace('bin/ttenv','/bin/ttenv')
if line.lower().find(teststr) != -1:
mgmtExamine(ttenv_path)
continue
print(y)
x=subprocess.check_output(y, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as details:
print 'Error running recommendation from mgmtexamine: rc={0}: {1}'.format(
details.returncode,
str(details.output).replace('\n',''))
if __name__ == '__main__':
if len(sys.argv) <= 1:
print('{}: error: expected path of management instance').format(sys.argv[0])
sys.exit()
ttenvpath=sys.argv[1]
mgmtExamine(ttenvpath)
| [
"[email protected]"
] | |
11901354e2460ca0645149d407a655521cfe13f5 | 55dea1575e079a3f99fdc3d9eb69daf5eeaa14eb | /Exception Handling/exceptions.py | 7f14ea761d91e3fcb1c6b5622b49a3b6ef13b62e | [] | no_license | parekh0711/PPL_Assignments | b7bf86dbf98fb17b8a98ccc33c32513747bb8754 | 1b5c27459f41835eb17e6d6aee4fdf11a52a1fe0 | refs/heads/master | 2022-07-06T04:11:34.371996 | 2020-05-21T01:13:55 | 2020-05-21T01:13:55 | 265,709,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | #This code is to demonstrate how to handle various exceptions in python
try:
# print(1/0)
#
# import blahblahblah
#
# file_object = open("doesnotexist.txt", "r")
#
#
# keyboard_interrupt=input()
#
# dict={}
# print(dict['error'])
#
# print(a)
#
# sys.exit(0)
except KeyError:
print("Sorry, That dictionary does not have this key.")
except ZeroDivisionError:
print("Let us stay in the domain of Real Numbers")
except ImportError:
print("Please install before importing")
except IndentationError:
print("Have a look at your indentation again")
except SystemExit:
print("Your sys.exit() is working perfectly")
except OSError:
print("Could not open/read file")
except EOFError:
print("You have reached EOF but still tried reading")
except KeyboardInterrupt:
print("It's rude to interrupt people like that")
except AttributeError:
print("You passed the wrong attribute")
# except:
# print("You did something wrong which even I didn't expect")
finally:
print("I am executed no matter what")
| [
"[email protected]"
] | |
c51caf312d2bd595e50500d71a7ea3a418d60266 | 192312463c840398012a96f3178971e2f9faf5cc | /users/models/__init__.py | 309d3c8ac9b6ff46838f7ec5ec601d498c24476c | [] | no_license | SogunEdu/LabSys | 66135088403d59b74f05caa1e5f023b960812c45 | 03d209117d6afeca628c50e57b2f1875fbc8788d | refs/heads/master | 2020-04-03T10:58:40.333913 | 2018-11-06T01:35:20 | 2018-11-06T01:35:20 | 155,208,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | # -*- coding:utf-8 -*-
from users.models.user import *
from users.models.student import *
__author__ = "姜显辉" | [
"JohnnyStephen"
] | JohnnyStephen |
d194f41f1dd41ea0d559d059811fbc56a7d749cd | 4701f84941a2dd39448b0d25e467ec567440cef5 | /dirdiff.py | a9d090d317ba2a8af1683c8a637c1999355af8a8 | [] | no_license | lakshmankumar12/dirdiffer | 86f3e368f930408d204cc15e6db8128a74d377d2 | 24baba5a123e2408cf3c801b9b61cb64d700bf4c | refs/heads/master | 2021-01-10T08:03:25.728139 | 2016-04-23T00:40:19 | 2016-04-23T00:40:19 | 53,748,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,783 | py | #!/usr/bin/python
from __future__ import print_function
import sys
import os
import argparse
import hashlib
'''
Lets build a collection which is a dictionary of sha1's.
Each value is inturn a list of size 2
Each index is the directory. And each entry is the collection of pathnames in that directory
that have this sha1
collection = {
sha1: [ ['/dir1/path1','/dir1/path2' ] , ['/dir2/path1/', '/dir2/path2/' ] ],
...
}
'''
def usage():
print("%s <dir1> <dir2>"%sys.argv[0])
print(" ")
print(" dir1 and dir2 will be diffed and it will report file-contents that are")
print(" presently only in dir1 and dir2")
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-b","--both", help="Print files common in both", action="store_true")
parser.add_argument("dir1", help="dir1")
parser.add_argument("dir2", help="dir2")
args = parser.parse_args()
return args
def sha1offile(fileName):
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open(fileName, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
def walk_dir_and_build_sha(dirname, collection, dir_index):
count = 0
for root,_,files in os.walk(dirname):
for f in files:
fname = os.path.join(root,f)
if os.path.islink(fname):
continue
if os.path.isfile(fname):
count += 1
if count % 100 == 0:
print ("Processed %d files in %s"%(count,dirname))
sha1 = sha1offile(fname)
if sha1 not in collection:
collection[sha1] = [ [], [] ]
collection[sha1][dir_index].append(fname)
return count
def compare_and_report(collection):
onlyOneDirShas = [ [], [] ]
both = []
for sha in collection:
found_both = 1
for i in range(2):
if not collection[sha][i]:
found_both = 0
onlyOneDirShas[1-i].append(sha)
if found_both:
both.append(sha)
return (onlyOneDirShas, both)
if __name__ == '__main__':
args = parseArgs()
collection = {}
count1 = walk_dir_and_build_sha(args.dir1, collection, 0)
count2 = walk_dir_and_build_sha(args.dir2, collection, 1)
onlyFiles, both = compare_and_report(collection)
print("We found %d files in %s and %d files in %s"%(count1,args.dir1,count2,args.dir2))
names=[args.dir1,args.dir2]
for i in range(2):
print("Only in %s .. count: %d"%(names[i],len(onlyFiles[i])))
for sha in onlyFiles[i]:
for j in collection[sha][i]:
print("%s"%j)
print("")
print("")
print ("Available in both .. count:%d"%(len(both)))
if args.both:
for sha in both:
for i in range(2):
for j in collection[sha][i]:
print ("%s"%j)
print("")
| [
"[email protected]"
] | |
09517ac4fc5638839dc3c85f4160316408b68897 | f5736261f00ad5f7c52d36733fa022244eb9178f | /Leetcode/232_Implement Queue using Stacks_06170223.py | f4c91ad3735161b32866d22df88ec8d0b70bf7be | [] | no_license | yuu0223/Python-DSA | 286b6a0b50cb7a78808e6ea7e2862b2e7ba5ff46 | aee92d7dfc6a7a12aaee3c4e9f1aa25cc5f29caa | refs/heads/master | 2022-07-23T23:24:31.597870 | 2022-07-06T09:27:19 | 2022-07-06T09:27:19 | 209,705,930 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | class MyQueue:
def __init__(self):
self.queue=[]
def push(self, x: int) -> None:
self.queue.append(x)
def pop(self) -> int:
return self.queue.pop(0) #pop(要移除的第n項)
def peek(self) -> int:
return self.queue[0]
def empty(self) -> bool:
return self.queue == []
#reference:https://www.runoob.com/python/python-lists.html
| [
"[email protected]"
] | |
a521614fd2ccbe29e7691d143ff6154280647512 | 78ddb8f3a35e56af4f3e59fba9ea583b6bbaab25 | /baidumaps/client.py | 411868aed491be26bcfa098f8935af5e495cabef | [
"MIT"
] | permissive | Feronial/baidu-maps-services-python | 6969defb9ae85ef77e4c00f4226000b912408a25 | 139aa274fbaab5320e25e04d231eb7f4f5ce7d85 | refs/heads/master | 2020-04-09T07:03:22.888873 | 2018-12-03T07:35:58 | 2018-12-03T07:35:58 | 160,138,586 | 0 | 0 | MIT | 2018-12-03T05:44:07 | 2018-12-03T05:44:06 | null | UTF-8 | Python | false | false | 3,502 | py | # -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright © 2015 Eli Song
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import urllib
import requests
import re
import baidumaps
from baidumaps import apis
from baidumaps import exceptions
from baidumaps import parse
class Client(object):
def __init__(self, ak=None, domain='http://api.map.baidu.com',
output='json'):
if not ak:
raise ValueError("Must provide API when creating client. Refer to\
the link: http://lbsyun.baidu.com/apiconsole/key")
if ak and not re.search(r'^[a-zA-Z0-9]+$', ak):
raise ValueError('Invalid ak(API key)!')
self.ak = ak
self.domain = domain
self.output = output
def get(self, params):
request_url = self.generate_url(params)
response = requests.get(request_url).json()
status = response['status']
server_name = params['server_name']
subserver_name = params['subserver_name']
if status != 0:
raise exceptions.StatusError(server_name, subserver_name, status)
elif 'raw' in params and params['raw']:
result = response
else:
result = self.parse(server_name, subserver_name, response)
return result
def generate_url(self, params):
base_url = '/'.join([self.domain,
params['server_name'],
params['version'],
params['subserver_name']]
) + '?'
base_url = re.sub(r'//ip', '/ip', base_url) # for ip_locate()
temp = params.copy() # avoid altering argument 'params'
{temp.pop(key) for key in ['server_name', 'version', 'subserver_name']}
temp.update({'ak': self.ak, 'output': self.output})
addi_url = urllib.urlencode(temp)
return base_url + addi_url
Client.place_search = apis.place_search
Client.place_detail = apis.place_detail
Client.place_eventsearch = apis.place_eventsearch
Client.place_eventdetail = apis.place_eventdetail
Client.place_suggest = apis.place_suggest
Client.geocode = apis.geocode
Client.direct = apis.direct
Client.ip_locate = apis.ip_locate
Client.route_matrix = apis.route_matrix
Client.geoconv = apis.geoconv
Client.parse = parse.parse
# if __name__ == "__main__":
# bdmaps = Client(ak='<Your Baidu Auth Key>')
# result = bdmaps.geoconv('114.21892734521,29.575429778924')
# print result
| [
"[email protected]"
] | |
16f1b36580d611b273b5329b19c6b1903f167e9c | d0cc51ddee650ae7bd4d5d6b671053124770b1bd | /products/models.py | 008a3cf1bc5fa93ca6084d237322150eb4a4624b | [] | no_license | Prakash190797/Product-Hunt | 77e87bf672244850e16cacdd9da409463c87aab4 | 42066500284ec1e734c084a7853087d178e724f9 | refs/heads/master | 2020-04-24T23:00:22.751070 | 2019-03-06T10:56:56 | 2019-03-06T10:56:56 | 172,329,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# product class
# title - char
# pub_date - datetime
# body - text
# url - text
# image - image
# icon - image
# votes_total - integer
# hunter - foreignkey
class Product(models.Model):
title = models.CharField(max_length = 255)
pub_date = models.DateTimeField()
body = models.TextField()
url = models.TextField()
image = models.ImageField(upload_to = 'images/')
icon = models.ImageField(upload_to = 'images/')
votes_total = models.IntegerField(default=1)
likes = models.ManyToManyField(User, related_name='likes', blank=True)
hunter = models.ForeignKey(User, on_delete= models.CASCADE)
def __str__(self):
return self.title
def summary(self):
return self.body[:100]
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
| [
"[email protected]"
] | |
5e8169569d6c46110750670bc0605cf308e67cfc | 01045526af01f01640aa77f945b7ade008627312 | /crafting/task1/banner/views.py | 4bd10b5aabed5775d921e6376a5faf6f82be0528 | [] | no_license | akshaygithub12/shop | 265666cd3a933fd8be6a5af094ac214528fd6610 | 838afe504e29b0b82636dd571ddcfcaafc6dc763 | refs/heads/main | 2023-06-08T21:02:02.538677 | 2021-06-22T13:10:35 | 2021-06-22T13:10:35 | 364,943,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | from django.shortcuts import render
from .models import Menu1
# Create your views here.
def index(request):
menu1 = Menu1.objects.all()
return render(request,"index.html",{'Menu1':menu1})
| [
"[email protected]"
] | |
2bd04d2a019ef56b3a01068257d343fce7f72a58 | 5d771f3d5f447a89629d26455d8c342ef539800e | /mezger_farms/farm_core/serializers.py | e603432d54d572369c0a464baee5996a9363d077 | [
"MIT"
] | permissive | mezgerj/mezger_farms | b785f9e25d80823486f0e751681991fecd220a17 | 6abeee6e64a293fa87e8cbc54c1f8946be83faa2 | refs/heads/master | 2020-03-18T20:37:06.309841 | 2018-09-29T03:43:02 | 2018-09-29T03:43:02 | 135,227,784 | 0 | 0 | MIT | 2018-05-29T03:35:18 | 2018-05-29T01:51:32 | Python | UTF-8 | Python | false | false | 272 | py | from rest_framework import serializers
from rest_framework.serializers import ModelSerializer
from farm_core.models import Farm
class FarmSerializer(ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Farm
fields = '__all__'
| [
"[email protected]"
] | |
d79da2adb156594c1a927a2ab49758fa4f33c500 | 2d2e8abcd55c6600f95ae14cd788786190c0a685 | /src/summary_range.py | cd6b4be82021245c01fe343aeff623425d87b1e1 | [] | no_license | fifa007/Leetcode | 93ef8d2779b9752af91d8e3460b58cc17d1a379f | b7b5d15e6a3c9ab11916550f0ed40ed6a9a2901e | refs/heads/master | 2021-01-21T04:48:00.361309 | 2016-06-15T23:49:07 | 2016-06-15T23:49:07 | 48,308,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | #!/usr/bin/env python2.7
'''
Given a sorted integer array without duplicates, return the summary of its ranges.
For example, given [0,1,2,4,5,7], return ["0->2","4->5","7"].
'''
class Solution(object):
def summary_range(self, nums):
if nums is None or len(nums) == 0:
return []
n = len(nums)
i = 0
j = 1
ret = []
while j < n:
while j < n and nums[j] == nums[j-1] + 1:
j += 1
ret.append(self.build_range(nums, i, j))
i = j
j += 1
ret.append(self.build_range(nums, i, j))
return ret
def build_range(self, nums, i, j):
if i == j-1:
return str(nums[i])
else:
return str(nums[i]) + '->' + str(nums[j-1]) | [
"[email protected]"
] | |
eea507a28e29f6da43211052a1361b2e542998c4 | f0d3ef10061147fb3bd04774a8b4eac9e4d9b671 | /feedly/tests/storage/cassandraCQL.py | 1347b91db0fd3154491b6bd0855e11aa08d52566 | [
"BSD-3-Clause"
] | permissive | jblomo/Feedly | 9929077be3364d827aa03c4506ade29b819141cb | 3e4999cc794231841e3b4909f0a73beabfcca046 | refs/heads/master | 2021-01-20T21:19:21.017683 | 2013-09-06T12:33:48 | 2013-09-06T12:33:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from feedly import settings
from feedly.storage.cassandraCQL.timeline_storage import CassandraTimelineStorage
from feedly.tests.storage.base import TestBaseTimelineStorageClass
import pytest
@pytest.mark.usefixtures("cassandra_cql_reset")
class TestCassandraTimelineStorage(TestBaseTimelineStorageClass):
storage_cls = CassandraTimelineStorage
storage_options = {
'hosts': settings.FEEDLY_CASSANDRA_HOSTS,
'column_family_name': 'example'
}
| [
"[email protected]"
] | |
7256182f506cc8ca1cce1567a708444a23a7318e | 3696d4addcf3004fe34efb48352656973f99b3d5 | /shortest_distance_to_a_character.py | da75bc20e6b0979dff3ff56709fddb21ceea993c | [
"MIT"
] | permissive | ntongha1/Leetcode_Problems | e882552abe5db5ff091b995bbd6468391c4730ef | a262708dab69000cfb513bd720ee58e05ca2db64 | refs/heads/master | 2023-04-09T00:13:31.215522 | 2021-04-18T15:55:18 | 2021-04-18T15:55:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | """
Given a string s and a character c that occurs in s, return an array of integers answer
where answer.length == s.length and answer[i] is the distance from index i to the closest occurrence of character c in s.
The distance between two indices i and j is abs(i - j), where abs is the absolute value function.
Example 1:
Input: s = "loveleetcode", c = "e"
Output: [3,2,1,0,1,0,0,1,2,2,1,0]
Explanation: The character 'e' appears at indices 3, 5, 6, and 11 (0-indexed).
The closest occurrence of 'e' for index 0 is at index 3, so the distance is abs(0 - 3) = 3.
The closest occurrence of 'e' for index 1 is at index 3, so the distance is abs(1 - 3) = 3.
For index 4, there is a tie between the 'e' at index 3 and the 'e' at index 5, but the distance is still the same: abs(4 - 3) == abs(4 - 5) = 1.
The closest occurrence of 'e' for index 8 is at index 6, so the distance is abs(8 - 6) = 2.
Example 2:
Input: s = "aaab", c = "b"
Output: [3,2,1,0]
Constraints:
1 <= s.length <= 104
s[i] and c are lowercase English letters.
It is guaranteed that c occurs at least once in s.
"""
def shortestToChar(s, c):
c_indices = []
distances = []
for i in range(len(s)):
if c == s[i]:
c_indices.append(i)
for i in range(len(s)):
distances.append(min([abs(c_indices[j] - i) for j in range(len(c_indices))]))
return distances
s, c = "loveleetcode", "e"
# s, c = "aaab", "b"
result = shortestToChar(s, c)
print(result) | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.