blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e01adaee84f82114314f03c60f964781241c70ad | 6d9a29bd76cdfcda472eccccc16a5d2f4478ac25 | /jinhuiz2-hw6.py | 8848729034b97043df180e276352930b9f7698d1 | []
| no_license | Jinhuiz2/IE598_F18_HW6 | 0c16af3d60f4fa41cdac73999286df5a7b96cb6d | a44e21aa600085d1747feef4bb38e82381ee4275 | refs/heads/master | 2020-03-31T07:03:57.205667 | 2018-10-08T02:21:44 | 2018-10-08T02:21:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,626 | py | from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
import numpy as np
from sklearn.model_selection import cross_val_score
#From Iris dataset get iris data, split it with 90% training and 10% test
iris = datasets.load_iris()
X, y = iris.data, iris.target
in_sample = []
out_sample = []
for k in range(1,11):
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.1,
random_state=k)
scaler = preprocessing.StandardScaler().fit(X_train)
dt = DecisionTreeClassifier(max_depth = 6, criterion = 'gini', random_state = 1)
dt.fit(X_train, y_train)
y_pred_out = dt.predict(X_test)
y_pred_in = dt.predict(X_train)
out_sample_score = accuracy_score(y_test, y_pred_out)
in_sample_score = accuracy_score(y_train, y_pred_in)
in_sample.append(in_sample_score)
out_sample.append(out_sample_score)
print('Random State: %d, in_sample: %.3f, out_sample: %.3f'%(k, in_sample_score,
out_sample_score))
in_sample_mean = np.mean(in_sample_score)
in_sample_std = np.std(in_sample_score)
out_sample_mean = np.mean(out_sample_score)
out_sample_std = np.std(out_sample_score)
print('In sample mean: %.f' %in_sample_mean)
print('In sample standard deviation: %.f' %in_sample_std)
print('Out sample mean: %.f' %out_sample_mean)
print('Out sample standard deviation: %.f' %out_sample_std)
print('\n')
in_sample = []
out_sample = []
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.1,
random_state=k)
scaler = preprocessing.StandardScaler().fit(X_train)
dt = DecisionTreeClassifier(max_depth = 6, criterion = 'gini', random_state = 1)
dt.fit(X_train, y_train)
y_pred_out = dt.predict(X_test)
y_pred_in = dt.predict(X_train)
in_sample_score = cross_val_score(dt, X_train, y_train, cv=10)
out_sample_score = accuracy_score(y_test, y_pred_out)
in_sample.append(in_sample_score)
out_sample.append(out_sample_score)
print('In sample CV score for every fold:')
for i in in_sample_score:
print(i)
print('Mean of sample CV score: ', np.mean(in_sample_score))
print('Standard deviation of sample CV score: ', np.std(in_sample_score))
print('\n')
print("My name is {Jinhui Zhang}")
print("My NetID is: {jinhuiz2}")
print("I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.")
| [
"[email protected]"
]
| |
9ae559ed24c07ed4a08b4ae782bccb3637f700c7 | 66b9e0f5bb64869014062c561179486aa85b7738 | /shapeCheckmark.py | c7f01c189b22ad2cb8483a54370f4bab67d522aa | []
| no_license | jainsimran/creating-shapes-python | 8fc247f465ebfe3fd98c661afeb83bb1b0e99c70 | a3fffc09593628b8d0fb8ce38e390025c2bd2c3f | refs/heads/master | 2023-02-13T04:58:46.940388 | 2021-01-06T21:19:26 | 2021-01-06T21:19:26 | 327,115,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import turtle
window = turtle.Screen()
window.bgcolor('lightgreen')
leo = turtle.Turtle()
leo.pencolor('blue')
leo.right(45)
leo.forward(70)
leo.left(90)
leo.forward(150)
window.exitonclick() | [
"[email protected]"
]
| |
b327a3211d631a84607b97fdcdf51a868070f337 | 34ff00df4e369bb9d4843a8f7bbc74449e95103c | /snake/snake_script.py | 2696e2f6f543122d9e6866660e4afb2cf94450e1 | []
| no_license | Coritiba019/Jogos-Pygame | f3ad6d1a7cb95eb3c4569c0411f7476905225f8e | 33d4d63ef25bfcb0d826d87c08d1a4e5f1045349 | refs/heads/main | 2023-04-02T07:55:27.702739 | 2021-04-09T02:26:09 | 2021-04-09T02:26:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,699 | py | import pygame
import random
from pygame.locals import *
from snake.snake import Snake
from snake.apple import Apple
"""
Created by Igor Lovatto Resende
N USP 10439099
"""
class SnakeGame:
FONT_SIZE = 18
PRIME_NUMBERS = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97]
BLOCK_SIZE = 20 # Size of blocks
def __init__(self, screen_size):
self.screen_size = screen_size // self.BLOCK_SIZE # The width and height of the screen in number of blocks
def mul(self, t, n):
return (t[0] * n, t[1] * n)
def on_grid_random(self):
"""
This function calculate a random position for a object on the screen
:returns:
tuple: Random position
"""
x = random.randint(0, self.screen_size - 2)
y = random.randint(0, self.screen_size - 2)
return (x, y)
def collision(self, c1, c2):
return (c1[0] == c2[0]) and (c1[1] == c2[1])
def prime_apple_randomizer(self):
"""
This function choose a random prime number from the self.PRIME_NUMBERS list
:return:
int: Random prime number
"""
number = random.choice(self.PRIME_NUMBERS)
return int(number)
def normal_apple_randomizer(self):
"""
This function chosse a not-prime random number between 0 and 99
:return:
int:
"""
number = random.randint(0 ,99)
while number in self.PRIME_NUMBERS:
number = random.randint(0, 99)
return number
def main(self, screen):
screen.fill((0, 0, 0))
tutorial1 = """Geraldo é uma cobrinha sapeca e que vive faminta. Sua comida favorita é a fruta maça."""
tutorial2 = """Porém Geraldo é bem especifico, ele só come maçãs que são de numero primo no pé."""
tutorial3 = """INSTRUÇÔES"""
tutorial4 = """Para isso ajude Geraldo a se alimentar capturando apenas as maçãs com numeros primos"""
tutorial5 = """E utilizando as setas do teclado para se mover"""
##### TUTORIAL ########################
score_font = pygame.font.Font('freesansbold.ttf', 13)
score_screen = score_font.render(f'{tutorial1}', True, (255, 255, 255))
score_rect = score_screen.get_rect()
score_rect.midtop = (600 / 2, 10)
screen.blit(score_screen, score_rect)
score_font = pygame.font.Font('freesansbold.ttf', 13)
score_screen = score_font.render(f'{tutorial2}', True, (255, 255, 255))
score_rect = score_screen.get_rect()
score_rect.midtop = (600 / 2, 50)
screen.blit(score_screen, score_rect)
score_font = pygame.font.Font('freesansbold.ttf', 13)
score_screen = score_font.render(f'{tutorial3}', True, (255, 255, 255))
score_rect = score_screen.get_rect()
score_rect.midtop = (600 / 2, 100)
screen.blit(score_screen, score_rect)
score_font = pygame.font.Font('freesansbold.ttf', 13)
score_screen = score_font.render(f'{tutorial4}', True, (255, 255, 255))
score_rect = score_screen.get_rect()
score_rect.midtop = (600 / 2, 150)
screen.blit(score_screen, score_rect)
score_font = pygame.font.Font('freesansbold.ttf', 13)
score_screen = score_font.render(f'{tutorial5}', True, (255, 255, 255))
score_rect = score_screen.get_rect()
score_rect.midtop = (600 / 2, 200)
screen.blit(score_screen, score_rect)
pygame.display.update()
pygame.time.wait(9000)
snake_skin = pygame.Surface((self.BLOCK_SIZE, self.BLOCK_SIZE))
snake_skin.fill((255, 255, 255))
self.snake = Snake(snake_skin, self.screen_size)
prime_apple_sprite = pygame.Surface((self.BLOCK_SIZE, self.BLOCK_SIZE))
prime_apple_sprite.fill((255, 0, 0))
prime_apple = Apple(
prime_apple_sprite, # sprite
self.on_grid_random(), # pos
self.prime_apple_randomizer(), # num
pygame.font.SysFont("arial", self.FONT_SIZE) # font
)
normal_apple_sprite = pygame.Surface((self.BLOCK_SIZE, self.BLOCK_SIZE))
normal_apple_sprite.fill((255, 0, 0))
normal_apple = Apple(
normal_apple_sprite, # sprite
self.on_grid_random(), # pos
self.normal_apple_randomizer(), # num
pygame.font.SysFont("arial", self.FONT_SIZE) # font
)
clock = pygame.time.Clock()
while True:
"""
This is the main looping of the game, resposible for update the screen,snake and apples
"""
clock.tick(10 if self.snake.fast else 5)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
if event.type == KEYDOWN:
self.snake.listen(event)
if event.key == K_ESCAPE:
return
if self.snake.collision(prime_apple.pos):
prime_apple.change(self.on_grid_random(), self.prime_apple_randomizer())
normal_apple.change(self.on_grid_random(), self.normal_apple_randomizer())
self.snake.grow()
self.snake.counter = self.snake.counter+ 1
if self.snake.collision(normal_apple.pos):
self.snake.snake_reset()
prime_apple.change(self.on_grid_random(), self.prime_apple_randomizer())
normal_apple.change(self.on_grid_random(), self.normal_apple_randomizer())
if self.snake.boundry_collision():# Check the collision with boudaries
game_over = True
self.game_over_screen(screen)
return
self.snake.update()
screen.fill((0, 0, 0))
prime_apple.drawn(screen, 20)
normal_apple.drawn(screen, 20)
self.snake.drawn(screen, self.BLOCK_SIZE)
pygame.display.update()
def game_over_screen(self, screen):
"""
This is the Game over menu looping. Responsible for the game-over screen and score
"""
while True:
game_over_font = pygame.font.Font('freesansbold.ttf', 75)
game_over_screen = game_over_font.render(f'Game Over', True, (255, 255, 255))
game_over_rect = game_over_screen.get_rect()
game_over_rect.midtop = (600 / 2, 10)
screen.blit(game_over_screen, game_over_rect)
score_font = pygame.font.Font('freesansbold.ttf', 30)
score_screen = score_font.render(f'Pontuação final: {self.snake.counter}', True, (255, 255, 255))
score_rect = score_screen.get_rect()
score_rect.midtop = (600 / 2, 100)
screen.blit(score_screen, score_rect)
pygame.display.update()
pygame.time.wait(500)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
elif event.type == KEYDOWN:
if event.key == K_RETURN or event.key == K_KP_ENTER:
self.main(screen)
elif event.key == K_ESCAPE:
return
if __name__ == '__main__':
pygame.init()
screen_size = 20
block_size = 30
screen = pygame.display.set_mode((screen_size * block_size, screen_size * block_size))
pygame.display.set_caption('Snake')
game = SnakeGame(screen_size * block_size)
game.main(screen)
pygame.quit()
exit()
| [
"[email protected]"
]
| |
e56792cbb398fa012c64d2825824f02755479a8c | dfe134b793e8a3503273b9a07318ec6ce92d7972 | /app/api/api_v1/api.py | 57beba35253a87dbd1bfdb266897629a60f33962 | []
| no_license | QuocDung0209/marketing-online-python-backend | a6fa9fea43c22dfa0b85d8b5a290f9581a9b2fd9 | 8297f9700be65ec61d47a58d7be573d99cb17018 | refs/heads/master | 2023-05-15T01:53:38.774801 | 2021-05-31T11:40:44 | 2021-05-31T11:40:44 | 372,190,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from fastapi import APIRouter
from .auth.endpoints import router as auth
api_router = APIRouter()
api_router.include_router(auth, prefix="/auth", tags=["auth"])
| [
"[email protected]"
]
| |
f686496abd0771fc7feec57294280598a826942c | 17dfbab7d4f28a53f62c6e23ddb62349088f67af | /A2/src/Set.py | d28348577b47f5c4e8edf44cc26bb7a9ee50feab | []
| no_license | dhruv423/Software-Dev-Coursework | 0bbd1a06f4c797fa3f72dd73fe6169731ec6833d | d75f3283058458bad7d5e26c2919fd94865bd100 | refs/heads/master | 2021-05-27T05:28:18.473384 | 2020-04-08T23:25:18 | 2020-04-08T23:25:18 | 254,152,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | ## @file Set.py
# @author Dhruv Bhavsar
# @brief Class for Set building and applying methods to the Set
# @date Feb 2, 2020
from Equality import *
## @brief Class that represents a Set
class Set(Equality):
## @brief Constructor that initializes the object with set
# @details Takes in a sequence and converts it into set and
# holds it in the state variable
# @param sequence - Takes in a sequence (list)
def __init__(self, sequence):
self.__set = set(sequence)
## @brief Add an element to the set
# @details Uses the add function of set object to add the element
# @param Element to add
def add(self, element):
self.__set.add(element)
## @brief Remove a specified from the set
# @details Uses the remove function from the Set Object
# @param Specified Element
# @throws ValueError if element not in set
def rm(self, element):
try:
self.__set.remove(element)
except KeyError:
raise ValueError("Element is not in the set")
## @brief Checks if the element is in the set
# @param Element to check
# @return True if in the set else false
def member(self, element):
if element in self.__set:
return True
else:
return False
## @brief Returns the size of the set
# @details Using len function
# @return the size of the set as an integer
def size(self):
return len(self.__set)
## @brief Checks if the two sets are the same
# @details First check if the two sets are the same then
# check if all the elements occur in the other set
# @param other_set - Set to check with
# @return True if the same else false
def equals(self, other_set):
if not (self.size() == other_set.size()):
return False
for elem in self.__set:
if not other_set.member(elem):
return False
return True
## @brief Convert the set into a list for it to be iterable
# @return List of the sets
def to_seq(self):
return list(self.__set)
| [
"[email protected]"
]
| |
39ac82b5900d8bff567825bc91b455a0be5074b1 | 4a7804ee05485c345b4e3c39a0c96ed4012542ac | /system/base/less/actions.py | fd506df396c789af3c5b6f73302212caf721a16d | []
| no_license | Erick-Pardus/Pardus | 1fef143c117c62a40e3779c3d09f5fd49b5a6f5c | 2693e89d53304a216a8822978e13f646dce9b1d3 | refs/heads/master | 2020-12-31T02:49:33.189799 | 2013-03-17T06:29:33 | 2013-03-17T06:29:33 | 17,247,989 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-vfi")
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall('DESTDIR="%s"' % get.installDIR())
#pisitools.dobin("less")
#pisitools.dobin("lessecho")
#pisitools.dobin("lesskey")
#pisitools.newman("lesskey.nro", "lesskey.1")
#pisitools.newman("less.nro", "less.1")
pisitools.dodoc("NEWS", "README", "COPYING")
| [
"[email protected]"
]
| |
e5341f61f6ab186bc40c0e83409dd594fc0a69cc | e551a7f5e37753bd12b6e4d37fa4326b0e18fd5a | /CursoemVideo/2020/world_3/ex102.py | 75f014556008dd5d08710dffa60bce5213742bf6 | []
| no_license | GersonFeDutra/Python-exercises | 27744148655736bd776980a8e01dfe3db8ed7349 | ab9d55979f42fd7894d7325f41aaee791e5bd772 | refs/heads/main | 2023-08-28T11:06:42.980942 | 2021-11-10T22:14:01 | 2021-11-10T22:14:01 | 333,112,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | def factorial(number: int, show: bool = False) -> int:
"""
Calculates the factorial of a number.
:param number: The number that will be calculated.
:param show: If the calculation will be displayed.
:return: int factorial of the number.
"""
counter: int = 1
for i in range(number, 0, -1):
counter *= i
if show:
print(i, end=' x ' if i > 1 else '')
return counter
print(factorial(5))
print(' =', factorial(5, True))
result: int = factorial(10, True)
print()
print(f'{result:>38}')
help(factorial)
| [
"[email protected]"
]
| |
c8e26e30e21138ec04c30db6579b6bd98a620898 | 55de20ff6a7b3e07cffae42d2d9b24178f65daf3 | /dockerhub_show_tags.py | 8c0568a7f12d0b05a91faf4c381b10a296ff8bb3 | []
| no_license | srjayep/pytools | 35f803f1adcc1e93f489475ee12c72ec10161649 | c96b752c7d8679e7dde1657914fa56bd9b4f2cfd | refs/heads/master | 2020-05-29T08:51:34.003012 | 2016-10-05T15:09:05 | 2016-10-05T15:09:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,117 | py | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2016-05-10 11:26:49 +0100 (Tue, 10 May 2016)
#
# https://github.com/harisekhon/pytools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help improve this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Tool to show Docker tags for one or more DockerHub repos
Written for convenience as Docker CLI doesn't currently support this:
See https://github.com/docker/docker/issues/17238
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import json
import logging
import os
import sys
import traceback
import urllib
try:
import requests
except ImportError:
print(traceback.format_exc(), end='')
sys.exit(4)
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import log, die, prog, isJson, jsonpp
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.3'
class DockerHubTags(CLI):
def __init__(self):
# Python 2.x
super(DockerHubTags, self).__init__()
# Python 3.x
# super().__init__()
self._CLI__parser.usage = '{0} [options] repo1 repo2 ...'.format(prog)
self.quiet = False
self.timeout_default = 30
def add_options(self):
self.add_opt('-q', '--quiet', action='store_true', default=False,
help='Output only the tags, one per line (useful for shell tricks)')
def run(self):
if not self.args:
self.usage('no repos given as args')
self.quiet = self.get_opt('quiet')
if not self.quiet:
print('\nDockerHub\n')
for arg in self.args:
self.print_tags(arg)
def print_tags(self, repo):
if not self.quiet:
print('repo: {0}'.format(repo))
print('tags: ', end='')
sys.stdout.flush()
indent = ' '
if self.quiet:
indent = ''
print('\n{0}'.format(indent).join(self.get_tags(repo)))
if not self.quiet:
print()
@staticmethod
def get_tags(repo):
namespace = 'library'
if '/' in repo:
(namespace, repo) = repo.split('/', 2)
url = 'https://registry.hub.docker.com/v2/repositories/{0}/{1}/tags/'\
.format(urllib.quote_plus(namespace), urllib.quote_plus(repo))
log.debug('GET %s' % url)
try:
verify = True
# workaround for Travis CI and older pythons - we're not exchanging secret data so this is ok
#if os.getenv('TRAVIS'):
# verify = False
req = requests.get(url, verify=verify)
except requests.exceptions.RequestException as _:
die(_)
log.debug("response: %s %s", req.status_code, req.reason)
log.debug("content:\n%s\n%s\n%s", '='*80, req.content.strip(), '='*80)
if req.status_code != 200:
die("%s %s" % (req.status_code, req.reason))
if not isJson(req.content):
die('invalid non-JSON response from DockerHub!')
if log.isEnabledFor(logging.DEBUG):
print(jsonpp(req.content))
print('='*80)
tag_list = []
try:
j = json.loads(req.content)
tag_list = [_['name'] for _ in j['results']]
except KeyError as _:
die('failed to parse output from DockerHub (format may have changed?): {0}'.format(_))
tag_list.sort()
# put latest to the top of the list
try:
tag_list.insert(0, tag_list.pop(tag_list.index('latest')))
except ValueError:
pass
return tag_list
if __name__ == '__main__':
DockerHubTags().main()
| [
"[email protected]"
]
| |
ba6464a34af3507fb206e03804836f85e2f1cec5 | 505275cedcbaf8e7a044979c1ba3da02a5428533 | /트리순회.py | ceebf764f8168118e28cd3c0c49449a415ddcac4 | []
| no_license | firstep710/baekjoon | a42c95c27e775a205bde4b8d0ba84b430ac87f03 | b7384f941efe80ac8e101164d6f31d538d6de54d | refs/heads/main | 2023-06-05T17:57:46.517304 | 2021-06-21T14:41:30 | 2021-06-21T14:41:30 | 370,252,891 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | def preorder(root):
if root != ".":
print(root,end='')
preorder(tree[root][0])
preorder(tree[root][1])
def inorder(root):
if root != ".":
inorder(tree[root][0])
print(root,end='')
inorder(tree[root][1])
def postorder(root):
if root != ".":
postorder(tree[root][0])
postorder(tree[root][1])
print(root,end='')
tree={}
for i in range(int(input())):
root, left, right=input().split()
tree[root]=[left,right]
preorder('A')
print()
inorder('A')
print()
postorder('A')
| [
"[email protected]"
]
| |
3e4c700160e79c2275b1ee502223708bd8fcd9af | bdc0afc39a7ae530bc66f2d907aaa9d0624cf892 | /tester3.py | 5ec4855b3dac70b54ba58ba2b44f5055e77d23ad | []
| no_license | SaifJamal/Test | fc1297c6e5cdbe0d8959916a45f8332621a94fd9 | f602da5223c0cf07eb5bd319f373d297d7d0f1c2 | refs/heads/master | 2021-09-06T15:22:58.383111 | 2018-02-08T01:34:29 | 2018-02-08T01:34:29 | 120,032,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | # This is the meaning of life
print('Meaning of life!!!')
| [
"[email protected]"
]
| |
779e7b1fc2bfe837f10a8070b3600f71ae8cdf3a | ece7ba486d29d4bc3e87c2046db2c31140e2d86a | /suitcase/mongo_normalized/tests/tests.py | 75f4046965a77899a78b88195844aeadf0dfc188 | []
| no_license | ke-zhang-rd/suitcase-mongo | 31b97bb13b9e6089248f888a6c33824b835de141 | c938bae589ab2fba301814c846c5d5339eb90fb8 | refs/heads/master | 2020-05-31T10:29:15.458932 | 2019-10-18T17:33:03 | 2019-10-18T17:33:03 | 190,241,607 | 0 | 0 | null | 2019-06-04T16:38:12 | 2019-06-04T16:38:11 | null | UTF-8 | Python | false | false | 451 | py | # Tests should generate (and then clean up) any files they need for testing. No
# binary files should be included in the repository.
from suitcase.mongo_normalized import Serializer
def test_export(db_factory, example_data):
documents = example_data()
metadatastore_db = db_factory()
asset_registry_db = db_factory()
serializer = Serializer(metadatastore_db, asset_registry_db)
for item in documents:
serializer(*item)
| [
"[email protected]"
]
| |
557e31ccc682fe60039353e6fdbdaf4edcd480da | e7c685a9ddc21186678c665eab4f8803c5711766 | /emil/Nauka1 - podstawy-kolekcje-listy/Basic1/Zad12 - Calendar.py | 569e21b4f578f373a849b9e4104627f7e4250fb0 | []
| no_license | emillo8905/nauka-2 | 543a7249eeb4c3913d3595d59c5b628d186bd061 | b3a14e254bdf589dee23aef8418a373da7e5d362 | refs/heads/master | 2023-07-05T01:53:33.352764 | 2021-08-29T10:16:03 | 2021-08-29T10:16:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import calendar
y = int(input("Podaj rok dla kalendarza: "))
m = int(input("podaj miesiac dla kalendarza: "))
print(calendar.month(y,m)) | [
"[email protected]"
]
| |
475d3709a36d6d7c776027c6f5b21474c5c96e8b | 87b6cae5f0bc49f86735619cda0e676486d3f143 | /tic-toc.py | 14e99adb7b24b660e1f46d8891c695a7ca3cd60b | []
| no_license | nmkolp/Python-scripts | a8dac713fd39c2a19e43aba3a7a4d609661fc64b | a69197f97707853ae68ac74ec0136a3082192ad7 | refs/heads/master | 2020-05-18T02:00:53.038116 | 2019-09-12T20:39:00 | 2019-09-12T20:39:00 | 184,104,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,178 | py | import copy
def check_win(board):
for y in range(3):
if board[0][y] == board[1][y] == board[2][y] != 0:
return True
for x in range(3):
if board[x][0] == board[x][1] == board[x][2] != 0:
return True
if board[0][0] == board[1][1] == board[2][2] != 0:
return True
if board[0][2] == board[1][1] == board[2][0] != 0:
return True
return False
def check_no_moves_left(board):
for x in range(3):
for y in range(3):
if board[x][y] == 0:
return False
return True
def get_coords(i):
if i < 1 or i > 9:
return False
return [(i - 1) % 3, 2 - (i - 1) // 3]
def print_board(board):
for y in range(3):
for x in range(3):
if board[x][y] == 0:
print("_", end='')
elif board[x][y] == 1:
print("x", end='')
else:
print("o", end='')
if x != 2:
print(" ", end='')
print("")
print("")
def eval_game(board, player):
if check_no_moves_left(board):
return [0]
for x in range(3):
for y in range(3):
if board[x][y] == 0:
nb = copy.deepcopy(board)
nb[x][y] = player
if check_win(nb):
return [player, x, y]
eval_result = eval_game(nb, -player)
if eval_result[0] == player:
return [player, x, y]
if eval_result[0] == 0:
ret_val = [0, x, y]
elif 'ret_val' not in vars():
ret_val = [-player, x, y]
return ret_val
def player_move(board, player):
while True:
inp = input("Enter: ")
if inp.isdigit() and int(inp) != 0:
coords = get_coords(int(inp))
x = coords[0]
y = coords[1]
if board[x][y] == 0:
board[x][y] = player
break
def ai_move(board, player):
eval_result = eval_game(board, player)
x = eval_result[1]
y = eval_result[2]
board[x][y] = player
play_game = True
while play_game:
board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
player = 1
ai_turn = False
while True:
first = input("Play first? (Y/N): ")
if first == "y" or first == "Y":
break
elif first == "n" or first == "N":
ai_turn = True
break
print_board(board)
while True:
if ai_turn:
ai_move(board, player)
else:
player_move(board, player)
print_board(board)
if check_win(board):
if ai_turn:
print("You lost")
else:
print("Congratulations")
break
if check_no_moves_left(board):
print("Draw")
break
ai_turn = not ai_turn
player = -player
print("")
while True:
first = input("Play again? (Y/N): ")
if first == "y" or first == "Y":
break
elif first == "n" or first == "N":
play_game = False
break
| [
"[email protected]"
]
| |
eda2d7a7d548e568bc5fb77caddeb16bfc3b87a0 | 861c248aab85784542fab84eeccedda6c90682d9 | /msgtracker/apps/collector.py | 57d9013ce26b082eb333ef71a105496cc2632ede | [
"MIT"
]
| permissive | daleysoftware/msg-tracker | c91cd67e7466c04574c2ed5256a2a0f931dd8647 | 16edb9d555795d0eec625dd954e14f914cbbbe2b | refs/heads/master | 2022-05-16T02:58:44.083469 | 2017-03-01T23:43:24 | 2017-03-01T23:43:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | import sched
import time
import sys
import logging
import msgtracker
import datetime
import signal
scheduler = sched.scheduler(time.time, time.sleep)
def _collect_and_log_forever(slack_client):
"""
Collect data from slack API and log in redis. Backend handles logging format. Run forever.
"""
wait_minutes = msgtracker.constants.QUERY_INTERVAL_MINUTES
try:
logging.info("Collect and log sequence queued.")
sample_time = datetime.datetime.now()
logging.debug("Sample time for this collection round: %s" % sample_time.strftime('%s'))
for user in slack_client.get_active_users():
msgtracker.backend.log_active(user, sample_time)
except IOError as e:
wait_minutes = 1
logging.error("IO error during collection round, retry soon. Error: %s" % e)
# And enter on the scheduler to keep things rolling.
logging.info("Wait %s minutes." % wait_minutes)
scheduler.enter(wait_minutes * 60, 1, _collect_and_log_forever, argument=(slack_client,))
def signal_handler(signum, frame):
print() # Cosmetics.
logging.error("Received signal. Abort.")
sys.exit(1)
def main(slack_client):
"""
Main program. Kick off scheduler and run forever.
"""
signal.signal(signal.SIGINT, signal_handler)
scheduler.enter(0, 1, _collect_and_log_forever, argument=(slack_client,))
scheduler.run()
if __name__ == '__main__':
msgtracker.helper.logging.init()
logging.info("Starting collector service.")
main(msgtracker.endpoints.slack.Slack())
| [
"[email protected]"
]
| |
7eb9891d94332915a61055017b11d4b686f01514 | 9a0a49b2e3f37f0b1a940085fdb08a5b19e680ab | /app/models/user.py | 84faf52f2b4459867b73ba844ed6ad4aea131c15 | []
| no_license | FVATE/FVATE-web | 8d4e3cefa33575be409e48da10dc8b71967912f1 | fb3d781e3bdbf87530c1839e854125e753cc45d5 | refs/heads/develop | 2023-07-19T23:57:46.455167 | 2019-11-03T14:18:57 | 2019-11-03T14:18:57 | 47,968,752 | 0 | 0 | null | 2022-03-26T01:14:25 | 2015-12-14T10:45:02 | JavaScript | UTF-8 | Python | false | false | 812 | py | """
app.models.user
~~~~~~~~~~~~~~~
:copyright: (c) 2016 by gregorynicholas.
"""
from __future__ import unicode_literals
from app.models import db
__all__ = ['User']
class User(db.Document):
"""
User model.
"""
key = db.StringField()
active = db.BooleanField(default=True)
roles = db.ListField(db.StringField())
@classmethod
def find_by_email(cls, email):
result = cls()
result.update({
'key': 'abc123', #: TODO
'active': True,
'name': 'testing',
'email': '[email protected]'})
return result
@classmethod
def get_by_id(cls, id):
result = cls()
result.save(**{
'key': 'abc123', #: TODO
'active': True,
'name': 'testing',
'email': '[email protected]'})
return result
def is_in_role(self, role):
return True
| [
"[email protected]"
]
| |
35ec94a0efb364b71563a9825181042bbbee0d61 | a9191be8d25279708f3f11d21c8ad993870f2a63 | /ex33.py | 5263a1cb286c757c7210e2bf6410bb0008563bc9 | []
| no_license | moog2009/Python | d64075d64fcc2ac3c40afe731ed9d1661ab21e13 | 73501ef9f2f3e07a0e6b428d5818e9cd7581984a | refs/heads/master | 2020-05-22T13:38:27.716616 | 2014-03-03T16:20:45 | 2014-03-03T16:20:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # -*- coding: utf-8 -*-
i=0
numbers=[]
while i<6:
print "At the top i is %d" %i
numbers.append(i)
i=i+1
print "Numbers now:",numbers
print "At the bottom i is %d" %i
print "The numbers:"
for num in numbers:
print num | [
"[email protected]"
]
| |
273826ab8f0d8e91f5220fbba64bd89e4e05afaf | 4c3b642ab4e9522171e595130e6b45e187dd9959 | /api/student.py | b88a24440eca2c9e405af01b584d59b14481373a | []
| no_license | black-dragon74/AIT-RESTFul-API | 5c799db291313f6c84a923b8e7f0397a6880e938 | 1f72c49cb5c4ad636fb84adb5aa0ec3c7812b0ed | refs/heads/master | 2021-05-18T00:24:54.578651 | 2020-03-29T13:42:38 | 2020-03-29T13:42:38 | 251,022,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,099 | py | """
Code is poetry
"""
from flask_restful import (
Resource,
reqparse,
)
import requests
from utils.functions import *
from utils.constants import *
from bs4 import BeautifulSoup
# Default home route, also serves as a 404 route
class Home(Resource):
def get(self):
return {
"msg": "Welcome to AIT ERP REST API",
"error": "No/Invalid route requested. Please refer to the documentation."
}
# Get student's attendance
class Attendance(Resource):
def get(self):
# Init the request parses
parser = reqparse.RequestParser()
parser.add_argument('sessionid', help="SessionID is missing", required=True)
# Get the args
args = parser.parse_args()
sessionID = args["sessionid"]
if not sessionID:
return throwError("Session id not found!")
# Else, we move forward and get the required data
with requests.session() as attendanceSession:
erp_cookies = {
"ASP.NET_SessionId": sessionID
}
headers = {
"User-Agent": API_USER_AGENT,
"Connection": "keep-alive",
"DNT": "1",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Cache-Control": "no-cache",
"Sec-Fetch-Dest": "empty",
"X-Requested-With": "XMLHttpRequest",
"X-MicrosoftAjax": "Delta=true",
"Accept": "*/*",
"Origin": "https://erp.aitpune.edu.in",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "cors",
"Referer": "https://erp.aitpune.edu.in/Secured/StudentSection/StudentProfileComplete.aspx",
"Accept-Language": "en-US,en;q=0.9"
}
aitget = attendanceSession.get(DASH_URL, headers=headers, cookies=erp_cookies).content
if not aitget:
return throwError("Unable to fetch details from the server")
aitsoup = BeautifulSoup(aitget, 'html.parser')
# Create the payload
formdata = {
"__EVENTTARGET": "ctl00$ContentPlaceHolderBody$lnkRefreshAttendance",
"__EVENTARGUMENT": "",
"__VIEWSTATE": getValueFromInput(aitsoup, ID_FOR_VIEWSTATE),
"__VIEWSTATEGENERATOR": "9BC554CB",
"__EVENTVALIDATION": getValueFromInput(aitsoup, ID_FOR_EVALIDATION),
"ctl00$ContentPlaceHolderBody$txtFromDateAttendance": getValueFromInput(aitsoup, ID_FOR_ATTEN_FROM),
"ctl00$ContentPlaceHolderBody$txtToDateAttendance": getValueFromInput(aitsoup, ID_FOR_ATTEN_TO),
"ctl00$ContentPlaceHolderBody$hdClassID": getValueFromInput(aitsoup, ID_FOR_CLASS_ID),
"ctl00$ContentPlaceHolderBody$hdStudentEntrollID": getValueFromInput(aitsoup, ID_FOR_ENROLL_ID),
"__ASYNCPOST": "true"
}
# Post and get the attendance
attendancePost = attendanceSession.post(DASH_URL, headers=headers, cookies=erp_cookies, data=formdata).content
if not attendancePost:
return throwError("Unable to get attendance from the server")
attendanceSoup = BeautifulSoup(attendancePost, 'html.parser')
attendance = parseHTMLTable(attendanceSoup, ID_FOR_ATTEN_TABLE)
attendanceSession.close()
# Now is the time to create a dict to return
masterDict = {
"percent": "0",
"attendance": []
}
for row in attendance:
subjectDict = dict()
index = 0
for item in row:
key = str(ATTEN_TABLE_STRUCT[index])
subjectDict[key] = item
index += 1
masterDict["attendance"].append(subjectDict)
masterDict["percent"] = attendanceSoup.find('span', selectid(ID_FOR_TOTAL_ATTEN_PERCENT)).text
return masterDict
| [
"[email protected]"
]
| |
f4b3889382cdd5b77a494efbc572bb3692fd6ba3 | 08484bb0dc0a4e55efd9a10b42ddb943c4964536 | /concept_of_head_tail.py | a7b137bc5e77cf2a79e7614f4b977e44a2e3f584 | []
| no_license | akjain96/PythonBasics | ebc1bbafe323129575a1e8d774eda5a3b5f495cb | c4a8b2db44f72f0f455a860aeef1ea05a8afeac6 | refs/heads/main | 2023-05-17T17:44:08.660633 | 2021-06-08T06:48:10 | 2021-06-08T06:48:10 | 374,908,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | var = [1,2,3,4,5,6,7,8,9]
head, *tail = var
print(head)
print(tail)
*head, tail = var
print(head)
print(tail) | [
"[email protected]"
]
| |
a225fe7ee8be173055a27f059f7cf1d154e96644 | c7c772d5900c1352d6ae1d392bddc5e061060d62 | /post/migrations/0001_initial.py | a132844072ab55c988b82303d22055d650b640ef | []
| no_license | gabrielhdez41/blog-test | b11fead309fe80d7247dca598dae23c3c20fd61d | e39cec4bd8361d5137ce88e94650dcc0d84d8181 | refs/heads/master | 2022-12-28T09:49:10.828691 | 2020-10-07T01:37:19 | 2020-10-07T01:37:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | # Generated by Django 3.0.8 on 2020-08-07 22:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(upload_to='')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('overview', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('comment_count', models.IntegerField(default=0)),
('thumbnail', models.ImageField(upload_to='')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='post.Author')),
('categories', models.ManyToManyField(to='post.Category')),
],
),
]
| [
"[email protected]"
]
| |
54e45b5adf5e30e8719f78c545a9e56a6627a681 | cf84ca819873ec61fcc3411316681b0900de75e8 | /UriOnlineJudge/uri1001.py | f80cb6b915d1a3bc490b7dc643d3471b3b055942 | []
| no_license | gabriellll00/hello-world | 8d563e78be14b006d2064bbd298514eacb1afdb7 | fc258b99f43c70bfd0811db0176a534d026eb83e | refs/heads/main | 2023-07-21T20:08:19.009397 | 2021-09-04T20:37:25 | 2021-09-04T20:37:25 | 388,102,882 | 0 | 0 | null | 2021-07-21T12:21:41 | 2021-07-21T12:02:57 | null | UTF-8 | Python | false | false | 62 | py | a = int(input())
b = int(input())
x = a + b
print(f'X = {x}')
| [
"[email protected]"
]
| |
8d00421a892e2caa7e04f1e4fde7bed021bdc058 | 93b12e31b85ae36345b47d29301e999480d13515 | /blog/blog/tests.py | a6618c02aa7ba4e59711ef801e7a438a30e0e6de | []
| no_license | bo858585/MyBlog | d3b61e7e4bd424a945789c2644909a5ab3314142 | 7a136b5ce043d584aa1327c59e39e8b534cbdf68 | refs/heads/master | 2016-08-03T00:27:51.631197 | 2013-08-30T08:17:17 | 2013-08-30T08:17:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | # coding: utf-8
from django.core.urlresolvers import reverse
from django.test import TestCase
from .models import Post
from .views import PostCreate
import json
class PostTest(TestCase):
def setUp(self):
"""
Creates two posts in blog and gets two urls:
url of the posts list and url of the post creating
"""
Post.objects.get_or_create(title=u"A title 1", text=u"A text 1")
Post.objects.get_or_create(title=u"A title 2", text=u"A text 2")
self.posts_list_url = reverse("blog_posts")
self.post_to_blog = reverse("post_to_blog")
def test_list(self):
"""
Gets posts list and checks that posts have initial title and text
"""
response = self.client.get(self.posts_list_url)
self.assertEquals(response.status_code, 200)
self.assertContains(response, u"A title 1")
self.assertContains(response, u"A title 2")
self.assertContains(response, u"A text 1")
self.assertContains(response, u"A text 2")
def test_create(self):
"""
Creates post and checks that it exists after creating
"""
post = {"title": "A title 3", "text": "A text 3"}
response = self.client.post(self.post_to_blog, post, follow=True)
self.assertEquals(response.status_code, 200)
self.assertContains(response, u"A title 3")
self.assertContains(response, u"A text 3")
| [
"[email protected]"
]
| |
f58da75f082920984c3ece191ce425e489425a4d | 76cb1f74513ae6fc071888cb130e07f8b49c38c7 | /editor/grid_functions.py | 87a3570f662dc1f66666021d6a66fbeb084bd9cf | []
| no_license | allenmrazek/super-mario | 0bb2719f5dcf8638a8dca6dd7fd9a2fc8e8d0972 | 0ca16f5516825aac1139a534fae4da3c5358777a | refs/heads/master | 2023-09-01T23:53:52.246187 | 2021-09-24T01:23:31 | 2021-09-24T01:23:31 | 394,555,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | import pygame
import pygame.gfxdraw
import config
from util import pixel_coords_to_tile_coords, tile_coords_to_pixel_coords, make_vector
def draw_grid(screen, line_color, grid_size, view_rect):
w, h = grid_size
ox, oy = (view_rect.left % grid_size[0], view_rect.top % grid_size[1]) if view_rect is not None else (0, 0)
# horizontal lines
for y_coord in range(h - oy, config.screen_rect.height, h):
pygame.gfxdraw.line(screen, 0, y_coord, config.screen_rect.width, y_coord, line_color)
# vertical lines
for x_coord in range(w - ox, config.screen_rect.width, w):
pygame.gfxdraw.line(screen, x_coord, 0, x_coord, config.screen_rect.height, line_color)
def draw_selection_square(screen, level_map, color, view_rect):
tile_coords = pixel_coords_to_tile_coords(pygame.mouse.get_pos() + make_vector(view_rect.x, view_rect.y),
level_map.tileset)
if level_map.is_in_bounds(tile_coords):
r = pygame.Rect(
*tile_coords_to_pixel_coords(tile_coords, level_map.tileset),
level_map.tileset.tile_width, level_map.tileset.tile_height)
r.topleft -= make_vector(*view_rect.topleft)
pygame.gfxdraw.rectangle(screen, r, color)
| [
"[email protected]"
]
| |
cd3f87f5e2e89356e2ca0b12e50553e9a48169a8 | 8d11a3316e2c8eac08ae11afb6c697393b557414 | /SpaceShip/ShipModule.py | 0508239565c56725c1614ad7c395daa6e12aabd7 | []
| no_license | dahou83/SpaceShip | 24548b7ac368448fe24ae118940cd289a455ef96 | d8c3df1eb36f9cfac7aeb7078afe7ef02c59cd85 | refs/heads/master | 2022-11-11T14:51:06.884969 | 2020-07-05T03:38:56 | 2020-07-05T03:38:56 | 277,225,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | import pygame
class Ship :
'''def __init__(self , s):
self.screen = s .screen
self.ship = pygame.image.load('spaceship1.png')
self.move_horiz = 305
self.move_vert = 150'''
def __init__(self , ai_game):
self.screen = ai_game.screen
self.screen_rect = ai_game.screen.get_rect()
self.image = pygame.image.load('C:\\Users\\abdoa\\projectgame\\SpaceShip\\spaceShip1.png')
self.rect = self.image.get_rect()
self.rect.midbottom = self.screen_rect.midbottom
self.movingr_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
def update (self):
if self.movingr_right :
self.rect.x += 1
if self.moving_left :
self.rect.x -= 1
if self.moving_up :
self.rect.y -=1
if self.moving_down :
self.rect.y +=1
def blitme(self):
self.screen.blit(self.image , self.rect)
#
'''def move (self):
self.screen.blit(self.ship,(self.move_horiz , self.move_vert))'''
| [
"[email protected]"
]
| |
19520d1850d303ee6d8e756f5a39b0610c647846 | c6a3b8362673ef847d8bd2445205f5a361b473fb | /HW-4/sigm.py | e88037ff7dfbfaf2f59aac7bbda0b9a8fe9582f2 | []
| no_license | nursultanbolel/Introduction-To-Deep-Learning-Homeworks | 1b231be8f4001f7093986b1c7016e4eef35a8ab1 | c50514b8a1eda735445e0880ba1ed03aa1612b76 | refs/heads/master | 2020-12-15T22:24:27.143947 | 2020-01-22T11:28:29 | 2020-01-22T11:28:29 | 235,272,899 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 12 16:48:28 2019
@Name-Surname: NUR SULTAN BOLEL
@Homework: Homework-4
"""
import numpy as np
def sigm(X):
h,w=X.shape
O=np.zeros((h,w))
for i in range(h):
for j in range(w):
X[i,j]=(-1)*X[i,j]
O[i,j]= 1 / (1 + np.exp( X[i,j]))
return O
| [
"[email protected]"
]
| |
1809a83312bcf86b533baa9b5a500fa91fcb2a47 | dcadd6029501cbd457616ee4b0db31f2d9c88006 | /fullyconvolutional/run_training.py | dab748cbe4fb056f8ac835c185b50355bb633a2d | []
| no_license | tpostadjian/pytorch | 59769f587fdb78ce24aea48bc348eb4b3324f4e3 | 93ec3c5989d5ab1066739772273d64dba716c295 | refs/heads/master | 2018-10-07T02:31:28.816419 | 2018-06-13T08:36:07 | 2018-06-13T08:36:07 | 113,600,046 | 0 | 0 | null | 2018-08-01T09:36:10 | 2017-12-08T17:27:22 | Python | UTF-8 | Python | false | false | 2,926 | py | from fullyconvolutional.training.train.fcn_dataset_loader import SPOT_dataset
from fullyconvolutional.training.train.fcn_trainer import Trainer
from fullyconvolutional.training.train.fcn_tester import Tester
from fullyconvolutional.models.fcn_net import fcn
from glob import glob as glob
import random
import torch
import torch.utils.data as data
import torch.nn as nn
WINDOW_SIZE = (128, 128)
STRIDE = 64
TRAIN_RATIO = 0.9
N_EPOCHS = 150
CLASSES = ['Buildings', 'Vegetation', 'Water', 'Crop', 'Roads']
# CLASSES = ['Unknown', 'Buildings', 'Vegetation', 'Water', 'Crop', 'Roads']
# weights = [0, 1, 0.4, 0.7, 0.1, 0.8]
# CLASSES_WEIGHT = torch.FloatTensor(weights).cuda()
# CLASSES_WEIGHT = torch.ones(len(CLASSES)).cuda()
CLASSES_WEIGHT = torch.ones(6).cuda()
data_dir = '../../../Data/finistere/img_rescaled/tile_{}.tif'
label_dir = '../../../Data/finistere/label/tile_{}.tif'
# label_dir = 'training/training_set_generation/training_set/label/label_{}.tif'
# data_dir = 'training/training_set_generation/training_set/data/data_{}.tif'
all_files = glob(label_dir.replace('{}', '*'))
all_ids = [f.split('tile_')[-1].split('.')[0] for f in all_files]
train_ids = random.sample(all_ids, int(TRAIN_RATIO * len(all_ids)))
test_ids = list(set(all_ids) - set(train_ids))
print(len(train_ids))
net = fcn(4, 6)
print(net)
train_dataset = SPOT_dataset(train_ids, data_dir, label_dir, WINDOW_SIZE, cache=True)
train_loader = data.DataLoader(train_dataset, batch_size=64, shuffle=True)
tr = Trainer(net, nn.CrossEntropyLoss(weight=CLASSES_WEIGHT, ignore_index=0), train_loader, mode='cuda')
# trainer = Trainer(net, nn.NLLLoss2d(weight=CLASSES_WEIGHT, ignore_index=0), train_loader)
te = Tester(net, nn.CrossEntropyLoss(weight=CLASSES_WEIGHT, ignore_index=0), test_ids, data_dir, label_dir, WINDOW_SIZE, STRIDE)
def train(epochs):
OUT_DIR = '.'
best_acc = 0
# Some training perfomances
LOSS_TRAIN_FILE = OUT_DIR + '/train_losses.txt'
LOSS_TEST_FILE = OUT_DIR + '/test_losses.txt'
ACC_TEST_FILE = OUT_DIR + '/test_acc.txt'
print('Initial best accuracy: {:.2f}'.format(best_acc))
with open(LOSS_TRAIN_FILE, 'w') as f_trainloss, \
open(LOSS_TEST_FILE, 'w') as f_testloss, \
open(ACC_TEST_FILE, 'w') as f_testacc:
for e in range(epochs):
# Training
print('\n----------------------------')
print('Epoch: {}'.format(e))
tr.runEpoch()
f_trainloss.write('{:.2f}\n'.format(tr.avg_loss))
print('\nTraining loss: {:.2f}'.format(tr.avg_loss))
if e % 5 == 0:
te.test(5)
torch.save(net.state_dict(), './net')
f_trainloss.close()
def save_state(state, is_best, out_dir):
torch.save(state, out_dir + '/model_state.pth')
if is_best:
torch.save(state, out_dir + '/model_best.pth')
torch.save(net.state_dict(), './net')
train(N_EPOCHS)
| [
"[email protected]"
]
| |
a8a885675b9305d63335322a7f85ca78e17ab456 | 4b2c1372c54dc069d08393397d8324c8dd1fcaf5 | /project2/urls.py | c9b6c7b9b7c392064b32f2ca43564b3bc7159aa9 | []
| no_license | gurashish1singh/project2 | 3af1a74392a70732d824f060597ba263eff5baf6 | 7931d50491b45471ff7276b7276d589ad18bacab | refs/heads/master | 2020-06-01T03:00:42.284709 | 2019-06-06T15:46:23 | 2019-06-06T15:46:23 | 190,607,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | """project2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('myapp.urls')),
]
| [
"[email protected]"
]
| |
ae5054662c31c4460f0dca8585dcffb3f9c504a2 | 954ac3c7e9c4b3b49a212dd1f848ee6024c1a7a8 | /test_logica.py | ff588f8575b7a3c15c3f5980e6435431b4e99288 | []
| no_license | denilsonsa/pythonlogica | bfb50264266ecf18e5212fdbcd64023d8638bdbc | 0ebe4ea3e6d99cda522fc59e8c29408d63dd5216 | refs/heads/master | 2022-11-09T02:45:34.365248 | 2020-07-01T07:32:01 | 2020-07-01T07:35:48 | 275,635,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,570 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4 sw=4 et foldmethod=indent foldlevel=1
import unittest
import string
import sys
from logica import *
ascii_uppercase = string.ascii_uppercase if sys.version_info.major >= 3 else string.uppercase
# Este arquivo executa dois tipos de teste:
# * testes unitários, usando o módulo 'unittest'
# * testes de documentação, usando o módulo 'doctest'
class TestarExpressoes(unittest.TestCase):
def setUp(self):
# Ugly... Writing to globals()...
# But it is damn handy! :)
criar_simbolos_no_namespace(ascii_uppercase, globals())
def tearDown(self):
for i in ascii_uppercase:
del globals()[i]
#################################################################
# Testes de operadores
def test_simbolos_iguais(self):
self.assertEqual(
ExpressaoSimbolo("x"),
ExpressaoSimbolo("x")
)
def test_simbolos_diferentes(self):
self.assertNotEqual(
ExpressaoSimbolo("x"),
ExpressaoSimbolo("y")
)
def test_operador_parentese(self):
e = Expressao(A)
self.assertEqual(
len(e.children),
1
)
self.assertEqual(
e.children[0],
A
)
def test_operador_not(self):
self.assertEqual(
~ A ,
ExpressaoNot(A)
)
def test_operador_not_duplicado(self):
self.assertEqual(
~ ~ A ,
ExpressaoNot(ExpressaoNot(A))
)
def test_operador_and(self):
self.assertEqual(
A & B ,
ExpressaoAnd(A, B)
)
def test_operador_or(self):
self.assertEqual(
A | B ,
ExpressaoOr(A, B)
)
def test_operador_implica(self):
self.assertEqual(
A > B ,
ExpressaoOr(ExpressaoNot(A), B)
)
def test_precedencia_and_not(self):
self.assertEqual(
~A & ~B ,
ExpressaoAnd(ExpressaoNot(A), ExpressaoNot(B))
)
def test_precedencia_or_not(self):
self.assertEqual(
~A | ~B ,
ExpressaoOr(ExpressaoNot(A), ExpressaoNot(B))
)
def test_precedencia_implica_not(self):
self.assertEqual(
~A > ~B ,
ExpressaoOr(ExpressaoNot(ExpressaoNot(A)), ExpressaoNot(B))
)
#################################################################
# Testes de comparações
def test_comparar_ignorando_ordem(self):
expressoes = (
(
A,
A,
True, # ==
True, # .comparar_ignorando_ordem()
),
(
A,
B,
False,
False,
),
(
A,
~ A,
False,
False,
),
(
A & B,
A & B,
True,
True,
),
(
A | B,
A | B,
True,
True,
),
(
A & B,
B & A,
False,
True,
),
(
A | B,
B | A,
False,
True,
),
(
A & (C | B),
A & (B | C),
False,
True,
),
(
(C | B) & A,
A & (B | C),
False,
True,
),
(
(A & B) | (C & D),
(A & B) | (C & D),
True,
True,
),
(
(B & A) | (D & C),
(A & B) | (C & D),
False,
True,
),
(
(B & A) | (D & C),
(C & D) | (A & B),
False,
True,
),
(
A > B,
~ A | B,
True,
True,
),
(
A > B,
B | ~ A,
False,
True,
),
(
A & B & C & D,
A & B & C & D,
True,
True,
),
(
A & B & C & D,
A & D & B & C,
False,
True,
),
(
A & B & C & D,
D & C & B & A,
False,
True,
),
(
A | B | C | D,
A | B | C | D,
True,
True,
),
(
A | B | C | D,
A | D | B | C,
False,
True,
),
(
A | B | C | D,
D | C | B | A,
False,
True,
),
)
for e, f, equal, comparacao in expressoes:
e.remover_associativas()
f.remover_associativas()
e.generate_sort_keys()
f.generate_sort_keys()
self.assertEqual(e == f, equal, "%s == %s ==> %s" % (str(e), str(f), not equal))
self.assertEqual(e.comparar_ignorando_ordem(f), comparacao, "%s .comparar_ignorando_ordem( %s ) ==> return '%s' == '%s'" % (str(e), str(f), e.sort_key, f.sort_key))
#################################################################
# Testes de manipulações
#
# Muitos dos testes abaixos exigem que a expressão seja
# encapsulada num parêntese, usando Expressao(). Isto é
# necessário porque a maioria das manipulações consegue
# operar apenas nos filhos.
def test_remover_dupla_negacao(self):
expressoes = (
(
~ A,
~ A
),
(
~ ~ A,
A
),
(
~ ~ ~ A,
~ A
),
(
~ ~ ~ ~ A,
A
),
(
~ ~ ~ ~ ~ A,
~ A
),
(
~ ~ ~ ~ ~ ~ A,
A
),
)
for antes, depois in expressoes:
e = Expressao(antes)
r = Expressao(depois)
e.remover_duplas_negacoes()
self.assertEqual(e, r)
def test_remover_dupla_negacao_e_associativa_nao_1(self):
e = Expressao(A & ~ ~ (B & C))
r = Expressao(ExpressaoAnd(A, ExpressaoAnd(B, C)))
e.remover_duplas_negacoes(auto_remover_associativas=False)
self.assertEqual(e, r)
def test_remover_dupla_negacao_e_associativa_sim_1(self):
e = Expressao(A & ~ ~ (B & C))
r = Expressao(ExpressaoAnd(A, B, C))
e.remover_duplas_negacoes(auto_remover_associativas=True)
self.assertEqual(e, r)
def test_remover_dupla_negacao_e_associativa_nao_2(self):
e = Expressao(A | ~ ~ (B | C))
r = Expressao(ExpressaoOr(A, ExpressaoOr(B, C)))
e.remover_duplas_negacoes(auto_remover_associativas=False)
self.assertEqual(e, r)
def test_remover_dupla_negacao_e_associativa_sim_2(self):
e = Expressao(A | ~ ~ (B | C))
r = Expressao(ExpressaoOr(A, B, C))
e.remover_duplas_negacoes(auto_remover_associativas=True)
self.assertEqual(e, r)
def test_demorgan_and(self):
e = Expressao(~(A & B))
r = Expressao(ExpressaoOr(ExpressaoNot(A), ExpressaoNot(B)))
e.interiorizar_negacao()
self.assertEqual(e, r)
def test_demorgan_or(self):
e = Expressao(~ (A | B))
r = Expressao(ExpressaoAnd(ExpressaoNot(A), ExpressaoNot(B)))
e.interiorizar_negacao()
self.assertEqual(e, r)
def test_demorgan_and_remover_dupla_negacao(self):
e = Expressao(~ (~A & ~B))
r = Expressao(ExpressaoOr(A, B))
e.interiorizar_negacao()
self.assertEqual(e, r)
def test_demorgan_or_remover_dupla_negacao(self):
e = Expressao(~ (~A | ~B))
r = Expressao(ExpressaoAnd(A, B))
e.interiorizar_negacao()
self.assertEqual(e, r)
def test_demorgan_remover_dupla_negacao_1(self):
e = Expressao(~(~ ~ A & ~ ~ (~B & C)))
r = Expressao(~ A | (B | ~ C))
e.interiorizar_negacao()
self.assertEqual(e, r)
def test_demorgan_remover_dupla_negacao_2(self):
e = Expressao(~(~ ~ ~ A & ~ ~ (~B & C)))
r = Expressao(A | (B | ~ C))
e.interiorizar_negacao()
self.assertEqual(e, r)
def test_demorgan_remover_dupla_negacao_3(self):
"""Ao executar .interiorizar_negacao(), a remoção automática da dupla negação deve acontecer de forma não recursiva."""
e = Expressao(~(~ A & ~ ~ B & ~ ~ ~ C) & ~ ~ D)
r = Expressao((A | ~ B | C) & ~ ~ D)
e.interiorizar_negacao()
self.assertEqual(e, r)
def test_demorgan_remover_dupla_negacao_4(self):
"""Ao executar .interiorizar_negacao(), a remoção automática da dupla negação deve acontecer de forma não recursiva."""
e = Expressao(~(~ A & ~ (B | ~ ~ ~ C)) )
r = Expressao(A | (B | ~ ~ ~ C) )
e.interiorizar_negacao()
self.assertEqual(e, r)
def test_remover_associativas_and(self):
e = A & B & C & D
r = ExpressaoAnd(A, B, C, D)
e.remover_associativas()
self.assertEqual(e, r)
def test_remover_associativas_or(self):
e = A | B | C | D
r = ExpressaoOr(A, B, C, D)
e.remover_associativas()
self.assertEqual(e, r)
def test_interiorizar_or_1(self):
e = Expressao(A | (B & C))
r = Expressao( (A | B) & (A | C) )
e.interiorizar_or()
self.assertEqual(e, r)
def test_interiorizar_or_2(self):
e = Expressao(A | (X & Y & Z))
r = Expressao( (A | X) & (A | Y) & (A | Z) )
e.remover_associativas()
r.remover_associativas()
e.interiorizar_or()
self.assertEqual(e, r)
def test_interiorizar_or_3(self):
e = Expressao(A | B | (X & Y & Z) | C)
r = Expressao( (A | B | X | C) & (A | B | Y | C) & (A | B | Z | C) )
e.remover_associativas()
r.remover_associativas()
e.interiorizar_or()
self.assertEqual(e, r)
def test_interiorizar_or_4(self):
e = Expressao( (A & B) | (C & D) )
r1 = Expressao( ((A | C) & (A | D)) & ((B | C) & (B | D)) )
r2 = Expressao( ((A | C) & (A | D)) & ((B | C) & (B | D)) )
r2.remover_associativas()
e.interiorizar_or()
self.assertTrue(e == r1 or e == r2)
def test_interiorizar_or_5(self):
e = Expressao( (A & (X | Y)) | (C & (J | K)) )
r1 = Expressao(((A | C) & (A | (J | K))) & (((X | Y) | C) & ((X | Y) | (J | K))))
r2 = Expressao(((A | C) & (A | (J | K))) & (((X | Y) | C) & ((X | Y) | (J | K))))
r2.remover_associativas()
e.interiorizar_or()
self.assertTrue(e == r1 or e == r2)
def test_interiorizar_or_6(self):
e = Expressao( (A & B & C) | (D & E & F) )
r = Expressao( (A | D) & (A | E) & (A | F) & (B | D) & (B | E) & (B | F) & (C | D) & (C | E) & (C | F) )
r.remover_associativas()
e.interiorizar_or()
e.remover_associativas()
self.assertEqual(e, r)
def test_interiorizar_or_7(self):
e = Expressao( A | B | (X & (J | K) & Y) | C )
r1 = Expressao( ExpressaoAnd( ExpressaoOr(A, B, X, C), ExpressaoOr(A, B, (J | K), C), ExpressaoOr(A, B, Y, C) ) )
r2 = Expressao( ExpressaoAnd( ExpressaoOr(A, B, X, C), ExpressaoOr(A, B, (J | K), C), ExpressaoOr(A, B, Y, C) ) )
e.remover_associativas()
r2.remover_associativas()
e.interiorizar_or()
self.assertTrue(e == r1 or e == r2)
def test_transformar_em_forma_normal_conjuntiva(self):
expressoes = (
(
A ,
A
),
(
~ A ,
~ A
),
(
~ ~ A ,
A
),
(
~ ~ A | B | C ,
A | B | C
),
(
~ ~ (A | B | C) ,
A | B | C
),
(
A & B & C ,
A & B & C
),
(
~ (A | B | C) ,
~A & ~B & ~C
),
(
~ (A & B & C) ,
~A | ~B | ~C
),
(
A & ~ ~ (B & C) ,
A & B & C
),
(
A | ~ ~ (B | C) ,
A | B | C
),
(
~(~ ~ A & ~ ~ (~B & C)) ,
~ A | B | ~ C
),
(
~(~ ~ ~ A & ~ ~ (~B & C)) ,
A | B | ~ C
),
(
~(~ A & ~ ~ B & ~ ~ ~ C) & ~ ~ D ,
(A | ~ B | C) & D
),
(
~(~ A & ~ (B | ~ ~ ~ C)) ,
A | B | ~ C
),
(
(A | B) & (C | D) ,
(A | B) & (C | D)
),
(
A | (X & Y & Z) ,
(A | X) & (A | Y) & (A | Z)
),
(
A | B | (X & Y & Z) | C ,
(A | B | X | C) & (A | B | Y | C) & (A | B | Z | C)
),
(
(A & B) | (C & D) ,
(A | C) & (A | D) & (B | C) & (B | D)
),
(
(A & (X | Y)) | (C & (J | K)) ,
(A | C) & (A | J | K) & (X | Y | C) & (X | Y | J | K)
),
(
(A & B & C) | (D & E & F) ,
(A | D) & (A | E) & (A | F) & (B | D) & (B | E) & (B | F) & (C | D) & (C | E) & (C | F)
),
(
A | B | (X & (J | K) & Y) | C ,
(A | B | X | C) & (A | B | J | K | C) & (A | B | Y | C)
),
( # Fórmula do XOR
(A | B) & ~(A & B) ,
(A | B) & (~A | ~B)
),
)
for antes, depois in expressoes:
e = Expressao(antes)
r = Expressao(depois)
r.remover_associativas()
e.transformar_em_forma_normal_conjuntiva()
self.assertEqual(e, r)
def test_remover_operacoes_vazias(self):
expressoes = (
Expressao(ExpressaoAnd()),
Expressao(ExpressaoOr()),
Expressao(ExpressaoNot(ExpressaoAnd())),
Expressao(ExpressaoNot(ExpressaoOr())),
Expressao(ExpressaoNot(ExpressaoNot(ExpressaoAnd()))),
Expressao(ExpressaoNot(ExpressaoNot(ExpressaoOr()))),
Expressao(ExpressaoNot(ExpressaoNot(ExpressaoNot(ExpressaoAnd())))),
Expressao(ExpressaoNot(ExpressaoNot(ExpressaoNot(ExpressaoOr())))),
Expressao(ExpressaoNot(Expressao(ExpressaoNot(ExpressaoAnd())))),
Expressao(ExpressaoNot(Expressao(ExpressaoNot(ExpressaoOr())))),
Expressao(Expressao(Expressao(Expressao(ExpressaoAnd())))),
Expressao(Expressao(Expressao(Expressao(ExpressaoOr())))),
Expressao(ExpressaoAnd(ExpressaoAnd(),ExpressaoAnd())),
Expressao(ExpressaoOr(ExpressaoAnd(),ExpressaoAnd())),
Expressao(ExpressaoAnd(ExpressaoOr(),ExpressaoOr())),
Expressao(ExpressaoOr(ExpressaoOr(),ExpressaoOr())),
Expressao(ExpressaoAnd(ExpressaoAnd(),ExpressaoOr())),
Expressao(ExpressaoOr(ExpressaoAnd(),ExpressaoOr())),
Expressao(ExpressaoAnd(ExpressaoOr(),ExpressaoAnd())),
Expressao(ExpressaoOr(ExpressaoOr(),ExpressaoAnd())),
Expressao(ExpressaoAnd(ExpressaoAnd(Expressao(ExpressaoOr())),ExpressaoAnd(ExpressaoNot(ExpressaoOr())))),
)
for antes in expressoes:
e = antes
r = Expressao(A)
r.children = []
e.remover_operacoes_vazias()
self.assertEqual(e, r)
# TODO:
# * Testar transformar_em_forma_normal_conjuntiva() com expressões com
# operador implica.
# * Criar funcao "remover tautologias" e "remover contradicoes"
# * Criar algum tipo de integração entre as classes Formula e Expressao.
# * Possível expressão para usar em testes:
# Manipular XOR: (~A & B) | (A & ~B) <==> (A | B) & ~(A & B)
#################################################################
# Testes de lista de símbolos
def test_listar_simbolos_1(self):
e = A & B & C
self.assertEqual(
set(['A', 'B', 'C']),
e.simbolos()
)
def test_listar_simbolos_2(self):
e = (A & B & C) | (~A & ~B & ~C) | (B & D)
self.assertEqual(
set(['A', 'B', 'C', 'D']),
e.simbolos()
)
#################################################################
# Testes de eval()
def test_eval_simbolo(self):
e = A
for valor in (Verdadeiro, Falso):
d = {"A": valor}
self.assertEqual(e.eval(d), valor)
def test_eval_parentese_1(self):
e = Expressao(A)
for valor in (Verdadeiro, Falso):
d = {"A": valor}
self.assertEqual(e.eval(d), valor)
def test_eval_parentese_2(self):
e = Expressao(Expressao(A))
for valor in (Verdadeiro, Falso):
d = {"A": valor}
self.assertEqual(e.eval(d), valor)
def test_eval_not_1(self):
e = ~A
for valor in (Verdadeiro, Falso):
d = {"A": valor}
self.assertEqual(e.eval(d), ~ valor)
def test_eval_not_2(self):
e = ~A
for valor, negacao in (
(Verdadeiro, Falso),
(Falso, Verdadeiro),
):
d = {"A": valor}
self.assertEqual(e.eval(d), negacao)
def test_eval_parentese_not_parentese(self):
e = Expressao(ExpressaoNot(Expressao(ExpressaoSimbolo("A"))))
for valor, negacao in (
(Verdadeiro, Falso),
(Falso, Verdadeiro),
):
d = {"A": valor}
self.assertEqual(e.eval(d), negacao)
def test_eval_and(self):
e = A & B
for valorA, valorB, resultado in (
(Verdadeiro, Verdadeiro, Verdadeiro),
(Verdadeiro, Falso , Falso ),
(Falso , Verdadeiro, Falso ),
(Falso , Falso , Falso ),
):
d = {"A": valorA, "B": valorB}
self.assertEqual(e.eval(d), resultado)
def test_eval_or(self):
e = A | B
for valorA, valorB, resultado in (
(Verdadeiro, Verdadeiro, Verdadeiro),
(Verdadeiro, Falso , Verdadeiro),
(Falso , Verdadeiro, Verdadeiro),
(Falso , Falso , Falso ),
):
d = {"A": valorA, "B": valorB}
self.assertEqual(e.eval(d), resultado)
def test_eval_implica(self):
e = A > B
for valorA, valorB, resultado in (
(Verdadeiro, Verdadeiro, Verdadeiro),
(Verdadeiro, Falso , Falso ),
(Falso , Verdadeiro, Verdadeiro),
(Falso , Falso , Verdadeiro),
):
d = {"A": valorA, "B": valorB}
self.assertEqual(e.eval(d), resultado)
class TestarExpressoesTrueFalse(unittest.TestCase):
"""Esta classe contém apenas testes não críticos"""
def setUp(self):
# Ugly... Writing to globals()...
# But it is damn handy! :)
criar_simbolos_no_namespace(ascii_uppercase, globals())
def tearDown(self):
for i in ascii_uppercase:
del globals()[i]
def test_eval_simbolo(self):
e = A
for valor in (True, False):
d = {"A": valor}
self.assertEqual(e.eval(d), valor)
def test_eval_parentese_1(self):
e = Expressao(A)
for valor in (True, False):
d = {"A": valor}
self.assertEqual(e.eval(d), valor)
def test_eval_parentese_2(self):
e = Expressao(Expressao(A))
for valor in (True, False):
d = {"A": valor}
self.assertEqual(e.eval(d), valor)
def test_eval_not(self):
e = ~A
for valor, negacao in (
(True, False),
(False, True),
):
d = {"A": valor}
self.assertEqual(e.eval(d), negacao)
def test_eval_parentese_not_parentese(self):
e = Expressao(ExpressaoNot(Expressao(ExpressaoSimbolo("A"))))
for valor, negacao in (
(True, False),
(False, True),
):
d = {"A": valor}
self.assertEqual(e.eval(d), negacao)
d = {"A": valorA, "B": valorB}
self.assertEqual(e.eval(d), resultado)
class _TerseTextTestResult(unittest._TextTestResult):
def printErrorList(self, flavour, errors):
for test, err in errors:
#self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
#self.stream.writeln(self.separator2)
#self.stream.writeln("%s" % err)
class TerseTextTestRunner(unittest.TextTestRunner):
def _makeResult(self):
return _TerseTextTestResult(self.stream, self.descriptions, self.verbosity)
if __name__ == '__main__':
#unittest.main()
testar_bool = False
if testar_bool:
sys.stderr.write("Running non-critical tests:\n")
non_critical_suite = unittest.TestLoader().loadTestsFromTestCase(TestarExpressoesTrueFalse)
TerseTextTestRunner(verbosity=1).run(non_critical_suite)
#unittest.TextTestRunner(verbosity=1).run(non_critical_suite)
sys.stderr.write("\n")
sys.stderr.write("Running CRITICAL tests:\n")
suite = unittest.TestLoader().loadTestsFromTestCase(TestarExpressoes)
unittest.TextTestRunner(verbosity=1).run(suite)
# Also running doctest:
import doctest
import logica
doctest.testmod(logica)
| [
"[email protected]"
]
| |
1cfadceb2486f1f384a0c83db90308ba1f2c3811 | aba3c6a5d7c4146ac54084c8d50c9a696f9c8b0a | /starting_out_with_python_code/Source Code/Chapter 12/factorial.py | 3b8c0a520e11b3ec379aedb6dc1107701a6f5d31 | []
| no_license | amonik/pythonnet | 53297c918c07a0d80fc8872939a60c3d19aa276c | 7d3eb37fbead4c344db6bdc357f6e17d11a189c2 | refs/heads/master | 2023-08-31T08:50:03.233930 | 2023-08-21T04:16:11 | 2023-08-21T04:16:11 | 24,205,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # This program uses recursion to calculate
# the factorial of a number.
def main():
# Get a number from the user.
number = int(input('Enter a nonnegative integer: '))
# Get the factorial of the number.
fact = factorial(number)
# Display the factorial.
print('The factorial of', number, 'is', fact)
# The factorial function uses recursion to
# calculate the factorial of its argument,
# which is assumed to be nonnegative.
def factorial(num):
if num == 0:
return 1
else:
return num * factorial(num - 1)
# Call the main function.
main()
| [
"[email protected]"
]
| |
9edca9a33a5b48adfc520b2af32140732c8097cf | b0411270ba3d7b59793524885f23f4ee9840797c | /starterbot/bin/wsdump.py | 48d7bed7fb5664b0def49dcba14bfc6726ca2e6d | []
| no_license | articuno12/megathon | 7cfffe808c05881379363956d37934240c881daa | 504730da3586fd702a51404de4661b516381d9f4 | refs/heads/master | 2021-07-13T20:21:56.044143 | 2017-10-08T14:52:58 | 2017-10-08T14:52:58 | 106,181,954 | 0 | 0 | null | 2017-10-08T14:18:32 | 2017-10-08T14:18:32 | null | UTF-8 | Python | false | false | 5,936 | py | #!/home/fundamentaleq/megathon/starterbot/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
| [
"[email protected]"
]
| |
c3b00d8278236a95e20ebb3af7fd9f50c93b373f | 8be4473603fb3319f2cab2aa335a2887fced78eb | /src/juego/accion_jugador.py | 5925da1b1ae9a3871ebea57e644d1797f4cedd08 | []
| no_license | ericbrandwein/tapada | e3d7e3a0120c98d90b1fc3c92c791e7e3184341d | 7bcfc76ef51d70ca89b15523e96528c79c4598da | refs/heads/master | 2020-04-23T01:23:13.730971 | 2019-03-14T19:29:36 | 2019-03-14T19:29:36 | 170,810,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from enum import Enum
class Fuente(Enum):
MANO = 0
TAPADA = 1
PILON = 2
class Destino(Enum):
PILON = 0
ESCALERA = 1
TAPADA_CONTRARIA = 2
class AccionJugador:
def __init__(self, fuente, destino, indice_fuente=0, indice_destino=0):
"""
Si el destino es ESCALERA y el indice_destino es < 0, se crea una nueva.
"""
self.fuente = fuente
self.destino = destino
self.indice_fuente = indice_fuente
self.indice_destino = indice_destino
| [
"[email protected]"
]
| |
161b1e11dcd515c9e213774de876e7e35c7198e9 | 74e75430e4ca2bf422017c7035580ae973c2c42e | /test/functional/wallet_bumpfee.py | 1907c874cc0841502d473005c080449aad105cf8 | [
"MIT"
]
| permissive | j00v/Lightcoin | 9e55bad2d3e38f4c3781f62f915828cde0e51bc9 | a8555320bebbf95545bc8c2841f1fadc38f5bd53 | refs/heads/main | 2023-05-09T07:13:28.031313 | 2021-06-08T22:11:45 | 2021-06-08T22:11:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,553 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from test_framework.blocktools import send_to_witness
from test_framework.test_framework import LightcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(LightcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [["-prematurewitness", "-walletprematurewitness", "-deprecatedrpc=addwitnessaddress", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.start_node(1)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes(self.nodes[0], 1)
self.sync_all()
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
blocktools.add_witness_commitment(block)
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
| [
"[email protected]"
]
| |
0dfec56cd4d62e01e54c239df360ace22c6352bb | 28fad446fac04f7f0b3c828fad1c3c2f808c2c62 | /weak_deepards/models/base/resnet.py | e5c6e322bca3db1a49f01459e71dd2734269d403 | []
| no_license | hahnicity/weak_deepards | b1f82db3b598a146360ef0691ae94c1dd52a7b79 | e4a51901865587281cbc687737fbf7b645747087 | refs/heads/master | 2023-02-26T12:29:56.172484 | 2021-01-30T11:16:34 | 2021-01-30T11:16:34 | 242,019,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,570 | py | import torch.nn as nn
import math
def conv2x2(in_planes, out_planes, stride=1):
"""2x2 convolution with padding"""
return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv2x2(inplanes, planes, stride)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv2x2(planes, planes)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.conv3 = nn.Conv1d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm1d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""
Adapt PRM Resnet to Deep ARDS so basically just make sure all convolutions are 1d instead
of 2d, and make sure that code is appropriately modified on final layers.
"""
def __init__(self, block, layers, initial_planes=64, initial_kernel_size=7, initial_stride=2):
self.inplanes = initial_planes
self.expansion = block.expansion
super(ResNet, self).__init__()
# padding formula: (W-F+2P)/S + 1 is an integer
# W=input size
# F=filter size
# P=padding
# S=stride
#
# Conv output calc:
# O = (W-F+2P)/S +1
self.conv1 = nn.Conv1d(1,
self.inplanes,
kernel_size=initial_kernel_size,
stride=initial_stride,
padding=3,
bias=False)
self.bn1 = nn.BatchNorm1d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
# This also divides the input by 2
self.first_pool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
# This layer keeps the same data shape
self.layer1 = self._make_layer(block, initial_planes, layers[0])
# This layer divides input seq size by 2
self.layer2 = self._make_layer(block, initial_planes * 2, layers[1], stride=2)
# This layer divides input seq size by 2
self.layer3 = self._make_layer(block, initial_planes * 4, layers[2], stride=2)
# This layer divides input seq size by 2
self.layer4 = self._make_layer(block, initial_planes * 8, layers[3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.n_out_filters = self.inplanes * block.expansion
self.features = nn.Sequential(
self.conv1,
self.bn1,
self.relu,
self.first_pool,
self.layer1,
self.layer2,
self.layer3,
self.layer4,
)
n_features = self.layer4[1].conv1.in_channels
# there is a classifier here because it basically works like grad cam does.
# it makes a classification on a specific downscaled location on the timeseries,
# and then that classification is fed back thru the PRM module to make an aggregate
# classification on the image/time series.
self.classifier = nn.Conv1d(n_features, 2, kernel_size=1, bias=True)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
import IPython; IPython.embed()
x = self.features(x)
x = self.classifier(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
model.network_name = 'resnet18'
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
model.network_name = 'resnet34'
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
model.network_name = 'resnet50'
return model
| [
"[email protected]"
]
| |
e4f7bb62b715cc2aa5f7fcf50a0dc14727350e14 | aacd105bee07aa613ae24d568e1bec0c04a70094 | /app/api/auth.py | 277f5e0ad80104df0f1599e366d6c1ef5b8bc478 | [
"MIT"
]
| permissive | Tert0/MT-Trainer | 2155c156ce780604d241ed6beeac65fd7e7b5ceb | d1895b2d22ed88f2abca0d35b65bea868fa436d8 | refs/heads/master | 2023-04-12T01:05:57.199064 | 2021-05-02T17:09:40 | 2021-05-02T17:10:02 | 349,793,100 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | from fastapi import APIRouter, HTTPException, Response, Depends
from app.models import User
from app.database import db
from app.auth import pwd_context, create_access_token, bearer_scheme, get_user
from datetime import timedelta
from redis import Redis
import os
import jwt
SECRET_KEY = os.getenv("JWT_SECRET_KEY")
ALGORITHM = os.getenv("JWT_ALGORITHM")
router = APIRouter(tags=["auth"])
redis = Redis(host=os.getenv("REDIS_HOST", "localhost"), port=os.getenv("REDIS_PORT", 6379))
@router.post("/token")
async def token_route(username: str, password: str):
user = db.query(User).filter(User.username == username).first()
if not user:
raise HTTPException(status_code=401, detail='Incorrect username or password')
if not pwd_context.verify(password, user.password):
raise HTTPException(status_code=401, detail='Incorrect username or password')
access_token_data = {"user": {"id": user.id, "username": user.username, "admin": user.admin}}
access_token = create_access_token(access_token_data)
refresh_token = create_access_token({'userid': user.id}, timedelta(minutes=60*24))
redis.lpush('refresh_tokens', refresh_token)
return {"access_token": access_token, "token_type": "bearer", "refresh_token": refresh_token}
@router.post("/register")
async def register_user(username: str, password: str):
if db.query(User).filter(User.username == username).first():
raise HTTPException(status_code=409, detail='Username already exists')
user = User(username, pwd_context.hash(password))
db.add(user)
db.commit()
return Response('Created User.', 201)
@router.post('/refresh')
async def refresh_route(refresh_token: str):
refresh_tokens = redis.lrange('refresh_tokens', 0, redis.llen('refresh_tokens'))
if refresh_token.encode('utf-8') not in refresh_tokens:
print('NOT IN CACHE')
raise HTTPException(status_code=401, detail='Invalid Refresh Token')
try:
refresh_data = jwt.decode(refresh_token, SECRET_KEY, algorithms=[ALGORITHM])
except jwt.exceptions.InvalidTokenError as e:
if isinstance(e, jwt.exceptions.ExpiredSignatureError):
raise HTTPException(status_code=401, detail='Refresh Token is expired')
else:
raise HTTPException(status_code=401, detail='Refresh Token is invalid')
user = db.query(User).filter(User.id == refresh_data['userid']).first()
access_token_data = {"user": {"id": user.id, "username": user.username, "admin": user.admin}}
access_token = create_access_token(access_token_data)
return {"access_token": access_token, "token_type": "bearer"}
@router.get('/authenticated')
async def test_authenication(_=Depends(get_user)):
return "true"
| [
"[email protected]"
]
| |
3a6b715cdd8aef01e1c7c299a0bc0119b452646f | 6431ca9c0e429878c544a9c8f65611af2054e342 | /backend/migrations/0006_auto_20161026_1348.py | 367ac1a737c379344187742a5bc77ba49e5089f5 | []
| no_license | ZhangYiJiang/jublia-agenda | 8306f6e434e6558234010e1b2d2704905de583fe | dab19b3569018020e69050ecc173a69d9b26d9ec | refs/heads/master | 2021-04-30T22:52:23.468620 | 2016-12-29T09:55:12 | 2016-12-29T09:55:12 | 69,541,616 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-26 13:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0005_auto_20161024_1045'),
]
operations = [
migrations.AlterField(
model_name='agenda',
name='duration',
field=models.IntegerField(default=3),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
977aa0a76af026f61c509844bb37c9a7e0e2603a | eb7c15f59f0863b457b272849930dce4ef92e58c | /znc/run | 09e03a3c11edded93c1dd153409c21e45d5db281 | []
| no_license | dozymoe/runit-init-scripts | 40492bc11b9a7f5f974088e7b5e870d97f54354a | ddb8915c6f2da8a5c2acdb5e09f33bc6c027ccdb | refs/heads/master | 2021-01-22T07:06:29.736090 | 2014-06-19T01:32:08 | 2014-06-19T01:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | #!/usr/bin/env python
import logging
import os
import sys
from time import sleep
# import external library
sys.path.append('/var/service')
from runit_helper import (
MAXIMUM_CRASHES_DELAY,
check_crash_quota,
check_dependencies,
get_logger,
run,
)
service_name = 'znc'
log = get_logger(service_name, logging.INFO)
check_dependencies(service_name, log)
if check_crash_quota(service_name):
sleep(MAXIMUM_CRASHES_DELAY)
exit(0)
log.info('starting..')
run('/usr/bin/znc', ['--foreground'])
| [
"[email protected]"
]
| ||
3b4075e9e8f1c8e8e2fd9272c916f22110f4e4f3 | 6cf2b60805184697fdac6693167845c5c8a65572 | /App_play.py | b7ec476f80f8388fe2b765c1a40208183c28250e | []
| no_license | eniche-akim/ChessAI | 760492036e6a13ae28f83a26d9b48599aec79148 | a2ebc7dac87de1312a41969386589ed2d09a671f | refs/heads/master | 2023-07-13T09:40:20.115471 | 2021-08-29T15:19:48 | 2021-08-29T15:19:48 | 401,072,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | from flask import Flask, render_template
import chess
from play_chess import Valuator
from state import State
app = Flask(__name__)
evaluation = Valuator()
@app.route('/')
def index():
return render_template("index.html")
@app.route('/move/<path:fen>/')
def get_move(fen):
#print(depth)
print("Calculating...")
board = chess.Board(fen)
state = State(board)
value , move = evaluation.get_best_move(state,evaluation)
#print('{} {: .0f}'.format(move,value))
return '{} {: .0f}'.format(move,value)
@app.route('/test/<string:tester>')
def test_get(tester):
return tester
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
]
| |
bb3dfd9df63b6070250b5a1f41319060fe55f689 | 3569e041632d827c93ff87bd164f19142d5772aa | /Cloud_Vision/text-test.py | eb851c7fbb4f9b4004112209c613bdc961e020e4 | []
| no_license | JoonasMattila95/GoPiGo3_PanttiBotti | af6533ded9b16e4285944aced557fd10063c9d45 | 8ca67cb324d27ba1f3354567d09bf948150a3ac9 | refs/heads/master | 2020-04-03T17:11:58.759199 | 2018-12-05T07:34:14 | 2018-12-05T07:34:14 | 155,435,668 | 0 | 0 | null | 2018-12-17T10:44:39 | 2018-10-30T18:22:09 | PHP | UTF-8 | Python | false | false | 1,742 | py |
def pantti(input_text):
if input_text == "0,15":
sum = sum + 0.15
print(input_text)
return
elif input_text == "0,20":
sum = sum + 0.20
print(input_text)
if input_text == "Pantti":
print(input_text)
elif input_text == "Pant":
print(input_text)
def detect_text(path):
"""Detects text in the file."""
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
print('Texts:')
for text in texts:
sum = pantti(text.description)
#vertices = (['({},{})'.format(vertex.x, vertex.y)
# for vertex in text.bounding_poly.vertices])
# print('bounds: {}'.format(','.join(vertices)))
image_file.close
def camera_thread():
while 1:
camera.capture('/etc/python2.7/pythonjutut/cam.jpg')
# Instantiates a client
client = vision.ImageAnnotatorClient()
# The name of the image file to annotate
file_name = os.path.join(
os.path.dirname('__file__'),
'cam.jpg')
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
detect_text(file_name)
import threading
import io
import os
from picamera import PiCamera
from google.cloud import vision
from google.cloud.vision import types
camera = PiCamera()
camera.resolution = (1600, 1200)
t = threading.Thread(target=camera_thread)
t.start()
| [
"[email protected]"
]
| |
6e29d18f72a1d40a4e52fe2a92279c2aee522198 | fcee0731ce523e65399abaf93586465fdd31f1ba | /PGML_V1/config/copy_yaml_files.py | b41e4ae4dc3639cdf1b9d2ca18ffe6610487f4d5 | []
| no_license | surajp92/2D_Turbulence | 2bf6c1d7373f0d6d04006284837777dc8f18ac99 | 06443024e7366cd84ab4b313f03bc90a22a2c68f | refs/heads/master | 2021-12-15T05:39:59.837009 | 2021-12-11T17:36:18 | 2021-12-11T17:36:18 | 194,935,492 | 6 | 6 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 30 13:30:01 2021
@author: suraj
"""
import yaml
nx = 1024
re = 8000
filename = f'input_{nx}_{re}.yaml'
with open(filename) as file:
input_data = yaml.load(file, Loader=yaml.FullLoader)
file.close()
for i in range(1,11):
seedn = int(i*10)
input_data['seedn'] = seedn
filename = f'input_{nx}_{re}_{i:03d}.yaml'
with open(filename, 'w') as outfile:
yaml.dump(input_data, outfile, default_flow_style=False) | [
"[email protected]"
]
| |
b24260d2fbde15d982901d41caeb8488dd6afe56 | dd079ce185597bf8fcb30103fd74d554f98373cd | /jupyter_notebooks/select_data/preprocessing_bq_data_import.py | 4bef60a39d1409a0a872826a048cd9d01315868f | []
| no_license | YoByron/UNICEF_CATS | 3bc93ae175fb75295c46e20d5ce941c72616a52c | 3d664e2a75675c0cf80f0d4bb1c93fe029d82868 | refs/heads/main | 2023-06-03T21:06:05.491795 | 2021-06-21T19:34:52 | 2021-06-21T19:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | """ This Script is designed to verify the connection between PyCharm Py dash_dashboard and Google BigQuery """
# set the OS environment to BigQuery
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "../../../Desktop/UNICEF/unicef-gdelt-bq_conn.json"
# connect to big query client
from google.cloud import bigquery
client = bigquery.Client()
# define a test query
QUERY = ('SELECT * FROM `unicef-gdelt.february2021.events-mentions` LIMIT 1000000')
# initiate query request
query_job = client.query(QUERY)
# get result
query_result = query_job.result()
# save output of result to csv
df = query_result.to_dataframe().to_csv('data/bq_data_feb2021.csv.csv', encoding='utf-8')
#print(df)
# Confirmed: verified output | [
"[email protected]"
]
| |
2ec3eebdb91c2cb26fa25dcf2f58788b5a9e3955 | c1a6f1fec51863ca3c9d0eca7172fa548658e450 | /bin/lib/python3.8/site-packages/ccxt/async_support/__init__.py | f0e78a6f23051c35376770ead420c8badf592050 | []
| no_license | marcellinamichie291/py | c38f68f10e54b5544d5580f1043100175e1bad50 | b8a8d5b3a4aaf23ba021e2bf16b2ed6409b16825 | refs/heads/main | 2023-06-19T22:51:56.202809 | 2021-07-22T03:19:48 | 2021-07-22T03:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,909 | py | # -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.51.40'
# -----------------------------------------------------------------------------
from ccxt.async_support.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import TICK_SIZE # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadSymbol # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RateLimitExceeded # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import OnMaintenance # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import error_hierarchy # noqa: F401
from ccxt.async_support.aax import aax # noqa: F401
from ccxt.async_support.aofex import aofex # noqa: F401
from ccxt.async_support.ascendex import ascendex # noqa: F401
from ccxt.async_support.bequant import bequant # noqa: F401
from ccxt.async_support.bibox import bibox # noqa: F401
from ccxt.async_support.bigone import bigone # noqa: F401
from ccxt.async_support.binance import binance # noqa: F401
from ccxt.async_support.binancecoinm import binancecoinm # noqa: F401
from ccxt.async_support.binanceus import binanceus # noqa: F401
from ccxt.async_support.binanceusdm import binanceusdm # noqa: F401
from ccxt.async_support.bit2c import bit2c # noqa: F401
from ccxt.async_support.bitbank import bitbank # noqa: F401
from ccxt.async_support.bitbay import bitbay # noqa: F401
from ccxt.async_support.bitbns import bitbns # noqa: F401
from ccxt.async_support.bitcoincom import bitcoincom # noqa: F401
from ccxt.async_support.bitfinex import bitfinex # noqa: F401
from ccxt.async_support.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async_support.bitflyer import bitflyer # noqa: F401
from ccxt.async_support.bitforex import bitforex # noqa: F401
from ccxt.async_support.bitget import bitget # noqa: F401
from ccxt.async_support.bithumb import bithumb # noqa: F401
from ccxt.async_support.bitmart import bitmart # noqa: F401
from ccxt.async_support.bitmex import bitmex # noqa: F401
from ccxt.async_support.bitpanda import bitpanda # noqa: F401
from ccxt.async_support.bitso import bitso # noqa: F401
from ccxt.async_support.bitstamp import bitstamp # noqa: F401
from ccxt.async_support.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async_support.bittrex import bittrex # noqa: F401
from ccxt.async_support.bitvavo import bitvavo # noqa: F401
from ccxt.async_support.bitz import bitz # noqa: F401
from ccxt.async_support.bl3p import bl3p # noqa: F401
from ccxt.async_support.braziliex import braziliex # noqa: F401
from ccxt.async_support.btcalpha import btcalpha # noqa: F401
from ccxt.async_support.btcbox import btcbox # noqa: F401
from ccxt.async_support.btcmarkets import btcmarkets # noqa: F401
from ccxt.async_support.btctradeua import btctradeua # noqa: F401
from ccxt.async_support.btcturk import btcturk # noqa: F401
from ccxt.async_support.buda import buda # noqa: F401
from ccxt.async_support.bw import bw # noqa: F401
from ccxt.async_support.bybit import bybit # noqa: F401
from ccxt.async_support.bytetrade import bytetrade # noqa: F401
from ccxt.async_support.cdax import cdax # noqa: F401
from ccxt.async_support.cex import cex # noqa: F401
from ccxt.async_support.coinbase import coinbase # noqa: F401
from ccxt.async_support.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.async_support.coinbasepro import coinbasepro # noqa: F401
from ccxt.async_support.coincheck import coincheck # noqa: F401
from ccxt.async_support.coinegg import coinegg # noqa: F401
from ccxt.async_support.coinex import coinex # noqa: F401
from ccxt.async_support.coinfalcon import coinfalcon # noqa: F401
from ccxt.async_support.coinfloor import coinfloor # noqa: F401
from ccxt.async_support.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.async_support.coinmate import coinmate # noqa: F401
from ccxt.async_support.coinone import coinone # noqa: F401
from ccxt.async_support.coinspot import coinspot # noqa: F401
from ccxt.async_support.crex24 import crex24 # noqa: F401
from ccxt.async_support.currencycom import currencycom # noqa: F401
from ccxt.async_support.delta import delta # noqa: F401
from ccxt.async_support.deribit import deribit # noqa: F401
from ccxt.async_support.digifinex import digifinex # noqa: F401
from ccxt.async_support.equos import equos # noqa: F401
from ccxt.async_support.exmo import exmo # noqa: F401
from ccxt.async_support.exx import exx # noqa: F401
from ccxt.async_support.flowbtc import flowbtc # noqa: F401
from ccxt.async_support.ftx import ftx # noqa: F401
from ccxt.async_support.gateio import gateio # noqa: F401
from ccxt.async_support.gemini import gemini # noqa: F401
from ccxt.async_support.gopax import gopax # noqa: F401
from ccxt.async_support.hbtc import hbtc # noqa: F401
from ccxt.async_support.hitbtc import hitbtc # noqa: F401
from ccxt.async_support.hollaex import hollaex # noqa: F401
from ccxt.async_support.huobijp import huobijp # noqa: F401
from ccxt.async_support.huobipro import huobipro # noqa: F401
from ccxt.async_support.idex import idex # noqa: F401
from ccxt.async_support.independentreserve import independentreserve # noqa: F401
from ccxt.async_support.indodax import indodax # noqa: F401
from ccxt.async_support.itbit import itbit # noqa: F401
from ccxt.async_support.kraken import kraken # noqa: F401
from ccxt.async_support.kucoin import kucoin # noqa: F401
from ccxt.async_support.kuna import kuna # noqa: F401
from ccxt.async_support.latoken import latoken # noqa: F401
from ccxt.async_support.lbank import lbank # noqa: F401
from ccxt.async_support.liquid import liquid # noqa: F401
from ccxt.async_support.luno import luno # noqa: F401
from ccxt.async_support.lykke import lykke # noqa: F401
from ccxt.async_support.mercado import mercado # noqa: F401
from ccxt.async_support.mixcoins import mixcoins # noqa: F401
from ccxt.async_support.ndax import ndax # noqa: F401
from ccxt.async_support.novadax import novadax # noqa: F401
from ccxt.async_support.oceanex import oceanex # noqa: F401
from ccxt.async_support.okcoin import okcoin # noqa: F401
from ccxt.async_support.okex import okex # noqa: F401
from ccxt.async_support.okex5 import okex5 # noqa: F401
from ccxt.async_support.paymium import paymium # noqa: F401
from ccxt.async_support.phemex import phemex # noqa: F401
from ccxt.async_support.poloniex import poloniex # noqa: F401
from ccxt.async_support.probit import probit # noqa: F401
from ccxt.async_support.qtrade import qtrade # noqa: F401
from ccxt.async_support.rightbtc import rightbtc # noqa: F401
from ccxt.async_support.ripio import ripio # noqa: F401
from ccxt.async_support.southxchange import southxchange # noqa: F401
from ccxt.async_support.stex import stex # noqa: F401
from ccxt.async_support.therock import therock # noqa: F401
from ccxt.async_support.tidebit import tidebit # noqa: F401
from ccxt.async_support.tidex import tidex # noqa: F401
from ccxt.async_support.timex import timex # noqa: F401
from ccxt.async_support.upbit import upbit # noqa: F401
from ccxt.async_support.vcc import vcc # noqa: F401
from ccxt.async_support.wavesexchange import wavesexchange # noqa: F401
from ccxt.async_support.whitebit import whitebit # noqa: F401
from ccxt.async_support.xena import xena # noqa: F401
from ccxt.async_support.yobit import yobit # noqa: F401
from ccxt.async_support.zaif import zaif # noqa: F401
from ccxt.async_support.zb import zb # noqa: F401
exchanges = [
'aax',
'aofex',
'ascendex',
'bequant',
'bibox',
'bigone',
'binance',
'binancecoinm',
'binanceus',
'binanceusdm',
'bit2c',
'bitbank',
'bitbay',
'bitbns',
'bitcoincom',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bitget',
'bithumb',
'bitmart',
'bitmex',
'bitpanda',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitvavo',
'bitz',
'bl3p',
'braziliex',
'btcalpha',
'btcbox',
'btcmarkets',
'btctradeua',
'btcturk',
'buda',
'bw',
'bybit',
'bytetrade',
'cdax',
'cex',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinegg',
'coinex',
'coinfalcon',
'coinfloor',
'coinmarketcap',
'coinmate',
'coinone',
'coinspot',
'crex24',
'currencycom',
'delta',
'deribit',
'digifinex',
'equos',
'exmo',
'exx',
'flowbtc',
'ftx',
'gateio',
'gemini',
'gopax',
'hbtc',
'hitbtc',
'hollaex',
'huobijp',
'huobipro',
'idex',
'independentreserve',
'indodax',
'itbit',
'kraken',
'kucoin',
'kuna',
'latoken',
'lbank',
'liquid',
'luno',
'lykke',
'mercado',
'mixcoins',
'ndax',
'novadax',
'oceanex',
'okcoin',
'okex',
'okex5',
'paymium',
'phemex',
'poloniex',
'probit',
'qtrade',
'rightbtc',
'ripio',
'southxchange',
'stex',
'therock',
'tidebit',
'tidex',
'timex',
'upbit',
'vcc',
'wavesexchange',
'whitebit',
'xena',
'yobit',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
| [
"[email protected]"
]
| |
cf4bdcc6d6b65f2338db2ae85b995b624c1c46a6 | b14aeb73518d00af9b4425b73fd4c82e8e36cbee | /Sorting Algo/912SortanArray.py | 1dc0f586b560a5ed8b88fe091f9b022f0ce6b0c4 | []
| no_license | jli124/leetcodeprac | 86020e22668be0b63b4a062d897c03f304336b81 | 8d23bc6c0084d405c5e9b22fb713d48835e17f9e | refs/heads/master | 2021-01-05T02:09:03.526693 | 2020-07-07T05:33:53 | 2020-07-07T05:33:53 | 240,839,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,639 | py | #912. Sort an array
#-------------------------------------------------------------------------------
# Approach
#-------------------------------------------------------------------------------
"""
use merge sort
"""
#-------------------------------------------------------------------------------
# Implementation
#-------------------------------------------------------------------------------
class Solution(object):
def sortArray(self, A):
"""
:type nums: List[int]
:rtype: List[int]
"""
temp = [0 for _ in range(len(A))]
self.merge_sort(0, len(A) - 1, A, temp)
return A
def merge_sort(self, start, end, A, temp):
if start >= end:
return
mid = (start + end) / 2
self.merge_sort(start, mid , A, temp)
self.merge_sort(mid + 1, end, A, temp)
self.merge(start, mid, end, A, temp)
def merge(self, start, mid, end, A, temp):
left, right = start, mid + 1
index = start
while left <= mid and right <= end:
if A[left] < A[right]:
temp[index] = A[left]
left += 1
else:
temp[index] = A[right];
right += 1
index += 1
while left <= mid:
temp[index] = A[left]
left += 1
index += 1
while right <= end:
temp[index] = A[right]
right += 1
index += 1
for index in range(start, end + 1):
A[index] = temp[index] | [
"[email protected]"
]
| |
593b77c458fd3098153bf81b83f5082c7d467aa2 | 968555a63995eef023a61586de85ab43145a71ad | /Clase04/busqueda_en_listas.py | c3ad5b3c9d38e0938fe171b85c9816e568e78674 | []
| no_license | sanchez-17/ejercicios-python-UNSAM | eba5160c9561572d8989177d6de68a421c6d292b | 055169d636746b7ae737e3bfaff45cba067c4405 | refs/heads/main | 2023-06-10T22:41:10.817147 | 2021-07-04T00:56:36 | 2021-07-04T00:56:36 | 382,734,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 12 02:00:14 2021
@author: Gaston
"""
'''
Ejercicio 4.3
'''
def buscar_u_elemento(lista, e):
'''Si e está en la lista devuelve su posición, de lo
contrario devuelve -1.
'''
pos = -1 # comenzamos suponiendo que e no está
for i, z in reversed(list(enumerate(lista))): # recorremos la lista desde el final
if z == e: # si encontramos a e
pos = i # guardamos su posición
break # y salimos del ciclo
return pos
res = buscar_u_elemento([1,2,3,2,3,4],5)
print(res)
def buscar_n_elemento(lista, e):
'''Devuelve la cantidad de veces que aparece e'''
cont = 0
for i in lista:
if e == i:
cont += 1
return cont
#%%
res = buscar_n_elemento([1,2,3,2,3,4],1)
print(res)
#%%
'''
Ejercicio 4.4
'''
def maximo(lista):
'''Devuelve el máximo de una lista,
la lista debe ser no vacía y de números positivos.
'''
# m guarda el máximo de los elementos a medida que recorro la lista.
m = 0 # Lo inicializo en 0
for e in lista: # Recorro la lista y voy guardando el mayor
if e > m:
m = e
return m
res = maximo([1,2,3,2,-1])
print(res)
| [
"[email protected]"
]
| |
f157860d5bc5dbe0ee3079c2fb9d2bb350ce8099 | 532fc2a40cf66d91a5eb036f351d29fc99f26e5c | /6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/$$slapp$$/ModelSwarm.py | 3003879e43b487f0baa93ab61d2f88e7f9371537 | [
"CC0-1.0"
]
| permissive | jefferychenPKU/SLAPP | 40cdd6ec2cbcf7e85e4f07129a1f35e342af479d | 7c57436d3ef76c05695e1e72f6e5040ac5f41b73 | refs/heads/v.1.11 | 2020-12-24T21:54:51.222962 | 2016-01-31T18:54:01 | 2016-01-31T18:54:01 | 50,984,545 | 0 | 1 | null | 2016-02-03T08:11:44 | 2016-02-03T08:11:44 | null | UTF-8 | Python | false | false | 14,429 | py | #ModelSwarm.py
import Tools
from Agent import *
from WorldState import *
from ActionGroup import *
import random
import os
from mActions import *
from turtle import *
# in this module, a few of the try/except structures are not cotrolled
# for debug
# these try/except constucts, indeed, are not intended to control user
# errors, but a regular flow of inputs
# other try/execpt structures are instead controlled for debug
task="0 0".split()
class ModelSwarm:
def __init__(self, nAgents, worldXSize, worldYSize, project0):
global task, project
project=project0
# the environment
task="0 0".split() #in case of repeated execution without restarting the shell
self.ff="" #in case of repeated execution without restarting the shell
self.nAgents = nAgents
self.agentList = []
self.worldStateList=[]
self.worldXSize= worldXSize
self.worldYSize= worldYSize
# types of the agents
agTypeFile=open(project+"/agTypeFile.txt","r")
self.types=agTypeFile.read().split()
agTypeFile.close()
#print self.types
# operating sets of the agents
try:
agOperatingSetFile=open(project+"/agOperatingSets.txt","r")
self.operatingSets=agOperatingSetFile.read().split()
except:
print 'Warning: operating sets not found.'
agOperatingSetFile = False
self.operatingSets=[]
if agOperatingSetFile: agOperatingSetFile.close()
#print self.operatingSets
dictExe={}
dictExe["project"]=project
execfile("./$$slapp$$/convert_xls_txt.py",dictExe)
# objects
def buildObjects(self):
for i in range(1):
aWorldState = WorldState(i)
self.worldStateList.append(aWorldState)
leftX =int(-self.worldXSize/2)
rightX=int(self.worldXSize-1 -self.worldXSize/2)
bottomY =int(-self.worldYSize/2)
topY=int(self.worldYSize-1 -self.worldYSize/2)
# internal agents
for i in range(self.nAgents):
anAgent = Agent(i, self.worldStateList[0],
random.randint(leftX,rightX),
random.randint(bottomY,topY), leftX,rightX,
bottomY,topY,agType="bland")
self.agentList.append(anAgent)
print
# external agents, RELATED TO THE SPECIFIC project
files=os.listdir(project)
for agType in self.types:
if not agType+".txt" in files: print "No", agType,\
"agents: lacking the specific file", agType+".txt"
for opSet in self.operatingSets:
if not opSet+".txt" in files: print "No", opSet,\
"agents: lacking the specific file", opSet+".txt"
print
for agType in self.types:
if agType+".txt" in files:
f=open(project+"/"+agType+".txt","r")
for line in f:
if line.split() != []:
num=int(line.split()[0])
print "creating "+agType+": agent #", num
#print line.split()
# specialized creation function for each model
# form mActions.py in the model folder
createTheAgent(self,line,num,leftX,rightX,bottomY,topY,agType)
#explictly pass self, here we use a function
f.close()
for opSet in self.operatingSets:
if opSet+".txt" in files:
f=open(project+"/"+opSet+".txt","r")
for line in f:
if line.split() != []:
num=int(line.split()[0])
for anAgent in self.agentList:
if anAgent.number == num:
anAgent.setAnOperatingSet(opSet)
print "including agent #", num, \
"into the operating set", opSet
f.close()
if self.operatingSets != []:
for anAgent in self.agentList:
anAgent.setContainers()
for anAgent in self.agentList:
anAgent.setAgentList(self.agentList)
print
# actions
def buildActions(self):
modelActions=open(project+"/modelActions.txt")
mList=modelActions.read().split()
modelActions.close()
self.actionList = mList
#print self.actionList
# look at basic case schedule, where "move" represents an example of mandatory
# action (in our case generating also a dynamic "jump" action) and
# "read_script" represents an external source of actions
# without reading an external schedule (anyway, in the case
# above, if you do not put a schedule.txt or a schedule.xls file in
# program folder), the "read_script" step simply has no effect)
# basic actionGroup
self.actionGroup0 = ActionGroup ("reset")
self.actionGroup0.do = do0 # do is a variable linking a method
self.actionGroup1 = ActionGroup ("move")
self.actionGroup1.do = do1 # do is a variable linking a method
# to create other actionGroup ..,
#self.actionGroup2 = ActionGroup (self.actionList[?])
#self.actionGroup2.do = do2 # do is a variable linking a method
# etc.
# this actionGroup is the schedule, which is generalized
# so it is not moved in the actions.py specific to the project
# the task number is huge (100), considering it to be the last one
# the name is identified as the last one, with -1
self.actionGroup100 = ActionGroup ("read_script") # the last
def do100(address, cycle):
global task
actionDictionary[self.actionGroup100.getName()]=self.actionGroup100
while True:
if task[0]=="#":
if int(task[1]) > cycle: break
task=read_s(self.ff)
#print "***", task
if task[0]=="#":
if int(task[1]) > cycle: break
if task[0]=="0": break
#if task[0] is all or an agent type
if check(task[0],self.types):
# keep safe the original list
localList=[]
for ag in address.agentList:
if task[0]=="all": localList.append(ag)
elif task[0]==ag.getAgentType(): localList.append(ag)
# never in the same order (please comment if you want to keep
# always the same sequence
random.shuffle(localList)
# apply method only to a part of the list, or, which is the
# same, with the given probability to each element of the list
self.share=0
try: self.share = float(task[1]) # does task[1] contains
# a int or float number?
except: pass
if self.share > 0:
tmpList=localList[:]
del localList[:]
for i in range(len(tmpList)):
if random.random() <= self.share:
localList.append(tmpList[i])
if self.share < 0: # in case, an abs. number of agent *(-1)
tmpList=localList[:]
del localList[:]
for i in range(int(-self.share)):
random.shuffle(tmpList)
if tmpList != []:
localList.append(tmpList.pop(0))
# apply
if len(localList)>0:
self.applyFromSchedule(localList,task)
#if task[0] is an opSet
if task[0] in address.operatingSets:
# keep safe the original list
localList=[]
for ag in address.agentList:
if task[0] in ag.getOperatingSetList():
localList.append(ag)
if localList==[]:
print "Warning, no agents in operating set", task[0]
# never in the same order (please comment if you want to keep
# always the same sequence
random.shuffle(localList)
# apply method only to a share of the list
self.share=0
try: self.share = float(task[1]) # does task[1] contains
# an int or float number?
except: pass
if self.share > 0:
tmpList=localList[:]
del localList[:]
for i in range(len(tmpList)):
if random.random() <= self.share:
localList.append(tmpList[i])
if self.share < 0: # in case, an abs. number of agent *(-1)
tmpList=localList[:]
#print "*********************", tmpList
del localList[:]
for i in range(int(-self.share)):
random.shuffle(tmpList)
if tmpList != []:
localList.append(tmpList.pop(0))
#print "*********************", localList
# apply
if len(localList)>0:
self.applyFromSchedule(localList,task)
if task[0]=='WorldState':
self.share=0
localList=address.worldStateList[:]
self.applyFromSchedule(localList,task)
self.actionGroup100.do = do100 # do is a variable linking a method
# run a step
def step(self,cycle):
global task
step=self.actionList[:]
while len(step)>0:
subStep=extractASubStep(step)
#print "*****************", subStep
found=False
if subStep == "reset":
found=True
self.actionGroup0.do(self)
if subStep == "move":
found=True
self.actionGroup1.do(self)
# self here is the model env.
# not added automatically
# being do a variable
# external schedule, in pos. -1
if subStep == "read_script":
found=True
if self.ff=="":
try: self.ff=open(project+"/schedule.txt","r")
except: pass
self.actionGroup100.do(self,cycle)
# self here is the model env.
# not added automatically
# being do a variable
# other steps
if not found:
found=otherSubSteps(subStep, self)
if not found: print "Warning: step %s not found in Model" % subStep
# from external schedule (script)
def applyFromSchedule(self,localList,task):
if task[0]=='WorldState':
if task[2]=='setGeneralMovingProb':
prob = 1
try: prob = float(task[1]) # does task[1] contains
# a number?
except: pass
d={}
d['generalMovingProb']=prob
try: exec "askEachAgentInCollection(localList,"+task[0]+"."+task[2]+", **d)"
except: pass
else:
try: exec "self.worldStateList[0]."+task[2]+"()"
except: pass
#if task[0] is 'all' or a type of agent
if check(task[0],self.types):
if self.share!=0:
if common.debug: exec "askEachAgentInCollection(localList,Agent"+"."+task[2]+")"
else:
try: exec "askEachAgentInCollection(localList,Agent"+"."+task[2]+")"
except:
print "Warning, method", task[2],"does not exist in class Agent"
else:
if common.debug: exec "askEachAgentInCollection(localList,Agent"+"."+task[1]+")"
else:
try: exec "askEachAgentInCollection(localList,Agent"+"."+task[1]+")"
except:
print "Warning, method", task[1],"does not exist in class Agent"
#if task[0] is an opSet
if task[0] in self.operatingSets:
if self.share!=0:
if common.debug: exec "askEachAgentInCollection(localList,Agent"+"."+task[2]+")"
else:
try: exec "askEachAgentInCollection(localList,Agent"+"."+task[2]+")"
except:
print "Warning, method", task[2],"does not exist in class Agent"
else:
if common.debug: exec "askEachAgentInCollection(localList,Agent"+"."+task[1]+")"
else:
try: exec "askEachAgentInCollection(localList,Agent"+"."+task[1]+")"
except:
print "Warning, method", task[1],"does not exist in class Agent"
# agent list
def getAgentList(self):
return self.agentList
# file address
def getFile(self):
return self.ff
# tools, read_s
def read_s(f):
if f != "":
try:
task=f.readline()
if task=='':task='0 0'
except:
task='0 0'
else: task='0 0'
return task.split()
# check if it is an agent
def check(s,aList):
found=False
if s.find("all")==0 : found=True
if s.find("bland")==0: found=True
for name in aList:
if s.find(name)==0:found=True
return found
| [
"[email protected]"
]
| |
cc4a708de2750423d0a29590b9866e9cb81f1acd | 28809dbb9d3a1901af0d2a6ea71eff0cee4c545d | /project_scripts/hrra/rnaseq_chap_altogether_plot.py | 0f204e3133aef9e78ac4d252eebf1f8561d5bb58 | []
| no_license | afilipch/afp | 6088f3375741c8a837006e9d4b3e716dfffc0b2a | b941a3eeb4d8447b3628c79a528c60cb540537e3 | refs/heads/master | 2021-06-28T00:57:37.869912 | 2020-10-14T16:50:20 | 2020-10-14T16:50:20 | 155,242,147 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,654 | py | #! /home/a_filipchyk/soft/home/a_filipchyk/anaconda3/bin/python
'''Draws a plot for a figure 3'''
import argparse
import os
import sys
from collections import defaultdict, namedtuple
import math
import numpy as np;
import pandas as pd;
from matplotlib import pyplot as plt;
from matplotlib.patches import Rectangle, Arrow
from pybedtools import BedTool, Interval
parser = argparse.ArgumentParser(description='Draws a plot for a figure 3');
parser.add_argument('--regions', nargs = '?', required=True, type = str, help = "Path to the genomic regions of interest, bed format")
parser.add_argument('--chap', nargs = 3, required=True, type = str, help = "Path to the chapseq coverage for time points 0,1,3");
parser.add_argument('--wt', nargs = 3, required=True, type = str, help = "Path to the WT rnaseq coverage for time points 0,1,3");
parser.add_argument('--ko', nargs = 3, required=True, type = str, help = "Path to the KO rnaseq coverage for time points 0,1,3");
parser.add_argument('--annotation', nargs = '?', required=True, type = str, help = "Path to the genomic annotation, plots will be annotated with the provided genomic features");
parser.add_argument('--outdir', nargs = '?', required=True, type = str, help = "Path to the output directory");
parser.add_argument('--format', nargs = '?', default='png', type = str, help = "Plot format, png by default");
parser.add_argument('--custom', nargs = '?', default=False, const=True, type = bool, help = "If set the annotation genes are supposed to be already processed, if not they are supposed to be in NCBI gff3 format");
args = parser.parse_args();
def get_cov(path):
return pd.read_csv(path, sep="\t", names = ["chr", "position", "coverage"]).coverage.values
chap_cov = [get_cov(x) for x in args.chap]
wt_cov = [get_cov(x) for x in args.wt]
ko_cov = [get_cov(x) for x in args.ko]
regions = BedTool(args.regions)
annotation = BedTool(args.annotation)
if(not args.custom):
annotation = [x for x in annotation if x[2] in ['gene', 'pseudogene']]
#s = 200000
#e = s + 1600
#regions = BedTool([Interval('chr1', s, e, 'test', '0', '+')])
rawannotated = regions.intersect(annotation, wo = True)
region2annotation = defaultdict(list);
for el in rawannotated:
an = max(el.start, int(el[9])), min(el.end, int(el[10])), dict( [x.strip().split('=') for x in el[14].split(";")])['Name'], el[12]
region2annotation[el.name].append(an)
#wt = [x[s:e] for x in wt_cov]
#ko = [x[s:e] for x in ko_cov]
#chap = [x[s:e] for x in ko_cov]
#locan = region2annotation[regions[0].name]
def draw_annotation(ax, locan, color, start, end):
for c, an in enumerate(locan):
rect = Rectangle( (an[0], 0.25+c), an[1] - an[0], 0.5, facecolor = color, edgecolor = color)
l = min((end-start)/20, an[1]-an[0]);
if(an[3] == '+'):
arrow = Arrow(an[1]-l, 0.5+c, l, 0, width=.75, facecolor = 'black', edgecolor = 'black')
else:
arrow = Arrow(an[0]+l, 0.5+c, -l, 0, width=.75, facecolor = 'black', edgecolor = 'black')
ax.add_patch(rect)
ax.add_patch(arrow)
def setticks(start, end):
l = float(end - start);
a = [1, 2, 5,10,20,50,100,200,500,1000, 2000]
for el in a:
if( 3 < l/el < 9):
scale = el;
break;
else:
return [];
s = start//scale*scale
if(s < start):
s += scale;
e = end//scale*scale
if(e == end):
e += scale;
locs = [x for x in range(s, e+scale, scale)]
labels = [str(x) for x in locs]
return locs, labels
def doublebar(ax, d1, d2, prange, ylim, start, end):
#print(type(ax))
ax.bar(prange, d1, 1, color='lightblue');
ax.bar(prange, -d2, 1, color='darkblue');
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticks([]);
ax.set_ylim(*ylim)
ax.set_xlim(start, end)
ax.tick_params(axis='both', labelsize='xx-large')
def singlebar(ax, d, prange, ylim, start, end):
ax.bar(prange, d, 1, color=(242/256, 97/256, 68/256));
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticks([]);
ax.set_ylim(*ylim)
ax.set_xlim(start, end)
ax.tick_params(axis='both', labelsize='xx-large')
def wholeplot(chap, rseq1, rseq2, locan, start, end, output):
prange = range(start, end);
size_rseq = len(rseq1)
size_chap = len(chap)
size = size_chap + size_rseq
size_an = len(locan)
fig, axes = plt.subplots(nrows=size, ncols = 1, sharex=False, sharey=False, figsize = (16, 5*(size+1)), frameon=False)
#plt.suptitle("Top %s peak with z-score %s" % (ordinal(fignum), zscore), fontsize = 'xx-large')
plt.tight_layout(rect=[0.05, 0.02, 0.95, 0.92 - 0.02*size_an], h_pad = 2)
###CHAP seq
ylim = 0, max([max(x) for x in chap])
for ax, d in zip(axes, chap):
singlebar(ax, d, prange, ylim, start, end);
###RNA seq
ylim = max([max(x) for x in rseq2] + [max(x) for x in rseq1])
ylim = -ylim, ylim
for ax, d1, d2 in zip(axes[size_chap:], rseq1, rseq2):
doublebar(ax, d1, d2, prange, ylim, start, end);
box = axes[0].get_position()
x0 = box.xmin
x1 = box.xmax-x0
ylen = box.ymax - box.ymin
anax = fig.add_axes([x0, 0.94 - 0.02*size_an, x1, 0.02*size_an])
anax.set_xlim(start, end)
anax.spines['bottom'].set_visible(False)
anax.spines['right'].set_visible(False)
anax.spines['left'].set_visible(False)
anax.tick_params(axis='both', labelsize='xx-large')
anax.set_ylim(0, size_an)
xticklocs, xticklabels = setticks(start, end)
anax.set_xticks(xticklocs)
anax.set_xticklabels(xticklabels)
anax.set_yticks([0.5 + x for x in range(size_an)])
anax.set_yticklabels([x[2] for x in locan], fontsize='xx-large')
anax.xaxis.tick_top()
anax.set_xlabel('base position', fontsize='xx-large')
anax.xaxis.set_label_position('top')
draw_annotation(anax, locan, '0.75', start, end);#max([max(x[0]) for x in signal_noise_local]) )
anax.set_xlim(start, end)
plt.savefig(output, format = args.format)
for region in regions:
start, end = region.start, region.end
print(start, end)
wt = [x[start:end] for x in wt_cov]
ko = [x[start:end] for x in ko_cov]
chap = [x[start:end] for x in chap_cov]
locan = region2annotation[region.name]
wholeplot(chap, wt, ko, locan, start, end, os.path.join(args.outdir, "%s.%s" % (region.name, args.format)));
| [
"[email protected]"
]
| |
4b64a051e30b954139e58857c0e08c141725d3be | 8f1d6f17d3bdad867518b7b0a164adfe6aeeed95 | /recognition/vpl/backbones/iresnet.py | c6d3b9c240c24687d432197f976ee01fbf423216 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
]
| permissive | xwyangjshb/insightface | 2c7f030a5d1f5a24b18967bd0d775ee33933d37f | ae233babaf7614ef4ef28dac0171205835d78d64 | refs/heads/master | 2022-09-29T07:49:22.944700 | 2022-09-22T11:36:12 | 2022-09-22T11:36:12 | 221,020,460 | 1 | 0 | MIT | 2019-11-11T16:16:56 | 2019-11-11T16:16:55 | null | UTF-8 | Python | false | false | 7,149 | py | import torch
from torch import nn
__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class IResNet(nn.Module):
fc_scale = 7 * 7
def __init__(self,
block, layers, dropout=0, num_features=512, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):
super(IResNet, self).__init__()
self.fp16 = fp16
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
self.prelu = nn.PReLU(self.inplanes)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2])
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
self.dropout = nn.Dropout(p=dropout, inplace=True)
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
nn.init.constant_(self.features.weight, 1.0)
self.features.weight.requires_grad = False
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, IBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation))
return nn.Sequential(*layers)
def forward(self, x):
with torch.cuda.amp.autocast(self.fp16):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x.float() if self.fp16 else x)
x = self.features(x)
return x
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
progress, **kwargs)
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
progress, **kwargs)
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
progress, **kwargs)
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
progress, **kwargs)
def iresnet200(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,
progress, **kwargs)
| [
"[email protected]"
]
| |
5711678503f71af49eab52cf70f07cb27cd48a3c | 3bd2d2d66c30241b63f0fd64a7cae8584e593428 | /blog/admin.py | b80fb51b5e9260c1cb25e555e0fe69e53f554946 | []
| no_license | startcode01/my-first-blog | dfe9f5fe05204e30e7eb73c42f9ea5ec17c7aa8c | 0d7d1574837b7c100a822fb63435062adf49e488 | refs/heads/master | 2020-04-16T08:06:03.323443 | 2019-01-12T15:20:47 | 2019-01-12T15:20:47 | 165,405,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from django.contrib import admin
from .models import Post
admin.site.register(Post)
# Register your models here.
| [
"[email protected]"
]
| |
998c4faf4a8619b9f980f1c65b082673dc189e49 | 1233af220e9fb80b80c6abdbe7583c473f8e25f8 | /CodeWars/002_241120_RomanNumerals.py | 7aef8cfcb8648242e198012b9bf367a846900585 | []
| no_license | olegbrz/coding_every_day | c3eca7150c3e0f542e7284da7551f9975922d88a | f9ff12588c494ff8b79176e22f889864b3f4c120 | refs/heads/master | 2023-03-13T18:52:52.844473 | 2021-03-19T00:24:16 | 2021-03-19T00:24:16 | 315,709,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | """TODO: create a RomanNumerals helper object
| Symbol | Value |
|----------------|
| I | 1 |
| V | 5 |
| X | 10 |
| L | 50 |
| C | 100 |
| D | 500 |
| M | 1000 |
"""
ld = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
dec = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
rom = ('M', 'CM', 'D', 'CD', 'C', 'XC',
'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
class RomanNumerals:
@staticmethod
def from_roman(roman):
sum_ = 0
for num in range(len(roman)):
val = ld[roman[num]]
if num + 1 < len(roman) and val < ld[roman[num+1]]:
sum_ -= ld[roman[num]]
else:
sum_ += ld[roman[num]]
return sum_
@staticmethod
def to_roman(decimal):
roman = []
for i in range(len(dec)):
count = int(decimal / dec[i])
roman.append(rom[i] * count)
decimal -= dec[i] * count
return ''.join(roman)
RomanNumerals.from_roman('IX')
| [
"[email protected]"
]
| |
dc76be437385dbad81055255bec2af7a8df87938 | b9005dd010b3d073f95d88df94813dec7580dd90 | /mongodb.py | 5b773bd956b67f4554ed01c892619339edc7bdd3 | []
| no_license | HORSESUNICE/webscraping | 33829e9a33089bec398284471414d7c03d6cce96 | f2e22273e9fb9a3fb5b4016f9591b49025fefc6c | refs/heads/master | 2021-01-18T22:39:49.144652 | 2018-03-01T01:56:29 | 2018-03-01T01:56:29 | 87,064,281 | 0 | 1 | null | 2018-03-01T01:56:30 | 2017-04-03T10:54:02 | Python | UTF-8 | Python | false | false | 2,120 | py | import requests
from bs4 import BeautifulSoup
import pymongo
client = pymongo.MongoClient('localhost',27017)
xiaozhu = client['xiaozhu']
bnb_info = xiaozhu['bnb_info']
# ====================================================== <<<< 单页行为 >>>> =============================================
url = 'http://bj.xiaozhu.com/search-duanzufang-p20-0/'
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text,'lxml')
titles = soup.select('span.result_title')
prices = soup.select('span.result_price > i')
for title, price in zip(titles,prices):
data = {
'title':title.get_text(),
'price':int(price.get_text())
}
bnb_info.insert_one(data)
print('Done')
# ====================================================== <<<< 设计函数 >>>> =============================================
def get_page_within(pages):
for page_num in range(1,pages+1):
wb_data = requests.get('http://bj.xiaozhu.com/search-duanzufang-p{}-0/'.format(page_num))
soup = BeautifulSoup(wb_data.text,'lxml')
titles = soup.select('span.result_title')
prices = soup.select('span.result_price > i')
for title, price in zip(titles,prices):
data = {
'title':title.get_text(),
'price':int(price.get_text())
}
bnb_info.insert_one(data)
print('Done')
# get_page_within(3) 获取前三页面得数据
# 从数据库中进行筛选
# for i in bnb_info.find():
# if i['price'] >= 500:
# print(i)
# client = pymongo.MongoClient('localhost',27017)
# walden = client['walden']
# sheet_tab = walden['sheet_tab']
# path = '/Users/Hou/Desktop/walden.txt'
# with open(path,'r') as f:
# lines = f.readlines()
# for index,line in enumerate(lines):
# data = {
# 'index':index,
# 'line' :line,
# 'words':len(line.split())
# }
# sheet_tab.insert_one(data)
# $lt/$lte/$gt/$gte/$ne,依次等价于</<=/>/>=/!=。(l表示less g表示greater e表示equal n表示not )
# for item in sheet_tab.find({'words':{'$lt':5}}):
# print(item)
| [
"[email protected]"
]
| |
b65b8f7c48e21d63843b88ce2832a2d666bf33d7 | 32f1d0e9c2fbce7f4682b9f79cae5f3df0480de0 | /brevets/flask_brevets.py | ff59123f5a991747db42de10588f90ef1a270ae0 | [
"Artistic-2.0"
]
| permissive | UO-CIS-322/proj4-brevets | b0546b3e47db78c74b4c35b52c5527c811eb8ad0 | a1600206886d324eaa3975f561ae6c7fff601b82 | refs/heads/master | 2021-01-21T21:32:21.088892 | 2017-10-13T21:29:38 | 2017-10-13T21:29:38 | 43,849,637 | 0 | 75 | null | 2017-10-22T04:51:19 | 2015-10-07T23:01:01 | Python | UTF-8 | Python | false | false | 1,854 | py | """
Replacement for RUSA ACP brevet time calculator
(see https://rusa.org/octime_acp.html)
"""
import flask
from flask import request
import arrow # Replacement for datetime, based on moment.js
import acp_times # Brevet time calculations
import config
import logging
###
# Globals
###
app = flask.Flask(__name__)
CONFIG = config.configuration()
app.secret_key = CONFIG.SECRET_KEY
###
# Pages
###
@app.route("/")
@app.route("/index")
def index():
app.logger.debug("Main page entry")
return flask.render_template('calc.html')
@app.errorhandler(404)
def page_not_found(error):
app.logger.debug("Page not found")
flask.session['linkback'] = flask.url_for("index")
return flask.render_template('404.html'), 404
###############
#
# AJAX request handlers
# These return JSON, rather than rendering pages.
#
###############
@app.route("/_calc_times")
def _calc_times():
"""
Calculates open/close times from miles, using rules
described at https://rusa.org/octime_alg.html.
Expects one URL-encoded argument, the number of miles.
"""
app.logger.debug("Got a JSON request")
km = request.args.get('km', 999, type=float)
app.logger.debug("km={}".format(km))
app.logger.debug("request.args: {}".format(request.args))
# FIXME: These probably aren't the right open and close times
# and brevets may be longer than 200km
open_time = acp_times.open_time(km, 200, arrow.now().isoformat)
close_time = acp_times.close_time(km, 200, arrow.now().isoformat)
result = {"open": open_time, "close": close_time}
return flask.jsonify(result=result)
#############
app.debug = CONFIG.DEBUG
if app.debug:
app.logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
print("Opening for global access on port {}".format(CONFIG.PORT))
app.run(port=CONFIG.PORT, host="0.0.0.0")
| [
"[email protected]"
]
| |
387575509aa4d79b183e9aab89214994f4aa8615 | 31c22696e8fffd9016e2f11a6ac7aa104a17c5f7 | /bitcoin.py | f047394ab01f60c44f5eed79167304643d843784 | []
| no_license | stiipast/bitcoin-analyzer | a33f14a523d14a02855a6ada185bf50103c63775 | f1bec29da55d725ee0424145230348fe1cb669b3 | refs/heads/master | 2020-04-10T13:05:09.769392 | 2018-03-07T18:26:19 | 2018-03-07T18:26:19 | 124,274,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,321 | py | #!/usr/bin/python
import urllib, json, time, sys
import mysql.connector
def obtindre_block(block_index):
# Guardarem els valors en llistes per despres poder utilitzar els valors per fer estadistiques (encara no els he utilitzat)
in_tx=[]
out_tx=[]
fee=[]
temps=[]
conndb = mysql.connector.connect(user='bitcoin', database='bitcoin') #fem la connexio amb la DB
cursor = conndb.cursor() # fem un cursor per a insertar les dades a la DB
data = json.loads(urllib.urlopen("http://blockchain.info/rawblock/" + block_index).read()) # Descarreguem el bloc
# Obtenim la data del block en format llegible
block_date = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(data['time'])))
block_received_time = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(data['received_time'])))
for t in range(len(data["tx"])): # recorrem el bloc, la variable t recorre cada trasaccio
in_tx_temp = 0 # inicialitzem el sumatori del valor dels inputs de la transaccio t
out_tx_temp = 0 # inicialitzem el sumatori del valor dels outputs de la transaccio t
fee_temp = 0
temps_temp = 0
i=0 # variable per a recorrer els inputs
j=0 # variable per a recorrer els outputs
for i in range(len(data['tx'][t]['inputs'])):
if(t!=0):
in_tx_temp=in_tx_temp + data['tx'][t]['inputs'][i]['prev_out']['value'] # sumem al valor de input el nou valor per a cada input
in_tx.append(in_tx_temp)
for j in range(len(data['tx'][t]['out'])):
out_tx_temp = out_tx_temp + data['tx'][t]['out'][j]['value'] # sumem els outputs
out_tx.append(out_tx_temp)
# fee = (in_tx - out_tx) / 100000000.0 # fem la resta per obtindre la diferencia que son les fees i dividim per obtindre el valor en BTC
if(t==0):
fee_temp = out_tx_temp
else:
fee_temp = in_tx_temp - out_tx_temp
fee.append(fee_temp)
temps_temp = data['time'] - data['tx'][t]['time']
temps.append(temps_temp) # Temps en segons que triga la transaccio en fer-se efectiva (temps de bloc - temps de tx)
# print "%s \t %s \t %s \t %s \t %s \t %s \t %s \t %s" %(data['block_index'], data['height'], data['hash'], t, in_tx[t], out_tx[t], fee[t], temps[t])
tx_date = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(data['tx'][t]['time'])))
# Construim les dades que introduim a la DB
add_tx = ("INSERT INTO transaccions "
"(block_index, block_date, altura, hash, tx_hash, tx_index, relayed_by, n_inputs, input, n_outputs, output, tx_date, fee, temps) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
data_tx = (data['block_index'], block_date, data['height'], data['hash'], data['tx'][t]['hash'], t, data['tx'][t]['relayed_by'], len(data['tx'][t]['inputs']), in_tx[t], len(data['tx'][t]['out']), out_tx[t], tx_date, fee[t], temps[t])
cursor.execute(add_tx, data_tx)
# Una volta hem fet totes les tx del block enviem les dades a la DB i tamquem el cursor i la connexio
add_block = ("INSERT INTO blocks "
"(block_index, block_date, block_received_time, height, hash, bits, n_tx, fee, size, main_chain, relayed_by) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
data_block = (data['block_index'], block_date, block_received_time, data['height'], data['hash'], data['bits'], data['n_tx'], data['fee'], data['size'], data['main_chain'], data['relayed_by'])
cursor.execute(add_block, data_block)
conndb.commit()
cursor.close()
conndb.close()
return data['prev_block'] # Tornem el hash del bloc anterior al actual
# Cos principal del programa
if (len(sys.argv)) < 2:
latest_block = json.loads(urllib.urlopen("http://blockchain.info/latestblock").read())
block_index=str(latest_block["block_index"]) # Obtenim el index del ultim bloc generat
else:
if (len(sys.argv[1])) != 64:
print "El hash es incorrecte"
exit()
else:
block_index = sys.argv[1]
print "Block_index \t Altura \t Hash \t Tx_Index \t input \t output \t fee \t temps"
z = 0
if
while z < 100: #obtenim els 100 primers blocks de la cadena
block_index = obtindre_block(block_index)
z += 1 | [
"[email protected]"
]
| |
859f53a675da269d458e7153e908f2527223ac15 | bf534da18426b49dbee0a0b1870f5f3a85922855 | /ex023.py | 81005cc6e332246276e30c09bd341672794200b7 | []
| no_license | kcpedrosa/Python-exercises | 0d20a72e7e68d9fc9714e3aabf4850fdbeb7d1f8 | ae35dfad869ceb3aac186fce5161cef8a77a7579 | refs/heads/master | 2021-05-20T08:46:29.318242 | 2020-04-01T15:44:36 | 2020-04-01T15:44:36 | 252,205,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | num = int(input('Digite um numero qualquer: '))
u = num // 1 % 10
d = num // 10 % 10
c = num // 100 % 10
m = num // 1000 % 10
print('Analisando o numero {}'.format(num))
print('A unidade vale {}'.format(u))
print('A dezena vale {}'.format(d))
print('A centena vale {}'.format(c))
print('A milhar vale {}'.format(m)) | [
"[email protected]"
]
| |
951e199ec32cab031e99f50017e8b9a574d1eb53 | 874227c96fa1f09160103299a84c34a6bd388cab | /Algorithms/Chap04/hello.txt | 691dda3b6a4e1ef4a64fec7a74fd5204e7dc9b24 | []
| no_license | seyoungnam/python_basics | 4be59f3c92d81c5a505d764ce10e3f7c66c30fe0 | 4ff6c7068f6de49e3f47311c42e1efc47c089443 | refs/heads/master | 2022-11-07T17:02:34.027796 | 2020-06-17T14:42:04 | 2020-06-17T14:42:04 | 257,255,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | txt | hello = "hello"
def world():
return "world"
if __name__ == "__main__":
print("{0} 직접 실행됨".format(__name__))
else:
print("{0} 임포트됨".format(__name__)) | [
"[email protected]"
]
| |
8fea38c27fcc878bfe3485c0023f45a5d12743af | fa7db1cca780471a0300c4a27213323513ad27d5 | /chapter6/batch_norm_gradient_check.py | 0d23d1482d9e8ceacc88f36dc9f5e6e7913c269a | []
| no_license | 772594536wang/Deep-Learning-From-Scratch | e3a9f0aaf48104d33f30c903be113887d68370b5 | 2cff7999196062fd98372aece42fab2eedc47299 | refs/heads/master | 2020-05-07T11:58:21.735124 | 2018-08-13T09:55:31 | 2018-08-13T09:55:31 | 180,483,924 | 2 | 0 | null | 2019-04-10T02:15:21 | 2019-04-10T02:15:21 | null | UTF-8 | Python | false | false | 1,255 | py | # -*- coding: utf-8 -*-
# @Time : 2018-08-10 22:58
# @Author : Jayce Wong
# @ProjectName : Deep_Learning_From_Scratch
# @FileName : batch_norm_gradient_check.py
# @Blog : http://blog.51cto.com/jayce1111
# @Github : https://github.com/SysuJayce
import numpy as np
from common.mnist import load_mnist
from common.multi_layer_net_extend import MultiLayerNetExtend
def main():
# 读入数据
(train_x, train_label), _ = load_mnist(one_hot_label=True)
# 构造神经网络
network = MultiLayerNetExtend(input_size=784,
hidden_size_list=[100, 100],
output_size=10,
use_batchnorm=True)
# 仅用一个训练样本来测试
batch_x = train_x[: 1]
batch_label = train_label[: 1]
# 用反向传播和数值方法分别计算梯度
grad_backprop = network.gradient(batch_x, batch_label)
grad_numerical = network.numerical_gradient(batch_x, batch_label)
# 比较两种方法的计算结果
for key in grad_numerical.keys():
diff = np.average(np.abs(grad_backprop[key] - grad_numerical[key]))
print(key + ":" + str(diff))
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
ddc32b1926560d046349ee35ff5707643abd8afe | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /140_gui/pyqt_pyside/_exercises/_templates/temp/Mastering GUI Programming with Python/Chapter 3 Handling Events with Signals and Slots/signal_slots_demo.py | f79d2febefd50d50434b21a86eb7d099cee6be09 | []
| no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,488 | py | # ______ ___
# ____ ? ______ ?W.. __ qtw
# ____ ? ______ ?C.. __ qtc
#
#
# c_ MainWindow ?.?W..
#
# ___ -
# s_. -
# sL.. ?.?VBL..
#
# # connecting a signal to a slot
# quitbutton _ ?.?PB.. Quit
# ?.c__.c.. cl..
# la__ .aW.. ?
#
# # connecting a signal with data to a slot that receives data
# entry1 _ ?.?LE..
# entry2 _ ?.?LE..
# la__ .aW.. ?
# la__ .aW.. ?
# _1.tC...c.. _2.sT.
#
# # connecting a signal to a python callable
# _2.tC...c.. pr..
#
# # Connecting a signal to another signal
# _1.eF__.c.. l___ print editing finished
# _2.rP__.c.. _1.eF__
#
# # This call will fail, because the signals have different argument types
# #self.entry1.textChanged.connect(self.quitbutton.clicked)
#
# # This won't work, because of signal doesn't send enough args
# badbutton _ ?.?PB.. Bad
# la__ .aW.. ?
# ?.c__.c.. n_a..
#
# # This will work, even though the signal sends extra args
# goodbutton _ ?.?PB.. Good
# la__ .aW.. ?
# ?.c__.c.. n_a..
#
#
# s..
#
# ___ needs_args arg1, arg2, arg3
# p..
#
# ___ no_args
# print('I need no arguments')
#
# __ ______ __ ______
# app _ ?.?A.. ___.a..
# # it's required to save a reference to MainWindow.
# # if it goes out of scope, it will be destroyed.
# mw _ ?
# ___.e.. ?.e..
| [
"[email protected]"
]
| |
5705fd2fedee9caeaeaa41e9e65f89a975c95792 | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/layers/convolutional.py | 1688b79891c2bcd3cce1b6bb7355c216736014a3 | [
"Apache-2.0"
]
| permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 182 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/layers/convolutional.py | [
"[email protected]"
]
| |
388a6eb4b8b486a5c9c706692097b3b4c38187c7 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/pandas/_config/display.py | 57b7af184346cd2f68442d22a2bd7a489047ecad | [
"MIT"
]
| permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f6ba130797f4f1ce2395562efa48f788ebd3a352e26f7c79209f476a3d300866
size 1756
| [
"[email protected]"
]
| |
994c0795da16cdc04ade8acbce51229b95fa4e8e | 5527d3854ad0840fb4a0a9893447535cd5e6ad0f | /python/ThirteenTeV/QstarToQW_M_1200_TuneCUETP8M1_13TeV_pythia8_cfi.py | 58d9d33c62bab8fd0ee915374feb779697103556 | []
| no_license | danbarto/genproductionsSummer16 | ecf2309c1627b4db3e4a1b8785ca612d9a59426f | 655ef31aa5f05d0117aeef82d107f07a1fd5d822 | refs/heads/master | 2020-03-26T23:12:37.115369 | 2018-08-21T14:23:30 | 2018-08-21T14:23:30 | 145,520,233 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(65.84),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExcitedFermion:dg2dStar = on',
'ExcitedFermion:ug2uStar = on',
'ExcitedFermion:Lambda = 1200',
'4000001:m0 = 1200',
'4000001:onMode = off',
'4000001:onIfMatch = 2 24',
'4000002:m0 = 1200',
'4000002:onMode = off',
'4000002:onIfMatch = 1 24',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
]
| |
1e71df78ffaddd0cc93f895ade53620ca92ad1fe | 3e89632dfc1f7f0b992002869cd5416033357cd3 | /model.py | 3b7e276f3748734112952754be4cf24316b0dacc | []
| no_license | freebugssss/DEAP | c97ca917910e87d5453357e224c72f3e41dec235 | e4802d6195691dd13c2a2ced89d3e056713bab61 | refs/heads/master | 2023-04-24T09:53:38.992657 | 2021-05-23T08:01:49 | 2021-05-23T08:01:49 | 369,991,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | # Create first network with Keras
from keras.models import Sequential
from keras.layers import Dense
import keras
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from matplotlib import pyplot
df=pd.read_csv('./dataset/dataset.csv',header=0, index_col=0)
dataset=df.values
X=dataset[:,:-1]
Y=dataset[:,-1]
Y=keras.utils.to_categorical(Y,4)
X, test_X, Y, test_y = train_test_split(X, Y, test_size=0.2, stratify=Y)
input_shape_X=X.shape[-1]
input_shape_Y=Y.shape[-1]
print('X:',X.shape,"Y:",Y.shape)
# create model
model = Sequential()
model.add(Dense(10240, input_shape=(input_shape_X,),activation='relu',kernel_initializer="normal"))
model.add(Dense(5120, kernel_initializer='normal', activation='relu'))
model.add(Dense(input_shape_Y, kernel_initializer='normal', activation='sigmoid'))
# Compile model
#adam=keras.optimizers.Adam(lr=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-8)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
history=model.fit(X, Y, epochs=150, batch_size=30, verbose=2,shuffle=True,validation_data=(test_X, test_y))
# evaluate the model
#scores = model.evaluate(X_test, Y_test)
#print("AAA%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# calculate predictions
predictions = model.predict(X)
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
| [
"[email protected]"
]
| |
e2070525c866d5c13ea17979887ad320706aefe9 | b3e7a3d1e5d50af82b60e6d7b8afa4a077a040ad | /main2.py | 5f83b2d48ceebcd188e66f2ed0f7efb1c605281a | []
| no_license | Namenaro/cheini | d317fb0a6396bf038629490231a175c62e2e6011 | 3b14b58030d1f910265da8c1b859742149df4f6f | refs/heads/master | 2021-05-10T12:30:26.042569 | 2018-02-18T20:03:49 | 2018-02-18T20:03:49 | 118,442,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,941 | py | # -*- coding: utf-8 -*
import itertools
import one_experiment_report
import utils
import simple_nets
from math import floor, ceil
import matplotlib.pyplot as plt
import numpy as np
import os
import _pickle as pickle
from keras.callbacks import EarlyStopping
from keras.callbacks import TensorBoard
from keras import optimizers
import time
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import cm
from keras import losses
# варьируем один (или несколько) гиперпараметр - проводим таким образом серию экспериментов,
# результаты серии сводим в единый отчет: таблица из 2 столбцов (что вариьровали) и (за чем следили)
#one_experiment_report.main()
class Serial:
def __init__(self, dataset, dataset_name='default'):
self.batch_size = [3]
self.code_len = [2]
self.wb_koef_reg = [0.]
self.num_epochs = [2200]
self.drop_in_decoder = [0.0]
self.drop_in_encoder = [0.0]
self.activation = ['linear']
self.dataset = dataset
self.dataset_name = [dataset_name]
def _get_all_cominations(self):
"""
:return: список словарей - всех возхможных комбинаций значений гиперпараметров
"""
def enumdict(listed):
myDict = {}
for i, x in enumerate(listed):
myDict[i] = x
return myDict
hypermapars_arrays = self.__dict__
names = hypermapars_arrays.keys()
enumerated_names = enumdict(names) # например {0: 'code_len', 1: 'activation', 2: 'num_epochs'}
n_hyperparams = len(enumerated_names.keys())
a = [None] * n_hyperparams
for k in enumerated_names.keys():
name = enumerated_names[k]
a[k] = hypermapars_arrays[name]
all_combinations = list(itertools.product(*a))
all_dicts = []
for combination in all_combinations:
d = {}
for i in enumerated_names.keys():
name = enumerated_names[i]
d[name] = combination[i]
all_dicts.append(d)
return all_dicts
def make_experiments(self, folder_name=None):
all_dicts = self._get_all_cominations()
print("NUM EXPERIMENTS EXPECTED: " + str(len(all_dicts)))
outer_story = []
summaries = []
experiment_id = 0
if folder_name is None:
folder_name = utils.ask_user_for_name() # выбрать имя серии
if folder_name is None:
exit()
utils.setup_folder_for_results(folder_name)
folder_full_path = os.getcwd()
for params in all_dicts:
utils.setup_folder_for_results(str(experiment_id)) # имя эксперимента в серии
e = Experiment(params)
summary = e.run_it(outer_story=outer_story, name_of_experiment="experiment_" + str(experiment_id))
summary['experiment_name'] = experiment_id
all_report_line = {**params, **summary}
summaries.append(all_report_line)
experiment_id += 1
os.chdir(folder_full_path) # обратно в папку серии
doc = SimpleDocTemplate("seria_report.pdf", pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
doc.build(outer_story)
return summaries
from keras.regularizers import Regularizer
from keras import backend as K
class ActivityRegularizer(Regularizer):
def __init__(self, l1=0., l2=0.):
self.l1 = l1
self.l2 = l2
def __call__(self,x):
loss = 0
#loss += self.l1 * K.sum(K.mean(K.abs(x), axis=0))
#loss += self.l2 * K.sum(K.mean(K.square(x), axis=0))
p1 = x[0]
p2 = x[1]
p3 = x[2]
loss = 0
return 0
def get_config(self):
return {"name": self.__class__.__name__,
"l1": self.l1,
"l2": self.l2}
class Experiment:
def __init__(self, dictionary):
for k, v in dictionary.items():
setattr(self, k, v)
def run_it(self, outer_story, name_of_experiment):
print("RUN: " + str(self.__dict__))
# вытаскиваем датасет из файла
foveas01 = utils.get_dataset(self.dataset)
a_regulariser = ActivityRegularizer(l1=0., l2=0.)
# создаем и обучаем модельку
en, de, ae = simple_nets.create_ae_YANA(encoding_dim=self.code_len,
input_data_shape=foveas01[0].shape,
activity_regulariser=a_regulariser,
koef_reg=self.wb_koef_reg,
activation_on_code=self.activation,
drop_in_decoder=self.drop_in_decoder,
drop_in_encoder=self.drop_in_encoder)
sgd = optimizers.SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)
ae.compile(optimizer=sgd, loss=losses.mean_squared_error)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto')
history = ae.fit(foveas01, foveas01,
epochs=self.num_epochs,
#batch_size=ceil(len(foveas01) / 2),
batch_size=self.batch_size,
shuffle=False,
validation_data=(foveas01, foveas01),
callbacks=[early_stopping])
# по результатам обучения на этом датасетке генерим репорт
report = one_experiment_report.ReportOnPath(ae=ae, en=en, de=de,
dataset=foveas01,
history_obj=history,
name_of_experiment=self.dataset + "__" + name_of_experiment
)
report.create_summary()
summary, exp_outer_story = report.end()
outer_story += exp_outer_story
utils.save_all(encoder=en, decoder=de, autoencoder=ae)
return summary
def make_seria_on_dataset(dataset, name_of_seria=None):
old_dir = os.getcwd()
utils.setup_folder_for_results("SERIES")
s = Serial(dataset)
summaries = s.make_experiments(folder_name=name_of_seria)
pickle.dump(summaries, open("summaries_dicts.pkl", "wb"))
print("summaries is saved into: " + os.getcwd())
with open("settings.txt", "w") as text_file:
text_file.write(str(s.__dict__))
os.chdir(old_dir)
def get_dataset(a_dir):
return [os.path.join(a_dir, name, 'foveas.pkl') for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def learn_models_on_dataset(folder_with_dataset, name_for_experiment):
dataset = get_dataset(folder_with_dataset)
make_seria_on_dataset(dataset, name_for_experiment)
if __name__ == "__main__":
directory = 'C:\\Users\\neuro\\PycharmProjects\\cheini\\partial\\7x7'
learn_models_on_dataset(folder_with_dataset=directory,
name_for_experiment='7x7 last ITOG')
#directory1 = 'C:\\Users\\neuro\\PycharmProjects\\cheini\\partial\\7x7'
# dataset1 = get_dataset(directory1)
#make_seria_on_dataset(dataset1, "ITOG 7x7 partial_")
| [
"[email protected]"
]
| |
1fadd9b1dcb925ea1e7e6b4598b9551cb9704bb1 | 0285ec106e21d5fab9b16a10da9023bcd82f930d | /src/pylhe.py | a2bece618e6c535d3b1e46b1b8f6b73f7dbeda68 | [
"MIT"
]
| permissive | christopher-w-murphy/Class-Imbalance-in-WW-Polarization | 099918a4709e5607da36ac3b6b65b7fcb907ea49 | 30fcbb21775e1a67d3b0739147657fd5f6ceb498 | refs/heads/master | 2020-04-13T01:01:59.308918 | 2019-11-23T22:25:09 | 2019-11-23T22:25:09 | 162,861,893 | 0 | 2 | MIT | 2019-11-23T22:18:03 | 2018-12-23T04:23:20 | Jupyter Notebook | UTF-8 | Python | false | false | 1,801 | py | # https://github.com/lukasheinrich/pylhe
from xml.etree import ElementTree
class LHEEvent(object):
def __init__(self, eventinfo, particles):
self.eventinfo = eventinfo
self.particles = particles
for p in self.particles:
p.event = self
class LHEEventInfo(object):
fieldnames = ['nparticles', 'pid', 'weight', 'scale', 'aqed', 'aqcd']
def __init__(self, **kwargs):
if not set(kwargs.keys()) == set(self.fieldnames):
raise RuntimeError
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def fromstring(cls, string):
return cls(**dict(zip(cls.fieldnames, map(float, string.split()))))
class LHEParticle(object):
fieldnames = ['id', 'status', 'mother1', 'mother2', 'color1', 'color2', 'px', 'py', 'pz', 'e', 'm', 'lifetime', 'spin']
def __init__(self, **kwargs):
if not set(kwargs.keys()) == set(self.fieldnames):
raise RuntimeError
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def fromstring(cls, string):
obj = cls(**dict(zip(cls.fieldnames, map(float, string.split()))))
return obj
def read_lhe(thefile):
try:
for event, element in ElementTree.iterparse(thefile, events=['end']):
if element.tag == 'event':
data = element.text.split('\n')[1:-1]
eventdata, particles = data[0], data[1:]
eventinfo = LHEEventInfo.fromstring(eventdata)
particle_objs = []
for p in particles:
particle_objs += [LHEParticle.fromstring(p)]
yield LHEEvent(eventinfo, particle_objs)
except ElementTree.ParseError:
print("WARNING. Parse Error.")
return
| [
"[email protected]"
]
| |
4592366353bb1a72dfd875e0dfdbd622612baa2b | ef84f06e845d5c42aae2faee84c263f9eb42d92d | /keen/web/views/api/user.py | 46bd2b750294c76a1ca60d1ba6b84a5b3139654b | []
| no_license | beforebeta/keensmb | 0921473df4e92e366695cc03c9fdef96a3be4075 | 5408a42a16c83558229f62c88eec011231a0a797 | refs/heads/master | 2016-09-16T00:37:08.907191 | 2014-03-24T15:31:11 | 2014-03-24T15:31:11 | 14,530,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,726 | py | import logging
from uuid import uuid1
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import ensure_csrf_cookie
from rest_framework.decorators import api_view
from rest_framework.response import Response
from keen.core.models import ClientUser
from keen.web.models import TrialRequest
from keen.web.forms import TrialRequestForm
from keen.web.serializers import ClientSerializer
from keen.tasks import send_email, mailchimp_subscribe
from tracking.models import Visitor
logger = logging.getLogger(__name__)
@ensure_csrf_cookie
@api_view(['POST'])
def login_view(request):
try:
email = request.DATA['email']
password = request.DATA['password']
except KeyError:
logger.warn('Request is missing email and/or password parameters: %r' % request.DATA)
return HttpResponseBadRequest('Missing authentication information')
user = authenticate(username=email, password=password)
logger.debug('Authenticate %r' % locals())
if user:
login(request, user)
try:
request.session['client_slug'] = ClientUser.objects.get(
user=user).client.slug
except ClientUser.DoesNotExist:
request.session['client_slug'] = None
request.session.save()
else:
request.session.save()
return Response({'success': 'Thank you for signing-in!'})
return Response({'error': 'Invalid e-mail/pasword combination'})
@ensure_csrf_cookie
@api_view(['GET'])
def logout_view(request):
request.session.pop('client_slug', None)
logout(request)
return HttpResponseRedirect(reverse('home'))
@ensure_csrf_cookie
@api_view(['POST'])
def request_free_trial(request):
form = TrialRequestForm(request.DATA)
if form.is_valid():
trial_request = TrialRequest(**form.cleaned_data)
trial_request.source = request.session.get('landing_page')
if 'visitor' in request.session:
try:
trial_request.visitor = Visitor.objects.get(
pk=request.session['visitor'])
except Visitor.DoesNotExist:
logger.error('Visitor does not exist')
try:
trial_request.save()
except DatabaseError:
logger.exception('Failed to save free trial request')
# FIXME: should we return an error?
# for now lets pretend all went well
email = trial_request.email or 'ignore+{0}@keensmb.com'.format(uuid1().hex)
mailchimp_subscribe.delay(
'aba1a09617',
email,
{
'EMAIL': email,
'NAME': trial_request.name or '',
'BIZNAME': trial_request.business or '',
'NUMBER': trial_request.phone or '',
'REFERRAL': trial_request.question or '',
'QUESTIONS': trial_request.comments or '',
},
double_optin=False,
update_existing=True,
send_welcome=False,
)
send_email.delay(
'Free Trial Request',
'''
Name: {0.name}
Business name: {0.business}
Phone number: {0.phone}
Email: {0.email}
Referral: {0.question}
Questions: {0.comments}
'''.format(trial_request),
['[email protected]'],
)
result = {
'success': 'We will be in touch shortly',
}
else:
result = {
'errors': form.errors,
}
return Response(result)
| [
"[email protected]"
]
| |
7248ab453e1a86b06b69a7d02263f0431915da01 | ac01dec84e77323a9c67439f92bf3a9f1a496e61 | /django_app/motif/apps.py | 22b8a8dfd1b0066ff4cb659b0007eb98dbb7d374 | []
| no_license | Monaegi/Julia-WordyGallery | d8c970e8bd25d7cad69254a876a216fecf97e367 | 4031afe1b5d45865a61f4ff4136a8314258a917a | refs/heads/master | 2021-01-23T16:18:09.876372 | 2017-10-13T08:08:25 | 2017-10-13T08:08:25 | 102,736,537 | 1 | 0 | null | 2017-10-13T08:08:26 | 2017-09-07T12:51:47 | Python | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class MotifConfig(AppConfig):
name = 'motif'
| [
"[email protected]"
]
| |
1c129ddb3ef50cd3b69ac689a889b667c11ec1d7 | b04b22018fd377bd899c19161272d72bbfa828f2 | /snippets/code/betmain2.py | 86de8edc6de634ce538b11bc32e2951c1e957462 | []
| no_license | jpmit/betman | f4f05f52db82bbf89de15531576623ee32e5ad00 | 058d69d7365b771eaaa8f77dead173a1262cccf0 | refs/heads/master | 2021-01-21T09:53:52.091499 | 2014-05-24T13:20:23 | 2014-05-24T13:20:23 | 83,349,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,112 | py | # betmain2.py
# James Mithen
# [email protected]
"""Main betting program."""
import time, datetime
import betman
from betman import const, database, order
from betman.api.bf import bfapi
from betman.api.bdaq import bdaqapi
from betman.all import betlog
import betutil
import multi
# in practicemode, we won't place any bets
PRACTICEMODE = False
# if this is set to false, we won't update any order info
UPDATEORDERINFO = True
class BetMain(object):
def __init__(self, deltat):
"""deltat is time between price refreshes in seconds."""
self.clock = betman.all.clock.Clock(deltat)
self.dbman = database.DBMaster()
self.load_strategies()
# market ids for all strategies (for updating prices)
self.marketids = self.stratgroup.get_marketids()
# we store selection objects as a dictionary of dictionaries.
# This contains the selection objects (e.g. a particular
# horse), and the selection objects contain the current price,
# hence the name.
self.prices = {const.BDAQID: {}, const.BFID: {}}
# orders for both exchanges
self.orders = {const.BDAQID: {}, const.BFID: {}}
# call the API functions to refresh prices etc.
self.on_startup()
def load_strategies(self):
"""Load strategies."""
# load from pickle file
try:
sgroup = betutil.unpickle_stratgroup()
except IOError:
betlog.betlog.warn(('Could not read pickle file: attempting'
' to read strategies from database'))
# save strategies from DB to pickle file
betutil.save_strategies()
# load from the pickle file
sgroup = betutil.unpickle_stratgroup()
betlog.betlog.debug("Loaded {0} strategies".format(len(sgroup)))
self.stratgroup = sgroup
def update_market_prices(self):
"""
Get new prices. Here we use python's multiprocessing module
to make the requests to BDAQ and BF (approximately)
simultaneously. Note that we do not write selection
information to the database here.
"""
betlog.betlog.debug('Updating prices for {0} strategies'\
.format(len(self.stratgroup)))
# market ids for all strategies (for updating prices)
self.marketids = self.stratgroup.get_marketids()
betlog.betlog.debug('BDAQ market ids total: {0}'.\
format(len(self.marketids[const.BDAQID])))
betlog.betlog.debug('BF market ids total: {0}'.\
format(len(self.marketids[const.BFID])))
# this is multithreaded so that we send requests to BDAQ and
# BF at roughly the same time.
self.prices, emids = multi.update_prices(self.marketids)
# remove any strategies from the strategy list that depend on
# any of the BDAQ or BF markets in emids.
for myid in [const.BDAQID, const.BFID]:
if emids[myid]:
self.stratgroup.remove_marketids(myid, emids[myid])
# refresh the dict of market ids that our strategy
# uses.
self.marketids = self.stratgroup.get_marketids()
# if we deleted any strategies, re-save the pickle so we only
# load valid strategies at startup
if emids[const.BDAQID] or emids[const.BFID]:
betutil.pickle_stratgroup(self.stratgroup)
def unmatched_orders(self, exid):
"""Return list of unmatched orders for exchange exid."""
unmatched = []
for o in self.orders[exid].values():
if o.status == order.UNMATCHED:
unmatched.append(o)
return unmatched
def update_order_information(self):
"""
Get information on all current orders. Note that there are
differences between BDAQ and BF here due to the APIs. For
BDAQ, we will only get information back if orders have
changed. For BF, we will get information back for all
unmatched orders. The main repurcussions of this is that
historical orders are available for every timepoint for BF,
but only at timepoints for which a given order has changed for
BDAQ (this applies to the table 'historders'.
"""
if (not PRACTICEMODE) and (UPDATEORDERINFO):
# get list of unmatched orders on BDAQ
bdaqunmatched = self.unmatched_orders(const.BDAQID)
# only want to call BDAQ API if we have unmatched bets
if bdaqunmatched:
# this should automatically keep track of a 'sequence
# number', so that we are updating information about all
# orders.
bdaqors = bdaqapi.ListOrdersChangedSince()
self.orders[const.BDAQID].update(bdaqors)
# get list of unmatched orders on BF
bfunmatched = self.unmatched_orders(const.BFID)
if bfunmatched:
# we pass this function the list of order objects;
bfors = bfapi.GetBetStatus(bfunmatched)
# update order dictionary
self.orders[const.BFID].update(bfors)
def make_orders(self, odict = None):
"""Make outstanding orders for all strategies."""
# get dictionary of outstanding orders for all strategies.
# Keys are const.BDAQID and const.BFID
if odict is None:
odict = self.stratgroup.get_orders_to_place()
saveorders = {const.BDAQID: {}, const.BFID: {}}
if PRACTICEMODE:
# we don't make any real money bets in practice mode
return
if (odict[const.BDAQID]) or (odict[const.BFID]):
betlog.betlog.debug('making orders: {0}'.format(odict))
# call multithreaded make orders so that we make order
# requests for BDAQ and BF simultaneously.
saveorders = multi.make_orders(odict)
# update the dictionary of orders that we have placed
# since starting the application.
self.orders[const.BDAQID].update(saveorders[const.BDAQID])
self.orders[const.BFID].update(saveorders[const.BFID])
# save the full order information to the DB
self.save_orders(saveorders)
# save the information on matching orders to the DB. Note
# we are assuming here that if the number of orders on
# each exchange are the same, then orders are made of
# matching orders.
if (len(odict[const.BDAQID]) == len(odict[const.BFID])):
self.save_match_orders(odict, saveorders)
def save_match_orders(self, odict, saveorders):
"""Save matching order ids to database table matchorders."""
# since we got odict from each strategy in turn, they
# are already in matching order; we just need to add
# the order refs that were returned by the BDAQ and BF
# API.
matchorders = zip(odict[const.BDAQID], odict[const.BFID])
for (o1, o2) in matchorders:
# we need to get the order id for o1 and o2 from
# saveorders dictionary
for o in saveorders[const.BDAQID].values():
if o1.sid == o.sid and o1.mid == o.mid:
o1.oref = o.oref
for o in saveorders[const.BFID].values():
if o2.sid == o.sid and o2.mid == o.mid:
o2.oref = o.oref
# write to DB
self.dbman.WriteOrderMatches(matchorders,
datetime.datetime.now())
def save_orders(self, sorders):
ords = [o.values() for o in sorders.values()]
allords = [item for subl in ords for item in subl]
# time we are writing is going to be a bit off
self.dbman.WriteOrders(allords, datetime.datetime.now())
def on_startup(self):
# put all this stuff in __init__ eventually??
# bootstrap BDAQ order information (we don't need to do this
# for BF).
ords = bdaqapi.ListBootstrapOrders()
while ords:
ords = bdaqapi.ListBootstrapOrders()
# need to login to BF api (we don't need to do this for BF).
bfapi.Login()
# update market prices
self.update_market_prices()
def save_prices(self):
"""
Save prices of all selections in self.prices dictionary to DB.
"""
# could be a quicker way of getting flat selection list in
# api/bf/bfnonapimethod.py
allsels = []
for exmid in self.prices:
for mid in self.prices[exmid]:
for sel in self.prices[exmid][mid]:
allsels.append(self.prices[exmid][mid][sel])
# note that the time we are writing is going to be a bit off
self.dbman.WriteSelections(allsels, datetime.datetime.now())
def main_loop(self):
# first tick initializes clock
self.clock.tick()
# second tick waits for the allocated time
self.clock.tick()
while True:
# print intro stuff
print time.asctime()
print '-'*32
# update fill status of orders etc
self.update_order_information()
# call BF and BDAQ API functions to get prices for all
# relevant markets
self.update_market_prices()
# update the strategies: based on the most recent prices,
# do the thinking, and create/cancel orders.
self.stratgroup.update(self.prices)
# make any outstanding orders and save order info.
self.make_orders()
# before next loop, save the updated prices.
self.save_prices()
self.clock.tick()
if __name__=='__main__':
bm = BetMain(3)
bm.main_loop()
| [
"[email protected]"
]
| |
b35ccc9994ce54f39ce1781c925b783dfcee3c12 | e20e027fc4bc03bdcda6c73a77e07eab7ce9d4e9 | /Numpy Assignment 1.py | 38490e0ef83da9c619c925fad6a64132fef4e599 | []
| no_license | aparna31ar/Numpy-Assignment-1 | 47eae1bbe741e3e2cbfb439aa5c761b552eb85fe | 2d674587a85470a841d41d0335120902fbdcd566 | refs/heads/main | 2023-07-17T14:13:29.917923 | 2021-08-30T19:21:40 | 2021-08-30T19:21:40 | 401,460,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | #!/usr/bin/env python
# coding: utf-8
# ### Q1. Use numpy to generate array of 25 random numbers sampled from a standard normal distribution
#
# In[4]:
import numpy as np
a=np.random.normal(0,1,25)
print("25 random numbers from a standard normal distribution:")
print(a)
# ### Q2. Create a random vector of size 30 and find the mean value.
#
# In[11]:
import numpy as np
a=np.random.seed(8)
a=np.random.rand(30)
a
# ### Q3. Insert 1 to 100 numbers in a numpy array and reshape it to 10*10 matrix.
#
# In[25]:
import numpy as np
a = np.arange(1,101)
a.reshape((10,10))
# ### Q4. Create a 10x10 array with random values and find the minimum and maximum values.
# In[49]:
import numpy as np
a=np.random.seed(8)
a = np.random.randint(100,size=(10,10))
print("The array of 10 x 10 matrix is:","\n",a)
print("The minimum value is:", np.min(a))
print("The maximum value is:", np.max(a))
# ### Q5. Find Dot product of two arrays
#
# f = np.array([1,2])
#
# g = np.array([4,5])
#
#
# In[50]:
f = np.array([1,2])
g = np.array([4,5])
print(f)
print(g)
np.dot(f,g)
# ### 6) Concatenate following arrays along axis=0
#
# x=np.array([[1,2],
# [3,4]])
# y=np.array([[5,6]])
#
# In[54]:
x=np.array([[1,2],
[3,4]])
y=np.array([[5,6]])
np.concatenate((x,y),axis=0)
# ### 7) How to get the common items between two python NumPy arrays?
# a = np.array([1,2,3,2,3,4,3,4,5,6])
# b = np.array([7,2,10,2,7,4,9,4,9,8])
#
# In[55]:
a = np.array([1,2,3,2,3,4,3,4,5,6])
b = np.array([7,2,10,2,7,4,9,4,9,8])
np.intersect1d(a,b)
# ### Q8. Sort the numpy array:
#
# arr = np.array([10,5,8,4,7,2,3,1])
# In[56]:
arr = np.array([10,5,8,4,7,2,3,1])
np.sort(arr)
# In[ ]:
| [
"[email protected]"
]
| |
a2b3241503da8be5bc7d9e319094cdac96c4489c | 34f55a8fda280e7dc1fe2f5f12cae4656edae708 | /python/direct_mail_result.py | cce468706cb05e44a4a2da010c12787fa41100a6 | []
| no_license | accuzip-opensource/direct-mail | 58a28c20892471d7c407674f6d78777519074f1e | c32e25b5bcf07647502ef24c0cf041bd071ceac6 | refs/heads/master | 2021-01-12T07:51:43.320289 | 2017-02-28T14:00:24 | 2017-02-28T14:00:24 | 77,045,264 | 1 | 1 | null | 2017-02-01T13:50:44 | 2016-12-21T11:18:39 | PHP | UTF-8 | Python | false | false | 220 | py | # http POST
import requests
import sys
r = requests.get( "http://cloud2.iaccutrace.com/ws_360_webapps/v2_0/download.jsp?guid="+sys.argv[1]+"&ftype=prev.csv" )
with open("prev.csv", "wb") as code:
code.write(r.content) | [
"[email protected]"
]
| |
eae5c7e381d3463e30033e379a629f9d06f562cb | 1de66a8d0efb90cf7b8d461729f6f287d6b66346 | /MySocketExp/old_west_final/Game/Game/ClientControl.py | 073040f1795c4640469b236f198aa43cfd08f789 | []
| no_license | KhorAMus/course-project | ede14f3e67358267b92cc8fc3e47e1aff2ff4b55 | a3a99385639a410cf1b8a5da978f8ef6ba07c741 | refs/heads/master | 2021-01-20T22:23:47.843404 | 2016-06-22T15:20:58 | 2016-06-22T15:20:58 | 61,729,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | class ClientControl:
key_codes = {'w': '87',
'a': '65',
's': '83',
'd': '68'}
def __init__(self, shooter, client):
self.keys = {}
self.shooter = shooter
self.client = client
#libGDX Box2D
def press(self, key):
print("user " + self.client.name+" press "+ str(key))
if key == 87:
self.shooter.position.y -= 1
if key == 83:
self.shooter.position.y += 1
if key == 65:
self.shooter.position.x -= 1
if key == 68:
self.shooter.position.x += 1
print("x : " + str(self.shooter.position.x))
print("y : " + str(self.shooter.position.y))
| [
"[email protected]"
]
| |
5d595b4de93f1e9d12f91a988cbcee8784d4310a | da20785e29f159a59747e0b86729a52ffbf0f8d7 | /app/perimeter/perimeter_webapp/perimeter_dashboard/views/reports/html.py | f193bdce4cf87e68b665f2b1884c7e529e9c6181 | []
| no_license | neet-lord/perimeter | eab622671dcf6e504b8c30baf0f494e3564bc192 | 47e21f009991b776b3e2a75845365dfda7e72c05 | refs/heads/master | 2022-12-29T09:42:03.924282 | 2020-10-19T14:46:36 | 2020-10-19T14:46:36 | 181,716,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from django.shortcuts import loader
from django.http import HttpResponse
from perimeter.perimeter_core.models.networks import Network
from perimeter.perimeter_core.models.clusters import Cluster
from perimeter.perimeter_core.models.nodes import Node
def index(request):
template = loader.get_template('perimeter_dashboard/reports/index.dtl.html')
networks = Network.objects.all()
clusters = Cluster.objects.all()
nodes = Node.objects.all()
context = {
'networks': networks,
'clusters': clusters,
'nodes': nodes,
}
return HttpResponse(
template.render(context, request)
)
| [
"[email protected]"
]
| |
369b9da392cd388ba088dbbcc071c86251091562 | e11d75549243eefa3332c0ee37be645f45dba337 | /Homework/homework_10_kirchoffs_rules/non-ideal_battery.py | 1e76c93fecb20d77abe215e31be488a35993c66e | []
| no_license | esemeniuc/phys121 | 142c3668228689204e45c0698432afe5e3a25a38 | be70590e1c32bc8c6038797bfc2bb0cc7e20c12b | refs/heads/master | 2021-09-09T11:42:05.932932 | 2018-03-15T19:51:50 | 2018-03-15T19:51:50 | 50,221,953 | 37 | 14 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | from sympy import *
V = 12.0
v_b = 11.8
r_1 = r_3 = 43.0
r_4 = r_5 = 84.0
r_2 = 150.0
i_1, i_2, i_3, i_4, i_5, r = symbols('i_1 i_2 i_3 i_4 i_5 r')
# 1) What is i_1, the current that flows through the resistor r_1?
r_equiv = r_1 + 1/(1/r_2 + 1/(r_3 + r_4 + r_5))
print( solve( V - (V * r)/(r_equiv) - v_b, r))
# Yup still haven't figured out how to grab values from solve, first time using python gime a break...
r = 2.17788550323176
i_1 = V/(r_equiv + r)
print( 1000 * i_1)
# 2) What is r, the internal resistance of the battery?
r_fuck_it_wont_accept_the_other_r = symbols('r_fuck_it_wont_accept_the_other_r')
print( solve( V - i_1 * r_fuck_it_wont_accept_the_other_r - v_b, r_fuck_it_wont_accept_the_other_r))
# 3) What is i_3, the current through resistor r_3?
print( solve( (i_3 + i_2 - i_1,
r_2 * i_2 - r_3 * i_3 - r_4 * i_4 - r_5 * i_5,
i_3 - i_4,
i_3 - i_5,
), i_2, i_3, i_4, i_5))
# 4)
i_2 = 0.0527948572971549
print(i_2 * i_2 * r_2)
# 5)
print( i_2 * r_2)
| [
"[email protected]"
]
| |
1357ba73d00221123a4df957e5fb2229a2b6f843 | 33c9398eb53301cc3e3a71e29f610e8ab0fc8c55 | /colorann.py | af7da25012968e3b99ae9f4fbe8414174e4dcffd | []
| no_license | denzel-bogues/python-color-project | 2cc45acf358812c798a607f8de73c7e0e4067113 | c510512b4af73f544fb9859a483791f0deef649c | refs/heads/master | 2020-06-26T22:05:04.605705 | 2019-08-13T23:53:17 | 2019-08-13T23:53:17 | 199,769,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | import sys
import pandas as p
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
import AppWindow
AppWindow.call_ui()
data_read = p.read_csv("colors.csv", delimiter = ',', names=['Color names', 'Hex', 'R', 'G', 'B',])
# data_red = data_read[['R', 'G', 'B']]
R = input('Enter red value ')
G = input('Enter greem value ')
B = input('Enter blue value ')
userdata = [R, G, B]
user_df = p.DataFrame(userdata)
in_read = p.DataFrame.transpose(p.DataFrame(user_df))
in_read.columns = ['R', 'G', 'B']
in_read['R'] = in_read['R'].astype(int)
in_read['G'] = in_read['G'].astype(int)
in_read['B'] = in_read['B'].astype(int)
desired_df = p.merge(data_read, in_read, on=['R', 'G', 'B'], how='inner')
print(desired_df['Color names'])
"""
print(in_read)
print(is_string_dtype(in_read['G']))
print(is_numeric_dtype(in_read['G']))
print(p.merge(data_read, in_read, on=['R', 'G', 'B'], how='inner'))
"""
| [
"[email protected]"
]
| |
47b129f352e4fa6c43d2569a27328004ab5b8e7f | 9a6ff88fb3bf3f69ade803105ee9f401ef57b11f | /Lab. 9/Лаб.9 Завд. 2.py | 0003202d2b180beaab4707e490b9b8ca49bf1ebe | []
| no_license | IvanyukAndrew/Labs | 4647ce455742ed12a96bb132c48350e96ce636ee | 4c358ebb7072444229f161579f30d6080e7ba0b0 | refs/heads/main | 2023-02-05T10:48:41.189068 | 2021-01-03T09:18:41 | 2021-01-03T09:18:41 | 320,326,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | array_of_CookDict = []
def serch(choose, criterial):
if choose == 1:
for i in range(len(array_of_CookDict)):
if array_of_CookDict[i]["Name_of_dish"] == criterial:
print(array_of_CookDict[i])
if choose == 2:
for i in range(len(array_of_CookDict)):
if array_of_CookDict[i]["Number_of_components"] == criterial:
print(array_of_CookDict[i])
if choose == 3:
for i in range(len(array_of_CookDict)):
if array_of_CookDict[i]["List_of_components"] == criterial:
print(array_of_CookDict[i])
if choose == 4:
for i in range(len(array_of_CookDict)):
if array_of_CookDict[i]["Time_for_cook"] == criterial:
print(array_of_CookDict[i])
while True:
print("\n")
print("1. Вивести всю інформацію\n"
"2. Вести дані про страву\n"
"3. Кінець\n")
choose = int(input("Напишітть цифру:"))
if choose == 1:
for i in range(len(array_of_CookDict)):
print(array_of_CookDict[i])
if choose == 2:
Name_of_dish = input("Name of dish: ")
Number_of_components = int(input("Number of components: "))
List_of_components = input("List of components: ")
Time_for_cook = int(input("Time for cook: "))
CookDict = {"Name_of_dish": Name_of_dish, "Number_of_components": Number_of_components,
"List_of_components": List_of_components, "Time_for_cook": Time_for_cook}
array_of_CookDict.append(CookDict)
elif choose == 3:
break
else:
print("Ведіть коректне число\n")
| [
"[email protected]"
]
| |
b99727124520efc1555a5d51225f48be9156a9ec | d8e4dece3a4c35c30ec6a90f6dc7bcf4ff43b4b4 | /searcher/server/query.py | 7e74d75d9f400b0199527b5f3e37b231f9a95987 | []
| no_license | wikty/MiniSearchEngine | c17160a9b65f462fa0690723aa860c6092dea97e | 63f7ef576f48b780fb8cf7fd3f6d955bc0037efd | refs/heads/master | 2021-01-13T02:44:39.591042 | 2018-09-13T06:40:47 | 2018-09-13T06:40:47 | 77,355,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | from searcher.indexer.pipelines import Pipeline
from .ranker import rank
from .extractor import extract
def process(db, query):
[terms, _] = Pipeline.preprocess(query)
doc_info = db.get_doc_info(terms)
doc_list = rank(db, doc_info, terms)
return extract(doc_list) | [
"[email protected]"
]
| |
6c14d18a91a50a9c37ac43de179ee729a11704fa | 546f4c1e4c3e8298d82328667ded4d46eacab3b5 | /openwx/client.py | b2c6a2dcf169e41baec8c3de96934d7232654955 | []
| no_license | suchennuo/book-example | 3a2f9759a899d425546be925b4794da6d27c8dad | 044ad0110c85fdd024e4650dd3ae66cb22c66dcb | refs/heads/master | 2022-12-14T10:16:44.885594 | 2019-11-08T08:52:11 | 2019-11-08T08:52:11 | 81,738,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,726 | py | import time
import requests
from requests.compat import json as _json
from openwx.utils import to_text
"""
requests
"""
class ClientException(Exception):
pass
def check_error(json):
if "errcode" in json and json["errcode"] != 0:
raise ClientException("{}: {}".format(json["errcode"], json["errmsg"]))
return json
class Client(object):
"""
微信 API 操作类
主动发信息,创建自定义菜单等
"""
def __init__(self, config):
self.config = config
self._token = None
self.token_expires_at = None
@property
def appid(self):
return self.config.get("APP_ID", None)
@property
def appsecret(self):
return self.config.get("APP_SECRET", None)
@property
def token(self):
return self.get_access_token()
def request(self, method, url, **kwargs):
if "params" not in kwargs:
kwargs["params"] = {"access_token": self.token}
if isinstance(kwargs.get("data", ""), dict):
body = _json.dumps(kwargs["data"], ensure_ascii=False)
# ensure_ascii 默认 true ,会对所有 非 ASCII 转义
body = body.encode('utf8')
kwargs["data"] = body
r = requests.request(
method=method,
url=url,
**kwargs
)
r.raise_for_status() # 检查请求是否成功
r.encoding = "utf-8"
json = r.json()
print("response json {}".format(json))
if check_error(json):
return json
def get(self, url, **kwargs):
return self.request(
method="get",
url=url,
**kwargs
)
def post(self, url, **kwargs):
return self.request(
method="post",
url=url,
**kwargs
)
def grant_token(self):
"""
获取 access token
:return:
"""
print("grant_token {} {} ".format(self.appid, self.appsecret))
return self.get(
url="https://api.weixin.qq.com/cgi-bin/token",
params={
"grant_type":"client_credential",
"appid": self.appid,
"secret": self.appsecret
}
)
def get_access_token(self):
"""
判断现有token是否过期。
需要多进程或者多机器部署需要重写这个函数来自定义 token 的存储,刷新测量
:return:
"""
if self._token:
now = time.time()
print("token expires {}".format(self.token_expires_at))
if self.token_expires_at - now > 60:
return self._token
json = self.grant_token()
self._token = json["access_token"]
self.token_expires_at = int(time.time()) + json["expires_in"]
return self._token
def send_text_message(self, user_id, content):
"""
发送文本消息
:param user_id:
:param content:
:return: 返回的 Json 数据包
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data={
"touser": user_id,
"msgtype": "text",
"text": {"content": content}
}
)
def send_image_message(self, user_id, media_id):
"""
发送图片消息
:param user_id:
:param media_id:
:return:
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data={
"touser":user_id,
"msgtype":"image",
"image":{
"media_id":media_id
}
}
)
def get_user_info(self, user_id, lang='zh_CN'):
"""
获取用户基本信息
:param user_id:
:param lang:
:return:
"""
return self.get(
url="https://api.weixin.qq.com/cgi-bin/user/info",
params={
"access_token": self.token,
"openid": user_id,
"lang": lang
}
)
def create_menu(self, menu_data):
"""
:param menu_data: python 字典
:return:
"""
return self.post(
url="https://api.weixin.qq.com/cgi-bin/menu/create",
data=menu_data
)
def create_custom_menu(self, menu_data, matchrule):
return self.post(
url="http://api.weixin.qq.com/cgi-bin/menu/addconditional",
data={
"button": menu_data,
"matchrule": matchrule
}
)
| [
"[email protected]"
]
| |
5b8806c5816b969ef781ecc7efd146fd3438c4fe | 0872781c14769f972c141c54578990488a23ef97 | /Fewshotchestmotion/Draw_DifferentTrainSet.py | 0616d6297a54a765a978c72f4766cf362127f78d | []
| no_license | MrWang98/ChestLive | 9dcb84e128038d3bbe5eef6586f50dc55b32cbc1 | 94df5f99f1a9c68bf18206bae67542fdbaa71e61 | refs/heads/master | 2023-07-11T06:01:14.511016 | 2021-08-15T06:38:14 | 2021-08-15T06:38:14 | 339,718,379 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pylab as pl
import scipy.io as scio
ChestTrainResultPath = 'ChestLiveTrainingSet.mat'
ChestTrainResult = scio.loadmat(ChestTrainResultPath)
print(ChestTrainResult.keys())
User1 = ChestTrainResult['U1']
User2 = ChestTrainResult['U2']
User3 = ChestTrainResult['U3']
User4 = ChestTrainResult['U4']
User5 = ChestTrainResult['U5']
Times = ChestTrainResult['times']
plt.plot(Times, User1) #, color="r", linewidth=3.0
plt.plot(Times, User2) #, color="m", linewidth=3.0
plt.plot(Times, User3) #, color="y", linewidth=3.0
plt.plot(Times, User4) #, color="g", linewidth=3.0
plt.plot(Times, User5) #, color="c", linewidth=3.0
plt.show()
| [
"[email protected]"
]
| |
c3eb31bfb07ff76ae317c2d91ec0b1541e01c7c7 | 687fed3e95103b20b804a78659ea79e7918b6aa6 | /maec/bundle/capability.py | 481691ff26e4dd189d834fbd4d0658cf1b75d172 | [
"BSD-3-Clause"
]
| permissive | geliefan/python-maec | 02886af1dd3fc07bd89a5323a81920e126a960b4 | dd539b1214f5cf1f445cd5989ce3f93e4fb3b2a8 | refs/heads/master | 2021-01-18T13:04:26.768906 | 2014-07-07T17:16:56 | 2014-07-07T17:16:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,716 | py | #MAEC Capability Class
#Copyright (c) 2014, The MITRE Corporation
#All rights reserved
#Compatible with MAEC v4.1
#Last updated 02/18/2014
import maec
import maec.bindings.maec_bundle as bundle_binding
from maec.bundle.behavior_reference import BehaviorReference
from cybox.common import VocabString, String
class CapabilityObjectiveReference(maec.Entity):
_namespace = maec.bundle._namespace
def __init__(self):
super(CapabilityObjectiveReference, self).__init__()
self.objective_idref = None
def to_obj(self):
capability_objective_reference_obj = bundle_binding.CapabilityObjectiveReferenceType()
if self.objective_idref is not None: capability_objective_reference_obj.set_objective_idref(self.objective_idref)
return capability_objective_reference_obj
def to_dict(self):
capability_objective_reference_dict = {}
if self.objective_idref is not None: capability_objective_reference_dict['objective_idref'] = self.objective_idref
return capability_objective_reference_dict
@staticmethod
def from_obj(capability_objective_reference_obj):
if not capability_objective_reference_obj:
return None
capability_objective_reference_ = CapabilityObjectiveReference()
capability_objective_reference_.objective_idref = capability_objective_reference_obj.get_objective_idref()
return capability_objective_reference_
@staticmethod
def from_dict(capability_objective_reference_dict):
if not capability_objective_reference_dict:
return None
capability_objective_reference_ = CapabilityObjectiveReference()
capability_objective_reference_.objective_idref = capability_objective_reference_dict['objective_idref']
return capability_objective_reference_
class CapabilityReference(maec.Entity):
_namespace = maec.bundle._namespace
def __init__(self):
super(CapabilityReference, self).__init__()
self.capability_idref = None
def to_obj(self):
capability_reference_obj = bundle_binding.CapabilityReferenceType()
if self.capability_idref is not None: capability_reference_obj.set_capability_idref(self.capability_idref)
return capability_reference_obj
def to_dict(self):
capability_reference_dict = {}
if self.capability_idref is not None: capability_reference_dict['capability_idref'] = self.capability_idref
return capability_reference_dict
@staticmethod
def from_obj(capability_reference_obj):
if not capability_reference_obj:
return None
capability_reference_ = CapabilityReference()
capability_reference_.capability_idref = capability_reference_obj.get_capability_idref()
return capability_reference_
@staticmethod
def from_dict(capability_reference_dict):
if not capability_reference_dict:
return None
capability_reference_ = CapabilityReference()
capability_reference_.capability_idref = capability_reference_dict['capability_idref']
return capability_reference_
class CapabilityObjectiveRelationship(maec.Entity):
_namespace = maec.bundle._namespace
def __init__(self):
super(CapabilityObjectiveRelationship, self).__init__()
self.relationship_type = None
self.objective_reference = []
def to_obj(self):
capability_obj_rel_obj = bundle_binding.CapabilityObjectiveRelationshipType()
if self.relationship_type is not None: capability_obj_rel_obj.set_Relationship_Type(self.relationship_type.to_obj())
if self.objective_reference is not None:
for objective_ref in self.objective_reference:
capability_obj_rel_obj.add_Objective_Reference(objective_ref.to_obj())
return capability_obj_rel_obj
def to_dict(self):
capability_obj_rel_dict = {}
if self.relationship_type is not None: capability_obj_rel_dict['relationship_type'] = self.relationship_type.to_dict()
if self.objective_reference is not None:
capability_obj_rel_dict['objective_reference'] = [x.to_dict() for x in self.objective_reference]
return capability_obj_rel_dict
@staticmethod
def from_obj(capability_obj_rel_obj):
if not capability_obj_rel_obj:
return None
capability_obj_rel_ = CapabilityObjectiveRelationship()
capability_obj_rel_.relationship_type = VocabString.from_obj(capability_obj_rel_obj.get_Relationship_Type())
if capability_obj_rel_obj.get_Objective_Reference():
capability_obj_rel_.objective_reference = [CapabilityObjectiveReference.from_obj(x) for x in capability_obj_rel_obj.get_Objective_Reference()]
return capability_obj_rel_
@staticmethod
def from_dict(capability_obj_rel_dict):
if not capability_obj_rel_dict:
return None
capability_obj_rel_ = CapabilityRelationship()
capability_obj_rel_.relationship_type = VocabString.from_dict(capability_obj_rel_dict['relationship_type'])
if capability_obj_rel_dict['objective_reference']:
capability_obj_rel_.objective_reference = [CapabilityObjectiveReference.from_dict(x) for x in capability_obj_rel_dict['objective_reference']]
return capability_obj_rel_
class CapabilityRelationship(maec.Entity):
_namespace = maec.bundle._namespace
def __init__(self):
super(CapabilityRelationship, self).__init__()
self.relationship_type = None
self.capability_reference = []
def to_obj(self):
capability_rel_obj = bundle_binding.CapabilityRelationshipType()
if self.relationship_type is not None: capability_rel_obj.set_Relationship_Type(self.relationship_type.to_obj())
if self.capability_reference is not None:
for capability_ref in self.capability_reference:
capability_rel_obj.add_Capability_Reference(capability_ref.to_obj())
return capability_rel_obj
def to_dict(self):
capability_rel_dict = {}
if self.relationship_type is not None: capability_rel_dict['relationship_type'] = self.relationship_type.to_dict()
if self.capability_reference is not None:
capability_rel_dict['capability_reference'] = [x.to_dict() for x in self.capability_reference]
return capability_rel_dict
@staticmethod
def from_obj(capability_rel_obj):
if not capability_rel_obj:
return None
capability_rel_ = CapabilityRelationship()
capability_rel_.relationship_type = VocabString.from_obj(capability_rel_obj.get_Relationship_Type())
if capability_rel_obj.get_Capability_Reference():
capability_rel_.capability_reference = [CapabilityReference.from_obj(x) for x in capability_rel_obj.get_Capability_Reference()]
return capability_rel_
@staticmethod
def from_dict(capability_rel_dict):
if not capability_rel_dict:
return None
capability_rel_ = CapabilityRelationship()
capability_rel_.relationship_type = VocabString.from_dict(capability_rel_dict['relationship_type'])
if capability_rel_dict['capability_reference']:
capability_rel_.capability_reference = [CapabilityReference.from_dict(x) for x in capability_rel_dict['capability_reference']]
return capability_rel_
class CapabilityObjective(maec.Entity):
_namespace = maec.bundle._namespace
def __init__(self):
super(CapabilityObjective, self).__init__()
self.id_ = maec.utils.idgen.create_id(prefix="capability_objective")
self.name = None
self.description = None
self.property = []
self.behavior_reference = []
self.relationship = []
def to_obj(self):
capability_objective_obj = bundle_binding.CapabilityObjectiveType()
if self.id_ is not None: capability_objective_obj.set_id(self.id_)
if self.name is not None: capability_objective_obj.set_Name(self.name.to_obj())
if self.description is not None: capability_objective_obj.set_Description(self.description.to_obj())
if self.property:
for prop in self.property:
capability_objective_obj.add_Property(prop.to_obj())
if self.behavior_reference:
for behavior_ref in self.behavior_reference:
capability_objective_obj.add_Behavior_Reference(behavior_ref.to_obj())
if self.relationship:
for rel in self.relationship:
capability_objective_obj.add_Relationship(rel.to_obj())
return capability_objective_obj
def to_dict(self):
capability_objective_dict = {}
if self.id_ is not None: capability_objective_dict['id'] = self.id_
if self.name is not None: capability_objective_dict['name'] = self.name.to_dict()
if self.description is not None: capability_objective_dict['description'] = self.description
if self.property:
capability_objective_dict['property'] = [x.to_dict() for x in self.property]
if self.behavior_reference:
capability_objective_dict['behavior_reference'] = [x.to_dict() for x in self.behavior_reference]
if self.relationship:
capability_objective_dict['relationship'] = [x.to_dict() for x in self.relationship]
return capability_objective_dict
@staticmethod
def from_obj(capability_objective_obj):
if not capability_objective_obj:
return None
capability_objective_ = CapabilityObjective()
if capability_objective_obj.get_id(): capability_objective_.id_ = capability_objective_obj.get_id()
capability_objective_.name = VocabString.from_obj(capability_objective_obj.get_Name())
capability_objective_.description = capability_objective_obj.get_Description()
if capability_objective_obj.get_Property():
capability_objective_.property = [CapabilityProperty.from_obj(x) for x in capability_objective_obj.get_Property()]
if capability_objective_obj.get_Behavior_Reference():
capability_objective_.behavior_reference = [BehaviorReference.from_obj(x) for x in capability_objective_obj.get_Behavior_Reference()]
if capability_objective_obj.get_Relationship():
capability_objective_.relationship = [CapabilityObjectiveRelationship.from_obj(x) for x in capability_objective_obj.get_Relationship()]
return capability_objective_
@staticmethod
def from_dict(capability_objective_dict):
if not capability_objective_dict:
return None
capability_objective_ = CapabilityObjective()
if capability_objective_dict.get('id'): capability_objective_.id_ = capability_objective_dict.get('id')
capability_objective_.name = VocabString.from_dict(capability_objective_dict.get('name'))
capability_objective_.description = capability_objective_dict.get('description')
if capability_objective_dict.get('property'):
capability_objective_.property = [CapabilityProperty.from_dict(x) for x in capability_objective_dict.get('property')]
if capability_objective_dict.get('behavior_reference'):
capability_objective_.behavior_reference = [BehaviorReference.from_dict(x) for x in capability_objective_dict.get('behavior_reference')]
if capability_objective_dict.get('relationship'):
capability_objective_.relationship = [CapabilityObjectiveRelationship.from_dict(x) for x in capability_objective_dict.get('relationship')]
return capability_objective_
class CapabilityProperty(maec.Entity):
_namespace = maec.bundle._namespace
def __init__(self):
super(CapabilityProperty, self).__init__()
self.name = None
self.value = None
def to_obj(self):
capability_property_obj = bundle_binding.CapabilityPropertyType()
if self.name is not None: capability_property_obj.set_Name(self.name.to_obj())
if self.value is not None: capability_property_obj.set_Value(self.value.to_obj())
return capability_property_obj
def to_dict(self):
capability_property_dict = {}
if self.name is not None: capability_property_dict['name'] = self.name.to_dict()
if self.value is not None: capability_property_dict['value'] = self.value.to_dict()
return capability_property_dict
@staticmethod
def from_obj(capability_property_obj):
if not capability_property_obj:
return None
capability_property_ = CapabilityProperty()
capability_property_.name = VocabString.from_obj(capability_property_obj.get_Name())
capability_property_.value = String.from_obj(capability_property_obj.get_Value())
return capability_property_
@staticmethod
def from_dict(capability_property_dict):
if not capability_property_dict:
return None
capability_property_ = CapabilityProperty()
capability_property_.name = VocabString.from_dict(capability_property_dict['name'])
capability_property_.value = String.from_dict(capability_property_dict['value'])
return capability_property_
class Capability(maec.Entity):
_namespace = maec.bundle._namespace
def __init__(self, id = None, name = None):
super(Capability, self).__init__()
if id:
self.id_ = id
else:
self.id_ = maec.utils.idgen.create_id(prefix="capability")
self.name = name
self.description = None
self.property = []
self.strategic_objective = []
self.tactical_objective = []
self.behavior_reference = []
self.relationship = []
def add_tactical_objective(self, tactical_objective):
self.tactical_objective.append(tactical_objective)
def add_strategic_objective(self, strategic_objective):
self.strategic_objective.append(strategic_objective)
def to_obj(self):
capability_obj = bundle_binding.CapabilityType()
if self.id_ is not None: capability_obj.set_id(self.id_)
if self.name is not None: capability_obj.set_name(self.name)
if self.description is not None: capability_obj.set_Description(self.description)
if self.property:
for prop in self.property:
capability_obj.add_Property(prop.to_obj())
if self.strategic_objective:
for strategic_obj in self.strategic_objective:
capability_obj.add_Strategic_Objective(strategic_obj.to_obj())
if self.tactical_objective:
for tactical_obj in self.tactical_objective:
capability_obj.add_Tactical_Objective(tactical_obj.to_obj())
if self.behavior_reference:
for behavior_ref in self.behavior_reference:
capability_obj.add_Behavior_Reference(behavior_ref.to_obj())
if self.relationship:
for rel in self.relationship:
capability_obj.add_Relationship(rel.to_obj())
return capability_obj
def to_dict(self):
capability_dict = {}
if self.id_ is not None: capability_dict['id'] = self.id_
if self.name is not None: capability_dict['name'] = self.name
if self.description is not None: capability_dict['description'] = self.description
if self.property:
capability_dict['property'] = [x.to_dict() for x in self.property]
if self.strategic_objective:
capability_dict['strategic_objective'] = [x.to_dict() for x in self.strategic_objective]
if self.tactical_objective:
capability_dict['tactical_objective'] = [x.to_dict() for x in self.tactical_objective]
if self.behavior_reference:
capability_dict['behavior_reference'] = [x.to_dict() for x in self.behavior_reference]
if self.relationship:
capability_dict['relationship'] = [x.to_dict() for x in self.relationship]
return capability_dict
@staticmethod
def from_dict(capability_dict):
if not capability_dict:
return None
capability_ = Capability()
if capability_dict.get('id'): capability_.id_ = capability_dict.get('id')
capability_.name = capability_dict.get('name')
capability_.description = capability_dict.get('description')
if capability_dict.get('property'):
capability_.property = [CapabilityProperty.from_dict(x) for x in capability_dict.get('property')]
if capability_dict.get('strategic_objective'):
capability_.strategic_objective = [CapabilityObjective.from_dict(x) for x in capability_dict.get('strategic_objective')]
if capability_dict.get('tactical_objective'):
capability_.tactical_objective = [CapabilityObjective.from_dict(x) for x in capability_dict.get('tactical_objective')]
if capability_dict.get('behavior_reference'):
capability_.behavior_reference = [BehaviorReference.from_dict(x) for x in capability_dict.get('behavior_reference')]
if capability_dict.get('relationship'):
capability_.relationship = [CapabilityRelationship.from_dict(x) for x in capability_dict.get('relationship')]
return capability_
@staticmethod
def from_obj(capability_obj):
if not capability_obj:
return None
capability_ = Capability()
if capability_obj.get_id(): capability_.id_ = capability_obj.get_id()
capability_.name = capability_obj.get_name()
capability_.description = capability_obj.get_Description()
if capability_obj.get_Property():
capability_.property = [CapabilityProperty.from_obj(x) for x in capability_obj.get_Property()]
if capability_obj.get_Strategic_Objective():
capability_.strategic_objective = [CapabilityObjective.from_obj(x) for x in capability_obj.get_Strategic_Objective()]
if capability_obj.get_Tactical_Objective():
capability_.tactical_objective = [CapabilityObjective.from_obj(x) for x in capability_obj.get_Tactical_Objective()]
if capability_obj.get_Behavior_Reference():
capability_.behavior_reference = [BehaviorReference.from_obj(x) for x in capability_obj.get_Behavior_Reference()]
if capability_obj.get_Relationship():
capability_.relationship = [CapabilityRelationship.from_obj(x) for x in capability_obj.get_Relationship()]
return capability_
class CapabilityList(maec.Entity):
_namespace = maec.bundle._namespace
def __init__(self):
super(CapabilityList, self).__init__()
self.capability = []
self.capability_reference = []
def to_obj(self):
capability_list_obj = bundle_binding.CapabilityListType()
if self.capability:
for cap in self.capability:
capability_list_obj.add_Capability(cap.to_obj())
if self.capability_reference:
for cap_ref in self.capability_reference:
capability_list_obj.add_Capability_Reference(cap_ref.to_obj())
return capability_list_obj
def to_dict(self):
capability_list_dict = {}
if self.capability:
capability_list_dict['capability'] = [x.to_dict() for x in self.capability]
if self.capability_reference:
capability_list_dict['capability_reference'] = [x.to_dict() for x in self.capability_reference]
return capability_list_dict
@staticmethod
def from_obj(capability_list_obj):
if not capability_list_obj:
return None
capability_list_ = CapabilityList()
if capability_list_obj.get_Capability():
capability_list_.capability = [Capability.from_obj(x) for x in capability_list_obj.get_Capability()]
if capability_list_obj.get_Capability_Reference():
capability_list_.capability_reference = [CapabilityReference.from_obj(x) for x in capability_list_obj.get_Capability_Reference()]
return capability_list_
@staticmethod
def from_dict(capability_list_dict):
if not capability_list_dict:
return None
capability_list_ = CapabilityList()
if capability_list_dict.get('capability'):
capability_list_.capability = [Capability.from_dict(x) for x in capability_list_dict['capability']]
if capability_list_dict.get('capability_reference'):
capability_list_.capability_reference = [CapabilityReference.from_dict(x) for x in capability_list_dict['capability_reference']]
return capability_list_ | [
"[email protected]"
]
| |
e516aab52cfdc48890b5e3c8aa32812a43a1a189 | c523eff326b8bc6c0c903bf7fe16ec3b98605bff | /AejiJeon/ThisIsCT/sortingProblem/안테나/24.py | f1de39b4fc188cf214333107ab773354d6c40b55 | []
| no_license | IgoAlgo/Problem-Solving | c76fc157c4dd2afeeb72a7e4a1833b730a0b441d | 5cc57d532b2887cf4eec8591dafc5ef611c3c409 | refs/heads/master | 2023-06-26T05:12:02.449706 | 2021-07-14T06:57:00 | 2021-07-14T06:57:00 | 328,959,557 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | n = int(input())
data = list(map(int, input().split()))
data.sort()
print(data[(n-1)//2])
| [
"[email protected]"
]
| |
60dd8feecd68873824798a6ebff2b541ebdfc7ea | bfc38b283e2307d3cfb0b817209b7692bb602b16 | /source/wtables/io_tasks/GenerateTriples.py | 1257faae0bee6974a5a697f400ecef2d1c2a1766 | []
| no_license | wikitables/web-of-data-tables | 6fbc22293de088efd1bac39bee693fe03d5e1fed | 75891034e9ffa15a9e484ac741cb8d2197da0af7 | refs/heads/master | 2022-04-13T15:20:19.309330 | 2020-03-05T03:24:01 | 2020-03-05T03:24:01 | 116,946,910 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,555 | py | # -*- coding: utf-8 -*-
import pandas as pd
from wtables.wikidata_db.ConfigProperties import ConfigProperties
from wtables.wikidata_db.WikidataDAO import *
import sys
import time
from wtables.preprocessing.TextProcessing import TextProcessing
from nltk.stem.snowball import SnowballStemmer
import gzip
import logging
def generateTriples(file):
"""Generate file triples with relations by cluster.
:param filecluster: csv, tab separated [cluster, table, cols, relations, all_relations]
:param fileLinks: csv, tab separated [table, headers, num_cols, num_rows, cell(ex: row:col1:col2),
name_col1, name_col2, link1, link2, entity1, entity2, relations(ex:[P1])]
:param fileOut: csv, tab separated table triples format. Output example:
447772.3 447772.3 2:1:3 titl@3 produc@3 https://en.wikipedia.org/wiki/By_Chance :Q25205603 P361 :part of@en https://en.wikipedia.org/wiki/SremmLife_2 :Q23838424 1
"""
out=""
cont=0
contSpanTriples=0
contDescTriples = 0
contNoteTriples = 0
with gzip.open(FILE_OUT, 'wt') as fout:
with gzip.open(file, "rt") as fin:
for line in fin:
print("Line: ", cont)
_line = line.replace("\n", "").split("\t")
wd1 = _line[5]
wd2 = _line[6]
if wd1 == "" or wd2 == "" or wd1 == wd2:
continue
table = _line[0]
cluster = dictTableCluster.get(table)
if cluster == None:
continue
row_col = _line[1]
c1 = _line[2].split("##")[0]
c2 = _line[2].split("##")[1]
if "descrip" in c1 or "descrip" in c2:
contDescTriples+=1
if "spancol" in c1 or "spancol" in c2:
contSpanTriples+=1
if "note" in c1 or "note" in c2:
contNoteTriples+=1
link1 = _line[3]
link2 = _line[4] # print(wd1, wd2)
relations = _line[7] # self.wikidataDAO.getRelations(wd1, wd2)
if relations == None or relations == "":
relations = []
else:
relations = relations.replace("[", "").replace("]", "").split(",")
relations = set([r.strip() for r in relations])
allrels = dictCluster.get(cluster).get(c1 + "##" + c2)
if allrels != None:
if len(relations) > 0:
newRelations = set(allrels) - set(relations)
else:
newRelations = allrels
else:
#logging.debug("Pair columns not found.."+ cluster +" "+table +" "+c1 +" " +c2)
continue
for rel in relations:
if rel == "":
continue
prop = wikidataDAO.getWikidataProp(rel.strip())
# print(rel, prop.propId, prop.propName)
# print("relations", relations)
if prop == None:
#logging.debug("None prop: " + rel)
propId = rel
propName = rel
else:
propId = prop.propId
propName = prop.propName
#existentTriples += 1
fout.write(str(
cluster) + "\t" + table + "\t" + row_col + "\t" + c1 + "\t" + c2 + "\t" + link1 + " :" + wd1 + "\t" + propId + " :" + propName + "\t" + link2 + " :" + wd2 + "\t" + "1" + "\n")
#prop = wikidataDAO.getWikidataProp(rel)
for rel in newRelations:
if rel == "":
continue
prop = wikidataDAO.getWikidataProp(rel)
if prop == None:
#loggin.debug("None prop: "+rel)
propId = rel
propName = rel
else:
propId = prop.propId
propName = prop.propName
#noExistentTriples += 1
fout.write(str(
cluster) + "\t" + table + "\t" + row_col + "\t" + c1 + "\t" + c2 + "\t" + link1 + " :" + wd1 + "\t" + propId + " :" + propName + "\t" + link2 + " :" + wd2 + "\t" + "0" + "\n")
def generateTriples1(file):
cont=0
with gzip.open(FILE_OUT, 'wt') as fout:
with gzip.open(file, "rt") as fin:
for line in fin:
print("Line: ", cont)
cont+=1
_line = line.replace("\n", "").split("\t")
#print(_line)
if len(_line)<13:
continue
wd1 = _line[9]
wd2 = _line[10]
if wd1 == "" or wd2 == "" or wd1 == wd2:
continue
table = _line[0]
cluster = dictTableCluster.get(table)
if cluster == None:
continue
print("cluster", cluster)
row_col = _line[4]
c1 = _line[5]
c2 = _line[6]
link1 = _line[7]
link2 = _line[8] # print(wd1, wd2)
relations = _line[12] # self.wikidataDAO.getRelations(wd1, wd2)
print("Pair:", c1+"##"+c2)
if relations == None or relations == "":
relations = []
else:
relations = eval(relations) #.replace("[", "").replace("]", "").split(",")
relations = set([r.strip() for r in relations])
allrels = dictCluster.get(cluster).get(c1 + "##" + c2)
if allrels != None:
if len(relations) > 0:
newRelations = set(allrels) - set(relations)
else:
newRelations = allrels
else:
print("Pair columns not found.."+ cluster +" "+table +" "+c1 +" " +c2)
continue
for rel in relations:
if rel == "":
continue
prop = wikidataDAO.getWikidataProp(rel.strip())
# print(rel, prop.propId, prop.propName)
# print("relations", relations)
if prop == None:
print("None prop: " + rel)
propId = rel
propName = rel
else:
propId = prop.propId
propName = prop.propName
#existentTriples += 1
fout.write( str(
cluster) + "\t" + table + "\t" + row_col + "\t" + c1 + "\t" + c2 + "\t" + link1 + " :" + wd1 + "\t" + propId + " :" + propName + "\t" + link2 + " :" + wd2 + "\t" + "1" + "\n")
#prop = wikidataDAO.getWikidataProp(rel)
for rel in newRelations:
if rel == "":
continue
prop = wikidataDAO.getWikidataProp(rel)
if prop == None:
#loggin.debug("None prop: "+rel)
propId = rel
propName = rel
else:
propId = prop.propId
propName = prop.propName
#noExistentTriples += 1
fout.write(str(
cluster) + "\t" + table + "\t" + row_col + "\t" + c1 + "\t" + c2 + "\t" + link1 + " :" + wd1 + "\t" + propId + " :" + propName + "\t" + link2 + " :" + wd2 + "\t" + "0" + "\n")
def getClusterRelations(fileCluster):
dictTableCluster = {}
dictCluster = {}
cont=0
with open(fileCluster, "r") as fileCl:
for line in fileCl:
print("cont: ", cont)
if cont == 0:
cont += 1
continue
#if cont>1000:
# break
cont += 1
_line = line.replace("\n", "").split("\t")
cluster = _line[0]
table = _line[1]
all_relations = _line[4]
if all_relations == None or all_relations == "":
continue
else:
all_relations = eval(all_relations)
all_relations = {r.strip() for r in all_relations}
cols = _line[2]
dictTableCluster[table] = cluster
if dictCluster.get(cluster) == None:
dictCluster[cluster] = {cols: all_relations}
else:
if dictCluster.get(cluster).get(cols) == None:
dictCluster[cluster][cols] = all_relations
return dictTableCluster, dictCluster
def readLinks(input=0):
cont=0
line_out=""
fi =gzip.open(FILE_LINKS, "rt")
lines=fi.readlines()
for line in lines:
if cont>=50000:
outyield=line_out
line_out=""
cont = 1
yield outyield
else:
line_out+=line
cont+=1
if line_out!="":
yield line_out
yield Pipey.STOP
def processLinks1(line):
result = generateTriples1(line)
if result!="":
#print(result)
yield result
if __name__ == '__main__':
args = sys.argv[1:]
logging.basicConfig(filename="./debug.log", level=logging.DEBUG)
params = ConfigProperties().loadProperties()
wikidataDAO = WikidataDAO(params)
wikidataDAO.fillPropName()
option=args[0]
FILE_CLUSTER = args[1]#"cluster0.csv"
FILE_LINKS = args[2]#"test2.csv.gz"
FILE_OUT =args[3]#"out.out.gz"
dictTableCluster, dictCluster = getClusterRelations(FILE_CLUSTER)
startTime = time.time()
if option=="1":
generateTriples1(FILE_LINKS)
if option=="2":
generateTriples(FILE_LINKS)
#pipeline = Pipey.Pipeline()
# one process reads the documents
#pipeline.add(readLinks)
# up to 8 processes transform the documents
#if args[0]=='1':
# pipeline.add(processLinks1, 8)
#if args[0]=='2':
# pipeline.add(processLinks, 8)
# One process combines the results into a file.
#pipeline.add(ResultCombiner(FILE_OUT))
#pipeline.run(50)
print("Time Triples: ", time.time() - startTime)
| [
"[email protected]"
]
| |
3a40a1e42f60c1c9f14a8869461d90cc62d7f560 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /tests/test_rand_affine.py | 1e1a23bc0915f7025bb7fdc388ed9593b196b866 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
]
| permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 5,638 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import RandAffine
TEST_CASES = [
[
dict(as_tensor_output=False, device=None),
{"img": torch.arange(27).reshape((3, 3, 3))},
np.arange(27).reshape((3, 3, 3)),
],
[
dict(as_tensor_output=False, device=None, spatial_size=-1),
{"img": torch.arange(27).reshape((3, 3, 3))},
np.arange(27).reshape((3, 3, 3)),
],
[
dict(as_tensor_output=False, device=None),
{"img": torch.arange(27).reshape((3, 3, 3)), "spatial_size": (2, 2)},
np.array([[[2.0, 3.0], [5.0, 6.0]], [[11.0, 12.0], [14.0, 15.0]], [[20.0, 21.0], [23.0, 24.0]]]),
],
[
dict(as_tensor_output=True, device=None),
{"img": torch.ones((1, 3, 3, 3)), "spatial_size": (2, 2, 2)},
torch.ones((1, 2, 2, 2)),
],
[
dict(as_tensor_output=True, device=None, spatial_size=(2, 2, 2), cache_grid=True),
{"img": torch.ones((1, 3, 3, 3))},
torch.ones((1, 2, 2, 2)),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
as_tensor_output=True,
padding_mode="zeros",
spatial_size=(2, 2, 2),
device=None,
),
{"img": torch.ones((1, 3, 3, 3)), "mode": "bilinear"},
torch.tensor([[[[0.3658, 1.0000], [1.0000, 1.0000]], [[1.0000, 1.0000], [1.0000, 0.9333]]]]),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
as_tensor_output=True,
padding_mode="zeros",
spatial_size=(2, 2, 2),
cache_grid=True,
device=None,
),
{"img": torch.ones((1, 3, 3, 3)), "mode": "bilinear"},
torch.tensor([[[[0.3658, 1.0000], [1.0000, 1.0000]], [[1.0000, 1.0000], [1.0000, 0.9333]]]]),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
scale_range=[0.1, 0.2],
as_tensor_output=True,
device=None,
),
{"img": torch.arange(64).reshape((1, 8, 8)), "spatial_size": (3, 3)},
torch.tensor([[[18.7362, 15.5820, 12.4278], [27.3988, 24.2446, 21.0904], [36.0614, 32.9072, 29.7530]]]),
],
[
dict(
prob=0.9,
rotate_range=(np.pi / 2,),
shear_range=[1, 2],
translate_range=[2, 1],
scale_range=[0.1, 0.2],
spatial_size=(3, 3),
cache_grid=True,
as_tensor_output=True,
device=None,
),
{"img": torch.arange(64).reshape((1, 8, 8))},
torch.tensor([[[18.7362, 15.5820, 12.4278], [27.3988, 24.2446, 21.0904], [36.0614, 32.9072, 29.7530]]]),
],
]
ARR_NUMPY = np.arange(9 * 10).reshape(1, 9, 10)
ARR_TORCH = torch.Tensor(ARR_NUMPY)
TEST_CASES_SKIPPED_CONSISTENCY = []
for im in (ARR_NUMPY, ARR_TORCH):
for as_tensor_output in (True, False):
for in_dtype_is_int in (True, False):
TEST_CASES_SKIPPED_CONSISTENCY.append((im, as_tensor_output, in_dtype_is_int))
class TestRandAffine(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_rand_affine(self, input_param, input_data, expected_val):
g = RandAffine(**input_param)
g.set_random_state(123)
result = g(**input_data)
if input_param.get("cache_grid", False):
self.assertTrue(g._cached_grid is not None)
self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor))
if isinstance(result, torch.Tensor):
np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4)
else:
np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4)
def test_ill_cache(self):
with self.assertWarns(UserWarning):
RandAffine(cache_grid=True)
with self.assertWarns(UserWarning):
RandAffine(cache_grid=True, spatial_size=(1, 1, -1))
@parameterized.expand(TEST_CASES_SKIPPED_CONSISTENCY)
def test_skipped_transform_consistency(self, im, as_tensor_output, in_dtype_is_int):
t1 = RandAffine(prob=0, as_tensor_output=as_tensor_output)
t2 = RandAffine(prob=1, spatial_size=(10, 11), as_tensor_output=as_tensor_output)
# change dtype to int32 or float32
if in_dtype_is_int:
im = im.astype("int32") if isinstance(im, np.ndarray) else im.int()
else:
im = im.astype("float32") if isinstance(im, np.ndarray) else im.float()
out1 = t1(im)
out2 = t2(im)
# check same type
self.assertEqual(type(out1), type(out2))
# check matching dtype
self.assertEqual(out1.dtype, out2.dtype)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
f456b65143adce6b77f50922eb465da68100c038 | 4f88840e80027503da667e8f73b6babfd1ec118b | /tests/simple_sql_test.py | 5feace18a9742c2360a4a62af0592f9edabf9118 | []
| no_license | fuyjcnbr/analyzeSQL | a29fcf97d602f63917e1ad211fba84f9381a7700 | 05ba80b360832ff3a67ac876ebff73eef2300f74 | refs/heads/master | 2023-07-04T06:14:24.940255 | 2021-08-01T17:25:10 | 2021-08-01T17:25:10 | 391,296,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from unittest import TestCase, main
from analyzeSQL.main import SqlParser, SimplifySimpleSqlTree
from lark import Lark, Transformer, v_args, Tree, Token
class SqlParserTest(TestCase):
def test_simple_join(self):
sql = """
select a.asd, b.asf
from prod.foo a
inner join dev.bar b
on a.id = b.id
where a.x = 12
"""
sql_parser = SqlParser().get_parser("simple_sql")
p = sql_parser.parse
tree = p(sql)
tree2 = SimplifySimpleSqlTree().transform(tree)
self.assertEqual(Tree("hz", []), tree2)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
05ec4db57777e9607c55cd7a78b8bdf8c9c1c38a | 08a711031b12e75235b49b48d74069ae14ecf53a | /school_api/urls.py | 77e18e37a85402dd49688b9db004153d2ab0a384 | []
| no_license | dhivya-hub/old_django-assignment | 0b74ea94e070757fcce9518016e5b64b5da5086b | f647fac70e338871f990b89e3960ec3ca05a4858 | refs/heads/master | 2023-01-14T10:54:42.897946 | 2020-11-10T17:40:17 | 2020-11-10T17:40:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from django.urls import path
from .views import *
urlpatterns = [
path('', home, name='home'),
path('student/', student_detail, name='detail'),
path('teacher_update/<int:pk>/', teacher_update, name='update'),
path('teacher_detail/', teacher_detail, name='teacher'),
path('create/', CreateUserAPIView.as_view()),
path('login/', authenticate_user)
] | [
"[email protected]"
]
| |
a471721363e20e32661f35d9d756a88f94319b21 | aa288318cb22e25c0cb19e9f189a2f3351b8e8c1 | /manage.py | ea8ad7c432c038931edd0828af9957fa3481701f | []
| no_license | preethibaskar/ReferApp | 6adadbbb7c55f7f9927a9b148c1ac9468a273340 | d888299a627fd4b61775d128435c0b0e6f5001c7 | refs/heads/master | 2021-01-12T10:48:47.145340 | 2016-11-06T02:18:09 | 2016-11-06T02:18:09 | 72,710,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ReferApp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
aead48ff3d505783f0b9a2d0eb4d0d3131c26a05 | d3f41351d9068f631bf3d6953ba0f004191c3631 | /MBD_system/remove_duplicate.py | 5662cffcca9990eb88f714d19d00474718688dbd | []
| no_license | xupeiwust/DyS | 471c5c3757fccfd4553ff72194e1347b56a1553d | 5e6a54dee662206664dde022ccca372f966b1789 | refs/heads/master | 2022-03-01T05:10:31.118284 | 2018-02-28T16:57:13 | 2018-02-28T16:57:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | '''
Created on 9. mar. 2015
@author: lskrinjar
'''
import numpy as np
def remove_duplicate(nodes):
"""
Remove duplicate node vectors from matrix of nodes
Args:
nodes - a 2d array (or matrix) of nodes
Raises:
None
Returns:
unique_nodes - a 2d array (or matrix) of nodes without duplicates
"""
nodes = np.ascontiguousarray(nodes)
__unique_nodes = np.unique(nodes.view([('', nodes.dtype)] * nodes.shape[1]))
unique_nodes = __unique_nodes.view(nodes.dtype).reshape((__unique_nodes.shape[0], nodes.shape[1]))
return unique_nodes
| [
"[email protected]"
]
| |
1aef2eefec3ad88d9b7f8e6eeba325b3603c0c46 | 603488a6cde44b30732260010afe12b089d01c3d | /useful_functions.py | f355b0c3bc4eb97f054038f97c85f98621d7ff92 | []
| no_license | Zahra-Kader/ksz_21cm_signal | 172707ccda3aa4b61b7de4e046c8af9e2d3c034d | 897a5e36a493f0282fb1d72acee7b0425e0f4a41 | refs/heads/master | 2021-07-09T05:22:18.673237 | 2021-03-26T18:14:01 | 2021-03-26T18:14:01 | 231,374,552 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,009 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 22 12:53:47 2018
@author: zahra
"""
import distance as cd
from scipy.interpolate import interp1d
import numpy as np
import perturbation as cp
import density as den
import constants as cc
import matplotlib.pyplot as plt
import scipy as sp
import pylab
from matplotlib.colors import LogNorm
#import perturbation as cp
b_HI=1.0
omega_HI=0.8e-3
n_points=100
nu_21=1420.
cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'omega_k_0':0.0, 'h':0.67, 'omega_b_0' : 0.049, 'omega_n_0' : 0.0,
'N_nu' : 0, 'n' : 1.0, 'sigma_8' : 0.9, 'baryonic_effects' : False,'X_H':.75}
H0=cc.H100_s*cosmo['h']
#z=np.logspace(-10,np.log(2000),2000)
#z=np.linspace(1e-4,10,n_points)
z=np.geomspace(1e-4,10,n_points)
kabs,P= np.genfromtxt('/home/zahra/python_scripts/kSZ_21cm_signal/camb_63347152_matterpower_z0_16000_kmax.dat', dtype=float,
unpack=True)
#interpolate the matter power spec
Mps_interpf = interp1d(kabs, P, bounds_error=False,fill_value="extrapolate")
k=np.linspace(1.e-4,10.,10)
Mps_interpf_div_ksq=interp1d(kabs, P/kabs**2, bounds_error=False,fill_value=0.)
def zed(chi_in):
chi_full = cd.comoving_distance(z, **cosmo)
f=interp1d(chi_full,z,bounds_error=False,fill_value=0.)
return f(chi_in)
def chi(z):
chi_full = cd.comoving_distance(z, **cosmo)
return chi_full
def H(z):
H=cd.hubble_z(z,**cosmo)
return H
def D_1(z):
D_1=cp.fgrowth(z,cosmo['omega_M_0'],0)
return D_1
#plt.plot(z,D_1(z))
#plt.show()
chi_m=chi(1100)
chi_array=np.linspace(0,chi_m,2000)
#plt.plot(chi_array,D_1(zed(chi_array)))
#plt.show()
def f(z):
f=(den.omega_M_z(z,**cosmo))**(cc.gamma)
return f
#plt.plot(den.omega_M_z(z,**cosmo),f(z))
#plt.show()
def r(z):
r=cc.c_light_Mpc_s*(1+z)**2/H(z)
return r
def kpar(y,z):
kpar=y/r(z)
return kpar
def T_mean(z):
T_mean=566.*cosmo['h']*H0*omega_HI*(1+z)**2/(H(z)*0.003) #\mu K, microkelvin
return T_mean
def kpar_min(z,delta_z):
z_max=z+delta_z
z_min=z-delta_z
nu_min=nu_21/(1+z_max)
nu_max=nu_21/(1+z_min)
delta_nu_dimless=(nu_max-nu_min)/nu_21
return 2.*np.pi/r(z)/delta_nu_dimless
def ell_lims(z,Dmin,Dmax): #D=Dmin for kperp_min and D=Dmax for kperp_max
nu=nu_21/(1+z)*1.e6
c_metres=cc.c_light_cm_s/100.
lam=c_metres/nu
u_min=Dmin/lam
u_max=Dmax/lam
return 2.*np.pi*u_min, 2.*np.pi*u_max
def P_delta_delta(kperp,kpar):
Kperp,Kpar=np.meshgrid(kperp,kpar)
k=np.sqrt(Kpar**2+Kperp**2)
return k**3*Mps_interpf(k)
def P_vv(kperp,kpar,z):
Kperp,Kpar=np.meshgrid(kperp,kpar)
k=np.sqrt(Kpar**2+Kperp**2)
mu_k=Kpar/k
Pvv=f(z)**2*H(z)**2*Mps_interpf(k)*mu_k**2/((1+z)**2*k**2)/cc.c_light_Mpc_s**2
return k**3*Pvv
#return k**3*Mps_interpf(k)/k**4-----------USING THIS GIVES THE SAME AMPLITUDES THAT UE LI HAD IN HIS PAPER
def P_delta_v(kperp,kpar,z):
Kperp,Kpar=np.meshgrid(kperp,kpar)
k=np.sqrt(Kpar**2+Kperp**2)
mu_k=Kpar/k
Pdeltav=f(z)*H(z)*Mps_interpf(k)*mu_k/((1+z)*k)/cc.c_light_Mpc_s
return k**3*Pdeltav
kpar=np.geomspace(5.e-3,1.,30)
kperp=np.geomspace(5.e-3,1.,30)
#k=np.sqrt(kpar**2+kperp**2)
#k=np.linspace(1.e-2,110,100)
#P=P_delta_delta(k)*P_vv(k,1.)+P_delta_v(k,1.)**2
#plt.semilogy(k,P)
#plt.plot(k,P_vv(k,1.))
#plt.plot(k,P_delta_v(k,1.))
'''
#plt.show()
print (P_delta_delta(kperp,kpar).max())
print (P_vv(kperp,kpar,1.).max())
pylab.pcolormesh(kperp,kpar,P_delta_delta(kperp,kpar),cmap='Blues',norm=LogNorm()) ; cbar=plt.colorbar();
plt.tick_params(axis='both', which='major');
#pylab.xlim([np.min(kperp),np.max(kperp)])
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$k_\perp$',fontsize=12); plt.ylabel(r'$k_\parallel$',fontsize=12)
plt.title(r'$P_{\delta \delta}$')
pylab.show()
pylab.pcolormesh(kperp,kpar,P_vv(kperp,kpar,1.),cmap='Blues',norm=LogNorm()) ; cbar=plt.colorbar()
#pylab.xlim([np.min(kperp),.5])
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$k_\perp$',fontsize=12); plt.ylabel(r'$k_\parallel$',fontsize=12)
plt.title(r'$P_{vv}$')
pylab.show()
pylab.pcolormesh(kperp,kpar,P_delta_v(kperp,kpar,1.),cmap='Blues',norm=LogNorm()) ; cbar=plt.colorbar()
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$k_\perp$',fontsize=12); plt.ylabel(r'$k_\parallel$',fontsize=12)
plt.title(r'$P_{\delta v}$')
pylab.show()
'''
'''
plt.loglog(k,k**3*Mps_interpf(k),label=r'$\rm{P_{\delta \delta}}$')
plt.loglog(k,k**3*P_delta_v(k,1.),label=r'$\rm{P_{\delta v}}$')
plt.loglog(k,k**3*P_vv(k,1.),label=r'$\rm{P_{vv}}$')
plt.xlabel('k')
plt.ylabel(r'$\rm{k^3 P(k,z=1)}$')
plt.legend()
plt.show()
'''
#plt.plot(z,T_mean(z))
#plt.xlabel('z')
#plt.ylabel('T(z)')
#plt.show()
##print (z)
'''
def chi_flat():
for i in enumerate(z):
chi =2*(1-(1/np.sqrt(1+z)))/H0
return chi
#chi_f=chi_flat()
##print ("Comoving distance to z is %.1f Mpc" % (chi))
##print (chi)
##print (z)
#return res
#result=zed()
##plt.loglog(chi,b(chi))
##plt.show()
##plt.loglog(chi_f,z)
##plt.show()
##print (b(chi))
#f=cp.fgrowth(b(chi), omega_M_0=0.27, unnormed=False)
##print (f)
##plt.loglog(b(chi),f)
'''
delta_z=2.
z_r=10.
z_ri=z_r-delta_z/2
z_rf=z_r+delta_z/2
chi_ri=chi(z_ri)
chi_rf=chi(z_rf)
delta_chi=chi_rf-chi_ri
r_H=2*cc.c_light_Mpc_s/(3*H0*np.sqrt(cosmo['omega_M_0'])*(1+z_r)**1.5)
#r_H=cd.light_travel_distance(z_r,0.0,**cosmo)
chi_r=chi(z_r)
theta=r_H/cd.angular_diameter_distance(z_r,0,**cosmo)
#print (theta)
import reionization as cr
def tau_ind(z):
tau=cr.integrate_optical_depth(z,x_ionH=1.0, x_ionHe=1.0, **cosmo)
return tau
def tau_inst(z):
tau_r=cr.optical_depth_instant(z, x_ionH=1.0, x_ionHe=1.0, z_rHe = None,return_tau_star=False, verbose=0, **cosmo)
return tau_r
#print (tau_r)
#cosmo = {'omega_M_0':0.3, 'omega_lambda_0':0.7, 'omega_k_0':0.0, 'h':0.72, 'omega_b_0' : 0.045, 'omega_n_0' : 0.0,
# 'N_nu' : 0, 'n' : 1.0, 'sigma_8' : 0.9, 'baryonic_effects' : False}
#I=cr.ionization_from_collapse(z=6, coeff_ion=1, temp_min=1e4, passed_min_mass = False,**cosmo)
| [
"[email protected]"
]
| |
06005fb2c3ba90f593ed444f209cd6a808e3114b | 907cb7612ede31418997ce7b2813c9f2192e6a30 | /phase_cells/focal_evaluate/printout_network.py | a7d12f6f8ac0762947ad20ae88fc7d697979018f | [
"MIT"
]
| permissive | shenghh2015/segmentation_models | c3a6f9f0a7fc2ac52d0d1f6b2beef1c69133bae2 | 473c528c724f62ff38ac127747dd8babb7de6b85 | refs/heads/master | 2023-08-14T05:52:36.290536 | 2021-10-19T03:02:46 | 2021-10-19T03:02:46 | 276,793,700 | 0 | 0 | null | 2020-07-03T02:57:39 | 2020-07-03T02:57:38 | null | UTF-8 | Python | false | false | 648 | py | import os
import sys
sys.path.append('../')
import segmentation_models as sm
from segmentation_models import Unet
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
backbone = 'efficientnetb4'
model = Unet(backbone, input_shape = (736,736,3))
network_layers = model.layers
feature_layers = ['block6a_expand_activation', 'block4a_expand_activation','block3a_expand_activation', 'block2a_expand_activation']
with open('network_{}.txt'.format(backbone), 'w+') as f:
for layer in network_layers:
f.write('{}: {}\n'.format(layer.name, layer.output.get_shape()))
if layer.name in feature_layers:
f.write('\nFeature extansion ---{}\n'.format(layer.name))
| [
"[email protected]"
]
| |
49c6f0b8a1e7e95ba13f0ae9a3eb23fcb4aa89c6 | bcb4adead20dd054a8a408037abe7a658fd7e631 | /UF3/calc.py | cc64540f83d9f58c6ca387a09cf0375ee353f4d8 | []
| no_license | nessx/Programacion-1 | 0b0e38621696dcaaaff3c26afe9e8f337fabcb75 | 2b32373e0455685201416b9b455cf3067f2823fa | refs/heads/master | 2023-03-25T13:13:34.283174 | 2021-03-19T23:01:49 | 2021-03-19T23:01:49 | 233,480,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | #! /usr/bin/env python
# encoding: utf-8
import sys
import os
import math
def suma(num1,num2):
num1+=num2
return num1
def resta(num1,num2):
num1-=num2
return num1
def mult(num1,num2):
num1*=num2
return num1
def div(num1,num2):
num1/=num2
return num1
opc = ""
while opc != "0":
print "\n#### MENU ####"
print "1-Sumar"
print "2-Restar"
print "3-Multiplicación"
print "4-División\n"
print "0-Salir"
opc=raw_input("Elige una opción: ")
if opc=="0":
os.system("clear")
print "=====Adiós!!!!====="
sys.exit()
os.system("clear")
A = int(raw_input("Introduce el primer numero: "))
B = int(raw_input("Introduce el segundo numero: "))
if opc=="1":
os.system("clear")
print "==SUMA=="
print "Resultado: ", suma(A,B)
elif opc=="2":
os.system("clear")
print "==RESTA=="
print "Resultado: ",resta(A,B)
elif opc=="3":
os.system("clear")
print "==MULTIPLICACION=="
print"Resultado: ", mult(A,B)
elif opc=="4":
os.system("clear")
print "==DIVISION=="
try:
print "Resultado: ", div(A,B)
except ZeroDivisionError:
print "No puedes dividir entre zero einstein!!"
except:
print "algo esta mal :("
else:
os.system("clear")
print "oops, No es la llama que buscas..."
print "==Code: Opción erronea=="
| [
"[email protected]"
]
| |
5b3b3aa4586919012b05a07fefa8087dd34de097 | d0d45247209d3eabc1cb6bc0b01a8c23f807820d | /tests/test_utility.py | 8c0fd5031a9c46032e233084a2dbabffcb1e5ae4 | [
"MIT"
]
| permissive | yw5aj/trimesh | 2b102c5e265108ebd089023bb1c32b3217c35059 | f7dc490f7431ced7cc121369e96b9b2eeb17490d | refs/heads/master | 2021-01-20T03:25:26.772416 | 2017-04-27T16:15:10 | 2017-04-27T16:15:10 | 89,539,048 | 0 | 0 | null | 2017-04-27T00:37:43 | 2017-04-27T00:37:43 | null | UTF-8 | Python | false | false | 8,140 | py | import trimesh
import unittest
import logging
import time
import os
import sys
import inspect
import numpy as np
import json
from collections import deque
import generic as g
TEST_DIM = (100, 3)
TOL_ZERO = 1e-9
TOL_CHECK = 1e-2
log = logging.getLogger('trimesh')
log.addHandler(logging.NullHandler())
_QUICK = '-q' in sys.argv
class VectorTests(unittest.TestCase):
def setUp(self):
self.test_dim = TEST_DIM
def test_unitize_multi(self):
vectors = np.ones(self.test_dim)
vectors[0] = [0, 0, 0]
vectors, valid = trimesh.unitize(vectors, check_valid=True)
self.assertFalse(valid[0])
self.assertTrue(np.all(valid[1:]))
length = np.sum(vectors[1:] ** 2, axis=1) ** 2
length_check = np.abs(length - 1.0) < TOL_ZERO
self.assertTrue(np.all(length_check))
def test_align(self):
log.info('Testing vector alignment')
target = np.array([0, 0, 1])
for i in range(100):
vector = trimesh.unitize(np.random.random(3) - .5)
T = trimesh.geometry.align_vectors(vector, target)
result = np.dot(T, np.append(vector, 1))[0:3]
aligned = np.abs(result - target).sum() < TOL_ZERO
self.assertTrue(aligned)
def test_horn(self):
log.info('Testing absolute orientation')
for i in range(10):
points_A = (np.random.random(self.test_dim) - .5) * 100
angle = 4 * np.pi * (np.random.random() - .5)
vector = trimesh.unitize(np.random.random(3) - .5)
offset = 100 * (np.random.random(3) - .5)
T = trimesh.transformations.rotation_matrix(angle, vector)
T[0:3, 3] = offset
points_B = trimesh.transformations.transform_points(points_A, T)
M, error = trimesh.points.absolute_orientation(
points_A, points_B, return_error=True)
self.assertTrue(np.all(error < TOL_ZERO))
class UtilTests(unittest.TestCase):
def test_track(self):
a = trimesh.util.tracked_array(np.random.random(TEST_DIM))
modified = deque()
modified.append(int(a.md5(), 16))
a[0][0] = 10
modified.append(int(a.md5(), 16))
a[1] = 5
modified.append(int(a.md5(), 16))
a[2:] = 2
modified.append(int(a.md5(), 16))
self.assertTrue((np.diff(modified) != 0).all())
modified = deque()
modified.append(int(a.md5(), 16))
b = a[[0, 1, 2]]
modified.append(int(a.md5(), 16))
c = a[1:]
modified.append(int(a.md5(), 16))
self.assertTrue((np.diff(modified) == 0).all())
def test_bounds_tree(self):
for attempt in range(3):
for dimension in [2, 3]:
t = g.np.random.random((1000, 3, dimension))
bounds = g.np.column_stack((t.min(axis=1), t.max(axis=1)))
tree = g.trimesh.util.bounds_tree(bounds)
self.assertTrue(0 in tree.intersection(bounds[0]))
def test_strips(self):
'''
Test our conversion of triangle strips to face indexes.
'''
# test 4- triangle strip
s = [g.np.arange(6)]
f = g.trimesh.util.triangle_strips_to_faces(s)
assert (f == g.np.array([[0, 1, 2],
[3, 2, 1],
[2, 3, 4],
[5, 4, 3]])).all()
assert len(f) + 2 == len(s[0])
# test single triangle
s = [g.np.arange(3)]
f = g.trimesh.util.triangle_strips_to_faces(s)
assert (f == g.np.array([[0, 1, 2]])).all()
assert len(f) + 2 == len(s[0])
s = [g.np.arange(100)]
f = g.trimesh.util.triangle_strips_to_faces(s)
assert len(f) + 2 == len(s[0])
class SceneTests(unittest.TestCase):
def setUp(self):
filename = os.path.join(g.dir_models, 'box.STL')
mesh = trimesh.load(filename)
split = mesh.split()
scene = trimesh.scene.Scene(split)
self.scene = scene
def test_scene(self):
duplicates = self.scene.duplicate_nodes()
class IOTest(unittest.TestCase):
def test_dae(self):
a = g.get_mesh('ballA.off')
r = a.export(file_type='dae')
class ContainsTest(unittest.TestCase):
def test_inside(self):
sphere = g.trimesh.primitives.Sphere(radius=1.0, subdivisions=4)
g.log.info('Testing contains function with sphere')
samples = (np.random.random((1000, 3)) - .5) * 5
radius = np.linalg.norm(samples, axis=1)
margin = .05
truth_in = radius < (1.0 - margin)
truth_out = radius > (1.0 + margin)
contains = sphere.contains(samples)
if not contains[truth_in].all():
raise ValueError('contains test doesnt match truth!')
if contains[truth_out].any():
raise ValueError('contains test doesnt match truth!')
class MassTests(unittest.TestCase):
def setUp(self):
# inertia numbers pulled from solidworks
self.truth = g.data['mass_properties']
self.meshes = dict()
for data in self.truth:
filename = data['filename']
self.meshes[filename] = g.get_mesh(filename)
def test_mass(self):
def check_parameter(a, b):
check = np.all(
np.less(np.abs(np.array(a) - np.array(b)), TOL_CHECK))
return check
for truth in self.truth:
calculated = self.meshes[truth['filename']].mass_properties(density=truth[
'density'])
parameter_count = 0
for parameter in calculated.keys():
if not (parameter in truth):
continue
parameter_ok = check_parameter(
calculated[parameter], truth[parameter])
if not parameter_ok:
log.error('Parameter %s failed on file %s!',
parameter, truth['filename'])
self.assertTrue(parameter_ok)
parameter_count += 1
log.info('%i mass parameters confirmed for %s',
parameter_count, truth['filename'])
class SphericalTests(unittest.TestCase):
def test_spherical(self):
v = g.trimesh.unitize(g.np.random.random((1000, 3)) - .5)
spherical = g.trimesh.util.vector_to_spherical(v)
v2 = g.trimesh.util.spherical_to_vector(spherical)
self.assertTrue((np.abs(v - v2) < g.trimesh.constants.tol.merge).all())
class HemisphereTests(unittest.TestCase):
def test_hemisphere(self):
v = trimesh.unitize(np.random.random((10000, 3)) - .5)
v[0] = [0, 1, 0]
v[1] = [1, 0, 0]
v[2] = [0, 0, 1]
v = np.column_stack((v, -v)).reshape((-1, 3))
resigned = trimesh.util.vector_hemisphere(v)
check = (abs(np.diff(resigned.reshape((-1, 2, 3)),
axis=1).sum(axis=2)) < trimesh.constants.tol.zero).all()
self.assertTrue(check)
class FileTests(unittest.TestCase):
def test_io_wrap(self):
test_b = g.np.random.random(1).tostring()
test_s = 'this is a test yo'
res_b = g.trimesh.util.wrap_as_stream(test_b).read()
res_s = g.trimesh.util.wrap_as_stream(test_s).read()
self.assertTrue(res_b == test_b)
self.assertTrue(res_s == test_s)
def test_file_hash(self):
data = g.np.random.random(10).tostring()
path = g.os.path.join(g.dir_data, 'nestable.json')
for file_obj in [g.trimesh.util.wrap_as_stream(data),
open(path, 'rb')]:
start = file_obj.tell()
hashed = g.trimesh.util.hash_file(file_obj)
self.assertTrue(file_obj.tell() == start)
self.assertTrue(hashed is not None)
self.assertTrue(len(hashed) > 5)
file_obj.close()
if __name__ == '__main__':
trimesh.util.attach_to_log()
unittest.main()
| [
"[email protected]"
]
| |
63c0bda45a2d089fa1b76c00cf8722998a78e1cd | 7ae07277c259d645585d125827a1b34984216cde | /id_to_matrix.py | bd6d4e5e83e7b6c126b166e0fa98b6b14d4880a7 | []
| no_license | MonsieurTapir/fast-cfc | c523cf645ad8dd09b4023db0ac7d493100781868 | 4d135d9306db9d4b2da8823a588e11c72027a437 | refs/heads/master | 2020-03-20T19:32:31.676016 | 2018-06-28T16:18:30 | 2018-06-28T16:18:30 | 137,642,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | import sys
if __name__ == "__main__":
if len(sys.argv)<3:
print("First argument: dimension")
print("Second argument: instance id")
exit(1)
dim=int(sys.argv[1])
inst=int(sys.argv[2])
size=(dim*(dim-1))/2
formatter='0'+str(int(size))+'b'
mat_string=format(inst,formatter)
k=0
for i in range(dim):
for j in range(i):
print(mat_string[k],end=" ")
k+=1
print("1")
| [
"[email protected]"
]
| |
2fdb3dfb7bc74e4888d21e09f8b2150b787d0ab2 | b40a73ab5bba51ef15b272a08642bd091ee820b8 | /botostictactoe.py | 01aff5872f8b7a8fd7f86f390037839a2ce26cb2 | []
| no_license | Matyko/codecool | 1b82829c0a099487c77fad6914a05012926eccaf | 69e72c4f9307b9557767fab17bd44e92f0e67920 | refs/heads/master | 2021-01-10T22:53:48.025571 | 2016-12-06T10:46:15 | 2016-12-06T10:46:15 | 70,338,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,659 | py | import random
import os
#Lena and Matyi TicTacToe
board = [" "] * 10
winvalues = ([7, 8, 9], [4, 5, 6], [1, 2, 3], [7, 4, 1,], [8, 5, 2], [9, 6, 3], [7, 5, 3], [9, 5, 1])
playermove = 0
#Draws the table
def table():
print("\n")
print(board[7], "|", board[8], "|", board[9])
print("---------")
print(board[4], "|", board[5], "|", board[6])
print("---------")
print(board[1], "|", board[2], "|", board[3])
#Checks if there is a full board
def boardfull():
if " " in board[1:10]:
print(" ")
else:
print("\nIt's a tie!")
return "tie"
#Checks if there is a winner
def wincheck():
for value in winvalues:
if (board[(value[0])] == board[(value[1])] == board[(value[2])] == "x"):
print("\nPlayer X won!\n")
return "winner"
if (board[(value[0])] == board[(value[1])] == board[(value[2])] == "o"):
print("\nPlayer 0 won!\n")
return "winner"
#What robot is looking for
def robotpanic(x,y,z):
if (board[x] == "o" and board[y] == "o" and board[z] == " "
or board[x] == "x" and board[y] == "x" and board[z] == " "):
board[z] = "o"
return "block"
#Robot actions
def robotmove():
while True:
if robotpanic(7,8,9) == "block":
break
if robotpanic(4,5,6) == "block":
break
if robotpanic(1,2,3) == "block":
break
if robotpanic(7,4,1) == "block":
break
if robotpanic(8,5,2) == "block":
break
if robotpanic(9,6,3) == "block":
break
if robotpanic(7,5,3) == "block":
break
if robotpanic(9,5,1) == "block":
break
if robotpanic(7,9,8) == "block":
break
if robotpanic(4,6,5) == "block":
break
if robotpanic(1,3,2) == "block":
break
if robotpanic(7,1,4) == "block":
break
if robotpanic(8,2,5) == "block":
break
if robotpanic(9,3,6) == "block":
break
if robotpanic(7,3,5) == "block":
break
if robotpanic(9,1,5) == "block":
break
if robotpanic(8,9,7) == "block":
break
if robotpanic(5,6,4) == "block":
break
if robotpanic(2,3,1) == "block":
break
if robotpanic(4,1,7) == "block":
break
if robotpanic(5,2,8) == "block":
break
if robotpanic(6,3,9) == "block":
break
if robotpanic(5,3,7) == "block":
break
if robotpanic(5,1,9) == "block":
break
else:
rn = random.randint(1, 9)
if board[rn] != "o" and board[rn] != "x":
board[rn] = "o"
print("random")
break
while True:
#The game
print("Welcome to Lena and Matyi's TicTacToe Game!")
print("\nChoose places with number keys:\n")
print("7 | 8 | 9 \n4 | 5 | 6 \n1 | 2 | 3")
start = input("\nWould you like to start the game? (y/n) ")
if start == "y":
robot = input("\nHow many players? (1/2): ")
if robot == "1":
break
if robot == "2":
plm = input("\nPlayer 1 choose X or O: ")
if plm == "X" or plm == "x":
playermove = 0
break
if plm == "O" or plm == "o":
playermove = 1
break
else:
continue
if start == "n":
print("Too bad :(")
quit()
else:
continue
os.system('cls' if os.name == 'nt' else 'clear')
table()
while True:
#First player
while playermove == 0:
n = input("\nPlayer X Choose a place: ")
if not n.isdigit() or int(n) > 9:
print("Please type a valid number! (1-9)")
continue
else:
n = int(n)
if board[n] != "x" and board[n] != "o":
board[n] = "x"
playermove += 1
else:
print(table)
print("\nPlace already taken! Choose another one!")
os.system('cls' if os.name == 'nt' else 'clear')
table()
if wincheck() == "winner" or boardfull() == "tie":
restart = input("\nPlay again? (y/n) ")
if restart == "y":
os.system('cls' if os.name == 'nt' else 'clear')
board = [" "] * 10
table()
if restart == "n":
quit()
boardfull()
#Second player
if robot == "2":
while playermove == 1:
m = input("\nPlayer O Choose a place: ")
if not m.isdigit() or int(m) > 9:
print("Please type a valid number! (1-9) ")
continue
else:
m = int(m)
if board[m] != "x" and board[m] != "o":
board[m] = "o"
playermove -= 1
else:
print("\nPlace already taken! Choose another one! ")
#Robot
while playermove == 1:
if robot == "1":
robotmove()
playermove -= 1
os.system('cls' if os.name == 'nt' else 'clear')
table()
if wincheck() == "winner" or boardfull() == "tie":
restart = input("\nPlay again? (y/n) ")
if restart == "y":
os.system('cls' if os.name == 'nt' else 'clear')
board = [" "] * 10
table()
if restart == "n":
quit()
boardfull()
| [
"[email protected]"
]
| |
37c30cf51a0914c9689b287a43a9c935afd849a0 | cbb6c6244cb815ab5850aa23f08a7aacee8ff57b | /belt_exam/urls.py | 1f17dc1f2c3a8588ad03ec6249237b0a8da40db9 | []
| no_license | tehtertot/wishlist | 93bf2e2fca50c464822d8a9974d9053c4de51c8d | 38e2a10b362e1c92f9995fe54d911677da635adb | refs/heads/master | 2021-01-23T01:21:14.212459 | 2017-05-30T23:39:42 | 2017-05-30T23:39:42 | 92,867,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | """belt_exam URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('apps.login.urls', namespace="login")),
url(r'^wishlist/', include('apps.wishlist.urls', namespace="wishlist"))
]
| [
"[email protected]"
]
| |
2bdfcc5083faff926e98630920724f687f0fda04 | 5fd31292a63ce10df8918dafa96ddae4092b3a4f | /server/manage.py | 8c20708b0b96ade7c72542bbf8d2dc444607c525 | []
| no_license | nagkumar91/masters_project | b6279b892ac6e721c669e9b5cd2e4580bc05b8ab | 94a999e73aed2da1a6e975d5bc9aa8bb7f4fe408 | refs/heads/master | 2021-01-10T06:55:47.599003 | 2016-04-20T22:36:29 | 2016-04-20T22:36:29 | 45,661,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "saferide.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
7938828e76d7687b0c2f5c59462e415ef42d7e42 | a91319cc56856499700b517da763f2143ba3fd26 | /scripts/manjaro.py | 6b7ef253936f35d629700ce8cc2630c10798ddf1 | []
| no_license | albertlincoln/dotfiles | d8fd11f53884197e0d2fe550dcd2af4ec1c52f61 | 41c027aa4748516fe285873c58cfde8416d54834 | refs/heads/master | 2021-06-05T12:23:40.516400 | 2021-04-29T11:42:41 | 2021-04-29T11:42:41 | 101,452,839 | 0 | 1 | null | 2019-11-29T22:41:34 | 2017-08-26T00:55:28 | Vim script | UTF-8 | Python | false | false | 779 | py | #!/usr/bin/env python3
import sys
import os
import json
import argparse
from subprocess import call
parser = argparse.ArgumentParser(description='Process some packages.')
parser.add_argument('--file', required=False, default='packages.json',
help='a custom json file')
args = parser.parse_args()
package_path = os.path.join(os.path.dirname(__file__), args.file)
packages = {}
with open(package_path) as f:
jsondata = json.load(f)
for required_key in jsondata["packagesets"]:
install = jsondata["packagesets"][required_key]["install"]
for i in range(len(install)):
try:
call(["sudo", "pacman", "-Sy", "--needed", "--noconfirm", install[i]])
except Exception as e:
print(e)
| [
"[email protected]"
]
| |
6f7095e9a5365184be1851ffbd1229da8d44bed7 | feb0b131af56e0e95d9808f5a92b0fc267a54d5a | /src/human_movement_identifier/classifier.py | 15be2acfc84865a7f7e956f93dae66b09addf0f0 | []
| no_license | ferdianjovan/human_movement_identifier | e5fdffc97dabd0250d129041bd3521e9ffb6d0ff | a73bf7a5f59139d9a98095f3d2f629f29002791f | refs/heads/master | 2020-06-04T22:18:14.216524 | 2015-03-20T06:21:27 | 2015-03-20T06:21:27 | 31,667,986 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,372 | py | #!/usr/bin/env python
import sys
import random
import rospy
import math
import pymongo
import pylab
import matplotlib.pyplot as plt
from collections import namedtuple
from mpl_toolkits.axes_grid.axislines import SubplotZero
from human_trajectory.trajectory import Trajectory
from geometry_msgs.msg import Point, Quaternion, Pose, PoseStamped
from std_msgs.msg import Header
class KNNClassifier(object):
def __init__(self):
self.alpha = 0.6
self.beta = 0.4
self.k = 11
self.accuracy = 0
self.training_data = []
self.test_data = []
self.LabeledNormalizedPoses = namedtuple(
"NormalizePoses", "uuid real normal"
)
# update training and test data from database
def update_database(self):
self.training_data = []
self.test_data = []
trajs = self._retrieve_logs()
self._label_data(trajs)
# splitting training data into training and test data
def split_training_data(self, training_ratio):
temp = []
self.test_data = []
for i in self.training_data:
if random.random() < training_ratio:
temp.append(i)
else:
self.test_data.append(i)
self.training_data = temp
# get k nearest values to a test data based on positions and velocities
def _nearest_values_to(self, test):
index = []
nearest = []
test_poses = test.normal
for i, j in enumerate(self.training_data):
dist = 0
vel = 0
for k, l in enumerate(j[0].normal):
# delta distance calculation
dx = test_poses[k].pose.position.x - l.pose.position.x
dy = test_poses[k].pose.position.y - l.pose.position.y
dist += math.hypot(dx, dy)
# delta velocity calculation
if k >= 1:
dx = l.pose.position.x - j[0].normal[k-1].pose.position.x
dy = l.pose.position.y - j[0].normal[k-1].pose.position.y
velo_l = math.hypot(dx, dy) / (
(l.header.stamp.secs -
j[0].normal[k-1].header.stamp.secs) +
(l.header.stamp.nsecs -
j[0].normal[k-1].header.stamp.nsecs) /
math.pow(10, 9)
)
dx = test_poses[k].pose.position.x - \
test_poses[k-1].pose.position.x
dy = test_poses[k].pose.position.y - \
test_poses[k-1].pose.position.y
velo_test = math.hypot(dx, dy) / (
(test_poses[k].header.stamp.secs -
test_poses[k-1].header.stamp.secs) +
(test_poses[k].header.stamp.nsecs -
test_poses[k-1].header.stamp.nsecs) /
math.pow(10, 9)
)
vel += abs(velo_l - velo_test)
if nearest != []:
dist = (self.alpha * dist) + (self.beta * vel)
max_val = max(nearest)
if max_val > dist and len(nearest) >= self.k:
temp = nearest.index(max_val)
nearest[temp] = dist
index[temp] = i
elif max_val > dist and len(nearest) < self.k:
nearest.append(dist)
index.append(i)
else:
nearest.append(dist)
index.append(i)
sort_data = sorted(zip(nearest, index), key=lambda i: i[0])
return [self.training_data[i[1]] for i in sort_data]
# predict the class of the test data
def predict_class_data(self, test_data):
rospy.loginfo("Predicting class for %s", test_data.uuid)
result = None
nn = self._nearest_values_to(test_data)
human = [i for i in nn if i[1] == 'human']
nonhuman = [i for i in nn if i[1] == 'non-human']
rospy.loginfo("Vote: %d, %d", len(human), len(nonhuman))
if len(human) > len(nonhuman):
result = 'human'
else:
result = 'non-human'
rospy.loginfo("%s belongs to %s", test_data.uuid, result)
return (result, human[:1], nonhuman[:1])
# get accuracy of the overall prediction with k-fold-cross validation
def get_accuracy(self, queue=None):
rospy.loginfo("Getting the overall accuracy...")
# dividing training data into k
k_fold = 5
length = len(self.training_data) / k_fold
k_fold_list = []
preempt = False
for i in range(k_fold):
ind = i * length
k_fold_list.append(self.training_data[ind:ind+length])
# measure the accuracy
accuracy = 0
for j in k_fold_list:
rospy.loginfo("Total testing data is %d", len(j))
self.training_data = []
for i in k_fold_list:
if i != j:
self.training_data.extend(i)
counter = 0
for i in j:
if queue is not None and not queue.empty():
preempt = queue.get()['preempt']
break
result = self.predict_class_data(i[0])
rospy.loginfo("The actual class is %s", i[1])
if result[0] == i[1]:
counter += 1
accuracy += float(counter) / float(len(j))
rospy.loginfo("Accuracy for this data is %d", accuracy)
if preempt:
break
if not preempt:
self.accuracy = accuracy/float(k_fold)
return self.accuracy
# label data and put them into training set
def _label_data(self, trajs):
rospy.loginfo("Splitting data...")
for uuid, traj in trajs.iteritems():
traj.validate_all_poses()
chunked_traj = self.create_chunk(
uuid, list(zip(*traj.humrobpose)[0])
)
label = 'human'
start = traj.humrobpose[0][0].header.stamp
end = traj.humrobpose[-1][0].header.stamp
delta = float((end-start).secs + 0.000000001 * (end-start).nsecs)
if delta != 0.0:
avg_vel = traj.length[-1] / delta
else:
avg_vel = 0.0
guard = traj.length[-1] < 0.1 or avg_vel < 0.5 or avg_vel > 1.5
if guard:
label = 'non-human'
for i in chunked_traj:
self.training_data.append((i, label))
# normalize poses so that the first pose becomes (0,0)
# and the second pose becomes the base for the axis
# with tangen, cos and sin
def get_normalized_poses(self, poses):
dx = poses[1].pose.position.x - poses[0].pose.position.x
dy = poses[1].pose.position.y - poses[0].pose.position.y
if dx < 0.00001:
dx = 0.00000000000000000001
rad = math.atan(dy / dx)
for i, j in enumerate(poses):
if i > 0:
dx = j.pose.position.x - poses[0].pose.position.x
dy = j.pose.position.y - poses[0].pose.position.y
if dx < 0.00001:
dx = 0.00000000000000000001
rad2 = math.atan(dy / dx)
delta_rad = rad2 - rad
if rad2 == 0:
r = dx / math.cos(rad2)
else:
r = dy / math.sin(rad2)
x = r * math.cos(delta_rad)
y = r * math.sin(delta_rad)
poses[i].pose.position.x = x
poses[i].pose.position.y = y
poses[0].pose.position.x = poses[0].pose.position.y = 0
return poses
# chunk data for each trajectory
def create_chunk(self, uuid, poses, chunk=20):
i = 0
chunk_trajectory = []
while i < len(poses) - (chunk - 1):
normalized = list()
# can not just do poses[i:i+chunk], need to rewrite
for j in range(chunk):
position = Point(
poses[i + j].pose.position.x,
poses[i + j].pose.position.y,
poses[i + j].pose.position.z
)
orientation = Quaternion(
poses[i + j].pose.orientation.x,
poses[i + j].pose.orientation.y,
poses[i + j].pose.orientation.z,
poses[i + j].pose.orientation.w
)
pose = Pose(position, orientation)
header = Header(
poses[i + j].header.seq,
poses[i + j].header.stamp,
poses[i + j].header.frame_id
)
normalized.append(PoseStamped(header, pose))
normalized = self.get_normalized_poses(normalized)
chunk_trajectory.append(
self.LabeledNormalizedPoses(uuid, poses[i:i+chunk], normalized)
)
i += chunk
return chunk_trajectory
# retrieve trajectory from mongodb
def _retrieve_logs(self):
client = pymongo.MongoClient(
rospy.get_param("datacentre_host", "localhost"),
rospy.get_param("datacentre_port", 62345)
)
rospy.loginfo("Retrieving data from mongodb...")
trajs = dict()
rospy.loginfo("Constructing data from people perception...")
for log in client.message_store.people_perception.find():
for i, uuid in enumerate(log['uuids']):
if uuid not in trajs:
trajs[uuid] = Trajectory(uuid)
header = Header(
log['header']['seq'],
rospy.Time(log['header']['stamp']['secs'],
log['header']['stamp']['nsecs']),
log['header']['frame_id']
)
human_pose = Pose(
Point(log['people'][i]['position']['x'],
log['people'][i]['position']['y'],
log['people'][i]['position']['z']),
Quaternion(log['people'][i]['orientation']['x'],
log['people'][i]['orientation']['y'],
log['people'][i]['orientation']['z'],
log['people'][i]['orientation']['w'])
)
robot_pose = Pose(
Point(log['robot']['position']['x'],
log['robot']['position']['y'],
log['robot']['position']['z']),
Quaternion(log['robot']['orientation']['x'],
log['robot']['orientation']['y'],
log['robot']['orientation']['z'],
log['robot']['orientation']['w']))
trajs[uuid].append_pose(human_pose, header, robot_pose)
return trajs
# create a visualisation graph in cartesian coordinate
def visualize_test_between_class(self, test, human, non_human):
fig = plt.figure("Trajectories for Test, Human, and Non-Human")
ax = SubplotZero(fig, 111)
fig.add_subplot(ax)
line_style = ['r.-', 'gx-', 'bo-']
# plotting test data
x = [i.pose.position.x for i in test]
y = [i.pose.position.y for i in test]
ax.plot(x, y, line_style[0], label="Test")
# plotting human data
x = [i.pose.position.x for i in human]
y = [i.pose.position.y for i in human]
ax.plot(x, y, line_style[1], label="Human")
# plotting non-human data
x = [i.pose.position.x for i in non_human]
y = [i.pose.position.y for i in non_human]
ax.plot(x, y, line_style[2], label="Non-human")
ax.margins(0.05)
ax.legend(loc="lower right", fontsize=10)
plt.title("Chunks of Trajectories")
plt.xlabel("Axis")
plt.ylabel("Ordinate")
for direction in ["xzero", "yzero"]:
ax.axis[direction].set_axisline_style("-|>")
ax.axis[direction].set_visible(True)
for direction in ["left", "right", "bottom", "top"]:
ax.axis[direction].set_visible(False)
pylab.grid()
plt.show()
if __name__ == '__main__':
rospy.init_node("labeled_short_poses")
if len(sys.argv) < 3:
rospy.logerr(
"usage: predictor train_ratio accuracy[1/0]"
)
sys.exit(2)
lsp = KNNClassifier()
lsp.update_database()
if int(sys.argv[2]):
rospy.loginfo("The overall accuracy is " + str(lsp.get_accuracy()))
else:
lsp.split_training_data(float(sys.argv[1]))
human_data = None
while not rospy.is_shutdown():
human_data = lsp.test_data[random.randint(0, len(lsp.test_data)-1)]
prediction = lsp.predict_class_data(human_data[0])
rospy.loginfo("The actual class is %s", human_data[1])
if len(prediction[1]) != 0 and len(prediction[2]) != 0:
lsp.visualize_test_between_class(
human_data[0].normal,
prediction[1][0][0].normal,
prediction[2][0][0].normal
)
| [
"[email protected]"
]
| |
e1f53e9d9c414baf21fc8962f20c6b2c150b910c | ee45e6de7188d0cc961cf24085183c95c9e325f6 | /scraper/crawl_runner.py | 21adb00ca6f86122a9fba2106a9645e96dd3cf57 | []
| no_license | ersel/catching-falling-knives | 775339fe97893c403db46dd8119048b9e93c02cd | 77cd59f015e3d5b4a7a66a354b365117bb1d6293 | refs/heads/master | 2022-12-09T12:00:10.468345 | 2018-02-22T21:02:24 | 2018-02-22T21:02:24 | 121,079,713 | 2 | 0 | null | 2022-12-08T00:53:22 | 2018-02-11T03:22:43 | Python | UTF-8 | Python | false | false | 1,297 | py | # taken from https://stackoverflow.com/a/37270442/1124076
from scrapy import signals
from scrapy.crawler import CrawlerRunner
class MyCrawlerRunner(CrawlerRunner):
"""
Crawler object that collects items and returns output after finishing crawl.
"""
def crawl(self, crawler_or_spidercls, *args, **kwargs):
# keep all stocks scraped
self.symbols = {}
# create crawler (Same as in base CrawlerProcess)
crawler = self.create_crawler(crawler_or_spidercls)
# handle each item scraped
crawler.signals.connect(self.item_scraped, signals.item_scraped)
# create Twisted.Deferred launching crawl
dfd = self._crawl(crawler, *args, **kwargs)
# add callback - when crawl is done cal return_items
dfd.addCallback(self.return_items)
return dfd
def item_scraped(self, item, response, spider):
if len(item.keys()) == 4:
# add symbol to dict
self.symbols[item['symbol']] = item
elif len(item.keys()) == 3:
# merge ratio data
symbol_data = self.symbols[item['symbol']]
merged_data = {**symbol_data, **item}
self.symbols[item['symbol']] = merged_data
def return_items(self, result):
return self.symbols
| [
"[email protected]"
]
| |
2eeb4a4b87fc3241161976de505becfb413273f1 | cb4aa4cab1fb3264c8a58cd1b1ae81600bed38fb | /src/test/test_search.py | 62e589adbe824ec97f7320aaf71b024678354389 | []
| no_license | fabigato/address_lookup | b3c9271612de061a66b3d7dc4101a8fe1326df2c | db88159eb15f18590070e8145468b1464c058fbe | refs/heads/main | 2023-05-19T12:08:12.087743 | 2021-06-10T13:47:21 | 2021-06-10T13:47:21 | 374,792,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | from deepdiff import DeepDiff # type: ignore
from unittest import TestCase
from app.util import Address
from app.search import query_db
class TestSearch(TestCase):
def test_query_db(self):
tests = [
"abaigar 4, navarra",
"calle josé maría 2, Pamplona",
"unexisting"
]
actuals = [query_db(t) for t in tests]
expected = [
Address(**{
"lon": -2.1414442,
"lat": 42.6489724,
"number": 4,
"street": "CALLE CALLEJA",
"city": "Abáigar",
"district": "Abáigar",
"region": "Navarra",
"postcode": "31280",
}),
Address(**{
"lon": -1.6398572,
"lat": 42.8015041,
"number": "2 181",
"street": "CALLE JOSE MARIA JIMENO JURIO",
"unit": None,
"city": "Pamplona / Iruña",
"district": "Pamplona / Iruña",
"region": "Navarra",
"postcode": 31006,
"id": 125632,
"hash": "7f72a12503865d51",
}),
None
]
self.assertEqual(
DeepDiff(expected, actuals, ignore_order=True),
{},
)
| [
"[email protected]"
]
| |
bae7db4680fc0354a644d46c840930244f86ed2a | b10b88230493c89cba76077c1593ca035dc1b2b2 | /NaiveBayes.py | 9f5ad4ad59fb08bd6f1e48faf2d160b51a257a07 | []
| no_license | rohandeb24/Text-Classification | ebea371bcd34a95375273ee41b5654251dec671e | 366a5622373f6f4dad4cfd47aab2203912d6c001 | refs/heads/master | 2020-03-23T18:27:06.333094 | 2018-07-22T16:21:58 | 2018-07-22T16:21:58 | 141,909,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
import Data
x_train, x_test, y_train, y_test = Data.process()
vec1 = Data.tfidf(x_train)
x_train1 = vec1.transform(x_train)
model1 = MultinomialNB()
model1.fit(x_train1,y_train)
vec2 = Data.bag_of_words(x_train)
x_train2 = vec2.transform(x_train)
model2 = MultinomialNB()
model2.fit(x_train2,y_train)
def test(x=x_test):
x_test1 = vec1.transform(x_test)
x_test2 = vec2.transform(x_test)
pred1 = model1.predict(x_test1)
pred2 = model2.predict(x_test2)
return pred1,pred2
def accuracy(predictions,y=y_test):
return accuracy_score(y_test,predictions)
def train_outputs():
pred1 = model1.predict(x_train1)
pred2 = model2.predict(x_train2)
return pred1,pred2
def predict(x):
x = vec1.transform(x)
pred1 = model1.predict(x)
pred2 = model2.predict(x)
return pred1,pred2
| [
"[email protected]"
]
| |
4290f33117641c516843aeaf64025823ad951026 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/f8052e4261238ff6c93465b3f0d0f22457f127ce-<container_run>-fix.py | d32a173f5a709bd873f8aaaa81b4fc29a4a7aeb0 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,425 | py | def container_run(platform: str, nvidia_runtime: bool, docker_registry: str, shared_memory_size: str, local_ccache_dir: str, command: List[str], cleanup: Cleanup, dry_run: bool=False) -> int:
'Run command in a container'
container_wait_s = 600
environment = {
'CCACHE_MAXSIZE': '500G',
'CCACHE_TEMPDIR': '/tmp/ccache',
'CCACHE_DIR': '/work/ccache',
'CCACHE_LOGFILE': '/tmp/ccache.log',
}
jenkins_env_vars = ['BUILD_NUMBER', 'BUILD_ID', 'BUILD_TAG']
environment.update({k: os.environ[k] for k in jenkins_env_vars if (k in os.environ)})
environment.update({k: os.environ[k] for k in ['CCACHE_MAXSIZE'] if (k in os.environ)})
tag = get_docker_tag(platform=platform, registry=docker_registry)
mx_root = get_mxnet_root()
local_build_folder = buildir()
os.makedirs(local_build_folder, exist_ok=True)
os.makedirs(local_ccache_dir, exist_ok=True)
logging.info('Using ccache directory: %s', local_ccache_dir)
docker_client = docker.from_env()
docker_cmd_list = [get_docker_binary(nvidia_runtime), 'run', '--cap-add', 'SYS_PTRACE', '--rm', '--shm-size={}'.format(shared_memory_size), '-v', '{}:/work/mxnet'.format(mx_root), '-v', '{}:/work/build'.format(local_build_folder), '-v', '{}:/work/ccache'.format(local_ccache_dir), '-u', '{}:{}'.format(os.getuid(), os.getgid()), '-e', 'CCACHE_MAXSIZE={}'.format(environment['CCACHE_MAXSIZE']), '-e', 'CCACHE_TEMPDIR={}'.format(environment['CCACHE_TEMPDIR']), '-e', 'CCACHE_DIR={}'.format(environment['CCACHE_DIR']), '-e', 'CCACHE_LOGFILE={}'.format(environment['CCACHE_LOGFILE']), '-ti', tag]
docker_cmd_list.extend(command)
docker_cmd = ' \\\n\t'.join(docker_cmd_list)
logging.info('Running %s in container %s', command, tag)
logging.info('Executing the equivalent of:\n%s\n', docker_cmd)
ret = 0
if (not dry_run):
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM})
runtime = None
if nvidia_runtime:
runtime = 'nvidia'
container = docker_client.containers.run(tag, runtime=runtime, detach=True, command=command, shm_size=shared_memory_size, user='{}:{}'.format(os.getuid(), os.getgid()), cap_add='SYS_PTRACE', volumes={
mx_root: {
'bind': '/work/mxnet',
'mode': 'rw',
},
local_build_folder: {
'bind': '/work/build',
'mode': 'rw',
},
local_ccache_dir: {
'bind': '/work/ccache',
'mode': 'rw',
},
}, environment=environment)
try:
logging.info('Started container: %s', trim_container_id(container.id))
cleanup.add_container(container)
signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM})
stream = container.logs(stream=True, stdout=True, stderr=True)
sys.stdout.flush()
for chunk in stream:
sys.stdout.buffer.write(chunk)
sys.stdout.buffer.flush()
sys.stdout.flush()
stream.close()
try:
logging.info('Waiting for status of container %s for %d s.', trim_container_id(container.id), container_wait_s)
wait_result = container.wait(timeout=container_wait_s)
logging.info('Container exit status: %s', wait_result)
ret = wait_result.get('StatusCode', 200)
except Exception as e:
logging.exception(e)
ret = 150
try:
logging.info('Stopping container: %s', trim_container_id(container.id))
container.stop()
except Exception as e:
logging.exception(e)
ret = 151
try:
logging.info('Removing container: %s', trim_container_id(container.id))
container.remove()
except Exception as e:
logging.exception(e)
ret = 152
cleanup.remove_container(container)
containers = docker_client.containers.list()
if containers:
logging.info('Other running containers: %s', [trim_container_id(x.id) for x in containers])
except docker.errors.NotFound as e:
logging.info('Container was stopped before cleanup started: %s', e)
return ret | [
"[email protected]"
]
| |
28bb95097e2e572270acd99938a592bbc769272d | 123b437b8419a07282a79f8b0991f5bdb3523a9d | /tips 2.py | 5e64995c0af538d9c5c23d34ca32444613140648 | []
| no_license | carriegrossman/week_one | c27d7191381cda847e00042e005daf611ae6d935 | 9761bc0d1bd015874a460eb0bdedf7ab8c55883a | refs/heads/master | 2022-11-23T03:51:53.510383 | 2020-06-10T17:38:16 | 2020-06-10T17:38:16 | 269,654,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | #ex2
bill_amount = 0
while bill_amount == 0:
try:
bill_amount = float(input("How much was the bill?\n"))
except ValueError:
print('You did not give a valid number.')
service_level = input("How was the service? Good, Fair, Bad\n")
#tip = 0
tip = bill_amount*.2
if service_level == "good":
tip = bill_amount*.2
elif service_level == "fair":
tip = bill_amount*.15
elif service_level == "bad":
tip = bill_amount*.10
else:
print('Incorrect value given. Default to good')
total = bill_amount+tip
print("Tip Amount: %s" % tip)
print("Total Amount: %s" % (total))
try:
people = int(input("Split how many ways?\n"))
if people == 0:
people = 1
except ValueError:
people = 1
print('Invalid number. Assumes 1')
split_amount = total/people
print("This amount for each individual is: %s" % split_amount) | [
"[email protected]"
]
| |
719912f69370c91a4fdab0e0d9c7201a5272929d | 4c44a7281b62038f83a5caf3b8870d655db1c86c | /lists.py | aa5e4fecb4527a715ee1ff5c1958c67c45f6a95c | []
| no_license | delamora3david/ProyectoFinal | 58676c3b3e5300c36d952876ed6088f9f0b2a6f9 | b545320ae8f399453f5b1c6256e55c536c66366e | refs/heads/master | 2023-02-11T06:52:30.889833 | 2021-01-09T01:45:32 | 2021-01-09T01:45:32 | 328,012,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | # Author: David De la Mora
# Date: Oct 10, 2020
# Universidad Veracruzana
# lists.py
adnseq = ["A", "C", "T", "G", "A", "T", "G", "T", "A", "C"]
adnseq2 = ["T", "G", "A", "T", "G"]
n = len(adnseq)
print adnseq
print " length = ",n
print " first position = ",adnseq[0]
print " last position = ",adnseq[n-1]
print " first 3 position = ",adnseq[0:3]
print " last 5 position = ",adnseq[-5:]
print "inserting one item..."
adnseq.append("T")
n = len(adnseq)
print adnseq
print " length = ",n
print "replace first position with T"
adnseq[0] = "T"
print adnseq
print "delete last position"
adnseq.pop(n-1)
print adnseq
print " newadnseq = seq1 + seq2"
newadnseq = adnseq + adnseq2
print adnseq
print adnseq2
print newadnseq
print " length = ", len(newadnseq)
| [
"[email protected]"
]
| |
d99c64f3835d5a6f6e22da70c1a3608d401f8a9a | 764dca2d680311f1b08b124f2bac4e5a018ed103 | /DailyChallenge/2020/April/17.py | 20a03609fe5fe7763e59b9d16b662fcc3658b4ed | []
| no_license | mohithvegi/Leetcode | cae4a915e7ffcee69681791ab663eec769b2abb5 | 4273ad308f025579178dc177441b71c3053f0417 | refs/heads/main | 2023-06-05T14:58:30.926908 | 2021-06-24T17:40:53 | 2021-06-24T17:40:53 | 350,000,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | # https://leetcode.com/explore/challenge/card/april-leetcoding-challenge-2021/595/week-3-april-15th-april-21st/3711/
class Solution:
def numSubmatrixSumTarget(self, matrix: List[List[int]], target: int) -> int:
rows, cols = len(matrix) + 1, len(matrix[0]) + 1
preSum = [[0] * cols for _ in range(rows)]
for i in range(1, rows):
for j in range(1, cols):
preSum[i][j] = preSum[i - 1][j] + preSum[i][j - 1] - preSum[i - 1][j - 1] + matrix[i - 1][j - 1]
count = 0
for top in range(rows):
for down in range(top + 1, rows):
c = collections.Counter({0 : 1})
for k in range(1, cols):
s = preSum[down][k] - preSum[top][k]
count += c[s - target]
c[s] += 1
return count
| [
"[email protected]"
]
| |
f84368ce393ae46d76fceba737d6f61b44883dda | e9de9860f48eb2b96abdf8fd2facafe18248cf2b | /pasteleria_app/models.py | 40456361c2c6d7bd9369553510adfc6ee5520b75 | []
| no_license | josebernal321/api_basica_pasteleria | c148125c8c1f54cd1a1fc234051ccc8b03dc01ca | 2c624607e9666c4deed6fe741c47ad23802974e4 | refs/heads/main | 2023-06-23T14:57:33.401927 | 2021-07-28T04:58:51 | 2021-07-28T04:58:51 | 390,192,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Pasteles(models.Model):
id = models.AutoField(primary_key=True)
nombre = models.CharField(max_length=100)
descripcion = models.CharField(max_length=250)
dificultad = models.CharField(max_length=50)
fecha = models.DateField(default=timezone.now)
img = models.CharField(max_length=300)
estado = models.BooleanField(default=True)
| [
"[email protected]"
]
| |
f56c695bb54f42f48fa411b078e127ed990ed482 | 9e68c9dc1cdbd3de4a86e2bfb7f08c0f3fbeebd8 | /shaw/18/sol.py | 179af80e74c17afd450e0a1bdbfed9c6bfbe2f6a | [
"MIT"
]
| permissive | swong225/advent-of-code-2017 | 9e09fdf06cf05d3366dfbaf1fa43f01e61386ebb | 79cc548a33150f7dd5589a996258985892c1d3b6 | refs/heads/master | 2021-09-01T11:47:28.376128 | 2017-12-26T19:53:31 | 2017-12-26T19:53:31 | 113,112,747 | 0 | 0 | null | 2017-12-05T00:52:57 | 2017-12-05T00:52:56 | null | UTF-8 | Python | false | false | 4,232 | py | with open("input") as f:
lines = f.readlines()
regs = {}
code = []
# create registers and code
for line in lines:
inst = line.strip().split()
# for each param, try and change to int
# if it fails, it's a register name
for i in range(1, len(inst)):
try:
inst[i] = int(inst[i])
except:
regs[inst[i]] = 0
code.append(list(inst))
# set program counter and sound value
pc = 0
sound = 0
# execute
while True:
# pull out the values
try:
(inst, r1, r2) = code[pc]
except:
(inst, r1) = code[pc]
if r1 in regs:
r1_val = regs[r1]
else:
r1_val = r1
if r2 in regs:
r2_val = regs[r2]
else:
r2_val = r2
if inst == 'snd':
sound = r1_val
pc += 1
elif inst == 'rcv':
regs[r1] = sound
print("part 1", sound)
break
pc += 1
elif inst == 'set':
regs[r1] = r2_val
pc += 1
elif inst == 'add':
regs[r1] += r2_val
pc += 1
elif inst == 'mul':
regs[r1] *= r2_val
pc += 1
elif inst == 'mod':
regs[r1] %= r2_val
pc += 1
else:
if r1_val > 0:
pc += r2_val
else:
pc += 1
# zero out regs
for key in regs:
regs[key] = 0
# make unique regs for each program
p0_regs = dict(regs)
p1_regs = dict(regs)
# set initial state
p1_regs['p'] = 1
# create queues
q_to_p0 = []
q_to_p1 = []
# create send counters
send_to_p0 = 0
send_to_p1 = 0
# init program pcs
p0_pc = 0
p1_pc = 0
# execute
while True:
# run p0 until it can't run any more (nothin to recv)
while True:
try:
(inst, r1, r2) = code[p0_pc]
except:
(inst, r1) = code[p0_pc]
if r1 in p0_regs:
r1_val = p0_regs[r1]
else:
r1_val = r1
if r2 in p0_regs:
r2_val = p0_regs[r2]
else:
r2_val = r2
if inst == 'snd':
q_to_p1.append(r1_val)
send_to_p1 += 1
p0_pc += 1
elif inst == 'rcv':
if len(q_to_p0) > 0:
p0_regs[r1] = q_to_p0.pop(0)
p0_pc += 1
else:
# nothing to recv, yield
break
elif inst == 'set':
p0_regs[r1] = r2_val
p0_pc += 1
elif inst == 'add':
p0_regs[r1] += r2_val
p0_pc += 1
elif inst == 'mul':
p0_regs[r1] *= r2_val
p0_pc += 1
elif inst == 'mod':
p0_regs[r1] %= r2_val
p0_pc += 1
else:
if r1_val > 0:
p0_pc += r2_val
else:
p0_pc += 1
# run p1 until it can't run any more
while True:
try:
(inst, r1, r2) = code[p1_pc]
except:
(inst, r1) = code[p1_pc]
if r1 in p1_regs:
r1_val = p1_regs[r1]
else:
r1_val = r1
if r2 in p1_regs:
r2_val = p1_regs[r2]
else:
r2_val = r2
if inst == 'snd':
q_to_p0.append(r1_val)
send_to_p0 += 1
p1_pc += 1
elif inst == 'rcv':
if len(q_to_p1) > 0:
p1_regs[r1] = q_to_p1.pop(0)
p1_pc += 1
else:
# nothing to recv, yield
break
elif inst == 'set':
p1_regs[r1] = r2_val
p1_pc += 1
elif inst == 'add':
p1_regs[r1] += r2_val
p1_pc += 1
elif inst == 'mul':
p1_regs[r1] *= r2_val
p1_pc += 1
elif inst == 'mod':
p1_regs[r1] %= r2_val
p1_pc += 1
else:
if r1_val > 0:
p1_pc += r2_val
else:
p1_pc += 1
# if both queues are empty, nothing to recv, deadlock
if len(q_to_p1) == 0 and len(q_to_p0) == 0:
break
print("part 2", send_to_p0) | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.