blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a9caebabdf171d0f736e2d36d431084f893a43ac
|
8fa040a06a728ccf8f31e91f39973bb4e98b3122
|
/Python_basic/07_python_decorator.py
|
eb32b59849ccdcdc8dd2bcbecbc16d794c4ab5d2
|
[] |
no_license
|
Souuul/python
|
155ca8efe0b86c97d07863b3e024394217211f01
|
7143dcc35b98d0f1c1fe8c52d0aeb99ee5b42050
|
refs/heads/master
| 2023-01-06T04:37:32.208248 | 2020-11-10T14:24:41 | 2020-11-10T14:24:41 | 283,421,555 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,072 |
py
|
'''
decorator
decorator의 사전적 의미는 장식가, 도배업자
python에서 Decorator는 기존의 코드에 여러가지 기능을 추가하는 python구문이라고 이해하면 편해요!
Closure
first class에 대해서 알아보았어요
first class function(일급함수) : 파이썬은 일급함수를 지원하는 언어
1. 파이썬의 함수는 변수에 저장할 수 있어요!
2. 함수의 인자로 함수를 이용할 수 있어요! > decorator / 프로그램의 확정성 생산성을 높이기 위함
3. 함수의 결과값(리턴값)으로 함수를 이용할 수 있어요! > closure
'''
# # def my_outer_func(func):
# # def my_inner_func():
# # func()
# # return my_inner_func #해당함수 코드를 리턴 // ()해당함수의 실행의 결과를 리턴
# #
# # def my_func():
# # return print("my_func() 함수가 호출되었어요!!")
# #
# # decorated_my_func = my_outer_func(my_func)
# # decorated_my_func() #my_func() 함수가 호출되었어요!!
# # my_func() #my_func() 함수가 호출되었어요!!
#
# import time
#
# def my_outer_func(func): # 목적은 my_func의 기능을 확장시키기 위함!
# def my_inner_func():
# print("{} 함수 수행 시간을 계산합니다.".format(func.__name__))
# start = time.time() # 1970년 1월 1일 0시 0분 0초 0
# func()
# end = time.time()
# print("함수 수행 시간은 {} 계산합니다.".format(start-end))
# return my_inner_func #해당함수 코드를 리턴 // ()해당함수의 실행의 결과를 리턴
#
# # def my_func():
# # return print("my_func() 함수가 호출되었어요!!")
#
# # decorated_my_func = my_outer_func(my_func)
# # decorated_my_func() #my_func() 함수가 호출되었어요!! // 함수자체의 기능을 수정하지 않고 함수의 기능을 수정할 수 있
# # my_func() #my_func() 함수가 호출되었어요!!
#
# # closure vs decorator // 새로운 기능 추가되는 것을 리턴하는것
#
# @my_outer_func # decorator 기능을 추가한 my_func를 리턴
# def my_func():
# return print("my_func() 함수가 호출되었어요!!")
#
#
# my_func()
# '''
# my_func 함수 수행 시간을 계산합니다.
# my_func() 함수가 호출되었어요!!
# 함수 수행 시간은 -5.7220458984375e-06 계산합니다.
# '''
#############################################################
# def print_user_name(*args): # 인자로 들어온 사람의 이름을 출력 / 정해지지 않는
# # args는 tuple로 받아요!
# for name in args:
# print(name)
# print_user_name("홍길동", "신사임당") #이렇게도 가능
# print_user_name("홍길동", "신사임당", "유관순") #이렇게도 가능
'''
홍길동
신사임당
홍길동
신사임당
유관순
'''
# def print_user_name(**kwargs): # 관용적으로 **kwargs 표기
# # kkargs는 dict로 받아요!
# for name in kwargs.values(): # key, value로 구분
# #print(kwargs.get(name)) # get 을 통해서 추출가능
# print(name)
# print_user_name(name1 = "홍길동", name2= "신사임당") #이렇게도 가능
## 받는 함수의 인자의 개수가 다를경우
# def my_outer(func):
# def my_inner(*args, **kwargs): # decorator의 인자의 개수를 예측하기 힘들기에 *args, **kwargs 를 적용하여 인자문제 해결
# print("데코레이터!! 시작")
# func(*args, **kwargs) # *args, **kwargs
# print("데코레이터!! 끝")
#
# return my_inner
#
# @my_outer
# def my_func():
# print("이것은 소리없는 아우성!!")
# @my_outer
# def my_add(x,y):
# print("두 수의 합은 : {}".format(x+y))
#
#
# my_func()
# '''
# 데코레이터!! 시작
# 이것은 소리없는 아우성!!
# 데코레이터!! 끝
# '''
#
# my_add(1,2)
#
# '''
# 데코레이터!! 시작
# 두 수의 합은 : 3
# 데코레이터!! 끝
# '''
# 블로그!!
## 티스토리추천
##http://moon9342.github.io
|
[
"[email protected]"
] | |
ec2f894e96dc3e63e0a4c74c3de67eab7985c8bc
|
c713ba772c663849b2339ea13f3a6e407058bfc7
|
/auth/tests.py
|
796d49f3d5c9abb762a26263a4e564faf0e0c7bd
|
[
"MIT"
] |
permissive
|
aprosvetova/vas3k.club
|
3aaf31cec36a3283336e6da391766632091c1fcc
|
e667b91bb855a256bcb7760fb1555ebbb1079744
|
refs/heads/master
| 2022-12-22T16:08:27.487072 | 2020-09-14T21:59:44 | 2020-09-14T21:59:44 | 297,010,352 | 2 | 0 |
MIT
| 2020-09-20T05:43:25 | 2020-09-20T05:43:24 | null |
UTF-8
|
Python
| false | false | 5,452 |
py
|
from datetime import datetime, timedelta
import django
from django.conf import settings
from django.test import TestCase
django.setup() # todo: how to run tests from PyCharm without this workaround?
from auth.models import Code
from club.exceptions import RateLimitException, InvalidCode
from users.models.user import User
class ModelCodeTests(TestCase):
@classmethod
def setUpTestData(cls):
# Set up data for the whole TestCase
cls.new_user: User = User.objects.create(
email="[email protected]",
membership_started_at=datetime.now() - timedelta(days=5),
membership_expires_at=datetime.now() + timedelta(days=5),
)
def test_create_code_positive(self):
recipient = "[email protected]"
code = Code.create_for_user(user=self.new_user, recipient=recipient, length=settings.AUTH_CODE_LENGTH)
self.assertEqual(code.recipient, recipient)
self.assertEqual(self.new_user.id, code.user_id)
self.assertEqual(len(code.code), settings.AUTH_CODE_LENGTH)
self.assertAlmostEqual(code.expires_at.second, (datetime.utcnow() + timedelta(minutes=15)).second, delta=5)
def test_create_code_ratelimit(self):
recipient = "[email protected]"
# override the AUTH_MAX_CODE_TIMEDELTA setting
with self.settings(AUTH_MAX_CODE_COUNT=1):
code = Code.create_for_user(user=self.new_user, recipient=recipient, length=settings.AUTH_CODE_LENGTH)
self.assertEqual(len(code.code), settings.AUTH_CODE_LENGTH)
# second attempt should rise exception
with self.assertRaises(RateLimitException):
Code.create_for_user(user=self.new_user, recipient=recipient)
def test_create_code_reset_ratelimit(self):
recipient = "[email protected]"
with self.settings(AUTH_MAX_CODE_COUNT=1):
code = Code.create_for_user(user=self.new_user, recipient=recipient, length=settings.AUTH_CODE_LENGTH)
self.assertEqual(len(code.code), settings.AUTH_CODE_LENGTH)
# move creation time to deep enough past
code.created_at = datetime.utcnow() - settings.AUTH_MAX_CODE_TIMEDELTA - timedelta(seconds=1)
code.save()
# no exception raises
code = Code.create_for_user(user=self.new_user, recipient=recipient)
self.assertEqual(len(code.code), settings.AUTH_CODE_LENGTH)
def test_check_code_positive(self):
recipient = "[email protected]"
code = Code.create_for_user(user=self.new_user, recipient=recipient, length=settings.AUTH_CODE_LENGTH)
user = Code.check_code(recipient=recipient, code=code.code)
self.assertEqual(user.id, self.new_user.id)
def test_check_code_which_is_incorrect(self):
with self.assertRaises(InvalidCode):
Code.check_code(recipient="[email protected]", code="failed")
def test_check_code_twice(self):
recipient = "[email protected]"
code = Code.create_for_user(user=self.new_user, recipient=recipient, length=settings.AUTH_CODE_LENGTH)
Code.check_code(recipient=recipient, code=code.code) # activate first time
with self.assertRaises(InvalidCode):
Code.check_code(recipient=recipient, code=code.code)
def test_check_code_which_is_not_last_one(self):
# issue few codes
recipient = "[email protected]"
code1: Code = Code.create_for_user(user=self.new_user, recipient=recipient, length=settings.AUTH_CODE_LENGTH)
code2: Code = Code.create_for_user(user=self.new_user, recipient=recipient, length=settings.AUTH_CODE_LENGTH)
# for stability test runs
code2.created_at -= timedelta(seconds=1)
code2.save()
with self.assertRaises(InvalidCode):
Code.check_code(recipient=recipient, code=code2.code)
# first one is successful
user = Code.check_code(recipient=recipient, code=code1.code)
self.assertEqual(user.id, self.new_user.id)
def test_check_code_which_is_for_other_user(self):
recipient_right = "[email protected]"
recipient_wrong = "[email protected]"
code = Code.create_for_user(user=self.new_user, recipient=recipient_right, length=settings.AUTH_CODE_LENGTH)
with self.assertRaises(InvalidCode):
Code.check_code(recipient=recipient_wrong, code=code.code)
def test_check_code_when_exceeded_attempts_count(self):
recipient = "[email protected]"
code = Code.create_for_user(user=self.new_user, recipient=recipient, length=settings.AUTH_CODE_LENGTH)
# override the AUTH_MAX_CODE_TIMEDELTA setting
with self.settings(AUTH_MAX_CODE_ATTEMPTS=1):
# first attempt
with self.assertRaises(InvalidCode):
Code.check_code(recipient=recipient, code="wrong_attempt")
# second attempt should rise ratelimit exception
with self.assertRaises(RateLimitException):
Code.check_code(recipient=recipient, code=code.code)
def test_check_code_which_is_expired(self):
recipient = "[email protected]"
code = Code.create_for_user(user=self.new_user, recipient=recipient, length=settings.AUTH_CODE_LENGTH)
code.expires_at = datetime.utcnow() - timedelta(seconds=1)
code.save()
with self.assertRaises(InvalidCode):
Code.check_code(recipient=recipient, code=code.code)
|
[
"[email protected]"
] | |
ba5641cad8c7c2185a8bca2d2985f2d0f54439df
|
2a76ca8c01e7abe6ef64d030ecbb65e88641b278
|
/glumpy/app/window/backends/backend_glfw_deprecated.py
|
66413814abff7f25ba48933d91f337089dd6e8fb
|
[] |
permissive
|
glumpy/glumpy
|
18bfc2d76b7a5fc126fbebddf2970d95238fc66b
|
75408635bd46e48ff10939e308a71eafdaff35e8
|
refs/heads/master
| 2023-09-03T11:48:52.087002 | 2023-04-20T15:23:59 | 2023-04-20T15:23:59 | 23,520,171 | 1,228 | 225 |
BSD-3-Clause
| 2023-07-07T07:25:18 | 2014-08-31T18:30:26 |
Python
|
UTF-8
|
Python
| false | false | 17,130 |
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
`GLFW <http://www.glfw.org>`_ is an Open Source, multi-platform library for
creating windows with OpenGL contexts and receiving input and events. It is
easy to integrate into existing applications and does not lay claim to the main
loop.
**Usage**
.. code:: python
from glumpy import app
app.use("glfw")
window = app.Window()
**Capability**
========================== ======== ======================== ========
Multiple windows ✓ Set GL API ✓
-------------------------- -------- ------------------------ --------
Non-decorated windows ✓ Set GL Profile ✓
-------------------------- -------- ------------------------ --------
Resize windows ✓ Share GL Context ✓
-------------------------- -------- ------------------------ --------
Move windows ✓ Unicode handling ✓
-------------------------- -------- ------------------------ --------
Fullscreen ✓ Scroll event ✓
========================== ======== ======================== ========
"""
import os, sys, platform
from glumpy import gl
from glumpy.log import log
from glumpy.app import configuration
from glumpy.app.window import window
# Backend name
__name__ = "GLFW"
# Backend version (if available)
__version__ = ""
# Backend availability
__availability__ = False
# Whether the framework has been initialized
__initialized__ = False
# Active windows
__windows__ = []
# Windows scheduled to be destroyed
__windows_to_remove__ = []
# ---------------------------------------------------- convenient functions ---
def name(): return __name__
def version(): return __version__
def available(): return __availability__
# --------------------------------------------------------------- init/exit ---
def __init__():
global __initialized__
if not __initialized__:
# glfw might change dir on initialization (feature, not a bug)
cwd = os.getcwd()
glfw.glfwInit()
os.chdir(cwd)
__initialized__ = True
def __exit__():
global __initialized__
glfw.glfwTerminate()
__initialized__ = False
# ------------------------------------------------------------ availability ---
try:
from glumpy.ext import glfw
__availability__ = True
__version__ = ("%d.%d.%d") % glfw.version
__init__()
__mouse_map__ = { glfw.GLFW_MOUSE_BUTTON_LEFT: window.mouse.LEFT,
glfw.GLFW_MOUSE_BUTTON_MIDDLE: window.mouse.MIDDLE,
glfw.GLFW_MOUSE_BUTTON_RIGHT: window.mouse.RIGHT }
__key_map__ = { glfw.GLFW_KEY_ESCAPE: window.key.ESCAPE,
glfw.GLFW_KEY_ENTER: window.key.ENTER,
glfw.GLFW_KEY_TAB: window.key.TAB,
glfw.GLFW_KEY_BACKSPACE: window.key.BACKSPACE,
glfw.GLFW_KEY_INSERT: window.key.INSERT,
glfw.GLFW_KEY_DELETE: window.key.DELETE,
glfw.GLFW_KEY_RIGHT: window.key.RIGHT,
glfw.GLFW_KEY_LEFT: window.key.LEFT,
glfw.GLFW_KEY_DOWN: window.key.DOWN,
glfw.GLFW_KEY_UP: window.key.UP,
glfw.GLFW_KEY_PAGE_UP: window.key.PAGEUP,
glfw.GLFW_KEY_PAGE_DOWN: window.key.PAGEDOWN,
glfw.GLFW_KEY_HOME: window.key.HOME,
glfw.GLFW_KEY_END: window.key.END,
glfw.GLFW_KEY_CAPS_LOCK: window.key.CAPSLOCK,
glfw.GLFW_KEY_LEFT_SHIFT: window.key.LSHIFT,
glfw.GLFW_KEY_RIGHT_SHIFT: window.key.RSHIFT,
glfw.GLFW_KEY_PRINT_SCREEN: window.key.PRINT,
glfw.GLFW_KEY_PAUSE: window.key.PAUSE,
glfw.GLFW_KEY_F1: window.key.F1,
glfw.GLFW_KEY_F2: window.key.F2,
glfw.GLFW_KEY_F3: window.key.F3,
glfw.GLFW_KEY_F4: window.key.F4,
glfw.GLFW_KEY_F5: window.key.F5,
glfw.GLFW_KEY_F6: window.key.F6,
glfw.GLFW_KEY_F7: window.key.F7,
glfw.GLFW_KEY_F8: window.key.F8,
glfw.GLFW_KEY_F9: window.key.F9,
glfw.GLFW_KEY_F10: window.key.F10,
glfw.GLFW_KEY_F11: window.key.F11,
glfw.GLFW_KEY_F12: window.key.F12 }
except ImportError:
__availability__ = False
__version__ = None
# -------------------------------------------------------------- capability ---
capability = {
"Window position get/set" : True,
"Window size get/set" : True,
"Multiple windows" : True,
"Mouse scroll events" : True,
"Non-decorated window" : True,
"Non-sizeable window" : True,
"Fullscreen mode" : True,
"Unicode processing" : True,
"Set GL version" : True,
"Set GL profile" : True,
"Share GL context" : True,
}
# ------------------------------------------------------- set_configuration ---
def set_configuration(config):
""" Set gl configuration for GLFW """
glfw.glfwWindowHint( glfw.GLFW_REFRESH_RATE, 0 )
glfw.glfwWindowHint(glfw.GLFW_RED_BITS, config.red_size)
glfw.glfwWindowHint(glfw.GLFW_GREEN_BITS, config.green_size)
glfw.glfwWindowHint(glfw.GLFW_BLUE_BITS, config.blue_size)
glfw.glfwWindowHint(glfw.GLFW_ALPHA_BITS, config.alpha_size)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_RED_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_GREEN_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_BLUE_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_ACCUM_ALPHA_BITS, 0)
glfw.glfwWindowHint(glfw.GLFW_DEPTH_BITS, config.depth_size)
glfw.glfwWindowHint(glfw.GLFW_STENCIL_BITS, config.stencil_size)
glfw.glfwWindowHint(glfw.GLFW_SRGB_CAPABLE, config.srgb)
glfw.glfwWindowHint(glfw.GLFW_SAMPLES, config.samples)
glfw.glfwWindowHint(glfw.GLFW_STEREO, config.stereo)
if config.api in ("ES", "es"):
glfw.glfwWindowHint(glfw.GLFW_CLIENT_API,
glfw.GLFW_OPENGL_ES_API)
else:
glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MAJOR,
config.major_version)
glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MINOR,
config.minor_version)
if config.major_version >= 3 and config.profile == "core":
glfw.glfwWindowHint(glfw.GLFW_OPENGL_PROFILE,
glfw.GLFW_OPENGL_CORE_PROFILE)
glfw.glfwWindowHint(glfw.GLFW_OPENGL_FORWARD_COMPAT, True)
elif config.major_version >= 3 and config.profile == "compatibility":
glfw.glfwWindowHint(glfw.GLFW_OPENGL_PROFILE,
glfw.GLFW_OPENGL_COMPAT_PROFILE)
else:
glfw.glfwWindowHint(glfw.GLFW_OPENGL_PROFILE,
glfw.GLFW_OPENGL_ANY_PROFILE)
# ------------------------------------------------------------------ Window ---
class Window(window.Window):
def __init__( self, width=512, height=512, title=None, visible=True, aspect=None,
decoration=True, fullscreen=False, screen=None, config=None, context=None, color=(0,0,0,1), vsync=False):
window.Window.__init__(self, width=width,
height=height,
title=title,
visible=visible,
aspect=aspect,
decoration=decoration,
fullscreen=fullscreen,
screen=screen,
config=config,
context=context,
color=color)
# Whether hidpi is active
self._hidpi = False
def on_error(error, message):
log.warning(message)
glfw.glfwSetErrorCallback(on_error)
glfw.glfwWindowHint(glfw.GLFW_RESIZABLE, True)
glfw.glfwWindowHint(glfw.GLFW_DECORATED, True)
glfw.glfwWindowHint(glfw.GLFW_VISIBLE, True)
if not decoration:
glfw.glfwWindowHint(glfw.GLFW_DECORATED, False)
if not visible:
glfw.glfwWindowHint(glfw.GLFW_VISIBLE, False)
if config is None:
config = configuration.Configuration()
set_configuration(config)
monitor = glfw.glfwGetMonitors()[self._screen] if fullscreen else None
if fullscreen:
mode = glfw.glfwGetVideoMode(monitor)
self._width, self._height = mode[:2]
self._native_window = glfw.glfwCreateWindow(self._width, self._height,
self._title, monitor, None)
if not self._native_window:
log.critical("Window creation failed")
__exit__()
sys.exit()
glfw.glfwMakeContextCurrent(self._native_window)
glfw.glfwSwapInterval(1 if vsync else 0)
# OSX: check framebuffer size / window size. On retina display, they
# can be different so we try to correct window size such as having
# the framebuffer size of the right size
w,h = glfw.glfwGetFramebufferSize(self._native_window)
if platform == 'darwin' and (w!= width or h!= height):
width, height = width//2, height//2
glfw.glfwSetWindowSize(self._native_window, width, height)
log.info("HiDPI detected, fixing window size")
self._hidpi = True
def on_framebuffer_resize(win, width, height):
self._width, self._height = width, height
self.dispatch_event('on_resize', width, height)
glfw.glfwSetFramebufferSizeCallback(self._native_window, on_framebuffer_resize)
# def on_resize(win, width, height):
# self._width, self._height = width, height
# self.dispatch_event('on_resize', width, height)
# glfw.glfwSetWindowSizeCallback(self._native_window, on_resize)
def on_cursor_enter(win, entered):
if entered:
self.dispatch_event('on_enter')
else:
self.dispatch_event('on_leave')
glfw.glfwSetCursorEnterCallback(self._native_window, on_cursor_enter)
def on_window_close(win):
self.close()
glfw.glfwSetWindowCloseCallback(self._native_window, on_window_close)
def on_keyboard(win, key, scancode, action, mods):
symbol = self._keyboard_translate(key)
modifiers = self._modifiers_translate(mods)
if action in[glfw.GLFW_PRESS,glfw.GLFW_REPEAT]:
self.dispatch_event('on_key_press', symbol, modifiers)
else:
self.dispatch_event('on_key_release', symbol, modifiers)
glfw.glfwSetKeyCallback(self._native_window, on_keyboard)
def on_character(win, character):
self.dispatch_event('on_character', u"%c" % character)
glfw.glfwSetCharCallback(self._native_window, on_character)
def on_mouse_button(win, button, action, mods):
x,y = glfw.glfwGetCursorPos(win)
if self._hidpi:
x, y = 2*x, 2*y
button = __mouse_map__.get(button, window.mouse.UNKNOWN)
if action == glfw.GLFW_RELEASE:
self._button = window.mouse.NONE
self._mouse_x = x
self._mouse_y = y
self.dispatch_event('on_mouse_release', x, y, button)
elif action == glfw.GLFW_PRESS:
self._button = button
self._mouse_x = x
self._mouse_y = y
self.dispatch_event('on_mouse_press', x, y, button)
glfw.glfwSetMouseButtonCallback(self._native_window, on_mouse_button)
def on_mouse_motion(win, x, y):
if self._hidpi:
x, y = 2*x, 2*y
dx = x - self._mouse_x
dy = y - self._mouse_y
self._mouse_x = x
self._mouse_y = y
if self._button != window.mouse.NONE:
self.dispatch_event('on_mouse_drag', x, y, dx, dy, self._button)
else:
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
glfw.glfwSetCursorPosCallback(self._native_window, on_mouse_motion)
def on_scroll(win, xoffset, yoffset):
x,y = glfw.glfwGetCursorPos(win)
if self._hidpi:
x, y = 2*x, 2*y
self.dispatch_event('on_mouse_scroll', x, y, xoffset, yoffset)
glfw.glfwSetScrollCallback( self._native_window, on_scroll )
self._width, self._height = self.get_size()
__windows__.append(self)
def _modifiers_translate( self, modifiers ):
_modifiers = 0
if modifiers & glfw.GLFW_MOD_SHIFT:
_modifiers |= window.key.MOD_SHIFT
if modifiers & glfw.GLFW_MOD_CONTROL:
_modifiers |= window.key.MOD_CTRL
if modifiers & glfw.GLFW_MOD_ALT:
_modifiers |= window.key.MOD_ALT
if modifiers & glfw.GLFW_MOD_SUPER:
_modifiers |= window.key.MOD_COMMAND
self._modifiers = modifiers
return _modifiers
def _keyboard_translate( self, code ):
if (32 <= code <= 96) or code in [161,162]:
return code
return __key_map__.get(code, window.key.UNKNOWN)
def show(self):
glfw.glfwShowWindow( self._native_window )
self.dispatch_event('on_show')
def hide(self):
glfw.glfwHideWindow( self._native_window )
self.dispatch_event('on_hide')
def close(self):
glfw.glfwSetWindowShouldClose(self._native_window, True)
__windows__.remove(self)
__windows_to_remove__.append(self)
for i in range(len(self._timer_stack)):
handler, interval = self._timer_stack[i]
self._clock.unschedule(handler)
self.dispatch_event('on_close')
def destroy(self):
glfw.glfwDestroyWindow(self._native_window)
def get_screen(self):
return glfw.glfwGetWindowMonitor(self._native_window)
def set_fullscreen(self, fullscreen, screen=None):
screen = 0 if screen is None else screen
mode = glfw.glfwGetVideoMode(glfw.glfwGetMonitors()[screen])
if fullscreen:
glfw.glfwSetWindowMonitor(self._native_window, screen, 0, 0, mode[0], mode[1], mode[-1])
else:
glfw.glfwSetWindowMonitor(self._native_window, screen, 0, 0, 256, 256, mode[-1])
def get_fullscreen(self):
return self._fullscreen
def set_title(self, title):
glfw.glfwSetWindowTitle( self._native_window, title)
self._title = title
def get_title(self):
return self._title
def set_size(self, width, height):
glfw.glfwSetWindowSize(self._native_window, width, height)
self._width, self._height = glfw.glfwGetFramebufferSize(self._native_window)
def get_size(self):
# self._width, self._height = glfw.glfwGetWindowSize(self._native_window)
self._width, self._height = glfw.glfwGetFramebufferSize(self._native_window)
return self._width, self._height
def set_position(self, x, y):
glfw.glfwSetWindowPos(self._native_window, x, y)
self._x, self._y = glfw.glfwGetWindowPos(self._native_window)
def get_position(self):
self._x, self._y = glfw.glfwGetWindowPos(self._native_window)
return self._x, self._y
def swap(self):
glfw.glfwSwapBuffers(self._native_window)
def activate(self):
glfw.glfwMakeContextCurrent(self._native_window)
# ----------------------------------------------------------------- windows ---
def windows():
return __windows__
# ----------------------------------------------------------------- process ---
def process(dt):
# Poll for and process events
glfw.glfwPollEvents()
for window in __windows__:
# Make window active
window.activate()
# Dispatch the main draw event
window.dispatch_event('on_draw', dt)
# Dispatch the idle event
window.dispatch_event('on_idle', dt)
# Swap buffers
window.swap()
for window in __windows_to_remove__:
window.destroy()
__windows_to_remove__.remove(window)
return len(__windows__)
|
[
"[email protected]"
] | |
2eeceb42de2ea27fde10e7e6d5c08914488ee6c5
|
b4da2201d2df789e28472aeded28720d5269ade5
|
/Komodo-Edit-7/lib/mozilla/components/koLintService.py
|
4e543d65bce81928a8d67a2192aeee69c031327d
|
[] |
no_license
|
AeonSaber/first_app
|
5ad89d4fb05d7662e2a39ce68176f43f1e618bf0
|
522fdfa6d33419fd49e431766fff85b40d21e78e
|
refs/heads/master
| 2020-06-12T17:22:09.786142 | 2013-09-09T23:57:51 | 2013-09-09T23:57:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 30,788 |
py
|
#!python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import os, sys
import threading
import time
import urllib2
from xpcom import components, nsError, ServerException, COMException
from xpcom._xpcom import PROXY_SYNC, PROXY_ALWAYS, PROXY_ASYNC, getProxyForObject
from xpcom.server import WrapObject, UnwrapObject
from koLintResult import KoLintResult, getProxiedEffectivePrefs
from koLintResults import koLintResults
import logging
log = logging.getLogger("koLintService")
#log.setLevel(logging.DEBUG)
class RequestQueue:
# This is a modification if Python's std Queue.Queue class:
# - drop maxsize related stuff
# - calls are always blocking
# - add .prepend() and .remove_uid()
def __init__(self):
import thread
self._init()
self.mutex = thread.allocate_lock()
self.esema = thread.allocate_lock() # if acquired, then queue is empty
self.esema.acquire()
def put(self, item):
"""Put an item into the queue."""
log.debug("in RequestQueue.put, acquiring mutex")
self.mutex.acquire()
log.debug("in RequestQueue.put, acquired mutex")
try:
was_empty = self._empty()
self._append(item)
# If we fail before here, the empty state has
# not changed, so we can skip the release of esema
if was_empty:
log.debug("in RequestQueue.put, releasing esema")
self.esema.release()
finally:
# Catching system level exceptions here (RecursionDepth,
# OutOfMemory, etc) - so do as little as possible in terms
# of Python calls.
log.debug("in RequestQueue.put, releasing mutex")
self.mutex.release()
def prepend(self, item):
"""Prepend an item to the queue."""
log.debug("in RequestQueue.prepend, acquiring mutex")
self.mutex.acquire()
log.debug("in RequestQueue.prepend, acquired mutex")
try:
was_empty = self._empty()
self._prepend(item)
# If we fail before here, the empty state has
# not changed, so we can skip the release of esema
if was_empty:
log.debug("in RequestQueue.prepend, releasing esema")
self.esema.release()
finally:
# Catching system level exceptions here (RecursionDepth,
# OutOfMemory, etc) - so do as little as possible in terms
# of Python calls.
log.debug("in RequestQueue.prepend, releasing mutex")
self.mutex.release()
def get(self):
"""Remove and return an item from the queue.
Block if necessary until an item is available.
"""
log.debug("in RequestQueue.get, acquiring esema")
self.esema.acquire()
log.debug("in RequestQueue.get, acquired esema")
log.debug("in RequestQueue.get, acquiring mutex")
self.mutex.acquire()
log.debug("in RequestQueue.get, acquired mutex")
release_esema = 1
try:
item = self._get()
# Failure means empty state also unchanged - release_esema
# remains true.
release_esema = not self._empty()
finally:
if release_esema:
log.debug("in RequestQueue.get, releasing esema")
self.esema.release()
log.debug("in RequestQueue.get, releasing mutex")
self.mutex.release()
return item
def remove_uid(self, uid):
"""Remove all current requests with the given uid.
Does not return anything.
"""
log.debug("in RequestQueue.remove_uid, acquiring esema")
if not self.esema.acquire(0): # do not block to acquire lock
# return if could not acquire: means queue is empty and
# therefore do not have any items to remove
log.debug("in RequestQueue.remove_uid, did not acquire esema")
return
log.debug("in RequestQueue.remove_uid, acquired mutex")
log.debug("in RequestQueue.remove_uid, acquiring mutex")
self.mutex.acquire()
release_esema = 1
try:
self._remove_uid(uid)
# Failure means empty state also unchanged - release_esema
# remains true.
release_esema = not self._empty()
finally:
if release_esema:
log.debug("in RequestQueue.remove_uid, releasing esema")
self.esema.release()
log.debug("in RequestQueue.remove_uid, releasing mutex")
self.mutex.release()
#---- Override these methods to implement other queue organizations
# (e.g. stack or priority queue). These will only be called with
# appropriate locks held.
# Initialize the queue representation
def _init(self):
self.queue = []
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Put a new item in the queue
def _append(self, item):
self.queue.append(item)
def _prepend(self, item):
self.queue.insert(0, item)
# Get an item from the queue
def _get(self):
item = self.queue[0]
del self.queue[0]
return item
# Remove all requests with the given uid.
def _remove_uid(self, uid):
self.queue = [item for item in self.queue
if hasattr(item, "uid") and item.uid != uid]
class _GenericAggregator(object):
_com_interfaces_ = [components.interfaces.koILinter]
_reg_desc_ = "Komodo Generic Aggregate Linter"
_reg_clsid_ = "{b68f4ff8-f37e-45d1-970e-88b964e7096d}"
_reg_contractid_ = "@activestate.com/koGenericLinterAggregator;1"
def initialize(self, languageName, koLintService):
self._languageName = languageName
self._koLintService = koLintService
def lint(self, request):
text = request.content.encode(request.encoding.python_encoding_name)
return self.lint_with_text(request, text)
def lint_with_text(self, request, text):
linters = self._koLintService.getTerminalLintersForLanguage(self._languageName)
finalLintResults = koLintResults()
for linter in linters:
try:
newLintResults = UnwrapObject(linter).lint_with_text(request, text)
except:
log.exception("lint_with_text exception")
else:
if newLintResults and newLintResults.getNumResults():
if finalLintResults.getNumResults():
finalLintResults = finalLintResults.addResults(newLintResults)
else:
finalLintResults = newLintResults
return finalLintResults
class KoLintRequest:
_com_interfaces_ = [components.interfaces.koILintRequest]
_reg_desc_ = "Komodo Lint Request"
_reg_clsid_ = "{845A872F-293F-4a82-8552-40849A92EC80}"
_reg_contractid_ = "@activestate.com/koLintRequest;1"
def __init__(self):
self.rid = None
self._koDoc = None
self.uid = ''
self.linterType = ''
self.cwd = ''
self.content = None
self.encoding = None
self.linter = None
self.results = None
self.errorString = ''
@property
def document(self):
import warnings
warnings.warn("`koILintRequest.document` was DEPRECATED in Komodo "
"6.0.0b1, use `koILintRequest.koDoc`.",
DeprecationWarning)
return self.koDoc
@property
def koDoc(self):
return self._koDoc
def get_koDoc(self):
return self._koDoc
def set_koDoc(self, val):
# Access to the koDoc *must* be from the main thread, otherwise
# Komodo may crash!
self._koDoc = getProxyForObject(1,
components.interfaces.koIDocument, val,
PROXY_ALWAYS | PROXY_SYNC)
def describe(self):
return "<KoLintRequest: %s on uid %s>" % (self.linterType, self.uid)
class KoLintService:
_com_interfaces_ = [components.interfaces.koILintService,
components.interfaces.nsIObserver]
_reg_desc_ = "Komodo Lint Management Service"
_reg_clsid_ = "{9FD67601-CB60-411D-A212-ED21B3D25C15}"
_reg_contractid_ = "@activestate.com/koLintService;1"
def __init__(self):
log.info("KoLintService.__init__()")
self._linterCache = {} # mapping of linterCID to koILinter instance
self.requests = RequestQueue() # an item of None is the quit sentinel
self._shuttingDown = 0
self.manager = threading.Thread(target=self.run, name="Linter")
self.manager.setDaemon(True)
self.manager.start()
self._wrapped = WrapObject(self, components.interfaces.nsIObserver)
_observerSvc = components.classes["@mozilla.org/observer-service;1"].\
getService(components.interfaces.nsIObserverService)
_observerSvc.addObserver(self._wrapped, 'xpcom-shutdown', 1)
self._prefs = components.classes["@activestate.com/koPrefService;1"].\
getService(components.interfaces.koIPrefService).prefs
# dict of { 'terminals' => array of linters, 'aggregators' => array of linters }
self._linterCIDsByLanguageName = {}
# Init it now, pay the price of walking through the categories now...
catman = components.classes["@mozilla.org/categorymanager;1"].\
getService(components.interfaces.nsICategoryManager)
categoryName = 'category-komodo-linter-aggregator'
names = catman.enumerateCategory(categoryName)
while names.hasMoreElements():
nameObj = names.getNext()
rawName, fixedName = self._getCategoryNameFromNameObj(nameObj)
cid = catman.getCategoryEntry(categoryName, rawName)
if not self._linterCIDsByLanguageName.has_key(fixedName):
self._linterCIDsByLanguageName[fixedName] = {'terminals':[],
'aggregator':cid}
else:
log.warn("Possible Problem: more than one entry for linter aggregator %s (was %s), now %s",
name,
self._linterCIDsByLanguageName[fixedName]['aggregator'],
cid)
self._linterCIDsByLanguageName[fixedName]['aggregator'] = cid
categoryName = 'category-komodo-linter'
names = catman.enumerateCategory(categoryName)
while names.hasMoreElements():
nameObj = names.getNext()
rawName, fixedName = self._getCategoryNameFromNameObj(nameObj)
idx = fixedName.find("&type=")
if idx == -1:
languageName = fixedName
else:
languageName = fixedName[:idx]
cid = catman.getCategoryEntry(categoryName, rawName)
if not self._linterCIDsByLanguageName.has_key(languageName):
self._linterCIDsByLanguageName[languageName] = {'terminals':[],
'aggregator':None}
self._linterCIDsByLanguageName[languageName]['terminals'].append(cid)
#log.debug("Loaded these linters: %s", self._linterCIDsByLanguageName)
def _getCategoryNameFromNameObj(self, nameObj):
nameObj.QueryInterface(components.interfaces.nsISupportsCString)
rawName = nameObj.data
try:
fixedName = urllib2.unquote(rawName)
except:
fixedName = rawName
return rawName, fixedName
def getLinter_CID_ForLanguage(self, languageName):
return self._getLinterCIDByLanguageName(languageName)
def observe(self, subject, topic, data):
#print "file status service observed %r %s %s" % (subject, topic, data)
if topic == 'xpcom-shutdown':
log.debug("file status got xpcom-shutdown, unloading");
self.terminate()
def terminate(self):
log.info("KoLintService.terminate()")
self.requests.prepend(None) # prepend the quit sentinel
self._shuttingDown = 1
# Do NOT attempt to .join() the manager thread because it is nigh on
# impossible to avoid all possible deadlocks.
def getTerminalLintersForLanguage(self, languageName):
return [self._getLinterByCID(cid)
for cid in self._linterCIDsByLanguageName[languageName]['terminals']]
GENERIC_LINTER_AGGREGATOR_CID = "@activestate.com/koGenericLinterAggregator;1"
def _getLinterCIDByLanguageName(self, languageName):
try:
linters = self._linterCIDsByLanguageName[languageName]
except KeyError:
self._linterCIDsByLanguageName[languageName] = {'aggregator':None,
'terminals':[],
'generated':True}
return None
# If there's no explicit aggregator, return the first terminal linter.
# If there isn't one, throw the ItemError all the way to top-level
if linters['aggregator'] is not None:
return linters['aggregator']
if len(linters['terminals']) != 1:
if len(linters['terminals']) == 0:
if not linters.get('generated', False):
log.error("No terminal linters for lang %s", languageName)
return None
# Create a generic aggregator for this language.
linters['aggregator'] = (self.GENERIC_LINTER_AGGREGATOR_CID
+ ":" + languageName)
return linters['aggregator']
return linters['terminals'][0]
def getLinterForLanguage(self, languageName):
"""Return a koILinter XPCOM component of the given linterCID.
This method cache's linter instances. If there is no such linter
then an exception is raised.
Note that aggregators are favored over terminal linters.
"""
linterCID = self._getLinterCIDByLanguageName(languageName)
if linterCID is None:
return None
return self._getLinterByCID(linterCID)
def _getLinterByCID(self, linterCID):
if linterCID not in self._linterCache:
try:
if linterCID.startswith(self.GENERIC_LINTER_AGGREGATOR_CID):
languageName = linterCID[len(self.GENERIC_LINTER_AGGREGATOR_CID) + 1:]
linter = components.classes[self.GENERIC_LINTER_AGGREGATOR_CID].createInstance(components.interfaces.koILinter)
UnwrapObject(linter).initialize(languageName, self)
elif linterCID not in components.classes.keys():
linter = None
else:
linter = components.classes[linterCID].createInstance(components.interfaces.koILinter)
except COMException, ex:
errmsg = "Internal Error creating a linter with CID '%s': %s"\
% (linterCID, ex)
raise ServerException(nsError.NS_ERROR_UNEXPECTED, errmsg)
self._linterCache[linterCID] = linter
return self._linterCache[linterCID]
def addRequest(self, request):
"""Add the given request to the queue.
If there is an error (e.g. bogus linterType) an exception is raised.
"""
log.info("KoLintService.addRequest(%s)", request.describe())
# Fill out the request (because document access and component
# creation must often be done in the main thread).
request.content = request.koDoc.buffer
request.encoding = request.koDoc.encoding
if request.linterType:
request.linter = self.getLinterForLanguage(request.linterType)
self.requests.put(request)
def cancelPendingRequests(self, uid):
log.info("KoLintService.cancelPendingRequests(uid='%s')", uid)
self.requests.remove_uid(uid)
# This does nothing to stop the reporting of results from a
# possible _currently running_ lint request for this uid.
# This is currently handled on the JavaScript side via the
# koILintRequest.rid attribute.
def _getEncodingLintResults(self, content, encoding):
"""Return lint results for encoding errors in the given document.
"content" is the document content as a unicode string
"encoding" is the currently selected encoding for the document
Returns a koLintResults instance.
"""
try:
encodedString = content.encode(encoding.python_encoding_name,
"strict")
except UnicodeError, ex:
pass # errors are handled after the try/except/else block
else:
return koLintResults() # no encoding errors
# Find the specific errors by encoding with "replace" and finding
# where those replacements were.
escapedContent = content.replace('?', 'X')
encodedString = escapedContent.encode(encoding.python_encoding_name,
"replace")
offset = 0
indeces = []
while 1:
index = encodedString.find('?', offset)
if index == -1:
break
indeces.append(index)
offset = index + 1
log.debug("encoding errors at indeces %s", indeces)
results = koLintResults()
lines = content.splitlines(1) # keep line terminators
offset = 0 # the current offset in the document
for i in range(len(lines)):
line = lines[i]
while indeces and indeces[0] < offset + len(line):
index = indeces.pop(0) # this index is on this line
r = KoLintResult()
r.description = "This character cannot be represented with "\
"the current encoding: '%s'"\
% encoding.python_encoding_name
r.lineStart = i+1
r.lineEnd = i+1
r.columnStart = index - offset + 1
r.columnEnd = r.columnStart + 1
log.debug("encoding error: index=%d: %d,%d-%d,%d", index,
r.lineStart, r.columnStart, r.lineEnd, r.columnEnd)
r.severity = r.SEV_ERROR
results.addResult(r)
if not indeces:
break
offset += len(line)
else:
raise ValueError("Did not find line and column for one or "
"more indeces in content: %s" % indeces)
return results
def _addMixedEOLWarnings(self, results, content, expectedEOL):
"""Add lint results (at the WARNING level) for each line that has
an unexpected EOL.
"results" in a koILintResults to which to add mixed EOL results.
"content" is the content to analyze
"expectedEOL" is the currently configured EOL for the document,
this must be on of the EOL_LF, EOL_CR, EOL_CRLF constants.
"""
import eollib
mixedEOLs = eollib.getMixedEOLLineNumbers(content, expectedEOL)
if not mixedEOLs:
return
def collapseContinuousLineNumbers(lineNos):
"""Return a collapsed group of continuous line numbers."""
results = []
start = -10
last = -10
for lineNo in lineNos:
if lineNo == last+1:
pass
else:
if start >= 0:
results.append((start, last))
start = lineNo
last = lineNo
if start >= 0:
results.append((start, last))
return results
# Add a warning lint result for each such line.
expectedEOLStr = eollib.eol2eolPref[expectedEOL]
lines = content.splitlines(1)
# For performance reasons, we collapse groups of continuous line
# numbers into the one line result - bug 92733.
for lineStart, lineEnd in collapseContinuousLineNumbers(mixedEOLs):
r = KoLintResult()
r.description = "This line does not end with the expected "\
"EOL: '%s' (select View | View EOL Markers)"\
% expectedEOLStr
r.lineStart = lineStart+1
r.lineEnd = lineEnd+1
r.columnStart = 1
r.columnEnd = len(lines[lineEnd]) + 1
r.severity = r.SEV_WARNING
results.addResult(r)
# When a new panel is added for a language in
# pref-syntax-checking.xul, we'll need to pull the generic marker
# out of any documents that adopted it. We can either do it when
# we open the doc (although we have to wait until we know its language),
# but this way we only check when we're about to lint.
#
# Also, it's too bad that doc prefs aren't versioned.
_no_longer_generic_languages = ["Python3", "HTML5"]
def _passesGenericCheck(self, request):
prefs = request.koDoc.prefs
languageName = request.koDoc.language
genericCheck = "genericLinter:" + languageName
if not prefs.hasPref(genericCheck):
return True
if languageName in self._no_longer_generic_languages:
prefs.deletePref(genericCheck)
return True
return prefs.getBooleanPref(genericCheck)
def run(self):
"""Process lint requests serially until told to stop.
Before the requested linter is run on a document it is first checked
for encoding problems (i.e. encoding is not sufficient for current
content).
"""
TIME_LINTS = False
log.info("manager thread: start")
while 1:
try:
# wait for next request
request = self.requests.get()
# quit if request is the quit sentinel
if request is None:
log.info("manager thread: quit sentinel")
break
# process the request
if TIME_LINTS: startlint = time.clock()
log.info("manager thread: process request: %r", request)
try:
# Look for encoding errors first.
results = self._getEncodingLintResults(request.content,
request.encoding)
if TIME_LINTS: endencodinglint = time.clock()
# If there were no encoding errors, try the
# requested linter.
if not results.getNumResults() and request.linter:
#XXX This is where context-sensitive linting args should
# be passed in, but linters don't support this yet.
log.debug("manager thread: call linter.lint(request)")
try:
if self._passesGenericCheck(request):
results = request.linter.lint(request)
#results = UnwrapObject(request.linter).lint(request)
# This makes a red statusbar icon go green, but it
# might not be what we always want.
# Needs more investigation.
#if results is None:
# results = koLintResults()
except:
log.exception("Unexpected error while linting")
# This makes a red statusbar icon go green, but it
# might not be what we always want.
# Needs more investigation.
#if results is None:
# results = koLintResults()
log.debug("manager thread: linter.lint(request) returned")
if TIME_LINTS: endlintlint = time.clock()
prefset = getProxiedEffectivePrefs(request)
if prefset.getBooleanPref("lintEOLs"):
# Also look for mixed-line endings warnings.
self._addMixedEOLWarnings(results, request.content,
request.koDoc.new_line_endings)
if TIME_LINTS:
endeollint = time.clock()
print "lint of '%s': encoding=%.3fs lint=%.3fs eol=%.3fs"\
% (request.koDoc.baseName,
endencodinglint-startlint,
endlintlint-endencodinglint,
endeollint-endlintlint)
request.results = results
except (ServerException, COMException), ex:
request.errorString = str(ex)
except:
# Any exceptions that are not ServerException or
# COMException are unexpected internal errors.
try:
err = "unexpected internal error checking '%s' with '%s' linter"\
% (request.koDoc.baseName, request.linterType)
log.exception(err)
request.errorString = err
except:
err = "Unexpected error in koLintService.run"
log.error(err)
request.errorString = err
else:
log.info("manager thread: lint results for uid %s: %r",
request.uid, results)
# Notify of request completion
# Note: this is not guaranteed to properly guard the proxy
# call because a context switch could happen in between the
# condition check and body. That is ok though. At worst it
# will raise an exception that will be trapped just below.
# The point is to catch the common case. I am pretty sure
# that there is no way to do this properly without going
# to great lengths.
if not self._shuttingDown:
try:
# Proxy this so the worker thread can report results on this iface.
lintBufferProxy = getProxyForObject(1,
components.interfaces.koILintBuffer, request.lintBuffer,
PROXY_ALWAYS | PROXY_SYNC)
lintBufferProxy.reportResults(request)
except COMException, ex:
# Ignore this error, which will happen if results
# are reported after the buffer has gone away (i.e.
# the file owning that buffer was closed):
# Traceback (most recent call last):
# File "...\koLintService.py", line 370, in run
# request.lintBuffer.reportResults(request)
# File "<XPCOMObject method 'reportResults'>", line 3, in reportResults
# Exception: 0x80570021 ()
errno = ex.args[0]
if errno == 0x80570021:
pass
else:
raise
except:
# Something bad happened, but don't let this thread die.
log.exception("unexpected error in the linting thread")
log.info("manager thread: end")
if __name__ == "__main__":
logging.basicConfig()
import pprint
class TestRequest:
def __init__(self, uid):
self.uid = uid
def __repr__(self):
return "<TestRequest: uid=%s>" % self.uid
q = RequestQueue()
if 0:
q.put(TestRequest("id_1"))
q.remove_uid("id_1")
print "item:"
sys.stdout.flush()
print q.get()
if 1:
q.put(TestRequest("id_1"))
q.put(TestRequest("id_2"))
pprint.pprint(q.queue)
print "item: ", q.get()
q.put(TestRequest("id_3"))
q.put(TestRequest("id_4"))
q.put(TestRequest("id_3"))
q.prepend(None)
pprint.pprint(q.queue)
q.remove_uid("id_3")
pprint.pprint(q.queue)
q.remove_uid("id_3")
sys.stdout.flush()
pprint.pprint(q.queue)
q.remove_uid("id_4")
pprint.pprint(q.queue)
print "item: ", q.get()
print "item: ", q.get()
pprint.pprint(q.queue)
|
[
"[email protected]"
] | |
615af52a6dfc9a4109231f08258cb4464601be9f
|
daa513724c547b06fdd6b52764d217db09fe6204
|
/iosapp/views.py
|
33dd5ffa2576380fb01eecca0ead1d4a0919c2e2
|
[] |
no_license
|
sunkeqiao/TMT-Server
|
4f95c75500f216f6694ce84eafab7b18b02eb772
|
e23b75a2f0cfc6960f788b3d0e6727614aea3c02
|
refs/heads/master
| 2020-09-01T10:26:50.834786 | 2019-06-19T16:14:31 | 2019-06-19T16:14:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,988 |
py
|
from django.http import Http404
from rest_framework import generics, mixins, views
from rest_framework.response import Response
from .models import *
from django.conf import settings
import json
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from .serializers import *
from .libimobiledevice import Libimobiledevice
import threading, time
lib_mobile = None
def keep_connect_mac():
global lib_mobile
if lib_mobile:
return lib_mobile
try:
lib_mobile = Libimobiledevice()
return lib_mobile
except Exception as e:
lib_mobile = None
return False
keep_connect_mac()
class DeviceObject(views.APIView):
"""
设备列表
"""
def get(self, request):
"""
设备列表
"""
devices = iOSDevice.objects.filter(owner__in=('public', request.session['user'].get('username')))
serializer = iOSDeviceSerializer(devices, many=True)
return Response({"status": True, "message": "成功", "data": serializer.data})
class DeviceScreenShotObject(views.APIView):
"""
截屏操作
"""
def post(self, request, uuid):
"""
截屏操作
"""
to = request.session['user'].get('username')
if not keep_connect_mac():
return Response({"status": False, "message": "MAC服务器不在线", "data": ""})
# 执行截图 与 设备信息 操作,并上传
response = lib_mobile.screenshot_device_info_then_upload(uuid, to)
if not response.get('file_info'):
return Response({"status": False, "message": "失败,请检查设备是否在线", "data": ""})
# 发送消息给人
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(to, {
"device_info": deal_with_device_info(response.get('device_info')),
"message": response.get('file_info'),
"room": f"{to}_ios",
"to": to,
"type": "message"
})
return Response({"status": True, "message": "成功", "data": ""})
class DeviceLogCatObject(views.APIView):
"""
抓日志操作
"""
def post(self, request, uuid):
"""
抓日志操作
"""
to = request.session['user'].get('username')
action = request.data.get('action')
if not keep_connect_mac():
return Response({"status": False, "message": "MAC服务器不在线", "data": ""})
if action == 'start':
try:
if lib_mobile.syslog_start(uuid):
threading.Thread(target=self.delay_stop, args=(request, uuid,)).start()
return Response({"status": True, "message": "成功", "data": ""})
else:
return Response({"status": False, "message": "失败,请检查设备是否在线", "data": ""})
except Exception as e:
return Response({"status": False, "message": str(e), "data": ""})
elif action == 'stop':
# 执行结束抓日志 与 设备信息 操作,并上传
response = lib_mobile.syslog_device_info_then_upload(uuid, to)
if not response.get('file_info'):
return Response({"status": False, "message": "失败,请检查设备是否在线", "data": ""})
# 发送消息给人
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(to, {
"device_info": deal_with_device_info(response.get('device_info')),
"message": response.get('file_info'),
"room": f"{to}_ios",
"to": to,
"type": "message"
})
return Response({"status": True, "message": "成功", "data": ""})
elif action == 'delay_stop':
lib_mobile.syslog_stop(uuid)
else:
return Response({"status": False, "message": "action指令不正确", "data": ""})
def delay_stop(self, request, uuid):
time.sleep(125)
request.data['action'] = 'delay_stop'
self.post(request, uuid)
def deal_with_device_info(device_info):
baseinfo = BaseDeviceInfo.objects.all()
base_device_info = {}
for info in baseinfo:
base_device_info[info.model_id] = info
return {
"device_name": base_device_info.get(device_info.get('ProductType')).name,
"system": f"iOS {device_info.get('ProductVersion')}",
"screen": base_device_info.get(device_info.get('ProductType')).screen,
"memory": f"总容量:{round(int(base_device_info.get(device_info.get('ProductType')).ram)/1024.0,2)}GB",
"storage": f"总容量{round(int(device_info.get('TotalDataCapacity'))/1024.0/1024.0/1024.0,2)}GB,可使用{round(int(device_info.get('TotalDataAvailable'))/1024.0/1024.0/1024.0,2)}GB",
"charge": f"{device_info.get('BatteryCurrentCapacity')}%",
}
|
[
"[email protected]"
] | |
b8deb2b9dbb51c31d7e0bd2d422a20c182cb5525
|
a34745efeb435a93309fb789b6bf03c031a5c820
|
/compchallenge2b.py
|
f634c6885e1ef4b0b0cfcb1e62472d1af9e93340
|
[] |
no_license
|
brunoreyes/python_fundamentals
|
5e87358a10f674e5a049aa4c5ae6a0108cd72a8e
|
9fa2b341a5b5c954a6a1a77aa36ee6ef9fe70daa
|
refs/heads/master
| 2023-01-20T23:32:49.772268 | 2020-11-17T04:51:10 | 2020-11-17T04:51:10 | 302,809,977 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,835 |
py
|
import timeit
# timeit is good for comparing the speeds of multiple lines of code rather than finding the exact speed
# "setup" is preferably way to give timeit access to global variables
# garbage collection
# By default, timeit() temporarily turns off garbage collection during the timing.
# The advantage of this approach is that it makes independent timings more comparable.
setup = """\
gc.enable()
locations = {0: "You are sitting in front of a computer learning Python",
1: "You are standing at the end of a road before a small brick building",
2: "You are at the top of a hill",
3: "You are inside a building, a well house for a small stream",
4: "You are in a valley beside a stream",
5: "You are in the forest"}
exits = {0: {"Q": 0},
1: {"W": 2, "E": 3, "N": 5, "S": 4, "Q": 0},
2: {"N": 5, "Q": 0},
3: {"W": 1, "Q": 0},
4: {"N": 1, "W": 2, "Q": 0},
5: {"W": 2, "S": 1, "Q": 0}}
"""
locations = {0: "You are sitting in front of a computer learning Python",
1: "You are standing at the end of a road before a small brick building",
2: "You are at the top of a hill",
3: "You are inside a building, a well house for a small stream",
4: "You are in a valley beside a stream",
5: "You are in the forest"}
exits = {0: {"Q": 0},
1: {"W": 2, "E": 3, "N": 5, "S": 4, "Q": 0},
2: {"N": 5, "Q": 0},
3: {"W": 1, "Q": 0},
4: {"N": 1, "W": 2, "Q": 0},
5: {"W": 2, "S": 1, "Q": 0}}
# print("nested for loops")
# print("----------------")
# nested_loop = """\
def nested_loop():
result= []
for loc in sorted(locations):
exits_to_destination_1 = []
for xit in exits:
if loc in exits[xit].values():
exits_to_destination_1.append((xit, locations[xit]))
result.append(exits_to_destination_1)
# print the result before returning
for x in result:
pass # recall pass doesn't do anything but is necessary to make a for loop valid
return result
# print("Locations leading to {}".format(loc), end='\t')
# print(exits_to_destination_1)
# """
print()
# print("List comprehension inside a for loop")
# print("------------------------------------")
# loop_comp = """\
def loop_comp():
result = []
for loc in sorted(locations):
exits_to_destination_2 = [(xit, locations[xit]) for xit in exits if loc in exits[xit].values()]
result.append(exits_to_destination_2)
# print the result before returning
for x in result:
pass
return result
# print("Locations leading to {}".format(loc), end='\t')
# print(exits_to_destination_2)
# """
# print()
# print("nested comprehension")
# print("--------------------")
# nested_comp = """\
def nested_comp():
exits_to_destination_3 = [[(xit, locations[xit]) for xit in exits if loc in exits[xit].values()]
for loc in sorted(locations)]
# print the result before returning
for x in exits_to_destination_3:
pass
return exits_to_destination_3
# print(exits_to_destination_3)
# print()
# for index, loc in enumerate(exits_to_destination_3):
# print("Locations leading to {}".format(index), end='\t')
# print(loc)
# """
def nested_gen():
exits_to_destination_3 = ([(xit, locations[xit]) for xit in exits if loc in exits[xit].values()]
for loc in sorted(locations))
# print the result before returning
for x in exits_to_destination_3:
pass
return exits_to_destination_3
# result_1 = timeit.timeit(nested_loop, globals=globals(), number=1000) # globals allows anything defined in module
# to be available to this snipet, default number is 1,000,000,000 so for timesake we decreased it to 1000
print(nested_loop())
print(loop_comp())
print(nested_comp())
print(nested_gen())
result_1 = timeit.timeit(nested_loop, setup, number=1000) # preferable way to add code using "setup"
result_2 = timeit.timeit(loop_comp, setup, number=1000)
result_3 = timeit.timeit(nested_comp, setup, number=1000)
result_4 = timeit.timeit(nested_gen, setup, number=1000)
# apply common sense rather than statistics
print("Nested loop:\t{}".format(result_1))
print("Loop composition:\t{}".format(result_2)) # loop composition is the fastest
print("Nested composition:\t{}".format(result_3))
print("Nested generator:\t{}".format(result_4)) # the nested generator is the fastest
# because it is not building the list but just iterating over it and returning each line
# the generator is faster
# speed or memory use, list comprehensions lack speed but are effiecent in memory
|
[
"[email protected]"
] | |
d261d6ca537220689a62cbf436cb312e521bf1d7
|
7289ebef10cdb7b7cbd834d749f46ace65624941
|
/7- Introduction - Fonctions/fonction.py
|
3f23f0678e2dcbcd67977a84f4f4e3669d0d1298
|
[] |
no_license
|
633-1-ALGO/introduction-python-FrankTheodoloz
|
14e7157371b707dcf26688818cdbeb4090526a8b
|
20aa27aa2bc8c562260e23c8cc30f3d0dcdc5a5c
|
refs/heads/master
| 2020-07-29T20:01:15.802637 | 2019-10-06T17:51:06 | 2019-10-06T17:51:06 | 209,942,919 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 181 |
py
|
# Exemple de fonction
def multiplication(n, m, p=1): # p a comme valeur par défaut 1
print(n * m)
print(n * m * p)
multiplication(3, 4)
print()
multiplication(3, 4, 2)
|
[
"[email protected]"
] | |
75ccd35c5fba5907e941b6f34fb90120507e400e
|
649d435286f4ead4ca29feea4f6766c8ae03f475
|
/src/FlaUILibrary/flaui/util/treeitemsparser.py
|
af1fa3c59c08ce665e0de02dee95aeda188ddce5
|
[
"MIT",
"Python-2.0"
] |
permissive
|
minatuyang/robotframework-flaui
|
1fe1c5555781d6d288bfe2eabbe12840050af77e
|
352851193265d62ab8282dd9ac2a416fe47e4717
|
refs/heads/main
| 2023-06-21T23:35:22.385202 | 2021-07-20T16:06:48 | 2021-07-20T20:30:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,663 |
py
|
from FlaUILibrary.flaui.exception import FlaUiError
class TreeItemsParser:
""" Helper class which handles the management of the given location string.
The location is used to locate the exact tree item in the tree control.
Examples:
location = N:Nameofitem1->N:Nameofitem2->N:Nameofitem3
location = I:indexofitem1->I:indexofitem2->I:indexofitem3
location = N:Nameofitem1->I:indexofitem2->I:indexofitem3
"""
def __init__(self, location):
self.location = location.split("->")
def get_treeitem(self, treeitems, index):
""" This function gets the index of the location, the location can either be name or index,
and returns the corresponding tree item to that name or index.
if the given name or index is not found a flauierror will be thrown.
"""
loc = self.location[index]
if loc.startswith("I:"):
loc = loc[2:]
try:
return treeitems[int(loc)]
except IndexError:
raise FlaUiError(FlaUiError.ArrayOutOfBoundException.format(int(loc))) from None
elif loc.startswith("N:"):
loc = loc[2:]
for item in treeitems:
if item.Name == loc:
return item
raise FlaUiError(FlaUiError.ElementNameNotFound.format(loc))
else:
raise FlaUiError(FlaUiError.FalseSyntax.format(loc)) from None
def is_last_element(self, index):
"""Retruns true if the index corresponds the last element of given location series.
"""
if index==len(self.location)-1:
return True
return False
|
[
"[email protected]"
] | |
aa80166792010844c80020d87de369afec96d42a
|
5eff9df4d276e83c68ce843d58868499858f701a
|
/Leetcode - FB/p0350.py
|
3780986eb5c2d856d4e29deeeacac48b9f10fdf7
|
[] |
no_license
|
arunraman/Code-Katas
|
b6723deb00caed58f0c9a1cafdbe807e39e96961
|
7fe3582fa6acf59a2620fe73e1e14bd8635bbee8
|
refs/heads/master
| 2023-03-04T17:27:44.037145 | 2023-03-02T21:09:53 | 2023-03-02T21:09:53 | 25,232,784 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 456 |
py
|
class p0349(object):
def intersectiontwoArrays(self, nums1, nums2):
dict1 = dict()
for i in nums1:
if i not in dict1:
dict1[i] = 1
else:
dict1[i] += 1
ret = []
for i in nums2:
if i in dict1 and dict1[i] > 0:
ret.append(i)
dict1[i] -= 1
return ret
S = p0349()
print S.intersectiontwoArrays([1, 2, 2, 1], [2, 2])
|
[
"[email protected]"
] | |
54b3d5db7d4193b1ac06c3eb01d62a626a47d055
|
118217dbc4d2e78f9fdf54fb652309708effa673
|
/app/app/settings.py
|
a444feed928e83d5a84443c4e23b4910660ea23e
|
[
"MIT"
] |
permissive
|
Mimicx/recipe-app-api
|
68f1d40aee64212823ff5e91a3fa3cd6e51b459f
|
4aa0ad098d414861b628e50948b741dc56a5847a
|
refs/heads/master
| 2020-12-04T15:01:06.894710 | 2020-04-27T08:10:32 | 2020-04-27T08:10:32 | 231,809,804 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,241 |
py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(d3+ugse4m4p-(h2jy5eh#&w*++fj*1^^ifrf6j8yr4gm5i#j#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
|
[
"[email protected]"
] | |
bd9e112b5ba6b811a80a6b9ccc5271dbdd0cc491
|
907ea8b2e3af5035ee640c95c646d6a04a192d41
|
/TTools/TTools.py
|
43b91782b12f7b6b5ed03dfe17eae3ca57566dd8
|
[] |
no_license
|
cuchy/TTools
|
8869ee47d3c489d95fa2c8b454757aee521cd705
|
37527c5a60360f0ddef7398a34296ab332810e0c
|
refs/heads/master
| 2021-01-11T10:39:05.100786 | 2016-11-05T21:25:29 | 2016-11-05T21:25:29 | 72,948,537 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,785 |
py
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
# "========================================================================="
# " UDELAR - Facultad de Ingeniería "
# " Proyecto de Grado - Año 2016 "
# " Autores: Viviana Solla & Gabriel Jambrina "
# "========================================================================="
import Tkinter,tkFileDialog,ttk
import os
import sys
import ttkcalendar
class simpleapp_tk(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
self.eval('tk::PlaceWindow %s center' % self.winfo_pathname(self.winfo_id()))
button1 = Tkinter.Button(self,text="Convertir archvio GML a XML",font=('times', 14, 'bold'), command=self.OnButtonClickB1)
button1.grid(column=0,row=1,columnspan=1,sticky='EW')
button2 = Tkinter.Button(self,text="Preparar emulación de Red", font=('times', 14, 'bold') , command=self.OnButtonClickB2)
button2.grid(column=0,row=2,columnspan=1,sticky='EW')
button3 = Tkinter.Button(self,text="Cargar Base de Datos Mysql",font=('times', 14, 'bold'), command=self.OnButtonClickB3)
button3.grid(column=0,row=3,columnspan=1,sticky='EW')
button4 = Tkinter.Button(self,text="Borrar archivos generados por bgpdump",font=('times', 14, 'bold'), command=self.OnButtonClickB4)
button4.grid(column=0,row=4,columnspan=1,sticky='EW')
button5 = Tkinter.Button(self,text="Ejecutar MiniNExt",font=('times', 14, 'bold'), command=self.OnButtonClickB5)
button5.grid(column=0,row=5,columnspan=1,sticky='EW')
button6 = Tkinter.Button(self,text="Descargar trazas BGP",font=('times', 14, 'bold'), command=self.OnButtonClickB6)
button6.grid(column=0,row=6,columnspan=1,sticky='EW')
self.image = Tkinter.PhotoImage(file="img/exit.png")
button6 = Tkinter.Button(self, text="SALIR",font=('times', 14, 'bold'), image=self.image, anchor=Tkinter.SE, compound="right", command=quit)
button6.grid(column=0,row=7, sticky='EW')
self.labelVariable = Tkinter.StringVar()
self.label = Tkinter.Label(self,textvariable=self.labelVariable,anchor="w",fg="white",bg="green")
self.label.grid(column=0,row=10,columnspan=2,sticky='EW')
self.labelVariable.set(" TTools v1.0")
self.grid_columnconfigure(0,weight=1)
self.resizable(True,False)
##################BOTON B1###########################
def OnButtonClickB1(self):
self.labelVariable.set(" Convertidor de GML a XML" )
file = tkFileDialog.askopenfile(parent=self,mode='rb',title='Selecciona el archivo a convertir')
if file != None:
self.root = Tkinter.Tk()
self.root.eval('tk::PlaceWindow %s center' % self.root.winfo_pathname(self.root.winfo_id()))
self.root.title("Numero de Sistema Autónomo")
print "¡File OK!"
self.abs_path = os.path.abspath(file.name)
#dirActual = os.getcwd()
Tkinter.Label(self.root, text="Ingrese el número de AS", font=('arial', 12, 'bold'), width=30).pack(pady=10)
self.e = Tkinter.Entry(self.root, width=10)
self.e.pack(pady=10)
b = Tkinter.Button(self.root, text="OK",font=('times', 12, 'bold'), command=self.onClickB1)
b.pack(pady=20)
def onClickB1(self):
os.system("python GMLtoXMLconverter.py "+self.abs_path+" "+self.e.get())
print "ASN= ", self.e.get()
self.root.destroy()
self.labelVariable.set("Archivo convertido con exito")
##################BOTON B2###########################
def OnButtonClickB2(self):
self.routersName = []
self.lstASN = []
self.labelVariable.set(" Generador de archivos de configuracion" )
file = tkFileDialog.askopenfile(parent=self,mode='rb',title='Seleccione el archivo XML que representa la topología')
if file != None:
self.root = Tkinter.Tk()
self.root.eval('tk::PlaceWindow %s center' % self.root.winfo_pathname(self.root.winfo_id()))
#self.root.resizable(width=False, height=False)
self.root.grid()
self.root.title("Seleccion routers de borde")
print "¡File OK!"
self.abs_path = os.path.abspath(file.name) #os.path.basename(file.name)
#Leo los routers de AS
linea = file.readline()
while not ("<topology>" in linea):
linea = file.readline()
while not ("</nodes>" in linea):
if ("<node id=" in linea):
auxLinea = linea.split("\"")
r = auxLinea[1]
r = '_'.join(r.split())
if not r in self.routersName:
self.routersName.append(r)
linea = file.readline()
#Muestro la lista con los routers
self.label1 = Tkinter.Label(self.root,text= "Selecciones los routers que mantienen sesiones eBGP",height=2, width=55,font=('arial', 12, 'bold'));self.label1.pack()
self.s1 = Tkinter.Scrollbar(self.root)
self.s2 = Tkinter.Scrollbar(self.root)
self.L1 = Tkinter.Listbox(self.root, height=20, font=('arial', 11))
self.L2 = Tkinter.Listbox(self.root, height=20)
self.s1.pack(side=Tkinter.LEFT, fill=Tkinter.Y)
self.s2.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)
self.L1.pack(side=Tkinter.LEFT, fill=Tkinter.Y)
self.L2.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)
self.s1.config(command=self.L1.yview)
self.s2.config(command=self.L2.yview)
self.L1.config(yscrollcommand=self.s1.set)
self.L2.config(yscrollcommand=self.s2.set)
for i in self.routersName:
self.L1.insert(Tkinter.END, i)
self.L1.select_set(0)
self.b3 = Tkinter.Button(self.root, text="Seleccionar =>", command=self.onClickB3, height=2, width=10, bg="green", font=('arial', 12));self.b3.pack()
self.b4 = Tkinter.Button(self.root, text="<= Quitar", command=self.onClickB4, height=2, width=10, bg="red", font=('arial', 12));self.b4.pack()
self.b2 = Tkinter.Button(self.root, text="Siguiente", command=self.onClickB2, height=2, width=10, font=('times', 12));self.b2.pack(side="bottom")
def onClickB3(self):
index = int(self.L1.curselection()[0])
self.L2.insert(Tkinter.END, self.routersName[index])
self.L2.select_set(self.L2.size()-1)
def onClickB4(self):
index = self.L2.curselection()[0]
self.L2.delete(index)
self.L2.select_set(self.L2.size()-1)
def onClickB2(self):
self.L1.pack_forget();self.s1.pack_forget();self.L2.pack_forget();self.s2.pack_forget();self.b2.pack_forget();self.b3.pack_forget();
self.b4.pack_forget();self.label1.pack_forget();
self.root.title("ASN de Vecinos eBGP")
self.label2 = Tkinter.Label(self.root,height=2,width=30,font=('arial', 15, 'bold'),text=self.L2.get(0, Tkinter.END)[0],anchor=Tkinter.CENTER);self.label2.pack()
self.e = Tkinter.Entry(self.root, font=("Calibri",12),justify="center",width=8,bg="#1E6FBA")
self.asn=65000
self.e.insert(Tkinter.END, self.asn); self.e.pack()
self.index=1;
self.b5 = Tkinter.Button(self.root, height=1, width=8, text="Siguiente", command=self.onClickB5, font=('arial', 12));self.b5.pack(pady=10)
def onClickB5(self):
self.asn+=10
self.lstASN.insert(self.index-1,self.e.get())
self.e.delete(0, Tkinter.END); self.e.insert(Tkinter.END, self.asn)
self.label2.config(text=self.L2.get(0, Tkinter.END)[self.index] )
self.index+=1
if (self.L2.size() == self.index):
self.b5.pack_forget()
self.b6 = Tkinter.Button(self.root, height=1, width=8, text="Terminar", command=self.onClickB6, font=('arial', 12)); self.b6.pack(pady=10)
def onClickB6(self):
self.lstASN.insert(self.index-1,self.e.get())
comando="python emulGen.py "+self.abs_path+" "
counter=0
for i in self.L2.get(0, Tkinter.END):
counter+=1
if (counter==self.index):
comando+=i+" "
else:
comando+=i+","
counter=0
for i in self.lstASN :
counter+=1
if (counter==self.index):
comando+=i+" "
else:
comando+=i+","
counter=0;
os.system(comando)
#print comando
self.root.destroy()
self.labelVariable.set(" Topology Tools v1.0")
#####################BOTON B3#########################
def OnButtonClickB3(self):
self.labelVariable.set("Cargar bases de datos Mysql" )
self.directory = tkFileDialog.askdirectory(parent=self,title='Seleccione la ubicacion de la carpeta mininext')
if self.directory != "":
self.root = Tkinter.Tk()
self.root.eval('tk::PlaceWindow %s center' % self.root.winfo_pathname(self.root.winfo_id()))
self.root.title("Nobre de la base de datos")
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11, 'bold'), text="Ingresee el nombre de la base").pack()
self.baseName = Tkinter.Entry(self.root, width=20)
self.baseName.pack(padx=50)
Tkinter.Label(self.root, height=2,width=30,font=('arial', 11, 'bold'), text="Selecione el algoritmo utilizado").pack()
self.L3 = Tkinter.Listbox(self.root, height=10)
self.L3.pack()
self.algorithm=["FM","RR_Sep", "RR_SepD", "RR_SepS", "RR_Bates", "RR_BatesY", "RR_BatesZ", "RR_Zhang"]
self.L3.insert(Tkinter.END, self.algorithm[0]);self.L3.insert(Tkinter.END, self.algorithm[1]);self.L3.insert(Tkinter.END, self.algorithm[2]);
self.L3.insert(Tkinter.END, self.algorithm[3]); self.L3.insert(Tkinter.END, self.algorithm[4]);self.L3.insert(Tkinter.END, self.algorithm[5]);
self.L3.insert(Tkinter.END, self.algorithm[6]); self.L3.insert(Tkinter.END, self.algorithm[7])
self.L3.select_set(0)
Tkinter.Button(self.root, text="Cargar BD", command=self.onClickB7, font=('arial', 12)).pack()
def onClickB7(self):
if self.baseName.get() != "" :
print "Cargar base"
print "python loadDB.py "+self.directory+" "+self.baseName.get() +" "+ self.algorithm[self.L3.curselection()[0]]
os.system("python loadDB.py "+self.directory+" "+self.baseName.get() +" "+ self.algorithm[self.L3.curselection()[0]])
self.root.destroy()
self.labelVariable.set(" Topology Tools v1.0")
else:
print "WARNING: Falto completar un campo"
#####################BOTON B4#########################
def OnButtonClickB4(self):
self.labelVariable.set("Borrar archivos temporales generados por tcpdump" )
self.directory = tkFileDialog.askdirectory(parent=self,title='Seleccione la ubicacion de la carpeta mininext')
if self.directory != "":
print "Borrando archivos temporales de la carpeta "+ self.directory
os.system("python deleteTemporaryFiles.py "+self.directory)
self.labelVariable.set(" Topology Tools v1.0")
#####################BOTON B5#########################
def OnButtonClickB5(self):
self.labelVariable.set("Empezar emulación con MiniNExt" )
self.directory = tkFileDialog.askdirectory(parent=self,title='Seleccione la ubicacion de la carpeta generada')
if self.directory != "":
print "Running sudo python "+ self.directory + "/start.py"
os.system("sudo python "+ self.directory + "/start.py")
#####################BOTON B6#########################
def OnButtonClickB6(self):
self.labelVariable.set("Descargar Trazas BGP desde www.ripe.net" )
self.directory = tkFileDialog.askdirectory(parent=self,title='Seleccione la ubicacion donde descargar la traza')
if self.directory != "":
self.root = Tkinter.Tk()
self.root.eval('tk::PlaceWindow %s center' % self.root.winfo_pathname(self.root.winfo_id()))
self.root.title("TRAZA BGP")
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11, 'bold'), text="Seleccione el origen de la traza").pack()
self.opM = ttk.Combobox(self.root, width=10, values=[ "rrc00" ,"rrc01", "rrc02", "rrc03","rrc04","rrc05","rrc06","rrc07","rrc08","rrc09","rrc10","rrc11","rrc12","rrc13","rrc14","rrc15","rrc16"])
self.opM.current(0)
self.opM.pack()
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11, 'bold'), text="Seleccione el día").pack()
self.calendar = ttkcalendar.Calendar(self.root)
self.calendar.pack()
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11, 'bold'), text="Seleccione la hora").pack()
lstHours=[]
cont=0;
for i in range(0,24):
for j in range(0,12):
if (j <= 1):
valor=str(0)+str(j*5)
else:
valor=str(j*5)
valor=str(i)+":"+valor
if (i <= 9):
valor=str(0)+valor
lstHours.insert(cont,valor)
cont+=1
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11), text="Hora Inicio").pack()
self.opHourMin = ttk.Combobox(self.root, width=5,values=lstHours)
self.opHourMin.current(0)
self.opHourMin.pack()
Tkinter.Label(self.root, height=2,width=40,font=('arial', 11), text="Hora Fin").pack()
self.opHourMax = ttk.Combobox(self.root, width=5,values=lstHours)
self.opHourMax.current(0)
self.opHourMax.pack()
buttonCalendar = Tkinter.Button(self.root, text="Aceptar", command=self.onClickCalendar).pack(side=Tkinter.RIGHT)
buttonCancel = Tkinter.Button(self.root, text="Cancelar", command=self.root.destroy).pack(side=Tkinter.LEFT)
def onClickCalendar(self):
date= str(self.calendar.selection).split(" ")[0]
#Ej: python downloadFromRipe.py rrc00 2014-02-15 16:45 17:10 /home/
print "python downloadFromRipe.py "+self.opM.get()+" "+date+" "+self.opHourMin.get()+" "+self.opHourMax.get()+" "+self.directory
os.system("python downloadFromRipe.py "+self.opM.get()+" "+date+" "+self.opHourMin.get()+" "+self.opHourMax.get()+" "+self.directory)
self.root.eval('::ttk::CancelRepeat')
self.root.destroy()
self.labelVariable.set(" Topology Tools v1.0")
if __name__ == "__main__":
app = simpleapp_tk(None)
app.title('Topology Tools')
app.mainloop()
|
[
"[email protected]"
] | |
7b0c4083d029a92441704bd296c1aef0ebbf84f2
|
2d4ab8e3ea9fd613ec0ae0c1956b68874c9b5f06
|
/tests/pipelines/cnv_calling/test_xhmm_pca.py
|
e9dc13feb4ca41c6220481e9e7105e1e72bce443
|
[] |
no_license
|
biocodices/paip
|
4abd39cbbd372a68592da87177c70c403d5a661d
|
040a62c11e5bae306e2de4cc3e0a78772ee580b3
|
refs/heads/master
| 2021-01-17T20:48:28.642255 | 2019-07-26T14:30:58 | 2019-07-26T14:30:58 | 62,604,413 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,013 |
py
|
from unittest.mock import MagicMock
import pytest
from paip.pipelines.cnv_calling.xhmm_pca import XhmmPCA, EmptyInputMatrix
@pytest.fixture
def task(cohort_task_factory):
return cohort_task_factory(XhmmPCA)
def test_check_matrix(task):
# NOTE: Run this test before the next one, because the tested method
# check_matrix() will be mocked in test_run().
empty_matrix = pytest.helpers.file('empty_matrix.txt')
with pytest.raises(EmptyInputMatrix):
task.check_matrix(empty_matrix)
def test_run(task, mock_rename):
check_matrix = MagicMock()
task.check_matrix = check_matrix
task.run()
check_matrix.assert_called_once()
(command, ), kwargs = task.run_command.call_args
assert 'xhmm --PCA' in command
assert 'DATA.filtered_centered.RD.txt' in command
assert 'DATA-temp.RD_PCA' in command
assert mock_rename.call_count == 3
assert 'DATA-temp.RD_PCA' in mock_rename.call_args[0][0]
assert 'DATA.RD_PCA' in mock_rename.call_args[0][1]
|
[
"[email protected]"
] | |
7ef805e48e0b8adaf86af6ff894ad57d90a8dabe
|
334e7e8b9162cd74e1c9dd115a6e293f01051454
|
/src/profiles/admin.py
|
2ded111a3ce3a2b08cab6f2e78f67dd609b0352c
|
[
"MIT"
] |
permissive
|
contactr2m/remote_repo
|
dec0dff9c299ab665cd36642a757ae9fa35950c3
|
5665c55b794929fd40645264c5c149e64d172097
|
refs/heads/master
| 2021-01-10T13:13:47.359357 | 2016-04-26T14:23:49 | 2016-04-26T14:23:49 | 53,814,820 | 0 | 0 | null | 2016-03-20T19:37:37 | 2016-03-14T00:13:06 |
Python
|
UTF-8
|
Python
| false | false | 1,084 |
py
|
from __future__ import unicode_literals
from django.contrib import admin
#from authtools.admin import NamedUserAdmin
from .models import Profile
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from accounts.admin import EmailUserAdmin
User = get_user_model()
class UserProfileInline(admin.StackedInline):
model = Profile
class NewUserAdmin(EmailUserAdmin):
inlines = [UserProfileInline]
list_display = ('is_active', 'email', 'first_name', 'last_name', 'display_name', 'permalink',
'is_superuser', 'is_staff',)
# 'View on site' didn't work since the original User model needs to
# have get_absolute_url defined. So showing on the list display
# was a workaround.
def permalink(self, obj):
url = reverse("profiles:show",
kwargs={"slug": obj.profile.slug})
# Unicode hex b6 is the Pilcrow sign
return '<a href="{}">{}</a>'.format(url, '\xb6')
permalink.allow_tags = True
admin.site.unregister(User)
admin.site.register(User, NewUserAdmin)
|
[
"[email protected]"
] | |
015c735e062ac63dde157d1b06e700b8009e14ce
|
8a1241ac8ad91672aec81c878f2165a7678a1ad6
|
/Web/Applications/Visualizer/server/pv_web_visualizer.py
|
84ef98ae22d8c269ffca4d47cdd4e0a31d3dd2f0
|
[
"MIT",
"LicenseRef-scancode-paraview-1.2",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"BSD-3-Clause"
] |
permissive
|
lmynsberge/ParaView
|
d9fbd0f4da197bc96172be8697ced76fe73852bf
|
2a68ee496949becf499742dfdbecb41b1eda81a7
|
refs/heads/master
| 2021-01-22T16:18:25.241194 | 2013-11-11T15:01:02 | 2013-11-11T15:01:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,203 |
py
|
r"""
This module is a ParaViewWeb server application.
The following command line illustrate how to use it::
$ pvpython .../pv_web_visualizer.py --data-dir /.../path-to-your-data-directory
--data-dir is used to list that directory on the server and let the client choose a file to load.
--load-file try to load the file relative to data-dir if any.
--ds-host None
Host name where pvserver has been started
--ds-port 11111
Port number to use to connect to pvserver
--rs-host None
Host name where renderserver has been started
--rs-port 22222
Port number to use to connect to the renderserver
Any ParaViewWeb executable script come with a set of standard arguments that
can be overriden if need be::
--port 8080
Port number on which the HTTP server will listen to.
--content /path-to-web-content/
Directory that you want to server as static web content.
By default, this variable is empty which mean that we rely on another server
to deliver the static content and the current process only focus on the
WebSocket connectivity of clients.
--authKey vtkweb-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expect "vtkweb-secret" as secret key.
"""
# import to process args
import os
# import paraview modules.
from paraview.web import wamp as pv_wamp
from paraview.web import protocols as pv_protocols
from vtk.web import server
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
import _argparse as argparse
# =============================================================================
# Create custom Pipeline Manager class to handle clients requests
# =============================================================================
class _PipelineManager(pv_wamp.PVServerProtocol):
dataDir = None
authKey = "vtkweb-secret"
dsHost = None
dsPort = 11111
rsHost = None
rsPort = 11111
fileToLoad = None
def initialize(self):
# Bring used components
self.registerVtkWebProtocol(pv_protocols.ParaViewWebStartupRemoteConnection(_PipelineManager.dsHost, _PipelineManager.dsPort, _PipelineManager.rsHost, _PipelineManager.rsPort))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebStateLoader(_PipelineManager.fileToLoad))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebPipelineManager(_PipelineManager.dataDir, _PipelineManager.fileToLoad))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortImageDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortGeometryDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebTimeHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebRemoteConnection())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebFileManager(_PipelineManager.dataDir))
# Update authentication key to use
self.updateSecret(_PipelineManager.authKey)
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="ParaView/Web Pipeline Manager web-application")
# Add default arguments
server.add_arguments(parser)
# Add local arguments
parser.add_argument("--data-dir", default=os.getcwd(), help="path to data directory to list", dest="path")
parser.add_argument("--load-file", default=None, help="File to load if any based on data-dir base path", dest="file")
parser.add_argument("--ds-host", default=None, help="Hostname to connect to for DataServer", dest="dsHost")
parser.add_argument("--ds-port", default=11111, type=int, help="Port number to connect to for DataServer", dest="dsPort")
parser.add_argument("--rs-host", default=None, help="Hostname to connect to for RenderServer", dest="rsHost")
parser.add_argument("--rs-port", default=11111, type=int, help="Port number to connect to for RenderServer", dest="rsPort")
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_PipelineManager.authKey = args.authKey
_PipelineManager.dataDir = args.path
_PipelineManager.dsHost = args.dsHost
_PipelineManager.dsPort = args.dsPort
_PipelineManager.rsHost = args.rsHost
_PipelineManager.rsPort = args.rsPort
if args.file:
_PipelineManager.fileToLoad = args.path + '/' + args.file
# Start server
server.start_webserver(options=args, protocol=_PipelineManager)
|
[
"[email protected]"
] | |
e40f1eef29e7514039f8878f249ee57807933519
|
a1d986433707e5a645347921f0b941176319ec15
|
/venv/bin/easy_install
|
6f048b0a8a3450324440a6e904b07daf6a076319
|
[] |
no_license
|
ezanat1/WineML
|
edba7ffe3d0353144f5f769e20e75af79b0ea234
|
7f77c70d0da9660ce60b9f5f94796b452024c870
|
refs/heads/master
| 2020-05-16T23:13:37.708689 | 2018-12-07T01:26:10 | 2018-12-07T01:26:10 | 183,357,538 | 1 | 0 | null | 2019-04-25T04:40:53 | 2019-04-25T04:40:52 | null |
UTF-8
|
Python
| false | false | 281 |
#!/Users/ezanatesfaye/Desktop/WineRecommendation/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
d4ad983bb64d5751121ab96f0b31f78bdb862868
|
b544a6d23a19f19a5a4ba41f3e002dc6666ec220
|
/tyche/route/__init__.py
|
1b2bd234a0a622503b44f05816266b86b76b1e58
|
[] |
no_license
|
EternalZing/Tyche-Server
|
2429b7a0cf952ae972f8f2de0ac085220984968a
|
b29b4fd3b5aea23127f3198c464e3c6421ae9c96
|
refs/heads/master
| 2021-05-05T12:56:09.266103 | 2018-02-08T07:55:36 | 2018-02-08T07:55:36 | 118,345,047 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 26 |
py
|
from .rules_r import rule
|
[
"[email protected]"
] | |
c77fc7c74aee8e33dbb0111a5d71f763ecb0cb21
|
b0015342f71f027a63630b0e3d76c8b1a24088a2
|
/srnet/utils/__init__.py
|
f498b8ecabd11961a34814af7e37aa1f1b32a691
|
[] |
no_license
|
sean-rice/srnet
|
476804f1f20d2e2b9d124e849cc6804e2b5ea878
|
6cf1b0232c081e1e8e02073402cd4f6910100255
|
refs/heads/master
| 2023-09-05T01:36:21.727479 | 2021-05-24T22:33:22 | 2021-05-24T22:33:22 | 283,088,184 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 59 |
py
|
from . import _utils, image, image_list, patchable_wrapper
|
[
"[email protected]"
] | |
bb883369d18920a27cc801735cf6b29fead3ce45
|
58c4acb298cfd1c47157d5ec657829f7465b64e5
|
/cable/tests.py
|
0f012306a8f13a4c0098fb07e71ed66f04ba8bae
|
[] |
no_license
|
AlexsandroMO/Cables_Projcts
|
c5b72e75a58e155dd7b17971793fcddef5fe4526
|
6ede9efc591d638769ded1ac75fa41d43e804496
|
refs/heads/master
| 2020-12-05T01:33:07.409735 | 2020-01-11T23:41:58 | 2020-01-11T23:41:58 | 231,967,765 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,054 |
py
|
from django.test import TestCase
# Create your tests here.
#https://www.geeksforgeeks.org/textfield-django-models/
<<<<<<< HEAD
#https://realpython.com/django-redirects/#passing-parameters-with-redirects
=======
#https://www.w3schools.com/css/css_table.asp
>>>>>>> homolog
#pip3 install django-crispy-forms
#Zerar senha do admin
#python manage.py shell
#from django.contrib.auth.models import User
#User.objects.filter(is_superuser=True)
#usr = User.objects.get(username='nome-do-administrador')
#usr.set_password('nova-senha')
#usr.save()
'''Upload documents on Github
git clone <nome>
<entra na pasta criada>
git add .
git commit -m "texto"
git push
git pull
'''
'''git checkout -b nome cria uma branch
git checkout nome entra na branch
git branch - verifica as branchs
git checkout master - entra na master
git merge origin "nome"
git push origin master - subir commit
git branch -D "nome"- deletar branch
'''
#Heroku
#https://github.com/Gpzim98/django-heroku
#git add .gitignore
#colocar no gitignore
'''.idea
.pyc
.DS_Store
*.sqlite3'''
'''
Publishing the app
git add .
git commit -m "Configuring the app"
git push heroku master --force
'''
'''
def newTask(request):
if request.method == 'POST':
form = ResidencDimensForm(request.POST)
if form.is_valid():
task = form.save(commit=False)
task.total_va = (task.potencia_va * task.quant)
task.corrente_a = (task.total_va / task.tensa_va)
#queda = task.sessao_condutor
#test = main.read_sql_queda(queda)
#task.queda_tensao_ckt = ((((test['queda_tesao'] * task.corrente_a) * task.comprimento) / 1000) / task.total_va)
task.save()
return redirect('/')
else:
form = ResidencDimensForm()
return render(request, 'cable/add-task.html', {'form': form})
'''
#urls ID
#https://stackoverflow.com/questions/15608295/passing-an-id-in-django-url
|
[
"[email protected]"
] | |
e5ace796de9aa6a3eed2fe626a7ccff896fcd712
|
c73f43dde251d83cf889f0d056559e601fe134e2
|
/test/baike_spider/url_manager.py
|
511a948ae3cbfa4bc8c2ef308da751d598b27ead
|
[] |
no_license
|
zhangyang183487/awesome-python3-webapp
|
bedd2bd2689abb130720afcfbd80f1a7adcab4c7
|
96e8e02a6fdb75a57a92259ad2c78c3593ef535a
|
refs/heads/master
| 2020-04-28T21:27:49.228349 | 2019-06-19T07:56:02 | 2019-06-19T07:56:02 | 175,582,604 | 0 | 0 | null | 2019-06-19T07:56:03 | 2019-03-14T08:46:26 |
Python
|
UTF-8
|
Python
| false | false | 867 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class UrlManager(object):
def __init__(self):
self.new_urls = set()
self.old_urls = set()
# 添加新url
def add_new_url(self, url):
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
# 理论添加url
def add_new_urls(self, urls):
if urls is None or len(urls) == 0:
return
for url in urls:
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
# 是否还有未执行过的url
def has_new_url(self):
return len(self.new_urls) > 0
# 获取一个新的url
def get_new_url(self):
new_url = self.new_urls.pop();
self.old_urls.add(new_url)
return new_url
|
[
"[email protected]"
] | |
ec639ec794b8162d801d6bc692154bb82195da7c
|
5294915919042b56505a01ed64b579a2c3788647
|
/DS_SJE_utils.py
|
f4526d5a19652cf71e6686319c002428521a39dd
|
[] |
no_license
|
JinHyeopLee/DS_SJE_tensorflow
|
bd67a6765015b3602f13ce0dd5220835f4523239
|
06f17e822bee1bddfa58fa91fc91305462d1078d
|
refs/heads/master
| 2020-04-07T07:04:52.643075 | 2018-12-05T15:05:03 | 2018-12-05T15:05:03 | 158,162,731 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 598 |
py
|
import numpy as np
def append_nparr(arr1, arr2, axis=0):
if arr1 is None:
arr1 = arr2
else:
arr1 = np.append(arr1, arr2, axis=axis)
return arr1
def random_select(class_num, num_entity_each_class):
random_offset = np.random.randint(0, num_entity_each_class[class_num])
class_base_num = 0
for i in range(len(num_entity_each_class)):
if i == class_num:
break
class_base_num += num_entity_each_class[i]
return class_base_num + random_offset
# def int_to_float(tuple):
# return tuple[0], np.float32(tuple[1]), tuple[2]
|
[
"[email protected]"
] | |
1cc46aac55864041c24fc7764d0bacccf6e6b983
|
c5ccca7b5af562e10c91a4dfe451da0990e73edf
|
/Test2-1/users/migrations/0006_auto_20200402_1057.py
|
e21fdce120ed0f09058b2ad3cd83f4ed31c5a5a0
|
[] |
no_license
|
170400529/zwj
|
ac29e788d47f0c1381527bbc101711c656d29e29
|
66257a8028fefb9a016e0dc04b99c9d2daed07ee
|
refs/heads/master
| 2021-05-20T20:25:30.200353 | 2020-04-02T09:05:49 | 2020-04-02T09:05:49 | 252,406,215 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 482 |
py
|
# Generated by Django 3.0.4 on 2020-04-02 02:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20200402_1056'),
]
operations = [
migrations.AlterField(
model_name='question',
name='level',
field=models.CharField(choices=[('1', 'easy'), ('3', 'difficult'), ('2', 'general')], max_length=10, null=True, verbose_name='等级'),
),
]
|
[
"[email protected]"
] | |
68ecfff4ba3f11e8600cdf732af4fbb73db1d9a6
|
e07fc6fb419c1ce0616478ae1a59f9d70e353984
|
/src/hqmanager/parser.py
|
0db9f6249ad53d892fa9f580b66afaa49bef0d56
|
[
"MIT"
] |
permissive
|
herqles-io/hq-manager
|
9e647bf874411279cb47f162c0c2049f984ce6c5
|
ec4a37760a7d0a52128b66eb264eb25998c6a9d1
|
refs/heads/master
| 2021-01-23T08:38:32.203593 | 2015-08-11T18:50:42 | 2015-08-11T18:50:42 | 40,308,480 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 343 |
py
|
import argparse
import hqmanager
description = """Some description here"""
epilog = """Some epilog here"""
parser = argparse.ArgumentParser(
description=description,
epilog=epilog)
parser.add_argument('-c', '--config', required=True, help='Config file to use')
parser.set_defaults(func=hqmanager.main)
# args = parser.parse_args()
|
[
"[email protected]"
] | |
4b2654ba6bffd9e20cf44a960e8ed5166476ba81
|
749aca95edfaad9e7d8b84dc2c6f62038595efc3
|
/mandala.py
|
dac1d0eae959c6a652cc1f391088ca60e9419b56
|
[] |
no_license
|
xmduhan/mandala
|
efe72b116ec829457cd2286b88b4544d5538861c
|
eafea6c9ebd0ca913c070f0bf2cbf72a6566b0a7
|
refs/heads/master
| 2021-06-30T16:30:49.410637 | 2017-09-20T09:44:53 | 2017-09-20T09:44:53 | 104,153,412 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,494 |
py
|
#!/usr/bin/env python
# encoding: utf-8
import dataset
from pyfiglet import Figlet
from termcolor import cprint
from prompt_toolkit import prompt as _prompt
from prompt_toolkit.history import InMemoryHistory
from itertools import count
from treelib import Tree
from pandas import DataFrame
history = InMemoryHistory()
db = dataset.connect('sqlite:///db.sqlite')
table = db['relation']
db.begin()
def commit():
""" """
db.commit()
db.begin()
print u'保存成功!'
def rollback():
""" """
db.rollback()
db.begin()
print u'操作撤销'
def save(w0, w1):
""" """
table.insert({'w0': w0, 'w1': w1})
# print u'%s --> %s: ' % (w0, w1)
cprint(' |-- ', 'green', end='')
cprint('%s --> %s: ' % (w0, w1), color='blue', end='')
cprint('+1', 'red')
def prompt(text):
return _prompt(text, history=history).strip()
def star(w0=None):
""" """
if w0 is None:
w0 = prompt(u'关键词:')
if len(w0) == 0:
return
for i in count(start=1, step=1):
w1 = prompt(u'%s --> (%d):' % (w0, i))
if len(w1) == 0:
break
save(w0, w1)
def chain(w0=None):
""" """
if w0 is None:
w0 = prompt(u'关键词:')
if len(w0) == 0:
return
for i in count(start=1, step=1):
w1 = prompt(u'%s --> (%d):' % (w0, i))
if len(w1) == 0:
break
save(w0, w1)
w0 = w1
def readLevel():
while True:
levelString = prompt(u'最大递归级数(3):')
if len(levelString) == 0:
levelString = 3
try:
level = int(levelString)
return level
except Exception:
print u'输入有误, 必须是整数!'
def lookup():
""" """
w0 = prompt(u'关键字:')
level = readLevel()
qs = db.query('select w0, w1, count(*) n from relation group by w0, w1')
df = DataFrame(list(qs))
tree = Tree()
tree.create_node(w0, w0)
appendList = []
def append(w0, level=5):
if w0 in appendList or level == 0:
return
appendList.append(w0)
for i, row in df[df['w0'] == w0].iterrows():
w1 = row['w1']
n = row['n']
# print w0, '-->', w1
if w1 not in tree:
title = '%s[%d]' % (w1, n)
tree.create_node(title, w1, parent=w0)
else:
# 出现循环
title = '%s[%d](*)' % (w1, n)
tree.create_node(title, i, parent=w0)
append(w1, level - 1)
append(w0, level)
tree.show()
def quit():
""" """
print u'再见!'
db.rollback()
exit()
def help():
""" """
print u'star: 星型添加'
print u'chain: 链式添加'
print u'commit: 保存'
print u'rollback: 取消'
print u'lookup: 查找'
print u'quit: 退出'
print u'help: 帮助'
commands = {
'star': star,
'chain': chain,
'lookup': lookup,
'commit': commit,
'rollback': rollback,
'quit': quit,
'help': help,
}
def main():
""" """
# 打印logo
f = Figlet(font='slant')
print f.renderText('Mandala')
# 读取并执行命令
try:
while True:
cmd = prompt(u'mandala>')
if cmd in commands:
commands[cmd]()
else:
print u'无效命令'
except KeyboardInterrupt:
quit()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
e2f166eb27eec77732a009850684325baf47550c
|
148f5fb80cb7640dbd4419617f1f002cd6b641bf
|
/MP4-HadoopMapReduce/TopTitleStatisticsMapper.py
|
43f9404508a4053c503e19c3962ab525dd6dd767
|
[] |
no_license
|
nhtrinh2/Cloud-Computing-and-Big-Data
|
7e6ec7811f42188ed181bb72b3be7768f7546480
|
c51e48e96660d7ed67f9812017124d30453a6f0a
|
refs/heads/master
| 2023-03-17T02:57:58.801203 | 2020-04-19T19:21:57 | 2020-04-19T19:21:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 172 |
py
|
#!/usr/bin/env python3
import sys
for line in sys.stdin:
word, value = line.strip().split('\t')
print('%s\t%s' % ('count', value)) # pass this output to reducer
|
[
"[email protected]"
] | |
26788b2b41f458fe7a22cf558196deec2b03846b
|
8a870098382dce7bf59ed046908ed369e5045662
|
/Problem_Solving/Algorithms/Implementation/8_Migratory_Birds/Solution.py
|
0a2caf5509caf83a3adb2d89c9229e6dfabb8d49
|
[
"MIT"
] |
permissive
|
CFLSousa/HackerRank
|
794318b3c4903f9f625848dfcd00d7d52b0bf748
|
29ed039634e88d72981b2ecd619e5c65d37111e4
|
refs/heads/master
| 2021-08-17T10:31:54.739339 | 2020-04-13T16:11:36 | 2020-04-13T16:11:36 | 159,576,300 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 692 |
py
|
import math
import os
import random
import re
import sys
def migratoryBirds(arr):
countersLen=5
maxCounter=0
maxCounterIndex=0
counters=[0 for x in range(countersLen)]
for k,birdType in enumerate(arr):
counters[birdType-1]+=1
for index,counterVal in enumerate(counters):
if counterVal>maxCounter:
maxCounter=counterVal
maxCounterIndex=index
return maxCounterIndex+1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = migratoryBirds(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"[email protected]"
] | |
3dcaa933be418416fe87dd55f3819f52ea48c329
|
4460086c7817c8f952d9f532cbbc01770a8f7d83
|
/hello.py
|
103ce431c728a675fd31464b1e1075071aeab4b4
|
[
"MIT"
] |
permissive
|
qianjing2020/twitoff
|
d3c926f153513a83df728aa8722412ef14856070
|
a223e8f4a3dfecd582c18c92f8ac9212a01d4570
|
refs/heads/main
| 2023-04-17T05:39:41.990743 | 2021-05-02T02:52:04 | 2021-05-02T02:52:04 | 311,975,367 | 0 | 0 |
MIT
| 2020-11-28T03:25:16 | 2020-11-11T13:14:38 | null |
UTF-8
|
Python
| false | false | 228 |
py
|
# hello.py
from flask import Flask
app = Flask(__name__)
# print(__name__)
# print(type(app))
@app.route("/")
def index():
x = 2 + 2
return f"Hello World! {x}"
@app.route("/about")
def about():
return "About me"
|
[
"[email protected]"
] | |
27ddd3555fa9a367607271950e9516a2c79efd64
|
0ae54260b86968dbb2748496629775d03732359c
|
/controller/action.py
|
60f11dd52f310b7f7d927cc3bf4d6fec2c0278ad
|
[
"MIT"
] |
permissive
|
sosterwalder/bti7501p
|
f9593bdf440af25cd40c7f3347e973dd77a0c6b2
|
6da28ac4f6c63205a2b6e8708b01c65fda6c3ce5
|
refs/heads/master
| 2021-01-10T19:56:54.759971 | 2014-06-13T21:28:00 | 2014-06-13T21:28:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,647 |
py
|
#!/usr/bin/env python
# action.py module
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Thi Thuy-Duc Dao ([email protected]), Sven Osterwalder ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# System imports
# Project imports
class Action(object):
ACTION_UNDO = 'UNDO'
ACTION_MOVE = 'MOVE'
ACTION_CAPTURE = 'CAPTURE'
def __init__(
self,
action_type,
source,
destination,
captured=None,
):
self.type_ = action_type
self.source = source
self.destination = destination
self.captured = captured
def undo(self):
return Action(
self.ACTION_UNDO,
self.destination,
self.source,
self.captured,
)
def copy(self):
return Action(
self.type_,
self.source,
self.destination,
self.captured,
)
def __len__(self):
return 1
def __eq__(self, other):
if other is None:
return False
if self.type_ != other.type_:
return False
if self.source != other.source:
return False
if self.destination != other.destination:
return False
if self.captured != other.captured:
return False
return True
def __repr__(self):
return self.__str__()
def __str__(self):
return "{0} :: <{1}, {2}> -> <{3}, {4}>".format(
self.type_,
self.source[0],
self.source[1],
self.destination[0],
self.destination[1]
)
|
[
"[email protected]"
] | |
f9ff82f3afadebdc0f2fa82b4a0f19227d7cf918
|
67a10f3384d5048bbc0e46c0535b0c113d78c2fa
|
/examples/implicit_orientation_learning/train.py
|
0e435a1228d1816889983ad832d23c56eaed8537
|
[
"MIT"
] |
permissive
|
DeepanChakravarthiPadmanabhan/fer
|
af9bc6b65bf6d265c63d107b0f11ab0c09002390
|
920268633aa0643416551212ec2d70f3591b5001
|
refs/heads/master
| 2023-09-05T03:04:50.468845 | 2021-11-09T23:42:54 | 2021-11-09T23:42:54 | 426,337,368 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,135 |
py
|
import os
import glob
import json
import argparse
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from paz.backend.image import write_image
from paz.abstract import GeneratingSequence
from paz.optimization.callbacks import DrawInferences
from paz.pipelines import AutoEncoderPredictor
from scenes import SingleView
from pipelines import DomainRandomization
from model import AutoEncoder
description = 'Training script for learning implicit orientation vector'
root_path = os.path.join(os.path.expanduser('~'), '.keras/paz/')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-op', '--obj_path', type=str, help='Path of 3D OBJ model',
default=os.path.join(
root_path,
'datasets/ycb/models/035_power_drill/textured.obj'))
parser.add_argument('-cl', '--class_name', default='035_power_drill', type=str,
help='Class name to be added to model save path')
parser.add_argument('-id', '--images_directory', type=str,
help='Path to directory containing background images',
default=os.path.join(
root_path, 'datasets/voc-backgrounds/'))
parser.add_argument('-bs', '--batch_size', default=32, type=int,
help='Batch size for training')
parser.add_argument('-lr', '--learning_rate', default=0.001, type=float,
help='Initial learning rate for Adam')
parser.add_argument('-is', '--latent_dimension', default=128, type=int,
help='Latent dimension of the auto-encoder')
parser.add_argument('-ld', '--image_size', default=128, type=int,
help='Size of the side of a square image e.g. 64')
parser.add_argument('-sp', '--stop_patience', default=7, type=int,
help='Number of epochs before doing early stopping')
parser.add_argument('-pp', '--plateau_patience', default=3, type=int,
help='Number of epochs before reducing learning rate')
parser.add_argument('-e', '--max_num_epochs', default=10000, type=int,
help='Maximum number of epochs before finishing')
parser.add_argument('-st', '--steps_per_epoch', default=1000, type=int,
help='Steps per epoch')
parser.add_argument('-sh', '--top_only', default=0, choices=[0, 1], type=int,
help='Flag for full sphere or top half for rendering')
parser.add_argument('-ls', '--loss', default='binary_crossentropy', type=str,
help='tf.keras loss function name to be used')
parser.add_argument('-r', '--roll', default=3.14159, type=float,
help='Threshold for camera roll in radians')
parser.add_argument('-s', '--shift', default=0.05, type=float,
help='Threshold of random shift of camera')
parser.add_argument('-d', '--depth', nargs='+', type=float,
default=[0.3, 0.5],
help='Distance from camera to origin in meters')
parser.add_argument('-fv', '--y_fov', default=3.14159 / 4.0, type=float,
help='Field of view angle in radians')
parser.add_argument('-l', '--light', nargs='+', type=float,
default=[.5, 30],
help='Light intensity from poseur')
parser.add_argument('-oc', '--num_occlusions', default=2, type=int,
help='Number of occlusions')
parser.add_argument('-sa', '--save_path',
default=os.path.join(
os.path.expanduser('~'), '.keras/paz/models'),
type=str, help='Path for writing model weights and logs')
args = parser.parse_args()
# setting optimizer and compiling model
latent_dimension = args.latent_dimension
model = AutoEncoder((args.image_size, args.image_size, 3), latent_dimension)
optimizer = Adam(args.learning_rate, amsgrad=True)
model.compile(optimizer, args.loss, metrics=['mse'])
model.summary()
# setting scene
renderer = SingleView(args.obj_path, (args.image_size, args.image_size),
args.y_fov, args.depth, args.light, bool(args.top_only),
args.roll, args.shift)
# creating sequencer
image_paths = glob.glob(os.path.join(args.images_directory, '*.png'))
processor = DomainRandomization(
renderer, args.image_size, image_paths, args.num_occlusions)
sequence = GeneratingSequence(processor, args.batch_size, args.steps_per_epoch)
# making directory for saving model weights and logs
model_name = '_'.join([model.name, str(latent_dimension), args.class_name])
save_path = os.path.join(args.save_path, model_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
# setting callbacks
log = CSVLogger(os.path.join(save_path, '%s.log' % model_name))
stop = EarlyStopping('loss', patience=args.stop_patience, verbose=1)
plateau = ReduceLROnPlateau('loss', patience=args.plateau_patience, verbose=1)
model_path = os.path.join(save_path, '%s_weights.hdf5' % model_name)
save = ModelCheckpoint(
model_path, 'loss', verbose=1, save_best_only=True, save_weights_only=True)
# setting drawing callbacks
images = (sequence.__getitem__(0)[0]['input_image'] * 255).astype('uint8')
for arg, image in enumerate(images):
image_name = 'image_%03d.png' % arg
image_path = os.path.join(save_path, 'original_images/' + image_name)
write_image(image_path, image)
inferencer = AutoEncoderPredictor(model)
draw = DrawInferences(save_path, images, inferencer)
# saving hyper-parameters and model summary as text files
print(save_path)
with open(os.path.join(save_path, 'hyperparameters.json'), 'w') as filer:
json.dump(args.__dict__, filer, indent=4)
with open(os.path.join(save_path, 'model_summary.txt'), 'w') as filer:
model.summary(print_fn=lambda x: filer.write(x + '\n'))
# model optimization
model.fit_generator(
sequence,
steps_per_epoch=args.steps_per_epoch,
epochs=args.max_num_epochs,
callbacks=[stop, log, save, plateau, draw],
verbose=1,
workers=0)
|
[
"[email protected]"
] | |
e28726924a187a1dcb5a4094e5ec1bb966898601
|
1777f5e6f3129e5e2df75161f669eb0531355a0b
|
/myweb/mysite/mysite/urls.py
|
d8a926e25371899e3a82b7ef589d0b22daf50f5b
|
[] |
no_license
|
HakimdarC/CRUD-project-Django-Django
|
ca52b3420a3e25fcebea7f855102a9e306dcbb19
|
4fe51989e1be7940331ddb89ccc7992a6a49559a
|
refs/heads/master
| 2022-10-08T04:01:41.530990 | 2019-07-26T12:04:02 | 2019-07-26T12:04:02 | 195,555,184 | 0 | 1 | null | 2022-10-03T14:09:50 | 2019-07-06T15:33:10 |
Python
|
UTF-8
|
Python
| false | false | 982 |
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from myapp import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('', views.index, name='index'),
path('myapp/', include("myapp.urls"), name='myapp'),
path('admin/', admin.site.urls)
]
urlpatterns += staticfiles_urlpatterns()
|
[
"[email protected]"
] | |
dd1240297f91dad4af698a16309dfc8c8652efa8
|
09fe5ec73326265ccdc05778baa35dd59acb1dd4
|
/Missions_to_Mars/scrape_mars.py
|
0085ecad12f50add3ebbe7c9cf64a8e3faa915b2
|
[] |
no_license
|
Alvin1359/missions-to-mars-webscraping
|
b1b51cad9e2065b3528cf89880c476c4911a995e
|
8880d79db63dda1ab2118ded406ff0c524428e4f
|
refs/heads/main
| 2023-05-27T19:59:19.442287 | 2021-06-14T15:21:59 | 2021-06-14T15:21:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,372 |
py
|
import pandas as pd
from splinter import Browser
from bs4 import BeautifulSoup
import time
from webdriver_manager.chrome import ChromeDriverManager
def scrape_info():
# Splinter setup
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# Visit URL
url = ('https://redplanetscience.com/')
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# Scrape news title
news_title = soup.find_all('div', class_='content_title')[0].text
# Scarpe news paragraph
news_p = soup.find_all('div', class_='article_teaser_body')[0].text
# Visit URL
url = 'https://spaceimages-mars.com/'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
partial_url = soup.find_all('a',class_="showimg")[0]['href']
featured_image_url = url + partial_url
# Mars Facts table
url = 'https://galaxyfacts-mars.com/'
mars_facts = pd.read_html(url)
mars_facts_df = mars_facts[0]
header_row = 0
mars_facts_df.columns = mars_facts_df.iloc[header_row]
mars_facts_df = mars_facts_df.drop(header_row)
mars_facts_df = mars_facts_df.reset_index(drop=True)
mars_facts_html = mars_facts_df.to_html(index=False, classes="table table-striped table-responsive")
# Visit URL
url = 'https://marshemispheres.com/'
browser.visit(url)
soup = BeautifulSoup(browser.html, 'html.parser')
titles = soup.find_all('h3')[:-1]
title_ls = []
for title in titles:
title_ls.append(title.text)
url_ls = []
for title in title_ls:
url = 'https://marshemispheres.com/'
browser.visit(url)
browser.click_link_by_partial_text(title)
html = browser.html
soup = BeautifulSoup(browser.html, 'html.parser')
image_url = soup.find_all('li')[0].a["href"]
dictionary = {"title": title,"image_url":url + image_url}
url_ls.append(dictionary)
# Store data in a dictionary
mars_data = {
"NewsTitle": news_title,
"NewsPara": news_p,
"FeaturedImg": featured_image_url,
"MarsFacts": mars_facts_html,
"Hemispheres": url_ls,
}
# Close the browser after scraping
browser.quit()
# Return results
return mars_data
|
[
"[email protected]"
] | |
3fa5adf81091340c3211190e48597e99715da11a
|
0723805aecf730d6762456651430ac222b5548b2
|
/codechef/MARCH13_TOTR.py
|
5e1cc0bae539be9ab3f3c7829ad028d2f7aeb10a
|
[] |
no_license
|
nemausus/topcoder
|
4979ba2b626cd23423891b5e126a30dbfc47960f
|
946bccc7847a6ac3177a5a7bb70917980a2912ee
|
refs/heads/master
| 2020-12-29T02:40:15.540203 | 2017-06-09T06:01:38 | 2017-07-07T21:45:21 | 7,567,126 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
// https://www.codechef.com/MARCH13/status/TOTR,nemausus
// https://www.codechef.com/viewplaintext/1890676
import string
a,b = raw_input().split()
t = int(a)
translation = str(b)
dictionary = {'_':' '}
allTheLetters = string.lowercase
for i in range(26):
dictionary[allTheLetters[i]] = translation[i]
dictionary[allTheLetters[i].upper()] = translation[i].upper()
for i in range(t):
inp = list(str(raw_input()))
length = len(inp)
for i in range(length):
try:
inp[i] = dictionary[inp[i]]
except KeyError:
dictionary[inp[i]] = inp[i]
print "".join(inp)
|
[
"[email protected]"
] | |
205e2c6f3f8e1f3fd358d21e4ccbb1da32701a93
|
021a3dff055d4b3e40aafc63f0029dc280466233
|
/db_scripts/curw_fcst/rfield/gen_rfield_kelani_basin_parallelized_optimized.py
|
e2bed1eb35b657a3592bea9d212fe72a3c8b6482
|
[] |
no_license
|
shadhini/curw_helpers
|
45efe90d887c702b3a3f5877163647e220d230e4
|
101d896f8b589b478ef146b5b4dd99ec24f2dc84
|
refs/heads/master
| 2021-07-03T02:53:13.398052 | 2020-10-28T03:39:58 | 2020-10-28T03:39:58 | 185,217,580 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,854 |
py
|
#!/home/uwcc-admin/curw_rfield_extractor/venv/bin/python3
import traceback
import pymysql
import json
import getopt
import sys
import os
import re
import multiprocessing as mp
from datetime import datetime, timedelta
# connection params
HOST = ""
USER = ""
PASSWORD = ""
DB =""
PORT = ""
VALID_MODELS = ["WRF_A", "WRF_C", "WRF_E", "WRF_SE"]
VALID_VERSIONS = ["v3", "v4", "4.0"]
SIM_TAGS = ["evening_18hrs"]
root_directory = '/var/www/html'
bucket_root = '/mnt/disks/wrf_nfs'
def read_attribute_from_config_file(attribute, config):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:return:
"""
if attribute in config and (config[attribute]!=""):
return config[attribute]
else:
print("{} not specified in config file.".format(attribute))
exit(1)
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def create_rfield(connection, wrf_model, version, sim_tag, timestamp):
# rfield = [['latitude', 'longitude', 'rainfall']]
rfield = []
with connection.cursor() as cursor0:
cursor0.callproc('get_d03_rfield_kelani_basin_rainfall', (wrf_model, version, sim_tag, timestamp))
results = cursor0.fetchall()
for result in results:
rfield.append('{}'.format(result.get('value')))
write_to_file('{}/wrf/{}/{}/rfield/kelani_basin/{}_{}_{}_rfield.txt'
.format(root_directory, version, sim_tag, wrf_model, version, timestamp.strftime('%Y-%m-%d_%H-%M')), rfield)
#############################
# Raw WRF RFIELD GENERATION #
#############################
def gen_rfield_d03_kelani_basin(wrf_model, version, sim_tag):
# remove outdated rfields
try:
os.system("sudo rm {}/wrf/{}/{}/rfield/kelani_basin/{}_{}_*".format(root_directory, version, sim_tag, wrf_model, version))
except Exception as e:
traceback.print_exc()
start_time = ''
end_time = ''
now = datetime.strptime((datetime.now()+timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d 00:00:00'), '%Y-%m-%d %H:%M:%S')
try:
# Connect to the database
connection = pymysql.connect(host=HOST, user=USER, password=PASSWORD, db=DB,
cursorclass=pymysql.cursors.DictCursor)
# Extract timeseries start time and end time
with connection.cursor() as cursor1:
cursor1.callproc('get_TS_start_end', (wrf_model, version, sim_tag))
result = cursor1.fetchone()
start_time = result.get('start')
end_time = result.get('end')
if end_time > (now + timedelta(days=1)):
# Extract rfields
timestamp = start_time
while timestamp <= end_time:
create_rfield(connection=connection, wrf_model=wrf_model, version=version, sim_tag=sim_tag,
timestamp=timestamp)
timestamp = datetime.strptime(str(timestamp), '%Y-%m-%d %H:%M:%S') + timedelta(minutes=15)
return True
except Exception as ex:
traceback.print_exc()
return False
finally:
connection.close()
print("Process finished")
def usage():
usageText = """
Usage: python gen_rfield_kelani_basin_parallelized_optimized_with_past_future.py -m WRF_X1,WRF_X2,WRF_X3 -v vX -s "evening_18hrs"
-h --help Show usage
-m --wrf_model List of WRF models (e.g. WRF_A, WRF_E). Compulsory arg
-v --version WRF model version (e.g. v4, v3). Compulsory arg
-s --sim_tag Simulation tag (e.g. evening_18hrs). Compulsory arg
"""
print(usageText)
if __name__=="__main__":
my_pool = None
try:
wrf_models = None
version = None
sim_tag = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:m:v:s:",
["help", "wrf_model=", "version=", "sim_tag="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-m", "--wrf_model"):
wrf_models = arg.strip()
elif opt in ("-v", "--version"):
version = arg.strip()
elif opt in ("-s", "--sim_tag"):
sim_tag = arg.strip()
print(wrf_models, version, sim_tag)
print(VALID_MODELS, VALID_VERSIONS, SIM_TAGS)
# load connection parameters
config = json.loads(open('/home/uwcc-admin/curw_rfield_extractor/db_config.json').read())
# connection params
HOST = read_attribute_from_config_file('host', config)
USER = read_attribute_from_config_file('user', config)
PASSWORD = read_attribute_from_config_file('password', config)
DB = read_attribute_from_config_file('db', config)
PORT = read_attribute_from_config_file('port', config)
wrf_model_list = wrf_models.split(',')
for wrf_model in wrf_model_list:
if wrf_model is None or wrf_model not in VALID_MODELS:
usage()
exit(1)
if version is None or version not in VALID_VERSIONS:
usage()
exit(1)
if sim_tag is None or sim_tag not in SIM_TAGS:
usage()
exit(1)
rfield_home = "{}/wrf/{}/{}/rfield/kelani_basin".format(root_directory, version, sim_tag)
try:
os.makedirs(rfield_home)
except FileExistsError:
# directory already exists
pass
gfs_data_hour =re.findall(r'\d+', sim_tag)[0]
bucket_rfield_home = "{}/wrf/{}/{}/rfield/kelani_basin".format(bucket_root, version, gfs_data_hour)
try:
os.makedirs(bucket_rfield_home)
except FileExistsError:
# directory already exists
pass
# copy file containing xy coordinates to the rfield home
try:
os.system("cp kelani_basin_xy.csv {}/xy.csv".format(rfield_home))
except Exception:
pass
mp_pool = mp.Pool(mp.cpu_count())
results = mp_pool.starmap(gen_rfield_d03_kelani_basin,
[(wrf_model, version, sim_tag) for wrf_model in wrf_model_list])
# results = mp_pool.starmap_async(gen_rfield_d03_kelani_basin,
# [(wrf_model, version, sim_tag) for wrf_model in wrf_model_list]).get()
print("results: ", results)
except Exception as e:
print('JSON config data loading error.')
traceback.print_exc()
finally:
if my_pool is not None:
mp_pool.close()
os.system("tar -czvf {}/rfield.tar.gz {}/*".format(bucket_rfield_home, rfield_home))
|
[
"[email protected]"
] | |
973d2f5af01158d20e8d8e0401dd4a38bffe70c1
|
a4fd8c01606641424faca66cae651b2670a863d7
|
/postfinancecheckout/models/customers_presence.py
|
904e7a50b40d1e730de795f474b2417c870457c4
|
[
"Apache-2.0"
] |
permissive
|
pfpayments/python-sdk
|
c435a4519a5a95a46cb6e446a4a8c83aeb9dcc2d
|
2d6b1429f5a4cafe61dcf5ea2c2a698848a837e0
|
refs/heads/master
| 2023-08-07T17:05:20.864000 | 2023-07-20T14:41:34 | 2023-07-20T14:41:34 | 251,532,627 | 2 | 0 |
Apache-2.0
| 2022-10-26T08:40:23 | 2020-03-31T07:38:00 |
Python
|
UTF-8
|
Python
| false | false | 207 |
py
|
# coding: utf-8
from enum import Enum, unique
@unique
class CustomersPresence(Enum):
NOT_PRESENT = "NOT_PRESENT"
VIRTUAL_PRESENT = "VIRTUAL_PRESENT"
PHYSICAL_PRESENT = "PHYSICAL_PRESENT"
|
[
"[email protected]"
] | |
d1f9c5d8fe6a52dd2e130204f45e94850dfa5e0f
|
33f86c1678d2f5e15da77885e0bf770f405201a4
|
/tcamp/local_settings.example.py
|
b5b48f86971536c25ec25d5c61d13c2805a1304e
|
[
"BSD-3-Clause"
] |
permissive
|
imclab/tcamp
|
5410c9549ed7731575e7312acfed7b8e4cd0c58d
|
111cabab90b2c8cf651ee480520bc43a33f30844
|
refs/heads/master
| 2021-01-18T12:15:58.484183 | 2014-03-05T21:36:00 | 2014-03-05T21:36:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,832 |
py
|
DEBUG = True
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
INTERNAL_IPS = ('127.0.0.1', )
SECRET_KEY = ''
DATABASES = {
'local': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'staging': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'production': {
'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
DATABASES['default'] = DATABASES['local']
FAVICON = ''
APPLE_TOUCH_ICON = ''
SHARING_IMAGE = ''
FB_APP_ID = ''
GOOGLE_ANALYTICS_ID = ''
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
ASSET_SITE_VERSION = '1.0'
COMPRESS_URL = ''
COMPRESS_STORAGE = ''
STATICFILES_STORAGE = COMPRESS_STORAGE
STATIC_URL = COMPRESS_URL
POSTMARK_API_KEY = ''
POSTMARK_SENDER = ''
GOOGLEAUTH_DOMAIN = ''
GOOGLEAUTH_REALM = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
DISQUS_CLIENT_ID = ''
DISQUS_CLIENT_SECRET = ''
AKISMET_KEY = ''
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
TWITTER_ACCESS_KEY = ''
TWITTER_ACCESS_SECRET = ''
DISQUS_SHORTNAME = ''
BRAINSTORM_USE_DISQUS = True
BRAINSTORM_LOGIN_OPTIONS = (
('Twitter', '/login/twitter/'),
('Facebook', '/login/facebook/'),
('Google', '/login/google-oauth2/'),
('Github', '/login/github/'),
)
VARNISH_MANAGEMENT_ADDRS = ()
TWILIO_ACCOUNT_SID = ''
TWILIO_AUTH_TOKEN = ''
RAVEN_CONFIG = {
'dsn': '',
}
|
[
"[email protected]"
] | |
67124ec1d96da05dd8371cdfe96260d5600890e8
|
45ffc0be0c6952cd7b503485e5c50fdc619b0601
|
/venv/settings.py
|
a304eab9e003e7a27169e4da389635a7237ab2f6
|
[] |
no_license
|
Quanny02/Alien_invasion
|
8f86854ea445e165b3ed28bd2c042c654c8f0803
|
7d8639bce39faecf2f388652389e51920bf47894
|
refs/heads/master
| 2020-03-16T22:25:06.412758 | 2018-05-29T11:59:55 | 2018-05-29T11:59:55 | 133,038,358 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 506 |
py
|
class Settings():
"""A class to store all setting s for Alien Invasion"""
def __init__(self):
"""Initialize the game's settings."""
#screen settings
self.screen_width =1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
#ship settings
self.ship_speed_factor = 1.5
#bullets settings
self.bullet_speed_facotr = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60, 60, 60
|
[
"[email protected]"
] | |
d020f7a59f0738fc32cb65ab2d97b3c8c707060d
|
5c5beb07756060ce7ccc9c4256d4c5521f4acee9
|
/SinGAN/editing.py
|
32221515e948beb759f9377ecae96fee275d20b5
|
[] |
no_license
|
JaeDukSeo/SinGAN-extension
|
a2e5fb3ae442a28cebe77592a3668367347e3422
|
23afc01ebe568f93e01c377b0a94e0dad26ac64a
|
refs/heads/master
| 2022-07-03T12:04:56.295212 | 2020-05-13T15:24:48 | 2020-05-13T15:24:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,301 |
py
|
from .config import get_arguments
from .SinGAN.manipulate import *
from .SinGAN.training import *
from .SinGAN.imresize import imresize
from .SinGAN.imresize import imresize_to_shape
from .SinGAN import functions
if __name__ == '__main__':
parser = get_arguments()
parser.add_argument('--input_dir', help='input image dir', default='Input\\Images')
parser.add_argument('--input_name', help='training image name', required=True)
parser.add_argument('--ref_dir', help='input reference dir', default='Input\\Editing')
parser.add_argument('--ref_name', help='reference image name', required=True)
parser.add_argument('--editing_start_scale', help='editing injection scale', type=int, required=True)
parser.add_argument('--mode', help='task to be done', default='editing')
opt = parser.parse_args()
opt = functions.post_config(opt)
Gs = []
Zs = []
reals = []
NoiseAmp = []
dir2save = functions.generate_dir2save(opt)
if dir2save is None:
print('task does not exist')
#elif (os.path.exists(dir2save)):
# print("output already exist")
else:
try:
os.makedirs(dir2save)
except OSError:
pass
real = functions.read_image(opt)
real = functions.adjust_scales2image(real, opt)
Gs, Zs, reals, NoiseAmp = functions.load_trained_pyramid(opt)
if (opt.editing_start_scale < 1) | (opt.editing_start_scale > (len(Gs)-1)):
print("injection scale should be between 1 and %d" % (len(Gs)-1))
else:
ref = functions.read_image_dir(os.path.join(opt.ref_dir, opt.ref_name), opt)
mask = functions.read_image_dir(os.path.join(opt.ref_dir,'{}_mask{}'.format(opt.ref_name[:-4],opt.ref_name[-4:])), opt)
if ref.shape[3] != real.shape[3]:
'''
mask = imresize(mask, real.shape[3]/ref.shape[3], opt)
mask = mask[:, :, :real.shape[2], :real.shape[3]]
ref = imresize(ref, real.shape[3] / ref.shape[3], opt)
ref = ref[:, :, :real.shape[2], :real.shape[3]]
'''
mask = imresize_to_shape(mask, [real.shape[2],real.shape[3]], opt)
mask = mask[:, :, :real.shape[2], :real.shape[3]]
ref = imresize_to_shape(ref, [real.shape[2],real.shape[3]], opt)
ref = ref[:, :, :real.shape[2], :real.shape[3]]
mask = functions.dilate_mask(mask, opt)
N = len(reals) - 1
n = opt.editing_start_scale
in_s = imresize(ref, pow(opt.scale_factor, (N - n + 1)), opt)
in_s = in_s[:, :, :reals[n - 1].shape[2], :reals[n - 1].shape[3]]
in_s = imresize(in_s, 1 / opt.scale_factor, opt)
in_s = in_s[:, :, :reals[n].shape[2], :reals[n].shape[3]]
out = SinGAN_generate(Gs[n:], Zs[n:], reals, NoiseAmp[n:], opt, in_s, n=n, num_samples=1)
plt.imsave(os.path.join(dir2save,'start_scale={}.png'.format(opt.editing_start_scale)), functions.convert_image_np(out.detach()), vmin=0, vmax=1)
out = (1-mask)*real+mask*out
plt.imsave(os.path.join(dir2save,'start_scale={}_masked.png'.format(opt.editing_start_scale)), functions.convert_image_np(out.detach()), vmin=0, vmax=1)
|
[
"[email protected]"
] | |
f9dce4240433ce38d9194cea922badddef39792a
|
391bca5157a1de3f94cb4dfbd7d3cd83665d0be5
|
/audioDatasetHdf.py
|
b6ee1017bdaa7b7193040ec9c2d6cbd5a98d442b
|
[] |
no_license
|
yogurtss/dual_path
|
69470ff1ebba63d32c95b988d51e8f73bd21b702
|
479bba58326058af6e41018b2e0037319fe7ee8c
|
refs/heads/master
| 2021-05-19T14:10:28.672095 | 2020-03-31T21:58:44 | 2020-03-31T21:58:44 | 251,749,967 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,711 |
py
|
import musdb
import soundfile
import os
import librosa as lib
from tqdm import tqdm
import numpy as np
import torch
from sortedcontainers import SortedList
import h5py
import torch.nn as nn
from dataset.util import *
def getMUSDB(database_path):
# 导入数据
mus = musdb.DB(root=database_path, is_wav=False)
subsets = list()
for subset in ["train", "test"]:
tracks = mus.load_mus_tracks(subset)
samples = list()
# Go through tracks
for track in tracks:
# Skip track if mixture is already written, assuming this track is done already
# track_path = track.path[:-4]
track_path = SAVE_PATH + subset + '/' + track.name
if not os.path.exists(track_path):
os.mkdir(track_path)
mix_path = track_path + "/mix.wav"
acc_path = track_path + "/accompaniment.wav"
if os.path.exists(mix_path):
print("WARNING: Skipping track " + mix_path + " since it exists already")
# Add paths and then skip
paths = {"mix": mix_path, "accompaniment": acc_path}
paths.update({key: track_path + "_" + key + ".wav" for key in ["bass", "drums", "other", "vocals"]})
samples.append(paths)
continue
rate = track.rate
# Go through each instrument
paths = dict()
stem_audio = dict()
for stem in ["bass", "drums", "other", "vocals"]:
path = track_path + '/' + stem + ".wav"
audio = track.targets[stem].audio.T
soundfile.write(path, audio, rate, "PCM_16")
stem_audio[stem] = audio
paths[stem] = path
# Add other instruments to form accompaniment
acc_audio = np.clip(sum([stem_audio[key] for key in list(stem_audio.keys()) if key != "vocals"]), -1.0, 1.0)
soundfile.write(acc_path, acc_audio, rate, "PCM_16")
paths["accompaniment"] = acc_path
# Create mixture
mix_audio = track.audio.T
soundfile.write(mix_path, mix_audio, rate, "PCM_16")
paths["mix"] = mix_path
diff_signal = np.abs(mix_audio - acc_audio - stem_audio["vocals"])
print("Maximum absolute deviation from source additivity constraint: " + str(
np.max(diff_signal))) # Check if acc+vocals=mix
print("Mean absolute deviation from source additivity constraint: " + str(np.mean(diff_signal)))
samples.append(paths)
subsets.append(samples)
train_val_list = subsets[0]
test_list = subsets[1]
np.random.seed(42)
train_list = np.random.choice(train_val_list, 75, replace=False)
val_list = [elem for elem in train_val_list if elem not in train_list]
dataset = {'train': train_list,
'val': val_list,
'test': test_list}
return dataset
class AudioDataset(nn.Module):
def __init__(self, partition, instruments, sr, channels, out_channels, random_hops, hdf_dir, shapes, audio_transform=None, in_memory=False):
super(AudioDataset, self).__init__()
self.hdf_dir = os.path.join(hdf_dir, partition + ".hdf5")
self.random_hops = random_hops
self.sr = sr
self.channels = channels
self.audio_transform = audio_transform
self.in_memory = in_memory
self.instruments = instruments
self.shapes = shapes
self.out_channels = out_channels
print('Preparing {} dataset...'.format(partition))
# Go through HDF and collect lengths of all audio files
with h5py.File(self.hdf_dir, "r") as f:
lengths = [f[str(song_idx)].attrs["target_length"] for song_idx in range(len(f))]
# Subtract input_size from lengths and divide by hop size to determine number of starting positions
lengths = [(l // self.shapes['length']) + 1 for l in lengths]
self.start_pos = SortedList(np.cumsum(lengths))
self.length = self.start_pos[-1]
self.dataset = h5py.File(self.hdf_dir, 'r', driver="core")
def __len__(self):
return self.length
def __getitem__(self, idx):
# Find out which slice of targets we want to read
audio_idx = self.start_pos.bisect_right(idx)
if audio_idx > 0:
idx = idx - self.start_pos[audio_idx - 1]
# Check length of audio signal
audio_length = self.dataset[str(audio_idx)].attrs["length"]
target_length = self.dataset[str(audio_idx)].attrs["target_length"]
# Determine position where to start targets
if self.random_hops:
start_target_pos = np.random.randint(0, max(target_length - self.shapes['length'] + 1, 1))
else:
# Map item index to sample position within song
start_target_pos = idx * self.shapes['length']
start_pos = start_target_pos
if start_pos < 0:
# Pad manually since audio signal was too short
pad_front = abs(start_pos)
start_pos = 0
else:
pad_front = 0
end_pos = start_target_pos + self.shapes['length']
if end_pos > audio_length:
# Pad manually since audio signal was too short
pad_back = end_pos - audio_length
end_pos = audio_length
else:
pad_back = 0
# Read and return
audio = self.dataset[str(audio_idx)]["inputs"][:, start_pos:end_pos].astype(np.float32)
if pad_front > 0 or pad_back > 0:
audio = np.pad(audio, [(0, 0), (pad_front, pad_back)], mode="constant", constant_values=0.0)
targets = self.dataset[str(audio_idx)]["targets"][:, start_pos:end_pos].astype(np.float32)
if pad_front > 0 or pad_back > 0:
targets = np.pad(targets, [(0, 0), (pad_front, pad_back)], mode="constant", constant_values=0.0)
sources = {}
for id, inst in enumerate(self.instruments.keys()):
sources[inst] = targets[id * self.channels:(id + 1) * self.channels]
del targets
if hasattr(self, "audio_transform") and self.audio_transform is not None:
audio, sources = self.audio_transform(audio, sources)
idx_temp = 0
targets = np.zeros([self.out_channels, self.shapes['length']], dtype=np.float32)
if self.out_channels == 1:
targets = sources['accompaniment']
else:
for k in sources.keys():
if k == 'other':
continue
targets[idx_temp] = sources[k]
idx_temp += 1
return torch.tensor(audio).squeeze(), torch.tensor(targets)
if __name__ == '__main__':
partition = 'train'
INSTRUMENTS = {"bass": True,
"drums": True,
"other": True,
"vocals": True,
"accompaniment": True}
shapes = {'start_frame': 6140,
'end_frame': 51201,
'output_len': 45061,
'input_len': 57341}
sr = 22050
channels = 1
augment_func = lambda mix, targets: random_amplify(mix, targets, 0.7, 1.0, shapes)
crop_func = lambda mix, targets: crop(mix, targets, shapes)
dataset = AudioDataset(partition, INSTRUMENTS, sr, channels, 2, True, hdf_dir='../H5/', shapes=shapes, audio_transform=augment_func)
dataset[0]
dataset[1]
print('test')
|
[
"[email protected]"
] | |
84a7a06840ab94d52c126cf03c787d21cc39ba7c
|
66b332e1bc377db177f388a9adf04082113bc7a8
|
/k-means/Kmeans.py
|
f4cd72c11bf9e6f3174767ece3abfe780454d0f7
|
[
"MIT"
] |
permissive
|
JamieMa/ML_APIs
|
68d56263197c7be377f11996a63eaca4376feacd
|
364912bff7e33056de7b8d735ce4d8ab3ed53f81
|
refs/heads/master
| 2021-08-31T00:22:59.409764 | 2017-12-20T00:25:55 | 2017-12-20T00:25:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,089 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 21:30:38 2017
@author: mjq
"""
import numpy as np
import pandas as pd
from sklearn import datasets
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial import distance
from numpy import linalg as LA
def kMeans(X,n_clusters=8,max_iter=300,minWCSS=10000):
'''
:type X: 2D np array,each row is a record
n_clusters: positive int
max_iter: positive int
minWCSS:positive int
:rtype: dictionary
'''
if n_clusters>len(X):
print "Number of cluster exceeds number of input records"
print "Please select K again."
return
def WCSS(X,centroids,cluster_result):
sum_WCSS=0
for k in range(len(centroids)):
WCSS_cluster = 0
II = (cluster_result==k)
for j in range(np.sum(II)):
WCSS_cluster+=distance.euclidean(X[II][j],centroids[k])
sum_WCSS+=WCSS_cluster
return sum_WCSS
#randomly select initial centroids
idx = np.random.choice([i for i in range(len(X))],size=n_clusters,replace=False)
centroids = X[idx,:]
cluster_result = np.zeros(len(X))
pre_cluster_result=None
i=0
while i<=max_iter:
#calculate distance
for j in range(len(X)):
min_distance = distance.euclidean(X[j],centroids[0])
num_cluster = 0
for k in range(1,n_clusters):
cur_distance=distance.euclidean(X[j],centroids[k])
if cur_distance<min_distance:
min_distance=cur_distance
num_cluster=k
cluster_result[j]=num_cluster
#check if assignment no longer change
print np.sum(pre_cluster_result==cluster_result)
print np.all(pre_cluster_result==cluster_result)
if pre_cluster_result is not None and np.all(pre_cluster_result==cluster_result):
break
#update centroids
for k in range(n_clusters):
II = (cluster_result==k)
centroids[k]= np.mean(X[II],axis=0)
#deep copy cluster_result to pre_cluster_result
pre_cluster_result = np.copy(cluster_result)
i+=1
cur_WCSS=WCSS(X,centroids,cluster_result)
print "The %d's iterative with WCSS: %f "%(i,cur_WCSS)
final_WCSS=WCSS(X,centroids,cluster_result)
kmeans_result={"cluster_centers_":centroids,
"labels_":cluster_result,
"WCSS_":final_WCSS,
"max_iter_":i}
return kmeans_result
if __name__ == '__main__':
in_file_name = "SCLC_study_output_filtered.csv"
dataIn = pd.read_csv(in_file_name)
X = dataIn.drop(dataIn.columns[0], axis=1)
k=2
myKmeansResults = kMeans(X.values,n_clusters=k)
labels=myKmeansResults['labels_']
|
[
"[email protected]"
] | |
80ba85293b15a3a17058013b70da20e139f12445
|
bd69b2c0737b25344f40d34870ebe59a70dde19f
|
/exploratory.py
|
a224cefb2e6de87f3001f0e667dff62a0b75eb39
|
[] |
no_license
|
shalisap/thesis-code
|
0e15a9014cbfdc61d40ce39c87422e507ca3f491
|
049aa1b469f6ccf02d7c6b2f3c7724d17656b7cc
|
refs/heads/master
| 2021-05-28T23:15:37.173771 | 2015-04-30T16:41:26 | 2015-04-30T16:41:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,086 |
py
|
"""
Create visualizations for time series data from parsed, windowed log files.
Supports horizon plots and color plots for random samples, histogram of
distribution characteristics, and single series time plots.
Syntax: python exploratory.py -summarize logfile.pickle
python exploratory.py -horizon logfile.pickle rand_seed
python exploratory.py -colorplots logfile.pickle rand_seed
python exploratory.py -timeplot logfile.pickle ts_ident
rand_seed determines which 1000 series random sample is taken from the record set.
ts_ident is the identifier of the single series to view, in the
format 'circ_id,ip_slug'.
@author: Julian Applebaum
@author: Shalisa Pattarawuttiwong
Last Edited: 08/04/14
-modified doTimeplot to plot multiple line graphs
"""
from scipy.stats import skew, pearsonr
from numpy import mean, std, median, linspace, correlate
from matplotlib.colors import LinearSegmentedColormap, ListedColormap
from matplotlib.patches import Rectangle
from sklearn.cluster import k_means
from pprint import pprint
from math import sqrt
from sequence_utils import trim_inactive_preprocess, flatten
import matplotlib.pyplot as plt
from matplotlib import cm
import pylab
import sys, cPickle, subprocess, random
from glob import glob
N_HIST_BINS = 100
N_CLUSTERS = 6
VMAX = 700
MAX_OBS = 1500
artist_ident_map = {}
def draw_sample(data, n=1000):
"""
Draw a random sample from a list
@param data: the list
@param n: the size of the sample
@return: a size n random sample of data
"""
random.shuffle(data)
return data[:n]
def on_pick(event):
"""
Pull up the time plot for a series when the user clicks on it.
@param event: The picking event fired by the click
"""
ident = artist_ident_map[event.artist]
print "Loading time series for circuit %s" % ident
bash_call = "python exploratory.py -timeplot %s 1000 %i,%i" % (filepath,
ident[0], ident[1])
subprocess.call(bash_call, shell=True)
def autocorrelate(series, lag=1):
"""
Perform Pearson autocorrelation on a time series
@param series: the time series
@param lag: the lag
@return: the autocorrelation coefficient
"""
shifted = series[0:len(series)-1-lag]
series_cutoff = series[lag:len(series)-1]
return pearsonr(series_cutoff, shifted)[0]
def acorr_plot(series, ax):
"""
Generate a correlogram for a series for all possible lag values.
@param series: the time series
@param ax: the axes object to draw the plot on
"""
corrs = []
for i in xrange(1, len(series)-1):
corrs.append(autocorrelate(series, i))
plt.title("Correlogram")
plt.xlabel("Lag")
plt.ylabel("Pearson Correlation")
ax.bar(range(1, len(corrs)+1), corrs, width=1)
plt.ylim([-1, 1])
def summarize(values, name):
"""
Print out summary stats for a list of values
@param values: The values to summarize
@param name: The name to display in the printed text
"""
border_len = len(name) + 8
print "*" * border_len
print "***", name, "***"
print "Mean:", mean(values)
print "Min:", min(values)
print "Max:" , max(values)
print "Std Dev:", std(values)
print "*" * border_len, "\n"
def discretize(relay_series):
"""
Cluster the observations in relay_series into k bins, and replace each
observation with its cluster label.
@param relay_series: the list of series to discretize
@param k: the number of clusters to create
@return: (relay_series, cluster_ranges). relay_series is the list of
discretized series. cluster_ranges is a list of cluster mins and maxs
in s.t. cluster_ranges[i] = (min(cluster_i), max(cluster_i))
"""
cluster_maxes = [0 for i in xrange(0, k)]
cluster_mins = [float("inf") for i in xrange(0, k)]
all_window_counts = reduce(list.__add__, relay_series, [])
vectorized = [[o] for o in all_window_counts]
idx = 0
for series in relay_series:
for i in xrange(0, len(series)):
label = labels[idx]
cluster_maxes[label] = max(cluster_maxes[label], series[i])
cluster_mins[label] = min(cluster_mins[label], series[i])
series[i] = label
idx += 1
cluster_ranges = zip(sorted(cluster_mins), sorted(cluster_maxes))
return (relay_series, cluster_ranges)
def do_summarize(records):
"""
Display summary histograms for the series in records.
@param records: the circuit records
"""
circ_len_aggr = []
in_mean_cells_per_window_aggr = []
in_min_cells_per_window_aggr = []
in_max_cells_per_window_aggr = []
in_median_cells_per_window_aggr = []
in_stddev_cells_per_window_aggr = []
in_inst_counts_aggr = []
#unique_vals_aggr = []
percent_active_aggr = []
time_active_aggr = []
out_mean_cells_per_window_aggr = []
out_min_cells_per_window_aggr = []
out_max_cells_per_window_aggr = []
out_median_cells_per_window_aggr = []
out_stddev_cells_per_window_aggr = []
out_inst_counts_aggr = []
for record in records:
relays = record['relays']
in_relays = [r[0] for r in relays]
out_relays = [r[1] for r in relays]
circ_len_aggr.append((record['destroy'] - record['create'])/1000.0)
in_mean_cells_per_window_aggr.append(1.0*sum(in_relays)/len(in_relays))
out_mean_cells_per_window_aggr.append(1.0*sum(out_relays)/len(out_relays))
in_median_cells_per_window_aggr.append(median(in_relays))
out_median_cells_per_window_aggr.append(median(out_relays))
in_min_cells_per_window_aggr.append(min(in_relays))
out_min_cells_per_window_aggr.append(min(out_relays))
in_max_cells_per_window_aggr.append(max(in_relays))
out_max_cells_per_window_aggr.append(max(out_relays))
in_stddev_cells_per_window_aggr.append(std(in_relays))
out_stddev_cells_per_window_aggr.append(std(out_relays))
in_inst_counts_aggr += in_relays
out_inst_counts_aggr += out_relays
# unique_vals_aggr.append(len(set(filter(lambda o: o > 2, relays))))
time_active = len(trim_inactive_preprocess(relays))
percent_active_aggr.append(100.0*time_active/len(relays))
# time_active_aggr.append(time_active)
fig = plt.figure()
summarize(in_max_cells_per_window_aggr, "Max IN")
summarize(out_max_cells_per_window_aggr, "Max OUT")
meansplot = fig.add_subplot(421)
plt.title("Mean Cells/Window")
plt.xlabel("Mean Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
meansplot.hist(in_mean_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
meansplot.hist(out_mean_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
cellsplot = fig.add_subplot(422)
plt.title("Median Cells/Window")
plt.xlabel("Median Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
cellsplot.hist(in_median_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
cellsplot.hist(out_median_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
minsplot = fig.add_subplot(423)
plt.title("Min Cells/Window")
plt.xlabel("Min Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
minsplot.hist(in_min_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
minsplot.hist(out_min_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
maxsplot = fig.add_subplot(424)
plt.title("Max Cells/Window")
plt.xlabel("Max Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
maxsplot.hist(in_max_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label="in")
maxsplot.hist(out_max_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label="out")
stddevsplot = fig.add_subplot(425)
plt.title("Std Dev. of Cells/Window")
plt.xlabel("Std Dev. of Cells/Window")
plt.ylabel("Frequency")
plt.yscale('log')
stddevsplot.hist(in_stddev_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
stddevsplot.hist(out_stddev_cells_per_window_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
cellsplot = fig.add_subplot(426)
plt.title("Single Window Cell Count")
plt.xlabel("Single Window Cell Count")
plt.ylabel("Frequency")
plt.yscale('log')
cellsplot.hist(in_inst_counts_aggr, bins=N_HIST_BINS, alpha=0.5, label='in')
cellsplot.hist(out_inst_counts_aggr, bins=N_HIST_BINS, alpha=0.5, label='out')
lenplot = fig.add_subplot(427)
plt.title("Circuit Length (seconds)")
plt.xlabel("Circuit Length (seconds)")
plt.ylabel("Frequency")
plt.yscale('log')
lenplot.hist(circ_len_aggr, bins=N_HIST_BINS)
# uniqueplot = fig.add_subplot(338)
# plt.title("Number of Unique Values > 1")
# plt.xlabel("Number of Unique Values > 1")
# plt.ylabel("Frequency")
# uniqueplot.hist(unique_vals_aggr, bins=N_HIST_BINS)
# timeactiveplot = fig.add_subplot(428)
# plt.title("Percent of Time in Active State")
# plt.xlabel("Percent of Time")
# plt.ylabel("Frequency")
# timeactiveplot.hist(percent_active_aggr, bins=N_HIST_BINS)
fig.tight_layout()
def do_horizon(records, window_size, ylim=None):
"""
Display a horizon plot for a size 1000 random sample of records
@param records: the circuit records
@param window_size: the size of the cell count windows
"""
sample = draw_sample(records)
fig = plt.figure()
fig.canvas.mpl_connect('pick_event', on_pick)
ax = fig.add_subplot(2,2,1)
plt.title("Inbound Horizon Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Ingoing Relay Cells/Window")
plt.grid(True)
for record in sample:
s = record['relays']
series = [i[0] for i in s]
# use fill_between to avoid some rendering bugs
artist = ax.fill_between(range(0, len(series)), series, [0]*len(series),
alpha=.2, color='black', edgecolor='none', picker=True)
artist_ident_map[record['ident']] = artist
ay = fig.add_subplot(2,2,3)
plt.title("Outbound Horizon Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Outgoing Relay Cells/Window")
plt.grid(True)
for record in sample:
s = record['relays']
series = [i[1] for i in s]
# use fill_between to avoid some rendering bugs
artist = ay.fill_between(range(0, len(series)), series, [0]*len(series),
alpha=.2, color='black', edgecolor='none', picker=True)
artist_ident_map[record['ident']] = artist
fig.tight_layout()
if ylim is not None:
pylab.ylim([0, ylim])
def do_timeplot(records, window_size, ts_ident_list):
"""
Display a time plot and a correlogram for multiple time series
@param records: the list of circuits records containing the series
@param window_size: the size of the cell count windows
@param ts_ident: the list of [(circ_id, ip_slug)]
tuples identifying the series
"""
subplot_size = 421
fig = plt.figure()
# have to do this once first to be able to scale the subplots to the same scale
rstr, cstr, ipstr = ts_ident_list[0].split(",")
rstr = rstr.replace("(", "")
cstr = cstr.replace(")", "")
fig.canvas.set_window_title("%s-%i-%i-%i" % (rstr, int(cstr),
int(ipstr), window_size))
timeplot = fig.add_subplot(subplot_size)
for record in records:
if record['ident'] == ((rstr, int(cstr)), int(ipstr)):
s = record['relays']
in_series = [i[0] for i in s]
out_series = [i[1] for i in s]
plt.plot(in_series)
plt.plot(out_series)
subplot_size += 1
for ident in ts_ident_list[1:]:
rstr, cstr, ipstr = ident.split(",")
rstr = rstr.replace("(","")
cstr = cstr.replace(")","")
fig.canvas.set_window_title("%s-%i-%i-%i" % (rstr, int(cstr), int(ipstr), window_size))
timeplot1 = fig.add_subplot(subplot_size, sharex=timeplot, sharey=timeplot)
# acorrplot = fig.add_subplot(122)
for record in records:
if record['ident'] == ((rstr, int(cstr)), int(ipstr)):
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Ingoing Relay Cell Count")
s = record['relays']
in_series = [i[0] for i in s]
# line graphs
plt.plot(in_series)
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Outgoing Relay Cell Count")
out_series = [i[1] for i in s]
# line graphs
plt.plot(out_series)
# timeplot.fill_between(range(0, len(series)), series, [0]*len(series),
# color='grey')
# acorr_plot(series, acorrplot)
subplot_size += 1
fig.text(0.5, 0.04, 'Window # (%i ms windows)'% window_size, ha='center', va='center')
fig.text(0.06, 0.5, 'Outgoing Relay Cell Count', ha='center', va='center', rotation='vertical')
# outbound only
def do_colorplot(records, window_size, ax=None, ay=None, no_chrome=False,
sample_size=1000):
"""
Display a color plots for a size 1000 random sample of records
@param records: the circuit records
@param window_size: the size of the cell count windows
"""
def rec_cmp(rec_1, rec_2):
relays_1, relays_2 = rec_1['relays'], rec_2['relays']
m_r1, m_r2= ((mean([i[0] for i in relays_1]) +
mean([i[1] for i in relays_1])),
(mean([i[0] for i in relays_2]) +
mean([i[1] for i in relays_2])))
if len(relays_1) == len(relays_2):
if m_r1 == m_r2: return 0
elif m_r1 > m_r2: return 1
else: return -1
elif len(relays_1) > len(relays_2):
return 1
else:
return -1
sample = draw_sample(records, sample_size)
sample.sort(cmp=rec_cmp)
N_CLUSTERS = 6
colors =[(1.0*i/N_CLUSTERS,)*3 for i in xrange(1, N_CLUSTERS+1)]
cmap = ListedColormap(colors)
relay_series = [record['relays'] for record in sample]
out_relay_series = []
for r in relay_series:
newTupOut = []
for tup in r:
newTupOut.append(tup[1])
out_relay_series.append(newTupOut)
vmin = 0
vmax = VMAX
if ax is None:
fig = plt.figure()
fig.canvas.mpl_connect('pick_event', on_pick)
ax = fig.add_subplot(111)
ax.get_yaxis().set_ticks([])
if not no_chrome:
plt.title("Outbound Luminance Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Client")
# legend_rects = [Rectangle((0, 0), 1, 1, fc=c) for c in reversed(colors)]
# legend_labels = ["%i-%i cells" % c for c in reversed(cluster_ranges)]
# plt.legend(legend_rects, legend_labels, loc=4)
n = 0
for i in xrange(0, len(sample)):
series = out_relay_series[i]
ident = sample[i]['ident']
artist = ax.scatter(range(0, len(series)), [n]*len(series),
c=series, marker="s", edgecolors='none', vmin=vmin, vmax=40,
cmap=cm.gray, picker=True)
n += 2
artist_ident_map[ident] = artist
if not no_chrome:
fig.colorbar(artist)
# inbound and outbound
def do_colorplot_both(records, window_size, ax=None, ay=None, no_chrome=False,
sample_size=1000):
"""
Display a color plots for a size 1000 random sample of records
@param records: the circuit records
@param window_size: the size of the cell count windows
"""
def rec_cmp(rec_1, rec_2):
relays_1, relays_2 = rec_1['relays'], rec_2['relays']
m_r1, m_r2= ((mean([i[0] for i in relays_1]) +
mean([i[1] for i in relays_1])),
(mean([i[0] for i in relays_2]) +
mean([i[1] for i in relays_2])))
if len(relays_1) == len(relays_2):
if m_r1 == m_r2: return 0
elif m_r1 > m_r2: return 1
else: return -1
elif len(relays_1) > len(relays_2):
return 1
else:
return -1
sample = draw_sample(records, sample_size)
sample.sort(cmp=rec_cmp)
N_CLUSTERS = 6
colors =[(1.0*i/N_CLUSTERS,)*3 for i in xrange(1, N_CLUSTERS+1)]
cmap = ListedColormap(colors)
relay_series = [record['relays'] for record in sample]
in_relay_series = []
out_relay_series = []
for r in relay_series:
newTupIn = []
newTupOut = []
for tup in r:
newTupIn.append(tup[0])
newTupOut.append(tup[1])
in_relay_series.append(newTupIn)
out_relay_series.append(newTupOut)
vmin = 0
vmax = VMAX
if ax is None:
fig = plt.figure()
fig.canvas.mpl_connect('pick_event', on_pick)
ax = fig.add_subplot(221)
ax.get_yaxis().set_ticks([])
if not no_chrome:
plt.title("Inbound Luminance Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Client")
# legend_rects = [Rectangle((0, 0), 1, 1, fc=c) for c in reversed(colors)]
# legend_labels = ["%i-%i cells" % c for c in reversed(cluster_ranges)]
# plt.legend(legend_rects, legend_labels, loc=4)
n = 0
for i in xrange(0, len(sample)):
series = in_relay_series[i]
ident = sample[i]['ident']
artist = ax.scatter(range(0, len(series)), [n]*len(series),
c=series, marker="s", edgecolors='none', vmin=vmin, vmax=vmax,
cmap=cm.gray, picker=True)
n += 2
artist_ident_map[ident] = artist
if ay is None:
ay = fig.add_subplot(223)
ay.get_yaxis().set_ticks([])
if not no_chrome:
plt.title("Outbound Luminance Plot (n=%i)" % len(sample))
plt.xlabel("Window # (%i ms windows)" % window_size)
plt.ylabel("Client")
# legend_rects = [Rectangle((0, 0), 1, 1, fc=c) for c in reversed(colors)]
# legend_labels = ["%i-%i cells" % c for c in reversed(cluster_ranges)]
# plt.legend(legend_rects, legend_labels, loc=4)
n = 0
for i in xrange(0, len(sample)):
series = out_relay_series[i]
ident = sample[i]['ident']
artist = ay.scatter(range(0, len(series)), [n]*len(series),
c=series, marker="s", edgecolors='none', vmin=vmin, vmax=vmax,
cmap=cm.gray, picker=True)
n += 2
artist_ident_map[ident] = artist
fig.tight_layout()
if not no_chrome:
fig.subplots_adjust(right=1.0)
cbar_ax = fig.add_axes([0.55, 0.15, 0.025, 0.7])
fig.colorbar(artist, cax=cbar_ax)
if __name__ == "__main__":
graphing_mode = sys.argv[1]
inpath = sys.argv[2]
if len(sys.argv) > 4: seed = int(sys.argv[3])
else: seed = 0
print "Random seed =", seed
random.seed(seed)
ts_ident_list = []
with open(inpath) as data_file:
print "Reading data..."
data = cPickle.load(data_file)
window_size, records = data['window_size'], data['records']
if graphing_mode == "-timeplot":
for arg in range(len(sys.argv)):
if arg >= 5:
ts_ident_list.append(sys.argv[arg])
elif graphing_mode == "-agg-colorplots":
k_val = int(sys.argv[4])
records = filter(lambda r: r['ident'] == (k_val, k_val), records)
print "%i series" % len(records)
print "Graphing..."
if graphing_mode == '-summarize':
do_summarize(records)
elif graphing_mode == '-horizon':
do_horizon(records, window_size)
elif graphing_mode == '-timeplot':
do_timeplot(records, window_size, ts_ident_list)
elif graphing_mode == '-colorplots':
do_colorplot(records, window_size)
elif graphing_mode == '-agg-colorplots':
do_colorplot(records, window_size)
do_horizon(records, window_size, MAX_OBS)
else:
print "ERROR: Invalid graphing mode selected"
plt.show()
|
[
"[email protected]"
] | |
dd8e7febbbd200e6c38975d76ce0e0646f3d7a4f
|
d5ef8f9ba3fc9a3af95c9c480eca3453606ed2d2
|
/isup_parameters/RedirectingNumberFlag.py
|
cde06ec3cad46959bfcbcc871df73e9314bef8a2
|
[] |
no_license
|
sureshrasa/isupds
|
ab0b643dc64923b55b679e2a19b88b7b11ab1c6b
|
cc4bb976dfcdd30719766051485956dfb6da169a
|
refs/heads/master
| 2020-03-21T15:27:08.613505 | 2018-06-26T09:57:21 | 2018-06-26T09:57:21 | 138,713,217 | 0 | 0 | null | null | null | null |
WINDOWS-1252
|
Python
| false | false | 473 |
py
|
#
# Author(s): Suresh Rasakulasuriar
#
# Copyright: © Resilientplc.com Ltd. 2018 - All Rights Reserved
#
from enum import IntEnum, unique
@unique
class RedirectingNumberFlag(IntEnum):
OddAddressSignals = 0
UNRECOGNISED = 0xFFFF
class RedirectingNumberFlagDict:
_dict = {
0 : RedirectingNumberFlag.OddAddressSignals }
@staticmethod
def lookup(ordinal):
return RedirectingNumberFlagDict._dict.get(ordinal, ordinal)
|
[
"[email protected]"
] | |
a15aa9381f0639460207512eace0c0e66ea54b4b
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4327/codes/1602_2049.py
|
1ce019700e7801903c6df341e812f94f4b2cb946
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 302 |
py
|
# Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
x=int(input("informe o dividendo: " ))
y=int(input("informe o divisor: " ))
print (x)
print (y)
print (x//y)
print (x%y)
|
[
"[email protected]"
] | |
0f02c8e716a5430048e7a6238850eacba01fa71a
|
f9c1c3523905e511d206187321b53ad39bbe5751
|
/import.py
|
9ddd2be3508219ec3a8d5a6575fac8ef8d1b9dcd
|
[] |
no_license
|
amrrsharaff/daisy-hacks
|
4f5acc0313a8740633a65ce6086649ddb720b35e
|
c69c5fb287c62b90981acdf8ae366efca14acb84
|
refs/heads/master
| 2021-05-09T02:23:34.629595 | 2018-02-05T23:11:08 | 2018-02-05T23:11:08 | 119,207,392 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,358 |
py
|
import tensorflow as tf
import csv
import pandas
# Metadata describing the text columns
COLUMNS = ['date_', 'store',
'department', 'item',
'unit_price','on_promotion', 'promotion_type', 'quantity']
FIELD_DEFAULTS = [[0.0], [0.0], [0.0], [0.0], [0], [0], [0], [0]]
def _parse_line(line):
# Decode the line into its fields
fields = tf.decode_csv(line, FIELD_DEFAULTS)
# Pack the result into a dictionary
features = dict(zip(COLUMNS,fields))
# Separate the label from the features
label = features.pop('quantity')
print(line)
#print(features)
return features, label
def train_input_fn(features, labels, batch_size):
"""An input function for training"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
# Build the Iterator, and return the read end of the pipeline.
return dataset.make_one_shot_iterator().get_next()
if __name__ == "__main__":
# All the inputs are numeric
dataframe = pandas.read_csv('/Users/ASharaf/Desktop/hackathon_data/trial.csv', header=0)
features = pandas.read_csv('/Users/ASharaf/Desktop/hackathon_data/trial.csv', header=0)
label = features.pop('quantity')
|
[
"[email protected]"
] | |
ddcda8626fa93c0e39ab42cbd537075f0c2726a5
|
0fe5a7ede2c3e8c4d47a85eebe959e2ab749568b
|
/kits19.py
|
2735abe5d5da8a3f71086197a6fd9c09999151e8
|
[] |
no_license
|
FlyGlider/Kits19
|
2ad9e1befb44f7e23f56c8b42b1b81d350361620
|
20efc3327c7a74431bb53fc3d58e453b791df49e
|
refs/heads/master
| 2020-06-25T23:03:51.926141 | 2019-07-29T12:35:42 | 2019-07-29T12:35:42 | 199,448,545 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,893 |
py
|
from utils.trainer import *
from utils.evaluator import Evaluator
# from utils.tester import Tester
plt.switch_backend('agg')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--epoch', dest='epoch', default=100, type=int,
help='number of epochs')
parser.add_argument('-b', '--batch-size', dest='batchsize', default=1,
type=int, help='batch size')
parser.add_argument('-l', '--learning-rate', dest='lr', default=0.0003,
type=float, help='learning rate')
parser.add_argument('-g', '--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('-v', '--val', dest='val', default=0,
type=int, help='choose which validation')
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)') # checkpoints_2d/model_best_0_200.pth
parser.add_argument('-w', '--num_workers', dest='num_workers', default=0,
type=int, help='how many subprocesses to use for data loading')
parser.add_argument('-p', '--pre_load', dest='pre_load', default=False,
type=bool, help='whether to pre-load dataset') # 实际上只要输入就是True
parser.add_argument('--ts', dest='train_samples', default=1000,
type=int, help='how many train sample in one epoch')
parser.add_argument('--vs', dest='val_samples', default=100,
type=int, help='how many val sample in one epoch')
parser.add_argument('-s', '--stage', dest='stage', default='train',
type=str, help='choose the best model in which stage')
args = parser.parse_args()
return args
if __name__ == '__main__':
dir_h5_train = 'h5_data_train_2d/'
dir_checkpoint = 'checkpoints/2d/'
dir_prediction = 'predictions/2d/'
create_dir(dir_checkpoint)
create_dir(dir_prediction)
dataset_props = load_pickle('dataset_props.pkl')
pool_layer_kernel_sizes = dataset_props['plan_2d']['pool_layer_kernel_sizes']
args = get_args()
model = ResUNet(in_ch=1, base_num_features=30, num_classes=3, norm_type='batch', nonlin_type='relu', pool_type='max',
pool_layer_kernel_sizes=pool_layer_kernel_sizes, deep_supervision=True, mode='2D')
trainer = Trainer(model, dir_h5_train, dir_checkpoint, args)
trainer.run()
model = ResUNet(in_ch=1, base_num_features=30, num_classes=3, norm_type='batch', nonlin_type='relu', pool_type='max',
pool_layer_kernel_sizes=pool_layer_kernel_sizes, deep_supervision=False, mode='2D')
evaluator = Evaluator(model, dir_h5_train, dir_checkpoint, dir_prediction, args)
evaluator.run()
|
[
"[email protected]"
] | |
736d14a11bc8f5c100eaeef9314edb90e4aed66d
|
775a51872851cd8d60ecf7b6c5f2a46103bc0bbc
|
/Dict-Hashing.py
|
68b469e28de0edb396baf1b468ec90496094f53a
|
[] |
no_license
|
Prabhanda-Akiri/Data-Structures-Implementation
|
08e285c29f7ed6794f937cf79e6d8f3cc7b18b99
|
d184b01889cbe68ec9ed033a353867a12e7f6edd
|
refs/heads/master
| 2021-01-20T10:46:45.927412 | 2018-01-22T08:43:46 | 2018-01-22T08:43:46 | 101,647,843 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,494 |
py
|
class LinkedList:
"""Defines a Singly Linked List.
attributes: head
"""
def __init__(self):
"""Create a new list with a Sentinel Node"""
self.head=ListNode('a')
def insert(self,x,s):
"""Insert element x in the position after p"""
self.temp=ListNode(s)
self.temp.value=x
self.temp.next=self.head.next
self.head.next=self.temp
def delete(self,p):
"""Delete the node following node p in the linked list."""
temp=self.searc(p)
temp.next=temp.next.next
def printlist(self):
""" Print all the elements of a list in a row."""
self.temp=ListNode('a')
self.temp=self.head.next
while self.temp != None :
print (self.temp.value)
print(self.temp.s)
self.temp=self.temp.next
def search(self,x):
"""Search for value x in the list. Return a reference to the first node with value x; return None if no such node is found."""
self.temp=ListNode('a')
self.temp=self.head
c=-1
while self.temp.next!=None :
self.temp=self.temp.next
c=c+1
if self.temp.s == x :
return c
return c
def searc(self,x):
"""Search for value x in the list. Return a reference to the first node with value x; return None if no such node is found."""
self.temp=ListNode('a')
self.temp=self.head
while self.temp.next!=None :
if self.temp.next.s == x :
return self.temp
self.temp=self.temp.next
return self.head
def len(self):
"""Return the length (the number of elements) in the Linked List."""
self.temp=ListNode('a')
self.temp=self.head.next
self.count=0
while self.temp!=None:
self.count=self.count+1
self.temp=self.temp.next
return self.count
class ListNode:
"""Represents a node of a Singly Linked List.
attributes: value, next.
"""
def __init__(self,s):
self.value=0
self.s=s
self.next=None
class HashTable :
def __init__(self) :
self.T=[None for i in range(30)]
for i in range (0,30):
self.T[i]=LinkedList()
def ins(self,s) :
x=s
n=len(x)
k=0
for i in range(0,n) :
k=k+ord(x[i])
h_k=k % 30
print('the list in T[',h_k,'] is')
self.T[h_k].insert(k,s)
self.T[h_k].printlist()
def dele(self,s) :
x=s
n=len(x)
k=0
for i in range(0,n) :
k=k+ord(x[i])
h_k=k % 30
self.T[h_k].delete(s)
print('the list in T[',h_k,'] is')
self.T[h_k].printlist()
def sear(self,s) :
x=s
n=len(x)
k=0
for i in range(0,n) :
k=k+ord(x[i])
h_k=k % 30
print('the string',s,'is present in',h_k,'th column of hash table and in the following position of linked list')
return self.T[h_k].search(k)
def main():
h=HashTable()
h.ins('apple')
h.ins('ball')
h.ins('cat')
h.ins('act')
h.ins('tac')
h.ins('atc')
print(h.sear('cat'))
h.dele('tac')
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f37668bdf7c079415ac4d8ece79b2a23765d21aa
|
71449d985dd2d9f9133822952664a19b0dbfaf91
|
/combine/files/utils.py
|
f8d1268f777119a44ce6aab38f97b2b86f8fbe75
|
[
"MIT"
] |
permissive
|
dropseed/combine
|
f7e6387f2a2a18420398022622afe9c1188fcced
|
7bf2b513877ddbc9911f0e79f9b82c3a770cece7
|
refs/heads/master
| 2023-07-05T22:35:23.107892 | 2023-07-04T01:51:25 | 2023-07-04T01:51:25 | 137,275,296 | 11 | 3 |
MIT
| 2023-09-04T20:14:40 | 2018-06-13T21:50:06 |
Python
|
UTF-8
|
Python
| false | false | 164 |
py
|
import os
def create_parent_directory(path: str) -> None:
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
os.makedirs(path_dir)
|
[
"[email protected]"
] | |
af0ff074d35191259400a9937db81997e7772ffd
|
d52cb4c2e880875944b14da0b8a9542235942ac8
|
/geeksforgeeks/heap/6_Find_median_in_stream.py
|
521a8f79468f59a0c175f5766c7681ae8d0a619c
|
[] |
no_license
|
saparia-data/data_structure
|
fbd61535b68f92143b2cb2679377c0f56f424670
|
2e8700cfdaeefe0093e5b4fb2704b1abcd300d02
|
refs/heads/master
| 2023-05-08T18:54:52.250941 | 2021-06-04T05:44:29 | 2021-06-04T05:44:29 | 296,071,146 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,382 |
py
|
'''
Given an input stream of N integers.
The task is to insert these numbers into a new stream and find the median of the stream formed by each insertion of X to the new stream.
Example 1:
Input:
N = 4
X[] = 5,15,1,3
Output:
5
10
5
4
Explanation:Flow in stream : 5, 15, 1, 3
5 goes to stream --> median 5 (5)
15 goes to stream --> median 10 (5,15)
1 goes to stream --> median 5 (5,15,1)
3 goes to stream --> median 4 (5,15,1 3)
'''
import heapq
min_heap = []
max_heap = []
def balanceHeaps():
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
Balance the two heaps size , such that difference is not more than one.
'''
if abs(len(min_heap)-len(max_heap)) <= 1:
return # already balanced
# take out one element from top of heap with greater size, and push in other heap
if len(min_heap)>len(max_heap): # min_heap has more data
value_top = heapq.heappop(min_heap)
# push in max heap, using negative as it is implemented on min heap
heapq.heappush(max_heap,-1*value_top) # value inserted in max heap
else:
# take from max heap and insert in min heap
value_top = -1* heapq.heappop(max_heap) # negate it to get original value
heapq.heappush(min_heap,value_top) # insert value in min heap
return
def getMedian():
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
:return: return the median of the data received till now.
'''
# cases with odd number of elements in data
if len(max_heap)>len(min_heap):
# return the element from top of max_heap
value = heapq.heappop(max_heap)
heapq.heappush(max_heap,value) # push element back in max heap
return (-1*value)
elif len(min_heap)>len(max_heap):
# return the top element from min heap
value = heapq.heappop(min_heap)
heapq.heappush(min_heap,value)
return value
else:
# the number of elements is even in data, return the average of the two values
val_min = heapq.heappop(min_heap)
val_max = -1*heapq.heappop(max_heap)
# push these values back in the heap
heapq.heappush(min_heap,val_min)
heapq.heappush(max_heap,-1*val_max)
return ((val_max+val_min)//2) # return the average of the two
def insertHeaps(x):
'''
use globals min_heap and max_heap, as per declared in driver code
use heapify modules , already imported by driver code
:param x: value to be inserted
:return: None
'''
# if top of min heap is less than x, x belongs in upper half
least_upperhalf = heapq.heappop(min_heap) if len(min_heap) else -1 # minimum element of upper half or -1 if empty
# if popped, push in min_heap again
if least_upperhalf!=-1:
heapq.heappush(min_heap,least_upperhalf)
if x >= least_upperhalf :
heapq.heappush(min_heap,x) # insert in min_heap
else:
# x belongs in lower half
# as this is a max_heap implemented on heapq, hence negative of x will be inserted to maintain
# max heap property.
heapq.heappush(max_heap,-1*x)
arr = [5,15,1,3]
n = len(arr)
for i in range(n):
insertHeaps(arr[i])
balanceHeaps()
print(getMedian())
|
[
"[email protected]"
] | |
d0be4c85c1a19f7385e5175292eca406cec92339
|
c8158a7ae28e457e564f1a72b3e82d406aad8c98
|
/cfgs/test_config.py
|
8ca243856ed3a8070dfd268445e5715b8804b742
|
[] |
no_license
|
Wei2624/blender_datasets
|
aad26c7fdfc36c07f8bca3cbab57e16ae489be7a
|
8300b7b35ab5082eeaa313892ad099de74305acc
|
refs/heads/master
| 2020-04-15T19:21:54.028224 | 2019-01-10T02:01:59 | 2019-01-10T02:01:59 | 164,947,207 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,021 |
py
|
no_need_to_copy = ['Camera','Lamp','Lamp.001','Lamp.002','skp_camera_Last_Saved_SketchUp_View','background'\
,'Plane','Plane.001','Plane.002','Plane.003','Plane.004']
# static_classes = ['background','table']
# static_classes_color = [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0)]
off_table_classes = ['chair']
off_table_classes_color = [(1.0, 0, 1.0)]
static_classes = ['table']
static_classes_color = [(1.0, 0.0, 0.0)]
obj_alpha = 1.0
# changing this can remove certain object from appearing in the scene
# dynamic_classes = ['book', 'keyboard', 'mug', 'detergent', 'bottle', 'pringles']
# dynamic_classes_color = [(0.0, 1.0, 0.0), (0.0, 1.0, 1.0), (0.0, 0.0, 0.5), (0.0, 0.0, 1.0),(1.0, 1.0, 0.0), (1.0, 0.0, 1.0)]
dynamic_classes = ['cerealbox', 'bowl', 'mug', 'can', 'cap']
dynamic_classes_color = [(0.0, 1.0, 0.0), (0.0, 1.0, 1.0), (0.0, 0.0, 0.5), (0.0, 0.0, 1.0),(1.0, 1.0, 0.0)]
# dynamic_classes = ['chair']
# dynamic_classes_color = [(0.0, 1.0, 1.0)]
#table dimension parameters
table_height = 1.
table_dim = (1.5, 1., 1.5)
table_top_num_obj = 4
area_div = 4
v_bound = 0.51
h_bound = 0.51
h_div_unit = 0.03
v_div_unit = 0.03
#Lights parameters
num_of_lights = 3
randomize_xyz = False
lamp_xy = [(1.3, -0.75),(-1.3, -0.75),(0, 1.5)]
plane_scale = 20
light_z_range = (1.8, 3)
range_unit = 0.1
# texture information
tex_idx_dict = {'table0_0':[0],\
'book0_2': [0],'book1_1':[0],'book2_3': [0],'book3_0': [0],'book4_0': [0,1,2,3,4],\
'table1_0':[0],\
'bottle0_4': [0],'bottle1_4': [0],'bottle2_0': [0],'bottle3_11':[0],'bottle4_10':[0],'bottle4_15':[0],'bottle5_1': [0],'bottle5_9': [0],'bottle6_0': [0],\
'table2_0': [0],\
'detergent0_1':[0],'detergent1_0': [0],'detergent2_4': [0],'detergent3_3': [0],'detergent3_5': [0],'detergent4_0': [0],'detergent5_7': [0],\
'table3_0':[2],\
'keyboard0_24':[0],'keyboard1_190':[0],'keyboard2_72':[0],'keyboard3_99':[0],'keyboard4_484':[0],'keyboard5_289':[0],\
'table4_0':[0],\
'mug0_0': [0],'mug1_3': [0],'mug2_5': [0],'mug3_0': [0],'mug4_7': [0],'mug5_2': [0],'mug6_0': [0],'mug7_2':[0], 'mug8_6':[0],'mug9_4':[0],'mug10_3':[0],\
'table5_0':[0],'table6_0':[1],'table7_0': [0],'chair8_0': [0],'chair9_0':[0],\
'pringles1_1': [0],'pringles1_2': [0],'pringles2_2': [0],\
'cerealbox0_0': [0],'cerealbox0_1': [0],'cerealbox0_3': [0],'cerealbox0_5': [0],'cerealbox0_6': [0],'cerealbox0_7': [0],'cerealbox0_8': [0],\
'cerealbox1_71': [0],'cerealbox1_166': [0],'cerealbox1_66': [0],'cerealbox1_123': [0],\
'cerealbox2_493': [0],'cerealbox2_302': [0],'cerealbox2_349': [0],'cerealbox2_256': [0],'cerealbox2_194': [0],'cerealbox2_494': [0],'cerealbox2_225': [0],'cerealbox2_257': [0], 'cerealbox2_203': [0],\
'cerealbox3_1': [0],'cerealbox3_5': [0],'cerealbox3_4': [0],'cerealbox3_0': [0],'cerealbox3_2': [0],\
'cerealbox4_42': [0], \
'cerealbox5_0': [0], 'cerealbox5_1': [0],'cerealbox5_2': [0],'cerealbox5_4': [0],\
'cerealbox6_6': [0], 'cerealbox7_0': [0], 'cerealbox8_5':[0],'cerealbox8_0':[0],'cerealbox8_1':[0],'cerealbox8_6':[0],\
'cerealbox9_5':[0],\
'bowl0_2': [0],'bowl1_0': [0],'bowl2_2': [0], 'bowl3_1': [0], 'bowl4_1': [0], 'bowl5_0': [0], 'bowl6_0':[0],\
'can0_3': [0], 'can1_8': [0], 'can2_0': [0], 'can3_0': [0], 'can4_5': [0], 'can5_1': [0],\
'cap0_0': [0],'cap1_60': [0], 'cap1_102': [0], 'cap2_6': [0], 'cap3_2': [0], 'cap4_21': [0], 'cap5_10':[0], 'cap6_15':[0], 'cap6_11':[0],\
'cap7_0': [0], 'cap8_16':[0], 'cap8_0': [0],\
'chair0_0': [2],'chair1_0':[0], 'chair2_0': [2], 'chair3_0': [0,1,2,3,4],'chair4_0':[0],'chair5_0':[1],'chair6_0': [0],'chair7_0':[2],'chair8_0':[3],'chair9_0':[3]
}
# Gaussian samples
normal_m = 0
normal_s = 0.2
# position of background in range on x y
background_range = (1.2, 2.7)
# interval where script reloads a scene
change_scene_interval = 4
# camera parameters for keyframes
degree_interval = 30
num_degrees = 12
cam_height_range = (1.3, 2.7)
cam_xy_range = (1.5, 2.5)
total_frames = 100
target_point = (0,0,1.2)
|
[
"[email protected]"
] | |
516a6530d09f3f2717a8b0cf0e85c849bb9f4ad0
|
f63907d2115becd64704ef1881f3bfcb7ba9047d
|
/sandbox/test/testTemplate.py
|
91ba4b483092ee7a004dca1be860007bfd13cdaa
|
[] |
no_license
|
AseiSugiyama/NZMATH-Python3
|
d456610f72071a654531583228e439ffa8a4db0c
|
f65b176be2e58fafa0eea91f399c9ab17f3f478b
|
refs/heads/master
| 2020-05-21T07:26:51.434191 | 2019-04-27T09:52:18 | 2019-04-27T09:52:18 | 185,959,644 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 989 |
py
|
import unittest
import sandbox.hoge as hoge
class HogeTest (unittest.TestCase):
"""
Test classes must inherite unittest.TestCase.
They have name suffixed with 'Test'.
"""
def setUp(self):
"""
setUp is run before each test method run.
"""
pass
def tearDown(self):
"""
tearDown is run after each test method run.
"""
pass
def testHuga(self):
"""
Every test method have name prefixed with 'test'.
"""
# asserting something
self.assert_(hoge.ishoge(), "optional message string")
# asserting equality
self.assertEqual(1, hoge.huga)
# The following part is always unedited.
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
|
[
"devnull@localhost"
] |
devnull@localhost
|
eaa1694453e2fb1d8f4e20c3a6a0852dc8c2f92c
|
bec66ec0c920939547466b2b8f9d65813d560d1d
|
/noxious/__init__.py
|
f007d1198e0435f72d773eb479f29a48d9534092
|
[] |
no_license
|
mbr/noxious
|
cbb3be2ca725a0282db390520306da7ebba75339
|
6c48fe84867d80614defa6bdce4d4640ce657ae5
|
refs/heads/master
| 2023-06-06T20:42:08.079423 | 2015-08-30T10:54:52 | 2015-08-30T10:54:52 | 41,625,389 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,557 |
py
|
import xml.etree.ElementTree as ET
def from_file(fn):
tree = ET.parse(fn)
return Noxious(tree.getroot())
class Noxious(object):
def __init__(self, elem, parent=None):
self._parent = parent
self._elem = elem
def _all(self):
return [self.__class__(sibling)
for sibling in self._parent._elem.findall(self._elem.tag)]
def _get_path(self):
path = []
tag = self
while tag:
path.insert(0, tag._elem.tag)
tag = tag._parent
root = path.pop(0)
return root + ''.join('[{!r}]'.format(p) for p in path)
def _text(self):
return self._elem.text
def __add__(self, other):
return str(self) + other
def __bool__(self):
e = self._elem
return bool(e.text or list(e))
def __float__(self):
return float(str(self))
def __int__(self):
return int(str(self))
def __getitem__(self, name):
child = self._elem.find(name)
if child is None:
raise KeyError('No child {} on {!r}'.format(name, self))
return self.__class__(child, self)
def __getattr__(self, name):
if name not in self._elem.attrib:
raise AttributeError('No attribute {} on {!r}'.format(name, self))
return self._elem.attrib[name]
# py2:
__nonzero__ = __bool__
def __radd__(self, other):
return other + str(self)
def __str__(self):
return self._text()
def __repr__(self):
return self._get_path()
|
[
"[email protected]"
] | |
cb49bec2a5718c4576520ccf546458370bb74044
|
a8f73dcf71eb3be1387a4c5fc766ad2e14a64fd4
|
/recorder/scripts/socket_server.py
|
cdeaa0ede4b5933db55dd0fb9eb94743ff33cd30
|
[] |
no_license
|
ZhengYi0310/eve_workspace
|
3a192ab797fe21130ba45a9a09431ddc8f5662a5
|
5c73cebfaf37820ba4d1f1354632370434816921
|
refs/heads/master
| 2021-01-19T02:10:18.657989 | 2017-06-30T19:14:05 | 2017-06-30T19:14:05 | 87,264,944 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,044 |
py
|
# first of all import the socket library
import socket
ip = socket.gethostbyname('bml-ALL-SERIES')
print "The client hostname is %s." %(ip)
# next create a socket object
s_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "socket on eve successfully created."
# reserve a port on the pc, just set it to 12345
port = 9999
# Next bind to the port
# we have not typed any ip in the ip field
# instead we have inputted an empty string
# this makes the server listen to requests
# coming from other computers on the network
s_server.bind(("eve", port))
print "socket binded to %s." %(port)
s_server.listen(10)
s_client, address = s_server.accept()
print "socket is listening."
print s_client.recv(8192)
s_client.send('Hello Joy !')
print "handshake ! now starting transfer file !"
f = open("test.json", "wb")
l = s_client.recv(8192)
while (l):
f.write(l)
l = s_client.recv(1024)
#print "receiving"
s_client.send('file received by Eve!')
print "file received !"
f.close()
s_client.close()
s_server.close()
|
[
"[email protected]"
] | |
35da38996a54cfbccf733b5859960068514b4714
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2332/60592/271480.py
|
f602a8c01f31dbba291aa53971306002fff48fef
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 630 |
py
|
base = int(input())
tar = int(input())
res = 0
fun = []
te = 0
tem = tar
while tem != 0:
i = 0
if tem == 1:
te += 1
break
mark = 0
while mark <= tem:
mark = pow(base,i)
i+=1
te+=i-3
mark/=base
tem-=mark
if tem!= 0:
te+=1
fun.append(te)
te = 0
tem = tar
while tem != 0:
i = 0
if tem == 1 or tem == -1:
te+=1
break
mark = 0
while mark < abs(tem):
mark = pow(base,i)
i+=1
te+=i-2
if tem < 0:
tem+=mark
elif tem>0:
tem-=mark
if tem != 0:
te+=1
fun.append(te)
print(min(fun))
|
[
"[email protected]"
] | |
b83e8246703e0232b938a556394aff5db7517139
|
e7b87b4377660adf9872a0fd361b2f66ef2f4cfa
|
/advent_2019/ChristmaSSE_KeyGen/reverse.py
|
1d66bf5fad064d0e78c99dbd0d0e4cc245d83362
|
[] |
no_license
|
passwd0/ctf
|
a215dbabfb0a97723e7e805c4938b833c454c166
|
e5a6e4769308ed6283e7e20c6852683bdac5cedd
|
refs/heads/master
| 2020-12-01T15:04:08.186238 | 2019-12-28T22:12:24 | 2019-12-28T22:12:24 | 230,649,411 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,513 |
py
|
#!/bin/python
from cachetools import cached, LRUCache, TTLCache
import time
import numpy as np
import struct
from unicorn import *
from unicorn.x86_const import *
#def internalCycle():
# return 0
# #m6 = m4
# #pcmpgtd xmm6, xmm3
# #paddd xmm6, xmm5
# #pmulld xmm6, xmm4
# #psubd xmm3, xmm6
# #movdqa xmm6, xmm4
# #pcmpgtd xmm6, xmm2
# #paddd xmm6, xmm5
# #pmulld xmm6, xmm4
# #psubd xmm2, xmm6
# #movdqa xmm6, xmm4
# #pcmpgtd xmm6, xmm1
# #paddd xmm6, xmm5
# #pmulld xmm6, xmm4
# #psubd xmm1, xmm6
# #movdqa xmm6, xmm4
# #pcmpgtd xmm6, xmm0
# #paddd xmm6, xmm5
# #pmulld xmm6, xmm4
# #psubd xmm0, xmm6
# #add edx, 0xffffffff
#
@cached(cache={})
def pshufd(src,order):
line=bin(src)[2:].rjust(128,"0")
n=32
src=[line[i:i+n] for i in range(0, len(line), n)][::-1]
#print(src)
line=bin(order)[2:].rjust(8,"0")
n=2
order=[line[i:i+n] for i in range(0, len(line), n)]
#print(order)
res=""
for i in order:
val=int(i,2)
res+=src[val]
#print(int(res,2))
return int(res,2)
@cached(cache={})
def pmulld(val1,val2):
line=bin(val1)[2:]
line=line.rjust(128,"0")
n=32
val1=[line[i:i+n] for i in range(0, len(line), n)]
line=bin(val2)[2:].rjust(128,"0")
n=32
val2=[line[i:i+n] for i in range(0, len(line), n)]
#print(val1,val2)
res=""
for i,j in zip(val1,val2):
res+=str(int(i,2)*int(j,2)).rjust(32,"0")
return int(res,16)
@cached(cache={})
def paddd(val1,val2):
line=bin(val1)[2:]
line=line.rjust(128,"0")
n=32
val1=[line[i:i+n] for i in range(0, len(line), n)]
line=bin(val2)[2:].rjust(128,"0")
n=32
val2=[line[i:i+n] for i in range(0, len(line), n)]
#print(val1,val2)
res=""
for i,j in zip(val1,val2):
res+=str(int(i,2)+int(j,2)).rjust(32,"0")
return int(res,16)
@cached(cache={})
def m_fun(s1, s2, s3):
m = pmulld(s1, s2)
m = paddd(m, s3)
return m
@cached(cache={})
def fun(s1, s2, s3, s4):
m = m_fun(s1, s2, s3)
m = paddd(m, s4)
return m
@cached(cache={})
def mainFake():
start_time = time.time()
data = open('reverse_data', 'rb').read()
res = int.from_bytes(data[64:80], byteorder='little')
i0 = pshufd(res, 0x15)
i1 = pshufd(res, 0x45)
i2 = pshufd(res, 0x51)
i3 = pshufd(res, 0x54)
# print(hex(i0),hex(i1),hex(i2),hex(i3))
# ----------------------------------
# i = [
# [1, 0, 0, 0],
# [0, 1, 0, 0],
# [0, 0, 1, 0],
# [0, 0, 0, 1]
# ]
counter = 0x112210f47de98115
rax = 0
# d9 = int.from_bytes(data[:16], byteorder='little')
# d10 = int.from_bytes(data[16:32], byteorder='little')
# d13 = int.from_bytes(data[32:48], byteorder='little')
# d15 = int.from_bytes(data[48:64], byteorder='little')
#
# s00 = pshufd(d9, 0)
# s01 = pshufd(d9, 0x55)
# sss5= pshufd(d9, 0xaa)
# s03 = pshufd(d9, 0xff)
#
# s10 = pshufd(d10, 0)
# s5 = pshufd(d10, 0x55)
# s12 = pshufd(d10, 0xaa)
# s13 = pshufd(d10, 0xff)
#
# s20 = pshufd(d13, 0)
# s21 = pshufd(d13, 0x55)
# s22 = pshufd (d13, 0xaa)
# s23 = pshufd(d13, 0xff)
#
# s30 = pshufd(d15, 0)
# s31 = pshufd(d15, 0x55)
# s32 = pshufd(d15, 0xaa)
# s33 = pshufd(d15, 0xff)
#
# print(hex(s00 ), hex(s01), hex(sss5), hex(s03 ))
# print(hex(s10 ), hex(s5 ), hex(s12), hex(s13 ))
# print(hex(s20), hex(s21 ), hex(s22 ), hex(s23))
# print(hex(s30), hex(s31 ), hex(s32 ), hex(s33))
#
# #---------------------------------
s00 = 1 ; s01 = 2 ; s02 = 3 ; s03 = 4 ;
s10 = 5 ; s11 = 6 ; s12 = 7 ; s13 = 8 ;
s20 = 9 ; s21 = 10; s22 = 11; s23 = 12;
s30 = 13; s31 = 14; s32 = 15; s33 = 16;
# s = [
# [1 , 2, 3, 4],
# [5 , 6, 7, 8],
# [9 ,10,11,12],
# [13,14,15,16]
# ]
while(rax != counter):
# prima colonna
m6 = pmulld(s00, i3)
m8 = pmulld(s10, i3)
m11 = pmulld(s20, i3)
m14 = pmulld(s30, i3)
#--------------------
m12 = m_fun(s01, i2, m6) #xmm12s * xmm2 * xmm6 = xmm12
m5 = pmulld(s11, i1) #xmm5s *xmm1 = xmm5
i3 = fun(s03, i0, m5, m12)
mm5 = m_fun(s11, i2, m8)
m7 = m_fun(s21, i2, s20)
mm6 = pmulld(s12, i1) #64c
i2 = fun(s13, i0, mm6, mm5) #662
mmm5 = pmulld(s22, i1)
mmm6 = pmulld(s32, i1) #680
i1 = fun(s23, i0, mmm5, m7)
m4 = m_fun(s31, i2, m14)
i0 = fun(s33, i0, mmm6, m4)
#internal cycle
# m4 = pshufd(sres, 0xaa)
# m5 = pshufd(i0, 0)
# edx = 0x3e8
# internalCycle()
rax += 1
if rax % 10000000 == 0:
print(time.time()-start_time)
def emulation(matrix):
BASE = 0x400000
STACK = 0x7ffcaf000000
FLAG = 0x00600000
mu = Uc(UC_ARCH_X86, UC_MODE_64)
mu.mem_map(BASE, 1024*4)
mu.mem_map(STACK, 1024*4)
mu.mem_map(FLAG, 1024*1024)
code = struct.pack ("69B", *[
0x66,0x0f,0x7f,0x1c,0x24,0x66,0x0f,0x7f,0x54,0x24,0x10,
0x66,0x0f,0x7f,0x4c,0x24,0x20,0x66,0x0f,0x7f,0x44,0x24,
0x30,0x31,0xc0,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,
0x00,0x00,0x0f,0x1f,0x00,0x0f,0xb6,0x0c,0x44,0x30,0x88,
0x90,0x10,0x60,0x00,0x0f,0xb6,0x4c,0x44,0x01,0x30,0x88,
0x91,0x10,0x60,0x00,0x48,0x83,0xc0,0x02,0x48,0x83,0xf8,
0x20,0x75,0xe1])
flag = struct.pack ("40B", *[
0xfc,0x14,0xeb,0x09,0xbc,0xae,0xe7,0x47,0x4f,0xe3,0x7c,
0xc1,0x52,0xa5,0x02,0x8e,0x89,0x71,0xc8,0x8d,0x96,0x23,
0x01,0x6d,0x71,0x40,0x5a,0xea,0xfd,0x46,0x1d,0x23,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00])
mu.reg_write(UC_X86_REG_RSP, STACK)
mu.reg_write(UC_X86_REG_XMM0, (matrix[0][0]<<96) + (matrix[0][1]<<64) + (matrix[0][2]<<32) + (matrix[0][3]))
mu.reg_write(UC_X86_REG_XMM1, (matrix[1][0]<<96) + (matrix[1][1]<<64) + (matrix[1][2]<<32) + (matrix[1][3]))
mu.reg_write(UC_X86_REG_XMM2, (matrix[2][0]<<96) + (matrix[2][1]<<64) + (matrix[2][2]<<32) + (matrix[2][3]))
mu.reg_write(UC_X86_REG_XMM3, (matrix[3][0]<<96) + (matrix[3][1]<<64) + (matrix[3][2]<<32) + (matrix[3][3]))
mu.mem_write(FLAG+0x1090, flag)
mu.mem_write(BASE, code)
mu.emu_start(BASE, BASE + len(code), 2 * UC_SECOND_SCALE)
print(mu.mem_read(FLAG+0x1090, 0x20))
def moltiplication(exponent):
a = np.array([ \
[16,15,14,13], \
[12,11,10, 9], \
[8 , 7, 6, 5], \
[4 , 3, 2, 1] \
], dtype=object)
for i in range(exponent):
np.dot(a,a,a)
a = overflow(a)
return a
def overflow(a):
sres = int.from_bytes(data[72:76], byteorder='little')
for x in range(4):
for y in range(4):
while a[x][y] > sres:
a[x][y] %= sres
return a
def calculate_exponents():
res = []
s = 1234567890123456789
i=0
while(s>0):
if 2**i > s:
res.append(i-1)
s -= 2**(i-1)
i=0
else:
i+=1
return res
def main():
matrix = np.array([\
[1,0,0,0], \
[0,1,0,0], \
[0,0,1,0], \
[0,0,0,1] \
], dtype=object)
x = calculate_exponents()
print("lista: ", x)
for a in x:
np.dot(matrix, moltiplicazione(a), matrix)
overflow(matrix)
print("matrix: ", matrix)
emulation(matrix)
main()
|
[
"[email protected]"
] | |
fc8f57dcba1a88d5f1ae5e3963414387e47a3e5e
|
2a4fccb212b70bbef7634271b3a0992e8927abd8
|
/shop/migrations/0003_promo_date_fin.py
|
89d89e8e1abad1c780b0d1a8de33a0159d058fb4
|
[] |
no_license
|
snipercode221/cbshop
|
dd9eca3400ea500122289658592af52a81e99f6c
|
d139f27a540a458bc011c175e0899f4716d7ebb2
|
refs/heads/master
| 2022-11-29T23:43:47.304965 | 2020-08-02T00:08:00 | 2020-08-02T00:08:00 | 284,359,608 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 464 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-04-23 21:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_promo_pub'),
]
operations = [
migrations.AddField(
model_name='promo',
name='date_fin',
field=models.DateTimeField(null=True, verbose_name='fin promo'),
),
]
|
[
"[email protected]"
] | |
c142161627e8cc6cd58d55944d32e4c84a8c57d9
|
123d70d21419fbdf6939135e09b263f145f4174e
|
/new/plomrogue/commands.py
|
744d471fac100bc5e326813899d93466a353566e
|
[] |
no_license
|
plomlompom/plomrogue2-experiments
|
53aaf24ca88bc49bc21d13616de5126fa488ee31
|
a466115714f7da37c45d3fd0d054d67f85a725f0
|
refs/heads/master
| 2021-01-20T00:28:33.601033 | 2019-05-13T11:13:26 | 2019-05-13T11:13:26 | 89,135,617 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,931 |
py
|
from plomrogue.misc import quote
def cmd_GEN_WORLD(game, yx, seed):
game.make_new_world(yx, seed)
cmd_GEN_WORLD.argtypes = 'yx_tuple:pos int:nonneg'
def cmd_GET_GAMESTATE(game, connection_id):
"""Send game state to caller."""
game.send_gamestate(connection_id)
def cmd_SEED(game, seed):
game.rand.prngod_seed = seed
cmd_SEED.argtypes = 'int:nonneg'
def cmd_MAP_SIZE(game, size):
game.map_size = size
cmd_MAP_SIZE.argtypes = 'yx_tuple:pos'
def cmd_MAP(game, map_pos):
"""Ensure (possibly empty/'?'-filled) map at position map_pos."""
game.get_map(map_pos)
cmd_MAP.argtypes = 'yx_tuple'
def cmd_THING_TYPE(game, i, type_):
t_old = game.get_thing(i)
t_new = game.thing_types[type_](game, i)
#attr_names_of_old = [name for name in dir(t_old) where name[:2] != '__']
#attr_names_of_new = [name for name in dir(t_new) where name[:2] != '__']
#class_new = type(t_new)
#for attr_name in [v for v in attr_names_of_old if v in attr_names_of_new]:
# if hasattr(class_new, attr_name):
# attr_new = getattr(class_new, attr_name)
# if type(attr_new) == property and attr_new.fset is None:
# continue # ignore read-only properties on t_new
# attr_old = getattr(t_old, attr_name)
# attr_new = getattr(t_new, attr_name)
# if type(attr_old) != type(attr_new):
# continue
# setattr(t_new, attr_name, attr_old)
t_new.position = t_old.position
t_new.in_inventory = t_old.in_inventory
t_old_index = game.things.index(t_old)
game.things[t_old_index] = t_new
cmd_THING_TYPE.argtypes = 'int:nonneg string:thingtype'
def cmd_THING_POS(game, i, big_yx, small_yx):
t = game.get_thing(i)
t.position = (big_yx, small_yx)
cmd_THING_POS.argtypes = 'int:nonneg yx_tuple yx_tuple:nonneg'
def cmd_THING_INVENTORY(game, id_, ids):
carrier = game.get_thing(id_)
carrier.inventory = ids
for id_ in ids:
t = game.get_thing(id_)
t.in_inventory = True
t.position = carrier.position
cmd_THING_INVENTORY.argtypes = 'int:nonneg seq:int:nonneg'
def cmd_THING_HEALTH(game, id_, health):
t = game.get_thing(id_)
t.health = health
cmd_THING_HEALTH.argtypes = 'int:nonneg int:nonneg'
def cmd_GET_PICKABLE_ITEMS(game, connection_id):
pickable_ids = game.player.get_pickable_items()
if len(pickable_ids) > 0:
game.io.send('PICKABLE_ITEMS %s' %
','.join([str(id_) for id_ in pickable_ids]))
else:
game.io.send('PICKABLE_ITEMS ,')
def cmd_TERRAIN_LINE(game, big_yx, y, terrain_line):
game.maps[big_yx].set_line(y, terrain_line)
cmd_TERRAIN_LINE.argtypes = 'yx_tuple int:nonneg string'
def cmd_PLAYER_ID(game, id_):
# TODO: test whether valid thing ID
game.player_id = id_
cmd_PLAYER_ID.argtypes = 'int:nonneg'
def cmd_TURN(game, n):
game.turn = n
cmd_TURN.argtypes = 'int:nonneg'
def cmd_SWITCH_PLAYER(game):
game.player.set_task('WAIT')
thing_ids = [t.id_ for t in game.things]
player_index = thing_ids.index(game.player.id_)
if player_index == len(thing_ids) - 1:
game.player_id = thing_ids[0]
else:
game.player_id = thing_ids[player_index + 1]
game.proceed()
def cmd_SAVE(game):
def write(f, msg):
f.write(msg + '\n')
save_file_name = game.io.game_file_name + '.save'
with open(save_file_name, 'w') as f:
write(f, 'TURN %s' % game.turn)
write(f, 'SEED %s' % game.rand.prngod_seed)
write(f, 'MAP_SIZE %s' % (game.map_size,))
for map_pos in game.maps:
write(f, 'MAP %s' % (map_pos,))
for map_pos in game.maps:
for y, line in game.maps[map_pos].lines():
write(f, 'TERRAIN_LINE %s %5s %s' % (map_pos, y, quote(line)))
for thing in game.things:
write(f, 'THING_TYPE %s %s' % (thing.id_, thing.type_))
write(f, 'THING_POS %s %s %s' % (thing.id_, thing.position[0],
thing.position[1]))
if hasattr(thing, 'health'):
write(f, 'THING_HEALTH %s %s' % (thing.id_, thing.health))
if len(thing.inventory) > 0:
write(f, 'THING_INVENTORY %s %s' %
(thing.id_,','.join([str(i) for i in thing.inventory])))
else:
write(f, 'THING_INVENTORY %s ,' % thing.id_)
if hasattr(thing, 'task'):
task = thing.task
if task is not None:
task_args = task.get_args_string()
task_name = [k for k in game.tasks.keys()
if game.tasks[k] == task.__class__][0]
write(f, 'SET_TASK:%s %s %s %s' % (task_name, thing.id_,
task.todo, task_args))
write(f, 'PLAYER_ID %s' % game.player_id)
cmd_SAVE.dont_save = True
|
[
"[email protected]"
] | |
45e084179e4aa3dbbf8c2f260fc7c5bd9989286e
|
daf802cfd22614f98596691220c08e15e76fa994
|
/Website/community/rcmOnUser/admin.py
|
389d4206e36951bdbe8c52aac0f01db0cd5d07cd
|
[] |
no_license
|
ee08b397/Community-Recommendation
|
c3b3a534a01622314ea382b4a1d13056537c36a8
|
557e774eeb6045637599f98c130e465bfe345f29
|
refs/heads/master
| 2021-01-17T01:09:28.298491 | 2015-08-15T03:54:04 | 2015-08-15T03:54:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 126 |
py
|
from django.contrib import admin
from rcmOnUser.models import score
# Register your models here.
admin.site.register(score)
|
[
"[email protected]"
] | |
f319f75c6eda9733cf3e2eea7ec0aba1aac4bbf7
|
43188f92d61c427b34b3806adee717a7f445e60d
|
/mdb/files/views.py
|
647b51ca298e3762ef9906d583995b8dc042e59d
|
[
"MIT"
] |
permissive
|
idbac/maldidb
|
c71205dddae7f337669b9c8b0abb245adc95dfb8
|
b926397c1d5f166d80f1eea5b3ba99dcef9b44dc
|
refs/heads/master
| 2023-08-01T03:31:24.358835 | 2021-09-11T16:37:07 | 2021-09-11T16:37:07 | 332,827,885 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,160 |
py
|
from django.shortcuts import render
from .models import UserFile
from django_tables2 import SingleTableView
from .tables import *
# ~ from .forms import *
from django.views.generic.list import ListView
from spectra_search.forms import FileLibraryForm
from chat.models import Library
class UserFilesListView(SingleTableView):
model = UserFile
table_class = UserFileTable
template_name = 'files/user_files.html'
def get_queryset(self, *args, **kwargs):
return UserFile.objects.filter(owner = self.request.user) \
.order_by('-upload_date') #last_modified
class FileUpload(ListView):
model = UserFile
template_name = 'files/file_upload.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['upload_form'] = FileLibraryForm(request = self.request)
u = self.request.user
print(f'self.request{self.request}')
print(f'u{u}')
# own libraries (library_select shares this qs)
q = Library.objects.filter(created_by__exact = u)\
.exclude(lab__lab_type = 'user-uploads')
context['upload_form'].fields['library_select'].queryset = q
return context
|
[
"[email protected]"
] | |
2b900473f8ebad3774236008a4ce12609bd077c4
|
c4af67db4c523d20f2d55aef90ba77db1fb53c38
|
/validation/tests/test_validation.py
|
c1128b9d609b6db323abf0d49d809d2207be7177
|
[] |
no_license
|
dtgit/dtedu
|
e59b16612d7d9ea064026bf80a44657082ef45a3
|
d787885fe7ed0de6f9e40e9b05d852a0e9d60677
|
refs/heads/master
| 2020-04-06T05:22:50.025074 | 2009-04-08T20:13:20 | 2009-04-08T20:13:20 | 171,351 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,060 |
py
|
from Testing import ZopeTestCase
from Products.Archetypes.tests.atsitetestcase import ATSiteTestCase
from Testing.ZopeTestCase import doctest
from Products.validation import validation
class TestValidation(ATSiteTestCase):
def test_inNumericRange(self):
v = validation.validatorFor('inNumericRange')
self.failUnlessEqual(v(10, 1, 20), 1)
self.failUnlessEqual(v('10', 1, 20), 1)
self.failIfEqual(v(0, 4, 5), 1)
def test_isPrintable(self):
v = validation.validatorFor('isPrintable')
self.failUnlessEqual(v('text'), 1)
self.failIfEqual(v('\u203'), 1)
self.failIfEqual(v(10), 1)
def test_isSSN(self):
v = validation.validatorFor('isSSN')
self.failUnlessEqual(v('111223333'), 1)
self.failUnlessEqual(v('111-22-3333', ignore=r'-'), 1)
def test_isUSPhoneNumber(self):
v = validation.validatorFor('isUSPhoneNumber')
self.failUnlessEqual(v('(212) 555-1212',
ignore=r'[\s\(\)\-]'), 1)
self.failUnlessEqual(v('2125551212',
ignore=r'[\s\(\)\-]'), 1)
self.failUnlessEqual(v('(212) 555-1212'), 1)
def test_isURL(self):
v = validation.validatorFor('isURL')
self.failUnlessEqual(v('http://foo.bar:8080/manage'), 1)
self.failUnlessEqual(v('https://foo.bar:8080/manage'), 1)
self.failUnlessEqual(v('irc://[email protected]:6667/#plone'), 1)
self.failUnlessEqual(v('fish://tiran:password@myserver/~/'), 1)
self.failIfEqual(v('http://\n'), 1)
self.failIfEqual(v('../foo/bar'), 1)
def test_isEmail(self):
v = validation.validatorFor('isEmail')
self.failUnlessEqual(v('[email protected]'), 1)
self.failIfEqual(v('@foo.bar'), 1)
self.failIfEqual(v('me'), 1)
def test_isMailto(self):
v = validation.validatorFor('isMailto')
self.failUnlessEqual(v('mailto:[email protected]'), 1)
self.failIfEqual(v('[email protected]'), 1)
self.failIfEqual(v('mailto:@foo.bar'), 1)
self.failIfEqual(v('@foo.bar'), 1)
self.failIfEqual(v('mailto:'), 1)
self.failIfEqual(v('me'), 1)
def test_isUnixLikeName(self):
v = validation.validatorFor('isUnixLikeName')
self.failUnlessEqual(v('abcd'), 1)
self.failUnless(v('a_123456'), 1)
self.failIfEqual(v('123'), 1)
self.failIfEqual(v('ab.c'), 1)
self.failIfEqual(v('ab,c'), 1)
self.failIfEqual(v('aaaaaaaab'), 1) # too long
def test_isValidId(self):
v = validation.validatorFor("isValidId")
self.failIfEqual(v("a b", object()), 1)
# TODO: more tests require a site
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestValidation))
doctests = (
'Products.validation.validators.ExpressionValidator',
)
for module in doctests:
suite.addTest(doctest.DocTestSuite(module))
return suite
|
[
"[email protected]"
] | |
b0f222bd5a142c4071329ebd4c463e0851986566
|
d9a1999622bed85264ac65c57e90368d1a6e0e3e
|
/IoT/Flask/Exercise2.py
|
07ed4802fe801fde1e0b9bc9755ec8a7692341d5
|
[] |
no_license
|
dev-saem/IoT
|
38769f1906429c039e310daa1f8da74549c6e3e2
|
43ba904dd013c3d49ca8bfaa93cf2d6385d64927
|
refs/heads/main
| 2023-08-20T02:08:36.760510 | 2021-10-21T05:11:40 | 2021-10-21T05:11:40 | 419,588,914 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,244 |
py
|
import RPi.GPIO as GPIO
from flask import Flask
from time import sleep
app = Flask(__name__)
@app.route('/')
def hello():
return "hello world"
@app.route('/fan/<onoff>/<time>')
def fanonoff(onoff, time):
if (onoff == "on") and (time == "1"):
print("FAN on for one second")
GPIO.output(18,1)
GPIO.output(27,0)
sleep(1.0)
GPIO.output(18,0)
GPIO.output(27,0)
return "FAN on for 1 second"
elif (onoff == "on") and (time == "2"):
print("FAN on for two second")
GPIO.output(18,1)
GPIO.output(27,0)
sleep(2.0)
GPIO.output(18,0)
GPIO.output(27,0)
return "FAN on for 2 seconds"
elif (onoff == "on") and (time == "3"):
print("FAN on for three second")
GPIO.output(18,1)
GPIO.output(27,0)
sleep(3.0)
GPIO.output(18,0)
GPIO.output(27,0)
return "FAN on for 3 seconds"
if __name__ == "__main__":
GPIO.setmode(GPIO.BCM)
GPIO.setup(4,GPIO.OUT, initial = GPIO.LOW)
GPIO.setup(18, GPIO.OUT, initial = GPIO.LOW)
GPIO.setup(27, GPIO.OUT, initial = GPIO.LOW)
app.run(host = '0.0.0.0', port = 5000, debug = True)
|
[
"[email protected]"
] | |
edd7334352747e1e9b08be0af986b1239e3ee6fe
|
5a25edcf994a760688dc7c933e8071bf4ff24df3
|
/exercises/ja/solution_01_08_01.py
|
01762ddd77ee431a33af88413c4449ddfc5b02bc
|
[
"CC-BY-NC-4.0",
"MIT"
] |
permissive
|
heyMP/spacy-course
|
8762990ed6179011680730d9c24d5d34c0a8d954
|
3740c717f0d1090b01c1b0fe23f8e30af3bf0101
|
refs/heads/master
| 2022-11-07T21:52:15.479840 | 2020-06-25T18:13:44 | 2020-06-25T18:13:44 | 275,202,487 | 1 | 0 |
MIT
| 2020-06-26T16:39:32 | 2020-06-26T16:39:31 | null |
UTF-8
|
Python
| false | false | 476 |
py
|
import spacy
nlp = spacy.load("en_core_web_sm")
text = "It’s official: Apple is the first U.S. public company to reach a $1 trillion market value"
# テキストを処理
doc = nlp(text)
for token in doc:
# トークンの文字列、品詞タグ、依存関係ラベルを取得
token_text = token.text
token_pos = token.pos_
token_dep = token.dep_
# フォーマットしてプリント
print(f"{token_text:<12}{token_pos:<10}{token_dep:<10}")
|
[
"[email protected]"
] | |
bc77d77f9880d9fc4b4e175cf37ac58d606f29d5
|
1879ada1df11f83dc0bedc69d06aea619adaf166
|
/collections/lists_01.py
|
c9f2ea884b9417ccaf46ae10ffecbe7c5832b192
|
[] |
no_license
|
larajorge11/python-davor-training
|
0ef4fdb6a07d570b2a233960842ce6bbec84af2d
|
5c14109a53896ead9eb5acac9e6fac1c83f5c406
|
refs/heads/main
| 2023-08-26T19:53:10.168900 | 2021-11-04T22:15:31 | 2021-11-05T16:04:45 | 339,914,851 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 904 |
py
|
# Las listas son mutables
nombres = ['Juan', 'Carla', 'Stefani', 'Jacobo', 'Bianca']
# Imprimir la lista de nombres
print(nombres)
# Accediendo a los elementos
print(nombres[0])
print(nombres[-1]) # Ultimo elemento de la lista
# Imprimir un rango
print(nombres[0:2])
print(nombres[:3])
print(nombres[2:])
nombres[1] = 'Davor'
for nombre in nombres:
print(nombre)
# Preguntar el largo de una lista
print(f'elementos de la lista {len(nombres)}')
# Adicionar nuevo elemento
nombres.append('Brandon')
print(nombres)
# Insertar con un indice especifico
nombres.insert(1, 'Octavio')
print(nombres)
# Eliminar un elemento
nombres.remove('Octavio')
print(nombres)
# Remover ultimo elemento de lista
nombres.pop()
print(nombres)
# Remover un elemento especifico
del nombres[0]
print(nombres)
# Limpiar la lista
nombres.clear()
print(nombres)
# Remover la lista en memoria
del nombres
print(nombres)
|
[
"[email protected]"
] | |
85a7bdd7a9ae0094ba0970d2c19c7db253851acb
|
4b866626551640b701274cc1f6a8ea8966199c2d
|
/2019/Day_07/day7_Amplification_Circuit_Class.py
|
75f27a0663e2174a2a88387170563beb80085fc8
|
[] |
no_license
|
MaxTechniche/AdventOfCode
|
5a496b03c4348214a3abd990c5de8e10bd3a4fff
|
8d67cc151abc686cb06770422b795307f7cff170
|
refs/heads/main
| 2023-02-03T00:55:35.057710 | 2022-12-18T23:21:47 | 2022-12-18T23:21:47 | 307,274,063 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,408 |
py
|
og = [3,8,1001,8,10,8,105,1,0,0,21,46,67,76,101,118,199,280,361,442,99999,3,9,1002,9,4,9,1001,9,2,9,102,3,9,9,101,3,9,9,102,2,9,9,4,9,99,3,9,1001,9,3,9,102,2,9,9,1001,9,2,9,1002,9,3,9,4,9,99,3,9,101,3,9,9,4,9,99,3,9,1001,9,2,9,1002,9,5,9,101,5,9,9,1002,9,4,9,101,5,9,9,4,9,99,3,9,102,2,9,9,1001,9,5,9,102,2,9,9,4,9,99,3,9,1002,9,2,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,2,9,9,4,9,3,9,101,1,9,9,4,9,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,1,9,9,4,9,3,9,102,2,9,9,4,9,3,9,101,2,9,9,4,9,99,3,9,101,1,9,9,4,9,3,9,1002,9,2,9,4,9,3,9,102,2,9,9,4,9,3,9,101,1,9,9,4,9,3,9,101,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,101,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,2,9,9,4,9,99,3,9,1001,9,1,9,4,9,3,9,1002,9,2,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,1,9,9,4,9,3,9,102,2,9,9,4,9,3,9,1001,9,1,9,4,9,3,9,1002,9,2,9,4,9,3,9,1001,9,1,9,4,9,3,9,101,1,9,9,4,9,3,9,101,2,9,9,4,9,99,3,9,1002,9,2,9,4,9,3,9,1001,9,1,9,4,9,3,9,101,2,9,9,4,9,3,9,101,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,101,1,9,9,4,9,3,9,1001,9,2,9,4,9,99,3,9,102,2,9,9,4,9,3,9,102,2,9,9,4,9,3,9,101,2,9,9,4,9,3,9,101,1,9,9,4,9,3,9,101,2,9,9,4,9,3,9,1001,9,2,9,4,9,3,9,1001,9,2,9,4,9,3,9,101,2,9,9,4,9,3,9,1002,9,2,9,4,9,3,9,101,2,9,9,4,9,99]
og = [3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0]
class Amp():
def __init__(self, phase, sequence=og):
self.phase = phase
self.sequence = sequence
self.phase_used = False
self.four_value = None
def run(self, input_):
self.pos = 0
self.input_ = input_
while self.pos < len(self.sequence):
x = str(self.sequence[self.pos])
while len(x) < 5:
x = '0' + x
ones = int(x[-3])
twos = int(x[-4])
threes=int(x[-5])
code = int(x[-2:])
if code == 99:
return self.four_value
if ones == 0:
try:
one_spot = self.sequence[self.pos+1]
except IndexError:
one_spot = None
else:
one_spot = self.pos+1
if twos == 0:
try:
two_spot = self.sequence[self.pos+2]
except IndexError:
two_spot = None
else:
two_spot = self.pos+2
if threes == 0:
try:
three_spot = self.sequence[self.pos+3]
except IndexError:
three_spot = None
else:
three_spot = self.pos+3
self.spots = (0, one_spot, two_spot, three_spot)
self.process_code(code)
def process_code(self, code):
print(self.sequence)
if code == 1:
self.one()
elif code == 2:
self.two()
elif code == 3:
self.three()
elif code == 4:
self.four()
elif code == 5:
self.five()
elif code == 6:
self.six()
elif code == 7:
self.seven()
elif code == 8:
self.eight()
def one(self):
self.sequence[self.spots[3]] = self.sequence[self.spots[1]] + \
self.sequence[self.spots[2]]
self.pos += 4
def two(self):
self.sequence[self.spots[3]] = self.sequence[self.spots[1]] * \
self.sequence[self.spots[2]]
self.pos += 4
def three(self):
if self.phase_used:
self.sequence[self.spots[1]] = self.input_
else:
self.sequence[self.spots[1]] = self.phase
self.phase_used = True
self.pos += 2
def four(self):
if self.sequence[self.spots[1]]:
self.four_value = self.sequence[self.spots[1]]
self.pos += 2
def five(self):
if self.sequence[self.spots[1]] != 0:
self.pos = self.sequence[self.spots[2]]
else:
self.pos += 3
def six(self):
if self.sequence[self.spots[1]]:
self.pos += 3
else:
self.pos = self.sequence[self.spots[2]]
def seven(self):
if self.sequence[self.spots[1]] < self.sequence[self.spots[2]]:
self.sequence[self.spots[3]] = 1
else:
self.sequence[self.spots[3]] = 0
self.pos += 4
def eight(self):
if self.sequence[self.spots[1]] == self.sequence[self.spots[2]]:
self.sequence[self.spots[3]] = 1
else:
self.sequence[self.spots[3]] = 0
self.pos += 4
from itertools import permutations
input_1 = 0
best_thrust = 0
for combo in permutations([5, 6, 7, 8, 9], 5):
print(combo)
amp_list = []
for phase in combo:
amp_list.append(Amp(phase, og.copy()))
combo_score = 0
x = 0
output = amp_list[x].run(input_1)
combo_score += output
while output:
if x > 3:
x = 0
else:
x += 1
if output == True:
break
combo_score += output
output = amp_list[x].run(output)
best_thrust = max(best_thrust, combo_score)
print(best_thrust)
|
[
"[email protected]"
] | |
3e74f996fc5b64ab1e959cc5a5966dd6c8d6023d
|
8cbb7291096499bd2cefc0bfe9acfdfa0baa1fcc
|
/script_main.py
|
b25feb3f1ae4ad8a1bda1a8f345e4b594389d45c
|
[] |
no_license
|
a1015970/SSE_Exercise2
|
944cbae3c9458ae038fba5e97f27b4fd96873d1c
|
377b3cd496f0ed6bdd446913c980d626a416ac8f
|
refs/heads/master
| 2020-07-18T18:13:41.365392 | 2019-09-29T08:04:04 | 2019-09-29T08:04:04 | 206,290,408 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,487 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 6 22:45:02 2019
@author: Chris Crouch - a1015970
"""
import analyze_git_commit
from git import Repo, RemoteProgress
import os
class Progress(RemoteProgress):
def update(self, op_code, cur_count, max_count=None, message=''):
print(self._cur_line)
#%%
local_link = "../camel"
remote_link = "https://github.com/apache/camel"
fixing_commit = "235036d2396ae45b6809b72a1983dee33b5ba32"
if not os.path.isdir(local_link):
Repo.clone_from(remote_link, local_link, progress=Progress())
repo = Repo(local_link)
print("\n\n", repo.git.remote('get-url','origin'))
analyze_git_commit.analyze_git_commit(local_link, fixing_commit)
#%%
local_link = "../junit-plugin"
remote_link = "https://github.com/jenkinsci/junit-plugin"
fixing_commit = "15f39fc49d9f25bca872badb48e708a8bb815ea7"
if not os.path.isdir(local_link):
Repo.clone_from(remote_link, local_link, progress=Progress())
repo = Repo(local_link)
print("\n\n", repo.git.remote('get-url','origin'))
analyze_git_commit.analyze_git_commit(local_link, fixing_commit)
#%%
local_link = "../jackson-databind"
remote_link = "https://github.com/FasterXML/jackson-databind"
fixing_commit = "7487cf7eb14be2f65a1eb108e8629c07ef45e0a"
if not os.path.isdir(local_link):
Repo.clone_from(remote_link, local_link, progress=Progress())
repo = Repo(local_link)
print("\n\n", repo.git.remote('get-url','origin'))
analyze_git_commit.analyze_git_commit(local_link, fixing_commit)
|
[
"[email protected]"
] | |
0c14069b57089a2f4811e42046ae71152a6bdbd6
|
f066a92934f4d6c64d20bc3c455ee1100624983b
|
/run/stdio.py
|
4c26791e53b62afe6a488d1086c07419ea38a737
|
[] |
no_license
|
BitOpenFPGA/fpga_test_soc
|
229a15f0ff7280546a6a250dab624db04b196dd8
|
fe7c5f7b655371ed3be7f2171301608e24180491
|
refs/heads/master
| 2022-04-07T12:49:55.872758 | 2020-03-21T10:49:15 | 2020-03-21T10:49:15 | 304,899,091 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,166 |
py
|
#!/usr/bin/env python
import sys
import atexit
import termios
import os
orig_term = None
##################################################################
# stdio_init
##################################################################
def stdio_init():
atexit.register(stdio_close)
global orig_term
orig_term = termios.tcgetattr(sys.stdin)
new_settings = termios.tcgetattr(sys.stdin)
new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON)
new_settings[6][termios.VMIN] = 0
new_settings[6][termios.VTIME] = 0
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
##################################################################
# stdio_close
##################################################################
def stdio_close():
global orig_term
if orig_term:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_term)
##################################################################
# stdio_read
##################################################################
def stdio_read():
ch = os.read(sys.stdin.fileno(), 1)
if len(ch) > 0:
return ch;
else:
return None
|
[
"[email protected]"
] | |
0e3874a0fdfbd8a253c22505d38822612cf2a233
|
78c25dc9d381286209565e94ab24fe10a0924137
|
/bigneuron_app/fleet/tasks.py
|
58f2a8b4209f809a15c437c0550b8ecd4916a62a
|
[] |
no_license
|
Freshwood/vaa3d-api
|
c8c5d044c355685395d7b433e90d7759cad88b08
|
aeb3f22b2ab384f98d8ed0657645008430e78135
|
refs/heads/master
| 2020-05-27T23:59:34.053831 | 2016-02-06T18:58:01 | 2016-02-06T18:59:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 479 |
py
|
import time
import traceback
from bigneuron_app import tasks_log
from bigneuron_app.fleet import fleet_manager
FLEET_UPDATE_SLEEP=60
FLEET_UPDATE_MAX_RUNS=20
def update_fleet():
count = 0
while count < FLEET_UPDATE_MAX_RUNS:
try:
tasks_log.info("Update Jobs and JobItems Fleets. Attempt " + str(count))
fleet_manager.update_fleet_capacity()
except Exception, err:
tasks_log.error(traceback.format_exc())
finally:
count += 1
time.sleep(FLEET_UPDATE_SLEEP)
|
[
"[email protected]"
] | |
40b5a35183c69290d55869b4fcaa55d82c2b2f6b
|
a4956b95dea8412758af71fde5b06bee3575a06f
|
/Python/venv/Scripts/pip3-script.py
|
dee4588a446332bfe46f715e02399790b54880e8
|
[
"AFL-3.0"
] |
permissive
|
gzy23/myexercise
|
7188b9ba65562be118775502c4a63c4b5b6d8e67
|
471e45919c44efcb1a879dcade2ff5a87310e372
|
refs/heads/master
| 2022-11-18T22:42:36.622220 | 2020-07-08T10:05:33 | 2020-07-08T10:05:33 | 276,901,623 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 392 |
py
|
#!F:\gitwork\Python\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"[email protected]"
] | |
d8068915b32c07cb896a8397d6b917f876d3b5fe
|
4549c02dac55f2b8ed113ddacf95264630d91698
|
/Predict/__init__.py
|
022ca012e6428d9d89aadfa04b3b27bb059bca6b
|
[] |
no_license
|
kiminh/GPA
|
43077bb59797a096e3660b226642c5fe091a9663
|
29c5ffd8d7aa1bc3ebe6d83d1e55a997a04e4b60
|
refs/heads/master
| 2021-02-10T20:45:35.581410 | 2020-01-14T08:59:00 | 2020-01-14T08:59:00 | 244,417,955 | 1 | 0 | null | 2020-03-02T16:23:00 | 2020-03-02T16:23:00 | null |
UTF-8
|
Python
| false | false | 98 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/11/19 10:58
# @Author : zxl
# @FileName: __init__.py.py
|
[
"[email protected]"
] | |
e607164ee72ed5d0071b455388700dbe366a225e
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_windlasses.py
|
6ee113296ad40900fcef0fed2db7fb643eaa9caf
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 232 |
py
|
#calss header
class _WINDLASSES():
def __init__(self,):
self.name = "WINDLASSES"
self.definitions = windlass
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['windlass']
|
[
"[email protected]"
] | |
eaed1474671f49ae000323f1a00774c7a2c81b26
|
6d54a7a75768fe25dcdc56958f6ab215ac8a92d9
|
/to_do_app/settings.py
|
8bf0d1509732b00159e8c1429647634ce9838e4a
|
[] |
no_license
|
yigitkarabiyik/To-Do-App
|
f3362729230b0af0299a4969df3cff9ebe344457
|
b18e68b9b5ae4b547fab7a0a196a4879aa0055bd
|
refs/heads/main
| 2023-03-11T02:30:23.656828 | 2021-03-02T09:08:48 | 2021-03-02T09:08:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,427 |
py
|
"""
Django settings for to_do_app project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates/')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ylp!o9@47kfr$ql!5#he4fv-k96a30o$$-aa_853hfd2_)(x(0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.0','0.0.0.0']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app_to_do_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware'
]
ROOT_URLCONF = 'to_do_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'to_do_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorege'
|
[
"[email protected]"
] | |
4b62cb9c5ec11f7f8d43be007fc17be6e900bf2e
|
56909e729bc02088a79ac1d46111de7a52e4c9a6
|
/venv/dbms.py
|
d747ad4c5c69e6f417d24ff9428ebde68d2bb826
|
[] |
no_license
|
prasanthtummala/ArtGallery-DataBase
|
a1a623ace9554139a6a43f8102d9b5dd96b2d609
|
293ac756c64208a2382df82d861de072797b3fa0
|
refs/heads/master
| 2020-06-12T12:55:07.283929 | 2019-06-28T16:46:54 | 2019-06-28T16:46:54 | 194,305,653 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 622 |
py
|
import pymysql
# Open database connection
db = pymysql.connect("localhost","root","123456","artgallery" )
# prepare a cursor object using cursor() method
cursor = db.cursor()
# Prepare SQL query to INSERT a record into the database.
sql = "SELECT * FROM "
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
results = cursor.fetchall()
for row in results:
MNAME = row[0]
gross = row[1]
# Now print fetched result
print ("{0} {1} ".format(MNAME,gross))
except:
print ("Error: unable to fetch data")
# disconnect from server
db.close()
|
[
"[email protected]"
] | |
2f7b555b8a023acfc59b3616b78949d6bc53ab5f
|
3349a0d44da04fd9fae7728ce1315ccf0c82285e
|
/556A - case of zeroes and ones.py
|
c96ebf9ebc0e1aad3e01b362c37be5bd17da4cdb
|
[] |
no_license
|
umairnsr87/Data_Structures_Python
|
959848e546fd4f98959bc14470c26ce91bfb5c9c
|
05b5803521ed2ec7f64d95f08e2f014471dfdfd4
|
refs/heads/master
| 2023-07-18T12:11:55.245699 | 2023-07-16T17:01:09 | 2023-07-16T17:01:09 | 294,360,086 | 0 | 0 | null | 2023-07-16T17:01:10 | 2020-09-10T09:11:11 |
Python
|
UTF-8
|
Python
| false | false | 567 |
py
|
from collections import Counter
test = int(input())
strings = input()
# time complexity:O(n)
# while '01' or '10' in strings:
# if '01' in strings:
# strings = strings.replace('01', '')
# elif '10' in strings:
# strings = strings.replace('10', '')
# else:
# break
#
# print(len(strings))
# time complexity:O(1)
x = Counter(strings)
if (x['0'] == x['1']) and (x['0'] + x['1']) == len(strings):
print(0)
elif not x['1'] or not x['0']:
print(len(strings))
else:
a = min(x['0'], x['1'])
print(len(strings) - 2 * a)
|
[
"[email protected]"
] | |
afcad36fd990355b106a0e1875b71d72f6cde7f2
|
09911dfbc5b9e994599f15cac7d330582a8e7761
|
/guess.py
|
6870e1ff8bc64281f9fc02dc742c07278dd510fd
|
[] |
no_license
|
Xemicolon/NumberGuessingGame
|
2b05163495598f250f91aaf3ab12da443761ea6b
|
fada9df2420a6171f9e8075b4b8dd76596cf1991
|
refs/heads/master
| 2022-04-24T20:24:12.517192 | 2020-04-21T21:29:58 | 2020-04-21T21:29:58 | 257,325,504 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 808 |
py
|
from random import randint
def guessTheNumber(difficulty, range, chances):
computer = randint(1, range)
level = str(difficulty)
chances = chances
print(f'Difficulty level: {level}\nChances: {chances}')
while True:
try:
user_input = int(input(f'Guess the number between 1 - {range}: '))
chances -= 1
print(f'Chances you have left: {chances}')
if user_input == computer:
print(f'✅ You got it right!')
break
else:
print(f'❌ That was wrong.')
if chances == 0:
print(f'Game over! You exhausted your chances 😖\n')
break
except ValueError:
print(f'⚠ Please enter a whole number or integer.')
|
[
"[email protected]"
] | |
f3f53ccefea07832363a3f36b3f24a3daba97a46
|
5898a72d228beb1779ec68247ab89c3391c84ebd
|
/cgi-bin/getWaitingGame.py
|
49b4dbfd7587db29b40a73b4bf55fa6d897686d9
|
[] |
no_license
|
sirramsalott/boggle
|
1a2f1035fc219b3df6edcafaadead0375e62037a
|
242eca21ea982ed7fafec499ff9b18d7e1cd70d6
|
refs/heads/master
| 2022-12-14T06:01:38.561493 | 2020-09-04T08:36:10 | 2020-09-04T08:36:10 | 249,944,772 | 0 | 0 | null | 2022-12-12T16:02:38 | 2020-03-25T10:08:22 |
Python
|
UTF-8
|
Python
| false | false | 509 |
py
|
#!/usr/bin/python
import cgi, sys, json, cgitb
from boggleUser import Pupil
def response(pupilID):
g = Pupil.getWaitingGame(pupilID)
out = {"found": bool(g)}
if g:
Pupil(pupilID=pupilID).isWaiting(False)
out.update(g)
outJ = json.dumps(out)
return """Status: 200 OK
Content-Type: application/json
Content-Length: {}
{}""".format(len(outJ), outJ)
if __name__ == '__main__':
cgitb.enable()
post = cgi.FieldStorage()
print response(int(post['pupilID'].value))
|
[
"[email protected]"
] | |
b8ca63d31d28bcd4eed0f383b52286e292bb95cb
|
50f8a39a43c2f20953e77690d102eabefcb8c348
|
/venv/bin/gunicorn
|
ce4411beceb66e192ef16f8bdeb55039cf5ba357
|
[
"MIT"
] |
permissive
|
asandelarvine/News_App
|
0f5d5917643ded67f770ff60772197064f2e734b
|
393d8314586053024a0924b91d2e1912d12df2f4
|
refs/heads/main
| 2023-09-03T20:22:47.514495 | 2021-11-03T00:55:52 | 2021-11-03T02:00:05 | 422,500,708 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 246 |
#!/home/moringa/random/News_App/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"[email protected]"
] | ||
9a78b5d66a23140e8ef2d9c21dd84cfc729271a3
|
701d7413adc68a4f05fbb96e7981646e7bb3bee8
|
/fh/opl/Solutions-session5/Solutions-new/ex4_perfectsupto.py
|
1359ad1d660fc5f4bd7d0338157b238316a8e3a7
|
[] |
no_license
|
emildekeyser/tutoring
|
ee9311a0b65879284d33cdf154de3dac6b735f03
|
512593cd96247ae84c619a64279d0051c3ac16f9
|
refs/heads/master
| 2020-11-27T01:39:48.486085 | 2019-12-20T12:25:35 | 2019-12-20T12:25:35 | 197,552,157 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 590 |
py
|
# Calculate the sum of all divisors of n
def sum_of_proper_divisors(n):
s = 1
for i in range(2, n):
# Check if i divides n and if it does, add it to the sum s
if n % i == 0:
s += i
return s
def is_perfect(n):
return n == sum_of_proper_divisors(n)
def perfects_up_to(n):
for i in range(2,n):
if is_perfect(i):
# Print consecutive i's on the same line
print(i, end="\t")
def main():
n = int(input("Enter a number: "))
print("All perfect numbers up to %d are : " % n)
perfects_up_to(n)
main()
|
[
"[email protected]"
] | |
8c8e0126b4969636ebe2d414567e598beb70bf2c
|
e9a9955da9bee9be6580f1b1a75f97a1f99d0289
|
/login/migrations/0016_auto_20190803_1452.py
|
eb4f2ea18f1fff82b8ba290db60a29457a52f715
|
[] |
no_license
|
Manjunatha1997/project_IT
|
bdb36142256b9d4eb1b75a76994d801dd3c33013
|
fe58a30d033d4f4ed818c0282a802fafcf3aaff5
|
refs/heads/master
| 2021-02-28T04:17:13.872903 | 2020-03-07T15:48:49 | 2020-03-07T15:48:49 | 245,661,299 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 637 |
py
|
# Generated by Django 2.1.7 on 2019-08-03 14:52
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0015_auto_20190803_0435'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='dob',
field=models.DateField(default=datetime.datetime(2019, 8, 3, 14, 52, 29, 693918)),
),
migrations.AlterField(
model_name='profile',
name='doj',
field=models.DateField(default=datetime.datetime(2019, 8, 3, 14, 52, 29, 693948)),
),
]
|
[
"[email protected]"
] | |
40654b027f3ff750ac5224e44425c099db471f46
|
8ed759d055c99133eb58d229f291295e77f98286
|
/tcpServer.py
|
43a185103fe5cf01da6efda01f6f455eaa40aa4f
|
[] |
no_license
|
anjali-92/Python
|
67b6fca149604cc6ac81b553c8775f9c9c62f0ab
|
353355c0f95dae0ea956c77111314435e4f30d59
|
refs/heads/master
| 2020-12-28T21:06:38.782775 | 2020-11-19T11:39:59 | 2020-11-19T11:39:59 | 33,402,958 | 0 | 0 | null | 2016-09-23T05:31:33 | 2015-04-04T11:11:00 |
Python
|
UTF-8
|
Python
| false | false | 434 |
py
|
# Echo server program
import socket
HOST = '' # Symbolic name meaning all available interfaces
PORT = 50011 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
for i in range(0,2):
conn ,addr = s.accept()
print 'Connected by',addr
data = conn.recv(1024)
if not data: break
conn.sendall(data)
conn.close()
s.close()
|
[
"[email protected]"
] | |
01a663efab3a49d161c2793fb093704b775a3579
|
32d9ca9bda3d132179e952e3bd94f99f5de2d0cb
|
/train.py
|
420b4f7a24b35b604145e6f28bac12ecdc80322f
|
[] |
no_license
|
mcao516/PyTorch-ESIM
|
280efbae434d680ff0cce4b31ac00f772a9626de
|
5f01cebae70a8401cec61c89939d32da7c7a01df
|
refs/heads/master
| 2022-02-19T12:28:16.041379 | 2019-10-04T15:30:44 | 2019-10-04T15:30:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,146 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import MultiStepLR
from sklearn.metrics import classification_report
from ESIM import ESIM
from utils import Progbar, to_device
class Model:
"""Enhanced Sequential Inference Model (ESIM) for natural language inference.
"""
def __init__(self, args):
"""Class initialization.
"""
self.args = args
self.logger = args.logger
# initialziation
self.model = self._build_model()
self.model.to(args.device)
self._initialize_model(self.model)
self.optimizer = self._get_optimizer(self.model)
self.scheduler = self._get_scheduler(self.optimizer)
self.criterion = self._get_criterion()
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
self.logger.info("- Let's use {} GPUs !".format(torch.cuda.device_count()))
self.model = nn.DataParallel(self.model)
else:
self.logger.info("- Train the model on single GPU :/")
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
self.logger.info("- Let's do distributed training !")
self.model = nn.parallel.DistributedDataParallel(self.model,
device_ids=[args.local_rank],
output_device=args.local_rank)
# tensorboard
if args.write_summary and args.local_rank in [-1, 0]:
self.logger.info("- Let's use tensorboard on local rank {} device :)".format(args.local_rank))
self.writer = SummaryWriter(self.args.summary_path)
def _build_model(self):
"""Build ESIM model.
"""
return ESIM(self.args.vector_size,
self.args.vocab_size,
self.args.hidden_size,
self.args.class_num,
self.args.dropout)
def _initialize_model(self, model):
"""Initialize ESIM model paramerters.
"""
for p in model.parameters():
if p.dim() > 1:
nn.init.uniform_(p, a=-0.1, b=0.1)
def initialize_embeddings(self, vectors):
"""Load pre-trained word embeddings.
"""
if isinstance(self.model, nn.DataParallel):
self.model.module.load_embeddings(vectors)
else:
self.model.load_embeddings(vectors)
def _set_parameter_requires_grad(self):
"""Specify which parameters need compute gradients.
"""
# we don't need this right now
pass
def _get_optimizer(self, model):
"""Get optimizer for model training.
"""
if self.args.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(),
lr=self.args.lr,
momentum=self.args.momentum)
elif self.args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=self.args.lr)
else:
self.logger.info("Unknow optimizer: {}, exiting...".format(self.args.optimizer))
exit()
return optimizer
def _get_scheduler(self, optimizer):
"""Get scheduler for adjusting learning rate.
"""
return MultiStepLR(optimizer, milestones=[25], gamma=0.1)
def _get_criterion(self):
"""Loss function.
"""
return nn.CrossEntropyLoss()
def load_weights(self, model_dir):
"""Load pre-trained model weights.
"""
self.model.load_state_dict(torch.load(os.path.join(model_dir, "esim.pickle")))
def save_model(self, model_dir=None):
"""Save model's weights.
"""
if not model_dir:
model_dir = self.args.model_dir
torch.save(self.model.state_dict(), os.path.join(model_dir, "esim.pickle"))
self.logger.info("- ESIM model is saved at: {}".format(
os.path.join(model_dir, "esim.pickle")))
def loss_batch(self, p, h, labels, criterion, optimizer=None):
"""
Arguments:
p {torch.Tensor} -- premise [batch, seq_len]
h {torch.Tensor} -- hypothesis [batch, seq_len]
labels {torch.Tensor} -- hypothesis [batch]
criterion {torch.nn.Loss} -- loss function
Keyword Arguments:
optimizer {torch.optim.Optimizer} -- PyTorch optimizer
Returns:
logits {torch.Tensor} -- raw, unnormalized scores for each class
with shape [batch, class_num]
"""
logits = self.model(p, h)
loss = criterion(logits, labels)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if optimizer is not None:
with torch.set_grad_enabled(True):
loss.backward() # compute gradients
nn.utils.clip_grad_norm_(self.model.parameters(),
self.args.max_grad_norm)
optimizer.step() # update model parameters
optimizer.zero_grad() # clean all gradients
return loss.item(), logits.detach()
def train_epoch(self, train_iter, criterion, optimizer, epoch):
"""Train the model for one single epoch.
"""
self.model.train() # set the model to training mode
prog = Progbar(target=len(train_iter))
train_loss = 0.0
for i, batch in enumerate(train_iter):
batch_data = to_device(batch, self.args.device)
batch_loss, _ = self.loss_batch(batch_data['premise'],
batch_data['hypothesis'],
batch_data['label'],
criterion,
optimizer=optimizer)
train_loss += batch_loss
prog.update(i + 1, [("train loss", batch_loss)])
if self.args.local_rank in [-1, 0] and self.writer:
self.writer.add_scalar('batch_loss', batch_loss, epoch*len(train_iter) + i + 1)
# compute the average loss (batch loss)
epoch_loss = train_loss / len(train_iter)
# update scheduler
self.scheduler.step()
return epoch_loss
def evaluate(self, dev_iter, criterion):
"""Evaluate the model.
"""
self.model.eval() # set the model to evaluation mode
with torch.no_grad():
eval_loss, eval_corrects = 0.0, 0.0
for _, batch in enumerate(dev_iter):
batch_data = to_device(batch, self.args.device)
batch_loss, outputs = self.loss_batch(batch_data['premise'],
batch_data['hypothesis'],
batch_data['label'],
criterion,
optimizer=None)
_, preds = torch.max(outputs, 1) # preds: [batch_size]
eval_loss += batch_loss
eval_corrects += torch.sum(preds == (batch_data['label'])).double()
avg_loss = eval_loss / len(dev_iter)
avg_acc = eval_corrects / len(dev_iter.dataset)
return avg_loss, avg_acc
def fit(self, train_iter, dev_iter):
"""Model training and evaluation.
"""
best_acc = 0.
num_epochs = self.args.num_epochs
for epoch in range(num_epochs):
self.logger.info('Epoch {}/{}'.format(epoch + 1, num_epochs))
# training
train_loss = self.train_epoch(train_iter, self.criterion, self.optimizer, epoch)
self.logger.info("Traing Loss: {}".format(train_loss))
# evaluation, only on the master node
if self.args.local_rank in [-1, 0]:
eval_loss, eval_acc = self.evaluate(dev_iter, self.criterion)
self.logger.info("Evaluation:")
self.logger.info("- loss: {}".format(eval_loss))
self.logger.info("- acc: {}".format(eval_acc))
# monitor loss and accuracy
if self.writer:
self.writer.add_scalar('epoch_loss', train_loss, epoch)
self.writer.add_scalar('eval_loss', eval_loss, epoch)
self.writer.add_scalar('eval_acc', eval_acc, epoch)
# self.writer.add_scalar('lr', self.scheduler.get_lr()[0])
# save the model
if eval_acc >= best_acc:
best_acc = eval_acc
self.logger.info("New best score!")
self.save_model()
def predict(self, premise, hypothesis):
"""Prediction.
Arguments:
premise {torch.Tensor} -- [batch, seq_len]
hypothesis {torch.Tensor} -- [batch, seq_len]
Returns:
pres {torch.Tensor} -- [batch]
"""
self.model.eval() # evaluation mode
with torch.no_grad():
outputs = self.model(premise, hypothesis) # outpus: [batch, num_classes]
_, preds = torch.max(outputs, 1)
return preds
def get_report(self, dataset, target_names=None):
"""Test the model and print out a report.
"""
pred_class, label_class = [], []
for batch in dataset:
batch_data = to_device(batch, self.args.device)
preds = self.predict(batch_data['premise'], batch_data['hypothesis'])
pred_class += preds.tolist()
label_class += (batch_data['label']).tolist()
self.logger.info('\n')
self.logger.info(classification_report(label_class, pred_class,
target_names=target_names))
return pred_class, label_class
|
[
"[email protected]"
] | |
5b5c8f567d442b7d3f5b766199727996bd5699bd
|
e994e9c74a7f4b95ed28ce53e86d5715769f8137
|
/admin/feeds/urls.py
|
3de1c6809fcd64f66f348291763e01d4c58179d6
|
[] |
no_license
|
wechulimaven/lutterD-jangoA-PI
|
0773c07a78d3a78c46d374499b82251beaffa6e7
|
c932d1fd0969cdb7d1f0057c6a4f284768e0bef3
|
refs/heads/main
| 2023-03-22T09:16:29.873629 | 2021-03-20T23:19:40 | 2021-03-20T23:19:40 | 349,767,897 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 275 |
py
|
from django.urls import path
from .views import UserRecordView, feedEditAddPostList, feedPostsList
urlpatterns = [
path('', feedPostsList.as_view()),
path('<int:pk>/', feedEditAddPostList.as_view()),
path('user/', UserRecordView.as_view(), name='users'),
]
|
[
"[email protected]"
] | |
a34b60332da3cc2b916a9aa39e89a5d34abc28c3
|
59527a2f36edfec1cb3a06cd93864555d62cd7eb
|
/PythonCoreProgramming/chapter3/makeTextFile.py
|
c15ae4fa6348f7f662ae3c222006404dae7a182f
|
[] |
no_license
|
xiao2mo/MachineLearningInAction-1
|
e4559fdf54128316e5655f15269f371b687b959a
|
48fb1faa16827cd0fbeac6ae05358e5fdf5f9e90
|
refs/heads/master
| 2021-01-21T21:05:54.175565 | 2017-06-10T05:53:56 | 2017-06-10T05:53:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 660 |
py
|
#coding:utf-8
import os
if __name__ == '__main__':
#获得文件名
while True:
fname = raw_input('Enter file name: ');
if os.path.exists(fname):
print 'file: %s has already existed' %fname
else:
break;
fileContent = []
print '\n Enter file content, if you want quit, type .'
while True:
entry = raw_input('>')
if entry == '.':
break;
else:
fileContent.append(entry)
#将从fileContent写入到文件中去
fw = open(fname, 'w') #以写模式打开
fw.write('\n'.join(fileContent))
fw.flush()
fw.close()
print 'Done!'
|
[
"codercuixin@gmail"
] |
codercuixin@gmail
|
095d0bb99bf369d801675140f71509f3c30b20fc
|
2c28ecef182aab6f57f8ca1195aca33ae5061cc4
|
/AutomateTheBoringStuff/commaCode.py
|
574ce040aa387c04db5862594c8eb0149a52b47c
|
[] |
no_license
|
MarcHiggins/LearningPython
|
b5b39ec80b1f6959bf24d55cb5c86200ab82fd67
|
dba2fac74b37ac0038b109c00720a71c0b57109b
|
refs/heads/master
| 2022-09-17T22:04:55.753942 | 2020-06-05T18:10:03 | 2020-06-05T18:10:03 | 267,947,875 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 547 |
py
|
##Aim is to print a list with all items separated with a comma
## and a space, with and inserted before the last item
##Test one with a simple, user defined list
spam = ['apples', 'bananas', 'tofu', 'cats']
print(spam)
def andlist(x):
x.insert(-1, 'and')
print(*x, sep = ", ")
andlist(spam)
print(*spam)
## Test two: generate a long list and call the function on this
import random
randomDNA = []
dna = ["A", "T", "G", "C"]
for i in range (0,100):
randomDNA +=random.choice(dna)
print(randomDNA)
andlist(randomDNA)
|
[
"[email protected]"
] | |
ea641622136b336a1f7196b18e51f101df6df097
|
d45bb44b0dfabfeff37c21a6ac0be1362782e39a
|
/utils/import_bookmarks.py
|
ea763b006243bdea76577f71ce07e8fba1168997
|
[] |
no_license
|
SyJarvis/BookmarkManager
|
c25f9df8cb0d0719de805f8080a7ae78c5ac529c
|
dc3baf06fd47c4514b148134ee3d3fa03f7f1571
|
refs/heads/master
| 2023-03-26T17:14:17.776441 | 2021-03-21T14:58:58 | 2021-03-21T14:58:58 | 322,634,112 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 647 |
py
|
from pyquery import PyQuery as pq
class BookmarksTodb():
def __init__(self, filename='utils/bookmarks_2020_5_5_win.html'):
with open(filename, 'r+', encoding='utf-8') as file:
self.html = file.read()
self.doc = pq(self.html)
def get_cage_list(self):
cage_li = []
items = self.doc('H3')
for cage in items:
cage_li.append(cage.text)
return cage_li
def get_url_list(self):
lis = self.doc('A').items()
datas = []
for li in lis:
url_params = {}
url_params['url'] = li.attr('href')
url_params['title'] = li.text()
print(url_params)
datas.append(url_params)
return datas
|
[
"[email protected]"
] | |
ce9e940c44787a1a33a27df098898d8d46a918ce
|
eb8e13ec2997b6047be54ac9b859ece6f0b9baa3
|
/File_Reading.py
|
c6ca8e2d55ce02d5663653d420de9b8f62e23c18
|
[] |
no_license
|
Zidane786/Learning-Python
|
2a7f1eb2fd80a105afe5f73dc29fbc134f0e1208
|
8038582748263fe905f3412eb9209f1299719bd1
|
refs/heads/master
| 2020-12-06T07:19:39.047544 | 2020-05-02T09:45:57 | 2020-05-02T09:45:57 | 232,387,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,885 |
py
|
# ***Open File and Reading it as text file***:-
# open_file=open("Zidane.txt", 'rt') # In this rt is Default where r is read and t is text
# Read_File=open_file.read()
# print(Read_File)
# open_file.close() # It is good habbit to close the file when work is done Because we need to free Memomy when our work is done
"""
OUTPUT:-
Zidane is Great
Zidane is Really Great
Zidane is Really Really Great
Zidane is Really Really Really Great
Zidane is Really Really Really Really Great
"""
# ***Read And Binary*** :-
# open_file1=open("Zidane.txt","rb")
# Read_File1=open_file1.read()
# print(Read_File1) # Give Output in Binary String Form
"""
OUTPUT:-
b'Zidane is Great\nZidane is Really Great\nZidane is Really Really Great\nZidane is Really Really Really Great\nZidane is Really Really Really Really Great'
"""
# ***passing Argument in read functoion***:-
# Open=open("Zidane.txt")
# Read=Open.read(2)
# print(Read)
# Read=Open.read(3)
# print(Read) # pehle jab read mai 2 paas kiya to Zi ho gaya tha ab 3 paas kiya to uske aage se lege i.e dan See Output for better understanding
"""
OUTPUT:-
Zi
dan
"""
# ***If we want to print Content of file line by line we can do by following way***:-
# Open=open("Zidane.txt")
# for line in Open:
# print(line,end="")
"""
OUTPUT:-
Zidane is Great
Zidane is Really Great
Zidane is Really Really Great
Zidane is Really Really Really Great
Zidane is Really Really Really Really Great
"""
# *** readline() Function***:-
# Open=open("Zidane.txt")
# print(Open.readline(),end="") #It read a line agar hame multiple line read kerni hai to isse multiple time read kerna padega readline()=read a single line
# print(Open.readline(),end="") # uske saath new line character bhi read kerta hai isliye hame end="" lagayege
# print(Open.readline(),end="")
# print(Open.readline(),end="")
# print(Open.readline(),end="")
"""
OUTPUT:-
Zidane is Great
Zidane is Really Great
Zidane is Really Really Great
Zidane is Really Really Really Great
Zidane is Really Really Really Really Great
"""
# *** readlines() Function(it create list)***:-
Open=open("Zidane.txt")
print(Open.readlines()) #hamari har line List mai store ho jayegi with new line character
"""
OUTPUT:-
['Zidane is Great\n', 'Zidane is Really Great\n', 'Zidane is Really Really Great\n', 'Zidane is Really Really Really Great\n', 'Zidane is Really Really Really Really Great']
"""
Open.close() #good habbit to close it when work is done
|
[
"[email protected]"
] | |
d8b19ae42d91ddaf031dcf7c3a036edde31b414e
|
41aa400850af639e8d2ef4da7c65a566e3ff5a54
|
/Object_Oriented_Programs/Inventory_data.py
|
2696c04edc2fe332e64355e2774c02d351a90c2e
|
[] |
no_license
|
ravindraindia4u/Python
|
e1159df5eeea6b93ad42063644eaca375ca96f72
|
b1915958a524f4f2e6b80cff423cd7bc72834fa8
|
refs/heads/master
| 2022-04-27T17:45:35.653000 | 2020-05-01T21:15:30 | 2020-05-01T21:15:30 | 259,142,416 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 425 |
py
|
"""
Create the JSON from Inventory Object and output the JSON String
"""
import json
def main():
file = open("inventory", 'r')
data = json.load(file)
file.close()
for key, value in data.items():
print(f"The {key} details are as follows: ")
for dictionary in value:
for keys, values in dictionary.items():
print(f"\t\t{keys} : {values}")
print()
main()
|
[
"[email protected]"
] | |
5ea85b5d4ab127194e35ce410732ed286cdba869
|
0c7f3810d822595b55f70f2c9d87f5f2c98054e6
|
/fondos/download.py
|
01b3cc38e731884bfa1e9013bb78fd5c1822ae4e
|
[
"MIT"
] |
permissive
|
elsonidoq/fondos
|
c1b406ed85c1499546352bb704ad4c12dea78f6c
|
7d673865b929da6aba0757962e7d287c82edb1b6
|
refs/heads/master
| 2021-04-29T02:06:47.748040 | 2017-05-20T18:32:23 | 2017-05-20T18:32:23 | 78,062,720 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 373 |
py
|
import os
from datetime import datetime
from invertironline import InvertirOnlineDownloader
here = os.path.abspath(os.path.dirname(__file__))
def main():
bonds = [
'aa17',
'ay24'
]
print "Downloading {}".format(datetime.now())
for bond in bonds:
InvertirOnlineDownloader(bond).execute()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f17669184ef2e9e58cc9613ffd6e8add89126ea3
|
09e8c92187ff8d7a726727041e2dd80850dcce3d
|
/leetcode/028_implement_strStr_TRICKY.py
|
7154dcc9281455ccd29a545cb11042da6c8c43ad
|
[] |
no_license
|
kakru/puzzles
|
6dd72bd0585f526e75d026f3ba2446b0c14f60e0
|
b91bdf0e68605f7e517446f8a00b1e0f1897c24d
|
refs/heads/master
| 2020-04-09T09:47:31.341475 | 2019-05-03T21:24:41 | 2019-05-03T21:24:41 | 160,246,660 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,846 |
py
|
#/usr/bin/env python3
import unittest
class Solution:
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
h_len = len(haystack)
n_len = len(needle)
i = 0
while i <= h_len - n_len:
if haystack[i:i+n_len] == needle:
return i
i += 1
return -1
#
# There is a problem with a step by step solution it's easy to forget about:
# haystack="mississippi", needle="issippi"
# mississippi
# issippi --> X
# mississippi
# issippi --> OK
# the loop index on the haystack cannot go back to 0 !!
class BasicTest(unittest.TestCase):
def test_1(self):
input_ = "hello", "ll"
expected_output = 2
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_2(self):
input_ = "helo", "ll"
expected_output = -1
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_3(self):
input_ = "abc", ""
expected_output = 0
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_4(self):
input_ = "abc"*100000, "cab"
expected_output = 2
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_5(self):
input_ = "a", "a"
expected_output = 0
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
def test_6(self):
input_ = "mississippi", "issippi"
expected_output = 4
output = Solution().strStr(*input_)
self.assertEqual(output, expected_output)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"[email protected]"
] | |
44f7d5e6d9055b7acb7c3147d5e6aa735fc3ce3e
|
a09e70355b756bd5cba55246e17eb0480af6257b
|
/examples/ble_demo_central.py
|
eb56a9cb9b54270e50eb0709aed3104e43dfecc4
|
[
"MIT"
] |
permissive
|
devoh747/Adafruit_CircuitPython_BLE
|
9735381dc3481661af54ac32d89ec40e006edc5b
|
7566483e2dbdb1bf6c71d5629a2ed37b113c7cff
|
refs/heads/master
| 2020-08-09T04:14:59.774817 | 2019-10-10T21:11:07 | 2019-10-10T21:11:07 | 213,995,226 | 0 | 0 |
MIT
| 2019-10-09T18:33:32 | 2019-10-09T18:33:32 | null |
UTF-8
|
Python
| false | false | 1,319 |
py
|
"""
Demonstration of a Bluefruit BLE Central. Connects to the first BLE UART peripheral it finds.
Sends Bluefruit ColorPackets, read from three potentiometers, to the peripheral.
"""
import time
import board
from analogio import AnalogIn
#from adafruit_bluefruit_connect.packet import Packet
# Only the packet classes that are imported will be known to Packet.
from adafruit_bluefruit_connect.color_packet import ColorPacket
from adafruit_ble.scanner import Scanner
from adafruit_ble.uart_client import UARTClient
def scale(value):
"""Scale an value from 0-65535 (AnalogIn range) to 0-255 (RGB range)"""
return int(value / 65535 * 255)
scanner = Scanner()
uart_client = UARTClient()
a3 = AnalogIn(board.A3)
a4 = AnalogIn(board.A4)
a5 = AnalogIn(board.A5)
while True:
uart_addresses = []
# Keep trying to find a UART peripheral
while not uart_addresses:
uart_addresses = uart_client.scan(scanner)
uart_client.connect(uart_addresses[0], 5)
while uart_client.connected:
r = scale(a3.value)
g = scale(a4.value)
b = scale(a5.value)
color = (r, g, b)
print(color)
color_packet = ColorPacket(color)
try:
uart_client.write(color_packet.to_bytes())
except OSError:
pass
time.sleep(0.3)
|
[
"[email protected]"
] | |
d73cb9b3bb88001eb961512b064822c5c3bc29b3
|
2214265044a4c141b9743e74cb04b87a2d08ce5a
|
/luminoth/utils/homedir.py
|
1e4a8098a8f7488ac2171c99c76ffa474d329f7c
|
[
"BSD-3-Clause"
] |
permissive
|
Chunde/luminoth
|
163bb4850d9526675e5e02414b07055a894cb794
|
4c1523460d41a69dc548262051779dc5037afb8d
|
refs/heads/master
| 2021-04-27T02:39:30.272615 | 2018-03-02T18:04:46 | 2018-03-02T18:54:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 602 |
py
|
"""Luminoth home (~/.luminoth) management utilities."""
import os
import tensorflow as tf
DEFAULT_LUMINOTH_HOME = os.path.expanduser('~/.luminoth')
def get_luminoth_home(create_if_missing=True):
"""Returns Luminoth's homedir."""
# Get Luminoth's home directory (the default one or the overridden).
path = DEFAULT_LUMINOTH_HOME
if 'LUMI_HOME' in os.environ:
path = os.environ['LUMI_HOME']
path = os.path.abspath(path)
# Create the directory if it doesn't exist.
if create_if_missing and not os.path.exists(path):
tf.gfile.MakeDirs(path)
return path
|
[
"[email protected]"
] | |
347412fdfa9287d03bb131ce4771ae5ddacded33
|
4d5721457ae7856c11c869c5966a36416e1144c3
|
/djagno_pg_trgm/urls.py
|
1a25a241b46a0c0c72b051c4439ebc836c906d89
|
[] |
no_license
|
Zlira/django_pg_trgm
|
dd65df6caf372f2777db73d85c815eab661abd9d
|
59ad1b95a8a08783af856b200b53a793d1d8b403
|
refs/heads/master
| 2016-09-13T12:54:17.202890 | 2016-05-02T11:29:44 | 2016-05-02T11:29:44 | 57,662,880 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 283 |
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djagno_pg_trgm.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"[email protected]"
] | |
c2dfa472424134a661b3b929c19173f3627a69ca
|
50edc4d858276ae080a40561f68d64e2a9c8bb1d
|
/ExampleHydraulicComparison.py
|
0dcbeff5edf4853c8d47dac7b82b9c34d43c643f
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
iordach1/ICE-Coding
|
eef59eb043565c10208e197ce4109ba8ea86b11e
|
fb72df738e812f228fb4947f8fd3990754850c71
|
refs/heads/master
| 2020-03-28T03:54:50.491205 | 2018-08-16T17:40:40 | 2018-08-16T17:40:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,174 |
py
|
# Example Jython script demonstrating how DSS data can be accessed
# within execution of HEC-DSSVue and obtain hydraulic results from
# a HEC-RAS DSS file.
def GetMaxValueIndex(hydrograph):
# Gets the index of the entry in supplied
# array with the largest value
idx = 0
max = float(-sys.maxint)
for i in range (0, len(hydrograph.values)):
if (hydrograph.values[i] > max):
max = hydrograph.values[i]
idx = i
return idx
from hec.heclib.dss import HecDss
# obtain configuration details for HEC applications for
# python and jython scripts
import src.hecConfig
reload(src.hecConfig)
config = src.hecConfig.HecConfig()
dssFilePath=config.getRasProjectPath() + "/" + config.rasProjectName + ".dss"
dss = HecDss.open(dssFilePath)
hydraulicResults = {}
import pickle
dtf = open(config.getDataTransferFilePath(),'r+')
ditchNames = pickle.load(dtf)
dtf.close()
for ditchName in ditchNames:
# get some flow data from the DSS file - obtain peak values
dataPath = "/" + ditchName + "/FLOW/01DEC2006/1HOUR/" + config.rasPlanName + "/"
dataHydrograph = dss.get(dataPath.upper(), True); # true ensures entire time series is loaded
maxValueIdx = GetMaxValueIndex(dataHydrograph)
peakFlowValue = dataHydrograph.values[maxValueIdx]
peakFlowTime = dataHydrograph.times[maxValueIdx]
#print dataHydrograph.values
#print maxValueIdx
dataPath = "/" + ditchName + "/STAGE/01DEC2006/1HOUR/" + config.rasPlanName + "/"
dataHydrograph = dss.get(dataPath.upper(), True); # true ensures entire time series is loaded
peakStageValue = max(dataHydrograph.values)
hydraulicValues = {"peakFlowRate": peakFlowValue, "peakFlowTiming": peakFlowTime, "peakStage": peakStageValue}
hydraulicResults[ditchName] = hydraulicValues;
# Write results to a intermediate file that can be read within the calling
# Python script as communicaton between Jython called from HEC software and
# Python is somewhat limited
#print hydraulicResults
dtf = open(config.getDataTransferFilePath(),'w')
dtf.write(str(hydraulicResults))
dtf.close()
|
[
"[email protected]"
] | |
d766d4c5a78e044a4c3b133cff4e77e12c3e4d4f
|
ce7b21a1a9a79322db296a7ffb5dca23c3f53e8d
|
/exam/bin/django-admin.py
|
a5aa180a78eeba16a34a8bf08830073c2cec94ca
|
[] |
no_license
|
letyougo/exam
|
c8f11f25c4d4e2ab18fdbbf853f13939a41a7079
|
a6832790927abf36a80263d4c23efede0976fcc7
|
refs/heads/master
| 2021-08-19T08:16:34.385755 | 2017-11-25T12:12:40 | 2017-11-25T12:12:40 | 112,000,871 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 170 |
py
|
#!/Users/xiaoxiaosu/Documents/miui-2017/exam/exam/bin/python3.5
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"[email protected]"
] | |
623d7b812b2e43e3541b088ba3b63a76c0ac5857
|
20569ea832802e5dadde2cbc4c144ff02d85d355
|
/utils/BenchmarkRunner.py
|
fa230c9af28272b4a1fb6624842c5d4cd34f65b6
|
[
"MIT"
] |
permissive
|
bwhub/CONVJSSP
|
6017359f7bf21a1e257df121baf3f83fc22a80ba
|
dd9084dc370e6113df749b247ee05670e46fca3f
|
refs/heads/master
| 2023-02-01T15:18:43.057072 | 2020-12-14T17:10:26 | 2020-12-14T17:10:26 | 299,891,152 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,125 |
py
|
import subprocess
from tqdm import tqdm
import itertools as it
import os
import multiprocessing as mp
class BenchmarkRunner:
def __init__(self, log_dir, exec_path="../src/job-shop-experiment/job-shop", repeat_time=1, warmup_seconds=0):
self.exec_path = exec_path + " "
self.log_dir = log_dir
self.repeat_time = repeat_time
self.available_parameter_dict, self.available_instance_list = self._get_avaialbe_benchmark_options()
print("# Length of the available instance list is {}".format(len(self.available_instance_list)))
self.test_parameter_dict = {}
self.test_instance_list = []
self.warmup_seconds = warmup_seconds
# add testing parameters
def _run_single_command(self, cmd_str):
# function to execute a single command
p = subprocess.Popen(cmd_str.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, err = p.communicate()
return output.decode('utf-8')
def run_test(self, log_dir=None):
if log_dir==None:
log_dir = self.log_dir
all_parameter_names = sorted(self.test_parameter_dict)
# all_parameter_names = self.test_parameter_dict
parameter_combination_list = list(it.product(*((self._add_key_2_every_val(para_name, self.test_parameter_dict[para_name]) for para_name in all_parameter_names))))
testing_list = [ (list(para)+[instance]) for para in parameter_combination_list for instance in self.test_instance_list]
# print(testing_list)
result_list = []
if(self.warmup_seconds > 0):
print("Staring warmup for {} seconds".format(self.warmup_seconds))
self._run_single_command('stress --cpu 1 --timeout ' + str(self.warmup_seconds))
print("Finished warmup. Now starting the benchmark.")
for i in range(self.repeat_time):
for test in tqdm(testing_list):
cmd = self.exec_path + " ".join(test)
log_file_name = self.log_dir + " ".join(test).replace(" ", "_").replace("-", "")+"_run_"+str(i)+".log"
exists = os.path.isfile(log_file_name)
if not exists:
# run a single benchmark
result = self._run_single_command(cmd)
# write the result to a log file
with open(log_file_name, "w") as log_file:
log_file.write(cmd)
log_file.write('\n')
log_file.write(result)
result_list.append(result)
return result_list
def _run_single_command(self, cmd_str):
# function to execute a single command
p = subprocess.Popen(cmd_str.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, err = p.communicate()
return output.decode('utf-8')
def run_test_parallel(self, log_dir=None, process_count=mp.cpu_count()):
if process_count > mp.cpu_count():
print('number of of process should be smaller than cpu_count.')
process_count = mp.cpu_count()
if log_dir==None:
log_dir = self.log_dir
all_parameter_names = sorted(self.test_parameter_dict)
# all_parameter_names = self.test_parameter_dict
parameter_combination_list = list(it.product(*((self._add_key_2_every_val(para_name, self.test_parameter_dict[para_name]) for para_name in all_parameter_names))))
testing_list = [ (list(para)+[instance]) for para in parameter_combination_list for instance in self.test_instance_list]
# print(testing_list)
result_list = []
if(self.warmup_seconds > 0):
print("Staring warmup for {} seconds".format(self.warmup_seconds))
self._run_single_command('stress --cpu 1 --timeout ' + str(self.warmup_seconds))
print("Finished warmup. Now starting the benchmark.")
print("The first element in testing list is {}".format(testing_list[0]))
with mp.Pool(process_count) as pool:
result_list = pool.starmap_async(self._run_single_JSSP_instance, [(test, 0) for test in testing_list]).get()
# for i in range(self.repeat_time):
# for test in tqdm(testing_list):
# pool.apply_async(self._run_single_JSSP_instance, args=(test, i))
return result_list
def _run_single_JSSP_instance(self, test, repeat_time):
cmd = self.exec_path + " ".join(test)
log_file_name = self.log_dir + " ".join(test).replace(" ", "_").replace("-", "")+"_run_"+str(repeat_time)+".log"
exists = os.path.isfile(log_file_name)
if not exists:
# run a single benchmark
result = self._run_single_command(cmd)
# write the result to a log file
with open(log_file_name, "w") as log_file:
log_file.write(cmd)
log_file.write('\n')
log_file.write(result)
return result
# def add_testing_instances(self, instance_list):
# print("Start add_testing_instances")
# for instance in instance_list:
# if instance.split()[-1] in self.available_instance_list:
# self.test_instance_list.append(instance)
def add_testing_instances(self, instance_list):
print("Start add_testing_instances")
for instance in instance_list:
self.test_instance_list.append(instance)
def _add_key_2_every_val(self, key, val_list):
return [(key + " " + v) for v in val_list]
def add_parameter_options(self, para_dict):
# add values for benchmarks for one parameter
assert len(para_dict) == 1, 'Please add one parameter at a time'
key = list(para_dict.keys())[0]
assert key in self.available_parameter_dict, 'Parameter {} is not avaiable'.format(key)
val = para_dict[key]
self.test_parameter_dict[key] = val
def get_current_test_instances(self):
return self.test_instance_list
def get_current_test_parameters(self):
return self.test_parameter_dict
def get_available_instance_list(self):
return self.available_instance_list
def _get_avaialbe_benchmark_options(self, exec_path=None, help_cmd=' --help'):
# get available parameter options from the program
if exec_path==None:
exec_path = self.exec_path
help_str = self._run_single_command(exec_path + help_cmd)
help_list = help_str.replace('\t', ' ').replace('\n', ' ').split(' ')
# get parameter options
parameter_list = [x for x in help_list if x.startswith('-') and len(x) >1][3:]
parameter_dict = {}
for option in parameter_list:
parameter_dict[option] = []
# get jssp instance options
instance_list = "".join(help_list[help_list.index('instances:')+1:]).split(',')[:-1]
return parameter_dict, instance_list
|
[
"[email protected]"
] | |
a9d884d274c01dd78cceab6370265d03f5fafe07
|
c7ab842b33195cf026b3a93ef38814ce3ea6309e
|
/Reed-Solomon/main.py
|
aff0b3b81d06e0755c525ab2998b20c8cff029d5
|
[] |
no_license
|
kizcko/Number-Theory
|
7f2429d7d1685b38338073f6603bce91e51106a5
|
aed1dba7a06a91eb0341d4fdc0bf1577d0809bc3
|
refs/heads/main
| 2023-04-20T18:55:06.584995 | 2021-05-17T15:09:59 | 2021-05-17T15:09:59 | 368,226,516 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 509 |
py
|
import decoding
from encoding import encoding
from decoding import *
m = 29
p = 11
s = 1
k = 3
def convert_to_ascii(text):
return "".join(str(ord(char)) for char in text)
if isinstance(m, str):
m = int(convert_to_ascii(m))
def main(m,p,s,k):
print("\nEncoding\n",20*'=')
print('\nm:', m)
initial = encoding(m, p, s, k)
print("\n\nDecoding\n", 20 * '=')
z = generate_z(initial,p)
print("\nZ:",z)
polynomial(p, z, k,s,initial)
main(m,p,s,k)
|
[
"[email protected]"
] | |
501c95b652f857f01708a965721f3e01f47337a9
|
f07b7f546278c86ec91fe9fdacbe4acc266b5ef0
|
/blog/blog/wvenv/Lib/site-packages/pyls/_version.py
|
1da11f8698d1b8e9027b628af4386644dc9ed2ca
|
[] |
no_license
|
CankayaUniversity/ceng-407-408-2019-2020-Patent-Comparison-System
|
0386a6d8651a9ce875a9cf56013c19d8242204c9
|
d9c0f2d84d90932b962a0618b01652f3bd560f25
|
refs/heads/master
| 2020-08-18T09:55:23.676188 | 2020-06-27T21:19:20 | 2020-06-27T21:19:20 | 215,772,427 | 2 | 4 | null | 2020-06-27T21:26:31 | 2019-10-17T11:08:50 | null |
UTF-8
|
Python
| false | false | 498 |
py
|
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
{
"date": "2020-01-21T17:27:49+0000",
"dirty": false,
"error": null,
"full-revisionid": "fc2ab66c5e447fb5fbd5941bfc9e070906689969",
"version": "0.31.7"
}
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
|
[
"[email protected]"
] | |
6b35c592f8a2cbebcbf7547605a3603a6085d533
|
c042600d495d90a70fce0ce8dc24b88951994ed7
|
/LiteratureBooks/pipelines.py
|
14616d164683728b418f3055016487aba968b913
|
[] |
no_license
|
JC5137/distributeSpider
|
b8c938be3033f77f06c8ae57c96a81c9a2d2439a
|
9c068503f34b3bc14e055f70903b3c81b32e09d0
|
refs/heads/master
| 2021-04-12T11:50:17.689045 | 2017-07-12T13:44:37 | 2017-07-12T13:44:37 | 94,546,730 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,543 |
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import MySQLdb
import MySQLdb.cursors
import logging
from twisted.enterprise import adbapi
class LiteraturebooksPipeline(object):
def __init__(self,dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
mysql_args = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8',
cursorclass = MySQLdb.cursors.DictCursor,
use_unicode= True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **mysql_args)
return cls(dbpool)
#pipeline默认调用
def process_item(self, item, spider):
if "Amzon" in spider.name:
d = self.dbpool.runInteraction(self._do_insert, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
if "Jd" in spider.name:
d = self.dbpool.runInteraction(self._do_update, item, spider)
d.addErrback(self._handle_error, item, spider)
d.addBoth(lambda _: item)
return d
#写入数据库
def _do_insert(self, conn, item, spider):
parms = (item["book_id_amzon"],item["book_url_amzon"],item['book_name'],item['book_comments_num_amzon'],item['book_price_amzon'])
sql = """insert into book_info
(book_id_amzon,
book_url_amzon,
book_name,
book_comments_num_amzon,
book_price_amzon
)
values ('%s','%s','%s',%d,'%s')""" %parms
print sql
conn.execute(sql)
#更新数据库
def _do_update(self, conn, item, spider):
parms = (item["book_url_jd"],item["book_comments_num_jd"],item["book_price_jd"],item["book_name"],item["book_id_amzon"])
sql = """update book_info set
book_url_jd = '%s',
book_comments_sum_jd = '%d',
book_price_jd = '%s'
where book_name = '%s' and book_id_amzon = '%s'""" %parms
print sql
conn.execute(sql)
#异常处理
def _handle_error(self, failue, item, spider):
logger = logging.getLogger("DB")
logger.error(failure)
|
[
"[email protected]"
] | |
15f12151f6b78ab44e92344a457d76919163a256
|
e2af805086a86ec5e3526c503e56aacf291bff7d
|
/pv_mcts.pyx
|
f6cb74e6357e7d2dcb9361b56fee7187c63aed81
|
[] |
no_license
|
keisuke-777/CythonVerAlphaZeroForGeister
|
e21c7e44e83f58e63b4ae35487deba6d75b9b4cc
|
abf9b124fc2d335cf6086a98223d60883719cfc9
|
refs/heads/master
| 2023-03-22T11:33:05.838507 | 2021-03-10T02:21:48 | 2021-03-10T02:21:48 | 315,944,517 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,206 |
pyx
|
# ====================
# モンテカルロ木探索の作成
# ====================
# パッケージのインポート
from game import State
from dual_network import DN_INPUT_SHAPE
from math import sqrt
from tensorflow.keras.models import load_model
from pathlib import Path
import numpy as np
# パラメータの準備
PV_EVALUATE_COUNT = 50 # 1推論あたりのシミュレーション回数
# 推論
def predict(model, state):
# 推論のための入力データのシェイプの変換
a, b, c = DN_INPUT_SHAPE
x = np.array(state.pieces_array())
x = x.reshape(c, a, b).transpose(1, 2, 0).reshape(1, a, b, c)
# 推論
y = model.predict(x, batch_size=1)
# 方策の取得
policies = y[0][0][list(state.legal_actions())] # 合法手のみ
policies /= sum(policies) if sum(policies) else 1 # 合計1の確率分布に変換
# 価値の取得
value = y[1][0][0]
return policies, value
# ノードのリストを試行回数のリストに変換
def nodes_to_scores(nodes):
scores = []
for c in nodes:
scores.append(c.n)
return scores
# モンテカルロ木探索のスコアの取得
def pv_mcts_scores(model, state, temperature):
# モンテカルロ木探索のノードの定義
class Node:
# ノードの初期化
def __init__(self, state, p):
self.state = state # 状態
self.p = p # 方策
self.w = 0 # 累計価値
self.n = 0 # 試行回数
self.child_nodes = None # 子ノード群
# 局面の価値の計算
def evaluate(self):
# ゲーム終了時
if self.state.is_done():
# 勝敗結果で価値を取得
value = -1 # 負けも引き分けも-1(要調整)
# value = -1 if self.state.is_lose() else 0
# 累計価値と試行回数の更新
self.w += value
self.n += 1
return value
# 子ノードが存在しない時
if not self.child_nodes:
# ニューラルネットワークの推論で方策と価値を取得
policies, value = predict(model, self.state)
# 累計価値と試行回数の更新
self.w += value
self.n += 1
# 子ノードの展開
self.child_nodes = []
for action, policy in zip(self.state.legal_actions(), policies):
self.child_nodes.append(Node(self.state.next(action), policy))
return value
# 子ノードが存在する時
else:
# アーク評価値が最大の子ノードの評価で価値を取得
value = -self.next_child_node().evaluate()
# 累計価値と試行回数の更新
self.w += value
self.n += 1
return value
# アーク評価値が最大の子ノードを取得
def next_child_node(self):
# アーク評価値の計算
C_PUCT = 1.0
t = sum(nodes_to_scores(self.child_nodes))
pucb_values = []
for child_node in self.child_nodes:
pucb_values.append(
(-child_node.w / child_node.n if child_node.n else 0.0)
+ C_PUCT * child_node.p * sqrt(t) / (1 + child_node.n)
)
# アーク評価値が最大の子ノードを返す
return self.child_nodes[np.argmax(pucb_values)]
# 現在の局面のノードの作成
root_node = Node(state, 0)
# 複数回の評価の実行
for _ in range(PV_EVALUATE_COUNT):
root_node.evaluate()
# 合法手の確率分布
scores = nodes_to_scores(root_node.child_nodes)
if temperature == 0: # 最大値のみ1
action = np.argmax(scores)
scores = np.zeros(len(scores))
scores[action] = 1
else: # ボルツマン分布でバラつき付加
scores = boltzman(scores, temperature)
return scores
# モンテカルロ木探索で行動選択
def pv_mcts_action(model, temperature=0):
def pv_mcts_action(state):
scores = pv_mcts_scores(model, state, temperature)
return np.random.choice(state.legal_actions(), p=scores)
return pv_mcts_action
# ボルツマン分布
def boltzman(xs, temperature):
xs = [x ** (1 / temperature) for x in xs]
return [x / sum(xs) for x in xs]
# 動作確認
if __name__ == "__main__":
# モデルの読み込み
path = sorted(Path("./model").glob("*.h5"))[-1]
model = load_model(str(path))
# 状態の生成
state = State()
# モンテカルロ木探索で行動取得を行う関数の生成
next_action = pv_mcts_action(model, 1.0)
# ゲーム終了までループ
while True:
# ゲーム終了時
if state.is_done():
break
# 行動の取得
action = next_action(state)
# 次の状態の取得
state = state.next(action)
# 文字列表示
print(state)
|
[
"[email protected]"
] | |
9fa2c72d698228f49d9a9ff9bacf0479f901071d
|
a79faf54ba4e6b8a1223d17b9b242fc7c52e7342
|
/test/tagger_test/test_ec2_tagger.py
|
ce170114b362d92fbd80e140bc549d14d19b538e
|
[
"Apache-2.0"
] |
permissive
|
sahajsoft/LUAU
|
773193cb549752a202a03d388e463ad11181af2e
|
1578fb971fb745d8ca4c42ff9c1ee0a1c7480194
|
refs/heads/master
| 2021-06-25T23:49:03.730066 | 2020-10-16T13:01:28 | 2020-10-16T13:01:28 | 153,218,839 | 1 | 1 |
Apache-2.0
| 2019-02-06T07:44:53 | 2018-10-16T03:45:03 |
Python
|
UTF-8
|
Python
| false | false | 709 |
py
|
import unittest
import json
import boto3
import os
from mock import MagicMock
from moto import mock_autoscaling, mock_ec2
from tagger.ec2_tagger import EC2Tagger
class TestEC2Tagger(unittest.TestCase):
@mock_ec2
def setUp(self):
json_data = open('./test/example_events/run_instances.json').read()
self.event = json.loads(json_data)
self.region = 'us-west-2'
os.environ['AWS_REGION'] = 'us-west-2'
@mock_ec2
def test_start(self):
self.tagger = EC2Tagger(self.event, None)
response = self.tagger.start()
response_metadata = response['ResponseMetadata']
self.assertEqual(response_metadata['HTTPStatusCode'], 200)
|
[
"[email protected]"
] | |
3652ed9c9aa0576a74edaf5107cd392b4e4156b3
|
85c873074683ce54ab6056c42ca745f672867d72
|
/quora/lstm_lvm/model.py
|
415184aa3fd978ae5dcc8e9172689184ed6ff217
|
[] |
no_license
|
jihunchoi/cross-sentence-lvm-public
|
46dbbec5c5cba3db38d42437f7f30dd4e4659fab
|
c48f890dc994fb538b47bea864c5bc3d182b622e
|
refs/heads/master
| 2022-11-25T08:19:14.639728 | 2020-07-28T05:25:51 | 2020-07-28T05:25:51 | 283,109,097 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,256 |
py
|
from typing import Any, Dict, Optional, Union, List
import torch
from torch import nn
from torch.distributions import Categorical
from torch.nn import functional
from allennlp.common.params import Params
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
from allennlp.nn.activations import Activation
from allennlp.nn.util import (
get_text_field_mask, sequence_cross_entropy_with_logits
)
from allennlp.training.metrics import CategoricalAccuracy
from modules.code_generators import GaussianCodeGenerator, VmfCodeGenerator
from utils.metrics import ScalarMetric
class SeparatedQuoraModel(Model):
_NUM_LABELS = 2
def __init__(self,
params: Params,
vocab: Vocabulary) -> None:
super().__init__(vocab=vocab)
enc_hidden_dim = params.pop_int('enc_hidden_dim', 300)
gen_hidden_dim = params.pop_int('gen_hidden_dim', 300)
disc_hidden_dim = params.pop_int('disc_hidden_dim', 1200)
disc_num_layers = params.pop_int('disc_num_layers', 1)
code_dist_type = params.pop_choice(
'code_dist_type', ['gaussian', 'vmf'],
default_to_first_choice=True)
code_dim = params.pop_int('code_dim', 300)
tie_embedding = params.pop_bool('tie_embedding', False)
emb_dropout = params.pop_float('emb_dropout', 0.0)
disc_dropout = params.pop_float('disc_dropout', 0.0)
l2_weight = params.pop_float('l2_weight', 0.0)
self.emb_dropout = nn.Dropout(emb_dropout)
self.disc_dropout = nn.Dropout(disc_dropout)
self._l2_weight = l2_weight
self._token_embedder = Embedding.from_params(
vocab=vocab, params=params.pop('token_embedder'))
self._encoder = PytorchSeq2VecWrapper(
nn.LSTM(input_size=self._token_embedder.get_output_dim(),
hidden_size=enc_hidden_dim, batch_first=True))
self._generator = PytorchSeq2SeqWrapper(
nn.LSTM(input_size=(self._token_embedder.get_output_dim()
+ code_dim),
hidden_size=gen_hidden_dim, batch_first=True))
self._generator_projector = nn.Linear(
in_features=self._generator.get_output_dim(),
out_features=vocab.get_vocab_size())
if tie_embedding:
self._generator_projector.weight = self._token_embedder.weight
if code_dist_type == 'vmf':
vmf_kappa = params.pop_int('vmf_kappa', 150)
self._code_generator = VmfCodeGenerator(
input_dim=self._encoder.get_output_dim(),
code_dim=code_dim, kappa=vmf_kappa)
elif code_dist_type == 'gaussian':
self._code_generator = GaussianCodeGenerator(
input_dim=self._encoder.get_output_dim(),
code_dim=code_dim)
else:
raise ValueError('Unknown code_dist_type')
self._discriminator = FeedForward(
input_dim=2 * self._code_generator.get_output_dim(),
hidden_dims=[disc_hidden_dim]*disc_num_layers + [self._NUM_LABELS],
num_layers=disc_num_layers + 1,
activations=[Activation.by_name('relu')()] * disc_num_layers
+ [Activation.by_name('linear')()],
dropout=disc_dropout)
self._kl_weight = 1.0
self._discriminator_weight = params.pop_float(
'discriminator_weight', 0.1)
self._gumbel_temperature = 1.0
# Metrics
self._metrics = {
'generator_loss': ScalarMetric(),
'kl_divergence': ScalarMetric(),
'discriminator_accuracy': CategoricalAccuracy(),
'discriminator_loss': ScalarMetric(),
'loss': ScalarMetric()
}
def get_regularization_penalty(self):
sum_sq = sum(p.pow(2).sum() for p in self.parameters())
l2_norm = sum_sq.sqrt()
return self.l2_weight * l2_norm
@property
def l2_weight(self):
return self._l2_weight
@property
def kl_weight(self):
return self._kl_weight
@kl_weight.setter
def kl_weight(self, value):
self._kl_weight = value
@property
def discriminator_weight(self):
return self._discriminator_weight
@discriminator_weight.setter
def discriminator_weight(self, value):
self._discriminator_weight = value
def embed(self, tokens: torch.Tensor) -> torch.Tensor:
return self._token_embedder(tokens)
def encode(self,
inputs: torch.Tensor,
mask: torch.Tensor,
drop_start_token: bool = True) -> torch.Tensor:
if drop_start_token:
inputs = inputs[:, 1:]
mask = mask[:, 1:]
enc_hidden = self._encoder(inputs.contiguous(), mask)
return enc_hidden
def sample_code_and_compute_kld(self,
hidden: torch.Tensor) -> torch.Tensor:
return self._code_generator(hidden)
def discriminate(self,
premise_hidden: torch.Tensor,
hypothesis_hidden: torch.Tensor) -> torch.Tensor:
disc_input = torch.cat(
[premise_hidden + hypothesis_hidden,
(premise_hidden - hypothesis_hidden).abs()],
dim=-1)
disc_input = self.disc_dropout(disc_input)
disc_logits = self._discriminator(disc_input)
return disc_logits
def construct_generator_inputs(self,
embeddings: torch.Tensor,
code: torch.Tensor) -> torch.Tensor:
batch_size, max_length, _ = embeddings.shape
code_expand = code.unsqueeze(1).expand(
batch_size, max_length, -1)
inputs = torch.cat([embeddings, code_expand], dim=-1)
return inputs
def generate(self,
code: torch.Tensor,
max_length: torch.Tensor) -> torch.Tensor:
start_index = self.vocab.get_token_index('<s>')
end_index = self.vocab.get_token_index('</s>')
pad_index = 0
done = torch.zeros_like(max_length).long()
max_max_length = max_length.max().item()
prev_word = (torch.empty_like(done).long().unsqueeze(1)
.fill_(start_index))
generated = []
self._generator.stateful = True
self._generator.reset_states()
for t in range(max_max_length):
if done.byte().all():
break
prev_word_emb = self.embed(prev_word)
input_t = self.construct_generator_inputs(
embeddings=prev_word_emb, code=code)
hidden_t = self._generator(input_t, 1 - done.unsqueeze(1))
pred_t = self._generator_projector(hidden_t).argmax(2)
pred_t.masked_fill_(done.byte(), pad_index)
generated.append(pred_t)
done.masked_fill_(pred_t.eq(end_index).squeeze(1), 1)
done.masked_fill_(max_length.le(t + 1), 1)
prev_word = pred_t
self._generator.stateful = False
generated = torch.cat(generated, dim=1)
return generated
def convert_to_readable_text(self,
generated: torch.Tensor) -> List[List[str]]:
sequences = [seq.cpu().tolist() for seq in generated.unbind(0)]
readable_sequences = []
for seq in sequences:
readable_seq = []
for word_index in seq:
if word_index != 0:
word = self.vocab.get_token_from_index(word_index)
readable_seq.append(word)
readable_sequences.append(readable_seq)
return readable_sequences
def compute_generator_loss(self,
embeddings: torch.Tensor,
code: torch.Tensor,
targets: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
inputs = self.construct_generator_inputs(
embeddings=embeddings, code=code)
hiddens = self._generator(inputs.contiguous(), mask)
logits = self._generator_projector(hiddens)
weights = mask.float()
loss = sequence_cross_entropy_with_logits(
logits=logits, targets=targets.contiguous(), weights=weights,
average=None)
return loss
def forward(self,
premise: Dict[str, torch.Tensor],
hypothesis: Dict[str, torch.Tensor],
label: Optional[torch.Tensor] = None) -> Dict[str, Any]:
"""
premise and hypothesis are padded with
the BOS and the EOS token.
"""
pre_mask = get_text_field_mask(premise)
hyp_mask = get_text_field_mask(hypothesis)
pre_tokens = premise['tokens']
hyp_tokens = hypothesis['tokens']
pre_token_embs = self.embed(pre_tokens)
hyp_token_embs = self.embed(hyp_tokens)
pre_token_embs = self.emb_dropout(pre_token_embs)
hyp_token_embs = self.emb_dropout(hyp_token_embs)
output_dict = {}
pre_hidden = self.encode(
inputs=pre_token_embs, mask=pre_mask, drop_start_token=True)
hyp_hidden = self.encode(
inputs=hyp_token_embs, mask=hyp_mask, drop_start_token=True)
pre_code, pre_kld = self.sample_code_and_compute_kld(pre_hidden)
hyp_code, hyp_kld = self.sample_code_and_compute_kld(hyp_hidden)
pre_kld = pre_kld.mean()
hyp_kld = hyp_kld.mean()
pre_gen_mask = pre_mask[:, 1:]
hyp_gen_mask = hyp_mask[:, 1:]
pre_gen_loss = self.compute_generator_loss(
embeddings=pre_token_embs[:, :-1], code=pre_code,
targets=pre_tokens[:, 1:], mask=pre_gen_mask)
hyp_gen_loss = self.compute_generator_loss(
embeddings=hyp_token_embs[:, :-1], code=hyp_code,
targets=hyp_tokens[:, 1:], mask=hyp_gen_mask)
pre_gen_loss = pre_gen_loss.mean()
hyp_gen_loss = hyp_gen_loss.mean()
gen_loss = pre_gen_loss + hyp_gen_loss
kld = pre_kld + hyp_kld
loss = gen_loss + self.kl_weight*kld
if label is not None:
disc_logits = self.discriminate(premise_hidden=pre_code,
hypothesis_hidden=hyp_code)
disc_loss = functional.cross_entropy(
input=disc_logits, target=label)
loss = loss + self.discriminator_weight*disc_loss
output_dict['discriminator_loss'] = disc_loss
self._metrics['discriminator_loss'](disc_loss)
self._metrics['discriminator_accuracy'](
predictions=disc_logits, gold_labels=label)
output_dict['generator_loss'] = gen_loss
output_dict['kl_divergence'] = kld
output_dict['loss'] = loss
self._metrics['generator_loss'](gen_loss)
self._metrics['kl_divergence'](kld)
self._metrics['loss'](loss)
return output_dict
def get_metrics(self, reset: bool = False
) -> Dict[str, Union[float, Dict[str, float]]]:
metrics = {k: v.get_metric(reset=reset)
for k, v in self._metrics.items()}
metrics['kl_weight'] = self.kl_weight
metrics['discriminator_weight'] = self.discriminator_weight
return metrics
def test_labeled():
from pprint import pprint
params = Params({
'token_embedder': {
'num_embeddings': 4,
'embedding_dim': 3
},
'code_dist_type': 'vmf',
'vmf_kappa': 100
})
vocab = Vocabulary()
while True:
vocab_size = vocab.get_vocab_size()
if vocab_size == 4:
break
vocab.add_token_to_namespace('a' + str(vocab_size))
model = SeparatedQuoraModel(params=params, vocab=vocab)
premise = {'tokens': torch.randint(low=0, high=4, size=(5, 6))}
hypothesis = {'tokens': torch.randint(low=0, high=4, size=(5, 7))}
label = torch.randint(low=0, high=3, size=(5,))
output = model(premise=premise, hypothesis=hypothesis, label=label)
pprint(output)
pprint(model.get_metrics())
def test_unlabeled():
from pprint import pprint
params = Params({
'token_embedder': {
'num_embeddings': 4,
'embedding_dim': 3
},
'code_dist_type': 'gaussian'
})
vocab = Vocabulary()
while True:
vocab_size = vocab.get_vocab_size()
if vocab_size == 4:
break
vocab.add_token_to_namespace('a' + str(vocab_size))
model = SeparatedQuoraModel(params=params, vocab=vocab)
premise = {'tokens': torch.randint(low=0, high=4, size=(5, 6))}
hypothesis = {'tokens': torch.randint(low=0, high=4, size=(5, 7))}
output = model(premise=premise, hypothesis=hypothesis, label=None)
pprint(output)
pprint(model.get_metrics())
if __name__ == '__main__':
test_labeled()
test_unlabeled()
|
[
"[email protected]"
] | |
26abf2b58ee4ed7a69f2c069c5026e46fd6d5427
|
419873dd3b7412f704b1a7907b64a60b44cedf39
|
/python/树/103. 二叉树的锯齿形层次遍历.py
|
b3b9739640c5bbaeecf8e7c3f913e970275761a9
|
[] |
no_license
|
Weless/leetcode
|
0585c5bfa260713f44dabc51fa58ebf8a10e7814
|
0566622daa5849f7deb0cfdc6de2282fb3127f4c
|
refs/heads/master
| 2021-11-13T07:59:20.299920 | 2021-10-25T02:09:53 | 2021-10-25T02:09:53 | 203,720,668 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 818 |
py
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from typing import List
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
from collections import deque
queue = deque()
queue.append(root)
res = []
level = 1
while queue:
tmp = []
for _ in range(len(queue)):
node = queue.popleft()
tmp.append(node.val)
if node.left: queue.append(node.left)
if node.right: queue.append(node.right)
if level % 2 == 0:
res.append(tmp[::-1])
else:
res.append(tmp)
level += 1
return res
|
[
"[email protected]"
] | |
7199f038ea4f16a7389261bd5f9003a535c2f491
|
1892a473b7eed6aaa712bc2959a1aca48beec284
|
/domains/gym_taxi/utils/config.py
|
afa1e60798fc34e0d28312b85cf4f1ba3ae3c071
|
[
"MIT"
] |
permissive
|
AndrewPaulChester/sage-code
|
d3753bc894f21ce057c1a273e54926e368529e2b
|
9fe676bfbcbc6f642eca29b30a1027fba2a426a0
|
refs/heads/main
| 2023-05-05T19:08:21.655463 | 2021-05-27T05:21:54 | 2021-05-27T05:21:54 | 371,245,286 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,501 |
py
|
"""
.. module:: config
:synopsis: Contains config parameters for the SDRL taxi world.
"""
import numpy as np
from numpy.random import randint as rand
import matplotlib.pyplot as pyplot
MAX_EPISODE_LENGTH = 500
DISCRETE_ENVIRONMENT_STATES = 500
FIXED_GRID_SIZE = 5
LOCS = [(0, 0), (4, 0), (0, 4), (3, 4)]
MAP = [
"+---------+",
"|R: | : :G|",
"| : : : : |",
"| : : : : |",
"| | : | : |",
"|Y| : |B: |",
"+---------+",
]
MISSING_EDGES = [
((1, 0), (2, 0)),
((0, 3), (1, 3)),
((0, 4), (1, 4)),
((2, 3), (3, 3)),
((2, 4), (3, 4)),
]
OPEN = {
"size": FIXED_GRID_SIZE,
"wall_locations": [],
"passenger_destinations": LOCS,
"passenger_locations": LOCS,
"delivery_limit": 1,
"concurrent_passengers": 1,
"timeout": MAX_EPISODE_LENGTH,
"random_walls": False,
}
ORIGINAL = {
"size": FIXED_GRID_SIZE,
"wall_locations": MISSING_EDGES,
"passenger_destinations": LOCS,
"passenger_locations": LOCS,
"delivery_limit": 1,
"concurrent_passengers": 1,
"timeout": MAX_EPISODE_LENGTH,
"random_walls": False,
}
EXPANDED = {
"size": 20,
"delivery_limit": 1,
"concurrent_passengers": 1,
"timeout": MAX_EPISODE_LENGTH,
"passenger_creation_probability": 1,
"random_walls": True,
}
MULTI = {
"size": 20,
"delivery_limit": 100,
"concurrent_passengers": 5,
"timeout": MAX_EPISODE_LENGTH,
"passenger_creation_probability": 0.05,
"random_walls": True,
}
PREDICTABLE = {
"size": 20,
"delivery_limit": 100,
"concurrent_passengers": 1,
"timeout": 2000,
"passenger_creation_probability": 0.04,
"random_walls": True,
"passenger_locations": [
(0, 0),
(0, 1),
(0, 2),
(1, 0),
(1, 1),
(1, 2),
(2, 0),
(2, 1),
(2, 2),
],
"passenger_destinations": [
(17, 17),
(17, 18),
(17, 19),
(18, 17),
(18, 18),
(18, 19),
(19, 17),
(19, 18),
(19, 19),
],
}
PREDICTABLE15 = {
"size": 15,
"delivery_limit": 100,
"concurrent_passengers": 1,
"timeout": 2000,
"passenger_creation_probability": 0.06,
"random_walls": True,
"passenger_locations": [
(0, 0),
(0, 1),
(0, 2),
(1, 0),
(1, 1),
(1, 2),
(2, 0),
(2, 1),
(2, 2),
],
"passenger_destinations": [
(12, 12),
(12, 13),
(12, 14),
(13, 12),
(13, 13),
(13, 14),
(14, 12),
(14, 13),
(14, 14),
],
}
PREDICTABLE10 = {
"size": 10,
"delivery_limit": 100,
"concurrent_passengers": 1,
"timeout": 2000,
"passenger_creation_probability": 0.08,
"random_walls": True,
"passenger_locations": [(0, 0), (0, 1), (1, 0), (1, 1)],
"passenger_destinations": [(8, 8), (8, 9), (9, 8), (9, 9)],
}
PREDICTABLE5 = {
"size": 5,
"delivery_limit": 100,
"wall_locations": MISSING_EDGES,
"concurrent_passengers": 1,
"timeout": 1000,
"passenger_creation_probability": 0.12,
"random_walls": False,
"passenger_locations": [(0, 0), (0, 1), (1, 0), (1, 1)],
"passenger_destinations": [(3, 3), (3, 4), (4, 3), (4, 4)],
}
FUEL = {
"size": 20,
"delivery_limit": 100,
"concurrent_passengers": 5,
"timeout": MAX_EPISODE_LENGTH,
"passenger_creation_probability": 0.05,
"random_walls": True,
"fuel_use": 1,
}
|
[
"[email protected]"
] | |
87418f41882ec35f3c52caae603025eb269fc11b
|
31473c88fbfb0dbd436468b7535787cdc91ce472
|
/ItemFastAPI/env/Lib/site-packages/poetry/core/__init__.py
|
a06869c8b84d72266573e3eaa9e694e31582d97a
|
[] |
no_license
|
WaleedAlromaema/ItemFastAPI
|
9fef19e4f85193f6f17d18c0c393148f4fe510c6
|
9aa4779dc7d45c38bdd738d0d311d35f9042cd4c
|
refs/heads/master
| 2023-07-10T14:19:28.077497 | 2021-08-01T14:08:24 | 2021-08-01T14:08:24 | 391,641,951 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 305 |
py
|
import sys
try:
from pathlib import Path
except ImportError:
# noinspection PyUnresolvedReferences
from pathlib2 import Path
__version__ = "1.0.3"
__vendor_site__ = (Path(__file__).parent / "_vendor").as_posix()
if __vendor_site__ not in sys.path:
sys.path.insert(0, __vendor_site__)
|
[
"WalRoma@DESKTOP-TJNDUI1"
] |
WalRoma@DESKTOP-TJNDUI1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.