blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14202107145a13a0fa75ccb3e8d7190f116eab9f | bd52b885516589cfb5b3bf79cbf684e435d04141 | /presentation/imgsrc/images.py | 6ebac28eed22a77cf6e5188829da9e6d9f538bae | [
"CC-BY-4.0"
]
| permissive | gertingold/feynman-intro | 6ce0aaca3d57858588977ef3f7f74d8ac1734a92 | d5132df951f67ec989f18dc014940e4a47924fd6 | refs/heads/master | 2023-07-20T02:20:28.676554 | 2019-08-25T20:55:17 | 2019-08-25T20:55:17 | 200,374,019 | 6 | 0 | NOASSERTION | 2023-07-06T21:42:06 | 2019-08-03T12:38:56 | TeX | UTF-8 | Python | false | false | 1,750 | py | from pyx import canvas, color, deco, path, style, text, unit
boxwidth = 3
height = 4
qi = 0.2*boxwidth
qf = 0.7*boxwidth
linecolor1 = color.rgb(0.8, 0, 0)
linecolor2 = color.rgb(0, 0, 0.8)
text.set(engine=text.LatexEngine)
text.preamble(r'''\usepackage[sfdefault,scaled=.85]{FiraSans}
\usepackage{newtxsf}''')
unit.set(vscale=1.2, wscale=1.3, xscale=1.5)
c = canvas.canvas()
c.fill(path.rect(0, 0, boxwidth, height), [color.rgb(0.92, 1, 0.92)])
for n in range(-1, 4):
c.stroke(path.line(n*boxwidth, 0, n*boxwidth, height),
[style.linewidth.THick, color.grey(0.4)])
poslinestyle = (style.linestyle.dashed, style.linewidth.Thick)
for n in range(-1, 2):
q = qf + 2*n*boxwidth
c.stroke(path.line(q, 0, q, height), [*poslinestyle, linecolor1])
c.stroke(path.line(q, height+1.1, q, height+1.5), [style.linewidth.thick])
for n in range(-1, 2):
q = -qf + (2*n+2)*boxwidth
c.stroke(path.line(q, 0, q, height), [*poslinestyle, linecolor2])
c.stroke(path.line(q, height+0.1, q, height+0.5), [style.linewidth.thick])
for n in range(0, 2):
c.stroke(path.line(-qf+2*n*boxwidth, height+0.3,
-qf+2*(n+1)*boxwidth, height+0.3),
[style.linewidth.thick, deco.barrow, deco.earrow])
c.text(-qf+(1+2*n)*boxwidth, height+0.4, '$2L$',
[text.halign.center])
c.stroke(path.line(qf-2*(n-1)*boxwidth, height+1.3,
qf-2*n*boxwidth, height+1.3),
[style.linewidth.thick, deco.barrow, deco.earrow])
c.text(qf-(2*n-1)*boxwidth, height+1.4, '$2L$',
[text.halign.center])
c.text(qf, -0.5, r'$q_\text{f}$', [text.halign.center])
c.text(2*boxwidth-qf, -0.5, r'$2L-q_\text{f}$', [text.halign.center])
c.writePDFfile()
| [
"[email protected]"
]
| |
79b8ba4f847d6784965310d7c4a26ead27dfdde5 | b3b93c7db04c60c42fc4db58f1731a33a14fd621 | /django_fund/projects/2_django/likes_books/main/main/settings.py | 0e04921db7e6c64cb1705ca2774675c4af46bc66 | []
| no_license | scottcdudley/python_stack | 97d6d90179c26cc59cf926c5dcf45a6e99a918e0 | ac3c6fdedc19711a65d8467b65c96a3938e9c2a1 | refs/heads/master | 2020-03-23T20:31:22.410044 | 2018-07-23T17:24:15 | 2018-07-23T17:24:15 | 142,043,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,110 | py | """
Django settings for main project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y)0dkf#5@$i%4509b^dua*m3l81h4z47e22*dx%av0hqa$4^&a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.first_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
bc3f8deb23057022860cae89ea139e479b4f287f | ece92e753aa944e38c3b8a94ddee8d87856a2696 | /p018.py | a6a9b9c788e3dafe8e5a32811cd817ab83e163e2 | []
| no_license | hugowxyz/proj-euler-python | c0ef6ba224e27f101a23a0b4e139aeb138adbd5c | b6f98dbb5f508372f1c43c4a2722825f46171dca | refs/heads/master | 2023-02-06T02:49:06.305509 | 2020-12-19T16:22:11 | 2020-12-19T16:22:11 | 312,874,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | triangle = """75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
array = [_.split(" ") for _ in triangle.split("\n")]
mem = dict()
ans_max = 0
def recursion(x, y, ans):
global ans_max
ans += int(array[y][x])
if y == len(array)-1:
if ans > ans_max:
ans_max = ans
ans = 0
return
if y < len(array):
recursion(x, y+1, ans)
if x < len(array[y]) and y < len(array):
recursion(x+1, y+1, ans)
def main():
recursion(0, 0, 0)
print(ans_max)
main()
| [
"[email protected]"
]
| |
54c97e297d324e1c04ce62adc25c5a40c1064c83 | 5067e146db7de78f61e7f09bd07b1d0a982270d6 | /python/lists.py | 9f49ccc3dc0ce5d2793bbfd8dc216f1a37a90b41 | []
| no_license | AliNisarAhmed/exercism-solutions | 024e0c5a3af09ebd03b6f9003af2c165d88dcc16 | d59fa32eba2b34326a8de00799bbe16e9309c662 | refs/heads/master | 2023-05-27T00:07:49.083346 | 2023-05-16T05:12:34 | 2023-05-16T05:12:34 | 205,302,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,258 | py | """Functions for tracking poker hands and assorted card tasks.
Python list documentation: https://docs.python.org/3/tutorial/datastructures.html
"""
def get_rounds(number):
"""Create a list containing the current and next two round numbers.
:param number: int - current round number.
:return: list - current round and the two that follow.
"""
return [number, number + 1, number + 2]
def concatenate_rounds(rounds_1, rounds_2):
"""Concatenate two lists of round numbers.
:param rounds_1: list - first rounds played.
:param rounds_2: list - second set of rounds played.
:return: list - all rounds played.
"""
rounds_1.extend(rounds_2)
return rounds_1
def list_contains_round(rounds, number):
"""Check if the list of rounds contains the specified number.
:param rounds: list - rounds played.
:param number: int - round number.
:return: bool - was the round played?
"""
return number in rounds
def card_average(hand):
"""Calculate and returns the average card value from the list.
:param hand: list - cards in hand.
:return: float - average value of the cards in the hand.
"""
return sum(hand) / len(hand)
def approx_average_is_average(hand):
"""Return if an average is using (first + last index values ) OR ('middle' card) == calculated average.
:param hand: list - cards in hand.
:return: bool - does one of the approximate averages equal the `true average`?
"""
median = hand[len(hand) // 2]
approx_avg = (hand[0] + hand[-1]) / 2
actual_avg = card_average(hand)
return actual_avg == median or actual_avg == approx_avg
def average_even_is_average_odd(hand):
"""Return if the (average of even indexed card values) == (average of odd indexed card values).
:param hand: list - cards in hand.
:return: bool - are even and odd averages equal?
"""
return card_average(hand[::2]) == card_average(hand[1::2])
def maybe_double_last(hand):
"""Multiply a Jack card value in the last index position by 2.
:param hand: list - cards in hand.
:return: list - hand with Jacks (if present) value doubled.
"""
last = hand[-1]
if last == 11:
hand[-1] = 22
return hand
| [
"[email protected]"
]
| |
6bf08a5eeb7496d01194cd95a6a6b984dc689343 | 129afd7e69b9105c1a80e1b51d73c2cd6a582025 | /uniquewords.py | 608c31e0a5f742fc028ced615d40e264c86d878a | []
| no_license | sailskisurf23/sidepeices | 736d977e7194843827e8cf6b0a9402fbe63ea580 | 582d40b761d7c6e215a3b27e6fb1665fce492948 | refs/heads/master | 2021-09-25T10:09:40.864109 | 2018-10-20T19:02:01 | 2018-10-20T19:02:01 | 101,957,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | #userinput = "enter comma-seperated list of words: "
userinput = 'hello, there, how, are, you, hello, you'
set1 = set([x.strip() for x in userinput.split(',')])
print(", ".join(set1))
| [
"[email protected]"
]
| |
8b672339668cec234c5434af9ba27be18894617a | ae74a7d1ef50f27cad108915d2879e01217934d4 | /01-QT/06-Dialog/InputDialog.py | 81afd8203dd0c98684bd8887c15158b2f83ec1b3 | []
| no_license | dyyzqz1015/python_learn | 0f9a585b782018559d8cb8a35f7b0bd1fc21a8ff | 62ee8c49e1e551d9d6bb881c553e8602f3a2c054 | refs/heads/master | 2021-06-10T18:50:36.761950 | 2019-12-11T08:40:14 | 2019-12-11T08:40:14 | 135,088,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : 01-QT
# @Time : 2018/5/26 17:12
# @Author : QinZai
# @File : InputDialog.py
# @Software: PyCharm
from PyQt5.QtWidgets import (QWidget, QPushButton, QLineEdit,
QInputDialog, QApplication)
import sys
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.btn = QPushButton('Dialog', self)
self.btn.move(20, 20)
self.btn.clicked.connect(self.showDialog)
self.le = QLineEdit(self)
self.le.move(130, 22)
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('Input dialog')
self.show()
def showDialog(self):
text, ok = QInputDialog.getText(self, 'Input Dialog',
'Enter your name:')
if ok:
self.le.setText(str(text))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | [
"[email protected]"
]
| |
7caaba5f6d3bc82752e4d751b5c5e178037ab7f7 | 74951991a9e1dbe92d4999da9060409a9492bdc3 | /palindrome-number/palindrome-number.py | 3155e839eb464780b9d419ca27c1b6a61a2bf6d4 | []
| no_license | drpuig/Leetcode-1 | fd800ee2f13c7ce03fa57c8a1d10b3aa6976d7c0 | 4ee104f3069c380e1756dd65f6ff6004554e6c0e | refs/heads/main | 2023-07-15T08:57:32.971194 | 2021-08-21T08:29:24 | 2021-08-21T08:29:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | class Solution:
def isPalindrome(self, num: int) -> bool:
if num < 0: return False
r, x = 0, num
while x > 0:
r = r * 10 + x % 10
x //= 10
return r == num
| [
"[email protected]"
]
| |
1ff6570048b201ffe5b06a087dde354ba9152ec6 | 8b46615f2c9148a45bd6969f91070baa759ff6af | /books/admin.py | 91e75c32d724807d78cf2518ff26e3a7e023f0f6 | [
"MIT"
]
| permissive | jamesbond007dj/blog-api2 | 59d52b2138bc04e365ad06a00aae94dc98f82946 | b4e1170f448c27528c48c955fde5998304e239d9 | refs/heads/master | 2022-05-31T19:41:05.867332 | 2020-01-24T01:37:52 | 2020-01-24T01:37:52 | 235,566,438 | 1 | 0 | MIT | 2022-05-25T03:56:24 | 2020-01-22T12:13:59 | Python | UTF-8 | Python | false | false | 87 | py | from django.contrib import admin
from .models import Books
admin.site.register(Books)
| [
"[email protected]"
]
| |
74eecca1ef7048bcade095ea953f638485e935bf | 997aac631c5d3442077c755b26cc948a3860dd1a | /vertex_cover_approximation/2_lognapprox.py | 0fb9366e265059aa4fe719fbe72c1f0148ed1528 | []
| no_license | arkadiuss/algorithms_for_computationally_hard_problems | 7657fdbba621a3f21fe9a3f856a2cc12e5b0222e | b15bb03cb3f2a32298177423b60a4fa3370d12bb | refs/heads/master | 2022-11-11T19:53:37.746769 | 2020-07-02T22:43:16 | 2020-07-02T22:43:16 | 249,685,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | import sys
from dimacs import *
from itertools import combinations
def remove_edge(G, S):
Gc = G.copy()
for i in range(0, len(G)):
Gc[i] = G[i].copy() - S
for s in S:
Gc[s] = set()
return Gc
def find_one(G):
for i in range(1, len(G)):
if len(G[i])==1:
return list(G[i])[0]
return None
def find_two(G):
for i in range(1, len(G)):
if len(G[i])==2:
return i
return None
def poly(G):
c = len([1 for i in G if len(i)>1])
R = set()
Gc = G.copy()
while True:
o = find_one(Gc)
if o == None:
o = find_two(Gc)
if o == None:
break
Gc = remove_edge(Gc, {o})
R.add(o)
return R
def vertex_cover(G):
S = set()
while True:
c = -1
for i in range(len(G)):
if len(G[i]) > 2 and (c == -1 or len(G[i]) > len(G[c])):
c = i
if c==-1:
Sp = poly(G)
return S|Sp
G = remove_edge(G, {c})
S=S|{c}
if len(S)%100==0:
print(S)
def solve_vertex_cover(G):
S = vertex_cover(G)
return S
if len(sys.argv) < 2:
print("Specify graph")
exit()
graph_file = sys.argv[1]
G = loadGraph("{0}".format(graph_file))
S = solve_vertex_cover(G)
saveSolution("{0}.sol".format(graph_file), S)
| [
"[email protected]"
]
| |
a6b04f1468e584f07faee05a9f0038e74e17f645 | e527efa21057bdab7aff6a6b8c753171e75c6bfe | /quetzalcoatl/settings/celery/prod.py | b940474cf5d70d73314d43a75ecee8d4c48b1ffa | [
"WTFPL"
]
| permissive | dem4ply/quetzalcoatl | cb49d6cbf0260ebdb127f6d95d39d299684291c6 | f9f72dc72f0da4f264c33128dc01b79f9fda5f2a | refs/heads/master | 2022-01-10T14:37:08.985767 | 2020-03-14T07:47:27 | 2020-03-14T07:47:27 | 235,905,684 | 0 | 0 | WTFPL | 2021-09-22T18:28:28 | 2020-01-23T23:19:15 | Python | UTF-8 | Python | false | false | 1,131 | py | import os
# from kombu import Exchange, Queue
# from celery.schedules import crontab
# from datetime import timedelta
url_key = os.environ[ 'QUETZALCOATL__RABBITMQ__KEY__URL' ]
celery_url = os.environ[ url_key ]
BROKER_URL = celery_url
RESULT_BACKEND = celery_url
CELERY_RESULT_BACKEND = celery_url
'''
task_annotations = {
'*': {
'rate_limit': '5/s'
}
}
'''
# beat_schedule = 'djcelery.schedulers.DatabaseScheduler'
# TASK_QUEUES = (
# Queue( 'default', Exchange( 'task', 'topic' ), routing_key='default' ),
# Queue(
# 'debug', Exchange( 'task_debug', 'topic' ), routing_key='*.debug.*' ),
# )
#
# TASK_DEFAULT_QUEUE = 'default'
# TASK_DEFAULT_EXCHANGE = "tasks"
# TASK_DEFAULT_EXCHANGE_TYPE = "topic"
# TASK_DEFAULT_ROUTING_KEY = "task.default"
#
# TASK_ROUTES = {
# 'default': {
# 'binding_key': 'task.#',
# },
# 'reader_moe.tasks.debug_task': {
# 'queue': 'debug',
# 'binding_key': 'task.debug.*',
# 'exchange': 'task_debug'
# }
# }
#
# beat_schedule = { }
RESULT_SERIALIZER = 'json'
TASK_SERIALIZER = 'json'
CELERY_ALWAYS_EAGER = False
| [
"[email protected]"
]
| |
1f8a19985062cc813d27a33bf128600b4da304ef | b5caf842bec794b95c6d8cd2c44437a91943cf67 | /GameClasses/Player.py | 9ad8d39c59a415fb9ad9089dc368b02fbb689937 | []
| no_license | Holthuizen/Final-Project | ade5c50d8f3a1dc935f76eac557577e1f0dfec42 | 150b5b0a1f3efb42c6ca78733ac8033610f90730 | refs/heads/main | 2023-02-13T04:19:09.786406 | 2021-01-25T17:13:21 | 2021-01-25T17:13:21 | 331,402,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | import random
#player
class Player:
def __init__(self,id,is_player):
self.canmove = False;
self.id = id;
self.hand = []
self.is_player = is_player
def pick_cards(self,deck):
#return [(8,0),(8,1),(8,2)]
for i in range(3):
random.shuffle(deck)
card = deck.pop()
self.hand.append(card)
def calc_score(self):
if self.hand == []:
return False
if self.hand[0][0] == self.hand[1][0]:
if self.hand[0][0] == self.hand[2][0]:
return 30.5
score = [0,0,0,0]
for card in self.hand:
value, color = card[0],card[1]
if value <= 9:
score[color] += card[0]+1 #correct for index
if value > 9 and value < 13:
score[color] += 10
if value == 0:
score[color] += 11
return max(score)
| [
"[email protected]"
]
| |
77f7b6ff72973d6db987794ba6c47a95707cc074 | 16a6a0f6088cedea0433743610cf2b13fde3fe46 | /sortedLLtoBST.py | 079447d0812bd01ad52e9e5dec5089ec9bbde1b3 | []
| no_license | VineeshaKasam/Leetcode | e05b57f3d6215f076e6343851b30af539a45b7b5 | 2b70b0a514e51b092daf4aecd86a932cd8d8dabe | refs/heads/master | 2020-03-22T20:14:32.703751 | 2018-11-14T06:53:26 | 2018-11-14T06:53:26 | 140,584,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | '''
Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of
every node never differ by more than 1.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sortedListToBST(self, head):
if not head:
return
if not head.next:
return TreeNode(head.val)
slow, fast = head, head.next.next
while fast and fast.next:
fast = fast.next.next
slow = slow.next
temp = slow.next
slow.next = None
root = TreeNode(temp.val)
root.left = self.sortedListToBST(head)
root.right = self.sortedListToBST(temp.next)
return root | [
"[email protected]"
]
| |
adb83d2082bc73049549cba58dc5fc6e9f3f0997 | 29afb48dd73bfd988864218aa5bf9e0acf219d0f | /src/sandBox/analysisFunctionDevelopment/SpikeCountCode.py | 55d8adcd4b7d37e65b257bf09c223e73c23bb0f6 | []
| no_license | alx5246/srnnetTraining | b18623ae2b2bc80476dc273af615ba7d784443d4 | 416a0f7f30ac55b094120abef370c7c4c166fdc8 | refs/heads/master | 2022-11-04T14:42:50.082172 | 2016-11-19T20:07:02 | 2016-11-19T20:07:02 | 59,118,134 | 1 | 1 | null | 2022-10-12T10:10:49 | 2016-05-18T13:29:42 | Python | UTF-8 | Python | false | false | 4,842 | py | # S. Pickard
# June 2016
# (Using python 3.4.4, and brian2 2.0rc)
#
# DESCRIPTION
# Function/method that produces an output vector of averaged spike counts across neurons per time step
# This analysis was inspired by Lazar et al.(2009) Figure 5.b
# Goal is to find how many spike occur in each sub time interval
import numpy as np
def spike_countCode(spikeTime, start, stop, dt):
"""
FUNCTION DESCRIPTION
This function takes as input a 1-D numpy.array of spike times, and outputs a spike count vector; the spike
counts are averaged over a user defined interval
This is an example function, and thus has some limitations. For example it can only handle 1D input. A more
complex version of this function would likely be able to handle N-D input (spike trains from multiple neurons)
simultaneously and would average the number of spikes over a user definesd interval and then average over the
number of neurons so that each averaging interval, a scalar mean would be outputed. This function is not
concerned with identifying how many spikes per interval are occurring for each neuron, but rather an averaged
count across all neurons (i.e. the network) per interval
:param spikeTime: 1D numpy.array, units are seconds, neuron spike times stored in an numpy.array
:param start: Start time of run in units of seconds
:param stop: End time of run in units of seconds
:param dt: user defined time subinteval over which to count spike
"""
#Spike time turned into a numpy array
spikeTime = np.array(spikeTime)
print('Spike Times: ', spikeTime)
#Creat interval array - intervals in which to break up the time array - sub time interval array
duration = stop-start #Total run time
n = duration/dt #How many subintervals from time horizon results from user defined interval
splitInterval = np.linspace(0, duration, n+1) #create numpy array of subinterval over which to count spikes
print ('split interval: ', splitInterval)
##Find length over which to iterate in for loop
length_splitInt = len(splitInterval)
print('length splitInterval: ', length_splitInt)
length_time = len(spikeTime)
print('length time: ', length_time)
length = length_splitInt + ((length_time) - 2)
print('length :', length)
i=0 #inex for time array
j=0 #index for splitInterval array.
k=0 #index for new matrix that will store the grouped values from the split time array
counter = 0 #counter variable to keep track of spike count for each subinterval through loop
SpikeCount = [] #Initialize array to collect the number of spikes occuring wihtin each subinterval
for i in range(length):
if (i == 0) and (spikeTime[0] == splitInterval[0]):
counter += 1
i += 1
# Spot check
print('if counter: ', counter)
print('time element: ', spikeTime[k])
print('splitInt: ', splitInterval[j], splitInterval[j + 1])
print('i: ', i)
print('if k: ', k)
if k < (len(spikeTime) - 1):
k += 1
# Spot check
print('iff k: ', k)
print('iff counter: ', counter)
else:
j += 1
# Spot check
print('iff counter: ', counter)
print(SpikeCount)
print('iff j: ', j)
elif (spikeTime[k] > splitInterval[j]) and (spikeTime[k] <= splitInterval[j + 1]):
counter += 1
i += 1
# Spot check
print('if counter: ', counter)
print('time element: ', spikeTime[k])
print('splitInt: ', splitInterval[j], splitInterval[j + 1])
print('i: ', i)
print('if k: ', k)
if k < (len(spikeTime) - 1):
k += 1
# Spot check
print('iff k: ', k)
print('iff counter: ', counter)
else:
j += 1
# Spot check
SpikeCount.append(counter)
print('iff counter: ', counter)
print(SpikeCount)
print('iff j: ', j)
else:
SpikeCount.append(counter)
counter = 0
j += 1
i += 1
# Spot Check
print('else counter: ', counter)
print(SpikeCount)
print('time element: ', spikeTime[k])
# print('splitInt: ', splitInterval[j], splitInterval[j + 1])
print('else j: ', j)
print('else i: ', i)
print('else k: ', k)
return SpikeCount
| [
"[email protected]"
]
| |
a9aae5af5a7cacba668bf8b9da5cef2adec167b3 | a29c7e363026111276e94b96d39b1b4ab48dbca8 | /sdk/test/test_authorisation_request_response.py | d58f0334a7f10dfe20737e184735fd187ad09325 | [
"MIT"
]
| permissive | matteo-kalogirou/yapily-sdk-python | a56bf6f9b1b308efda38f081f6237ebd8c8f8ad5 | f10d2d14383f551eeb59aa893d328ffa5080da22 | refs/heads/master | 2022-12-16T22:24:18.026765 | 2020-09-18T13:59:26 | 2020-09-18T13:59:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | py | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 0.0.242
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import yapily
from yapily.models.authorisation_request_response import AuthorisationRequestResponse # noqa: E501
from yapily.rest import ApiException
class TestAuthorisationRequestResponse(unittest.TestCase):
"""AuthorisationRequestResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test AuthorisationRequestResponse
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = yapily.models.authorisation_request_response.AuthorisationRequestResponse() # noqa: E501
if include_optional :
return AuthorisationRequestResponse(
id = '0',
user_uuid = '0',
application_user_id = '0',
reference_id = '0',
institution_id = '0',
status = 'AWAITING_AUTHORIZATION',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
transaction_from = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
transaction_to = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
expires_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
time_to_expire_in_millis = 56,
time_to_expire = '0',
feature_scope = [
'INITIATE_PRE_AUTHORISATION'
],
authorisation_url = '0',
consent_token = '0',
qr_code_url = '0'
)
else :
return AuthorisationRequestResponse(
)
def testAuthorisationRequestResponse(self):
"""Test AuthorisationRequestResponse"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
71a19a2d5851014c3bdebeb7b4bfc8a4ffcfaabe | 4dca1f7e50ae154a7288cd44c2edfb81063f9eea | /model/utility.py | 66d6008d8035c46920e505f010996326049560f8 | []
| no_license | Cloudxtreme/management_bind9 | 3c789a0ed6d9ef15da32afaccec6a877c093a7a8 | 82fbc9daaf9ca5ed97997cc9f87099e7a9e9d830 | refs/heads/master | 2021-05-27T04:05:41.237871 | 2014-03-14T12:35:26 | 2014-03-14T12:35:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,230 | py | #!/usr/bin/python
#-*-coding:utf-8-*-
#- utility Bind Class
#- AdminServer / System Management Server
#- Copyright (C) 2014 GoldraK & Interhack
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>
# WebSite: http://adminserver.org/
# Email: [email protected]
# Facebook: https://www.facebook.com/pages/Admin-Server/795147837179555?fref=ts
# Twitter: https://twitter.com/4dminserver
import sys, os
sys.path.append('modules/management_bind9/model')
from generate import generate
class utility(object):
@staticmethod
def edit_domain(translate, output, id_domain, conectionBrain, log):
cursor = conectionBrain.cursor()
cursor.execute("SELECT * FROM dns WHERE id = '" + id_domain + "'")
for info in cursor:
_ = translate
domain_name = raw_input('Domain Name [' + info[1] + ']: ')
email = raw_input('Email Contact [' + info[2] + ']: ')
ip_server = raw_input('Ip Server [' + info[3] + ']: ')
ns_primary = raw_input('NS Primary [' + info[4] + ']: ')
ns_secundary = raw_input('NS Secundary [' + info[5] + ']: ')
if str(info[6]) == '1':
valor = 'y'
else:
valor = 'n'
server_mail = raw_input('Server Mail [' + str(valor) + ']: ')
type_zone = raw_input('Type Zone [' + info[9] + ']: ')
ip_transfer = raw_input('Ip transfer [' + info[10] + ']: ')
domain_key = ''
spf = ip_server
control = False
valores = ''
if domain_name != "":
control = True
valores += "domain = '" + str(domain_name) + "', "
if email != "":
control = True
valores += "email = '" + str(email) + "', "
if ip_server != "":
control = True
valores += "ipserver = '" + str(ip_server) + "', "
if ns_primary != "":
control = True
valores += "NS_primary = '" + str(ns_primary) + "', "
if ns_secundary != "":
control = True
valores += "NS_secundary = '" + str(ns_secundary) + "', "
if server_mail !="":
control = True
if server_mail == 'y':
e_server = '1'
else:
e_server = '0'
valores += "email_server = '" + str(e_server) + "', "
if type_zone !="":
control = True
valores += "type_zone = '" + str(type_zone) + "', "
if ip_transfer != "":
control = True
valores += "ip_transfer = '" + str(ip_transfer) + "', "
if control == True:
final = valores + 'F'
cursor.execute("UPDATE dns SET " + final.split(', F')[0] + " WHERE id = '" + str(info[0]) + "'")
conectionBrain.commit()
generate.all(str(info[0]), conectionBrain)
log.write(_('Edit Domain ') + str(domain_name))
@staticmethod
def delete_domain(translate, output, id_domain, conectionBrain, log):
_ = translate
sentencia = raw_input('disable[0]/delete[1]: [0] ')
while sentencia != '0' and sentencia != '1':
output.error(_('Option not valid'))
sentencia = raw_input('disable[0]/delete[1]: [0] ')
cursor = conectionBrain.cursor()
cursor.execute("SELECT domain FROM dns WHERE id = '" + str(id_domain) + "'")
for info in cursor:
domain = info[0]
if sentencia == '0':
cursor.execute("UPDATE dns SET status = '0' WHERE id = '" + id_domain + "'")
else:
cursor.execute("DELETE FROM dns WHERE id = '" + id_domain + "'")
conectionBrain.commit()
os.system('rm -f /etc/bind/pri.' + domain)
generate.all('all', conectionBrain)
log.write(_('Delete Domain ') + str(domain))
@staticmethod
def activate_domain(translate, output, id_domain, conectionBrain, log):
_ = translate
cursor = conectionBrain.cursor()
cursor.execute("UPDATE dns SET status = '1' WHERE id = '" + str(id_domain) + "'")
conectionBrain.commit()
generate.all(id_domain, conectionBrain)
log.write(_('Activate domain')) | [
"[email protected]"
]
| |
8ab0a708d2affc01c58ab81049ab025d4170ea28 | b2eeda79302d04e1391ffb40fdd83de53f6b54ef | /app/core/tests/test_admin.py | d6be5a1dee56e35f6c79c2c5dd801dd406eb4d04 | [
"MIT"
]
| permissive | Memo2704/recipe-app-api | 7c7286140ab17d5cf83d93216ffb74de9a3b19c2 | 91b441065547991a8027cbe1496aaf5795d56a49 | refs/heads/main | 2023-05-04T08:19:07.306969 | 2021-05-24T16:56:11 | 2021-05-24T16:56:11 | 334,730,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='password123',
name='test'
)
def test_user_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| [
"[email protected]"
]
| |
14ee1cfd9991911f379fd0efca9e20b82d78db54 | 65e3bd4e1f155402c1a00198dacde4ee8668dd51 | /networks/cvaegan_conv.py | 1978466ff79211f8132c104fba720a598dcd453f | []
| no_license | shijy07/deep-learning-project | b3bc1c80d0f1f967f7155e468e105d4836146599 | 27f1ce6deaa3ed0ac2ac869bdde6a8f05dffa0df | refs/heads/master | 2020-04-09T06:10:21.064728 | 2016-12-15T01:19:17 | 2016-12-15T01:19:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,646 | py | import numpy as np
import tensorflow as tf
# from ops import *
slim = tf.contrib.slim
class Encoder:
def __init__(self, hidden_dim = 100):
self.hidden_dim = hidden_dim
def __call__(self, x, y, z_dim, batch_size=100, x_width=28):
"""
The probabilistic encoder which computes the mean and the log
of variance of z drawn from the Gaussian distribution q(z|images).
Inputs:
images: A batch of images.
z_dim: The dimension of hidden variable z.
Outputs:
A batch of means and log of the variances of z, each corresponding
to a single image in images.
"""
#self.hidden_dims = [self.hidden_dim] * 2
net = tf.reshape(x, [batch_size, x_width, x_width, -1])
net = slim.conv2d(net, 20, [5,5], stride=1)
net = slim.max_pool2d(net, [2,2], stride=2)
net = slim.conv2d(net, 50, [5,5], stride=1)
net = slim.max_pool2d(net, [2,2], stride=2)
net = tf.reshape(net, [batch_size, -1])
net = tf.concat(1, (net,y))
net = slim.fully_connected(net, 500)
#for i in xrange(len(self.hidden_dims)):
#net = slim.fully_connected(net, self.hidden_dims[i], scope='enc_fc{}'.format(i), activation_fn=tf.nn.softplus)
z_mean = slim.fully_connected(net, z_dim, scope='z_mean', activation_fn=None)
z_log_sigma_sq = slim.fully_connected(net, z_dim, scope='z_log_sigma', activation_fn=None)
"""
h0 = tf.nn.softplus(linear(x, self.hidden_dim, scope = 'en_fc0'))
h1 = tf.nn.softplus(linear(h0, self.hidden_dim, scope = 'en_fc1'))
z_mean = linear(h1, z_dim, scope = 'z_mean')
z_log_sigma_sq = linear(h1, z_dim, scope = 'z_log_sigma')
"""
return (z_mean, z_log_sigma_sq)
class Decoder:
def __init__(self, hidden_dim = 100):
self.hidden_dim = hidden_dim
def __call__(self, z, x_dim, batch_size=100, reuse=None):
"""
The probabilistic decoder which computes the mean of x drawn from
the Bernoulli distribution p(x|z).
Inputs:
z: A batch of hidden variables.
x_dim: The dimension of one input image.
Outputs:
x_mean: A batch of the means of p(x|z), each corresponding to a single z.
"""
#self.hidden_dims = [self.hidden_dim] * 2
net = z
net = slim.fully_connected(net, 500, reuse=reuse, scope='dec_fc0')
net = slim.fully_connected(net, 7*7*50, reuse=reuse, scope='dec_fc1')
net = tf.reshape(net, [batch_size, 7, 7, 50])
net = tf.image.resize_nearest_neighbor(net, [14, 14])
net = slim.conv2d_transpose(net, 20, [5,5], stride=1, reuse=reuse, scope='dec_conv0')
net = tf.image.resize_nearest_neighbor(net, [28, 28])
net = slim.conv2d_transpose(net, 1, [5,5], stride=1, reuse=reuse, scope='dec_conv1')
net = tf.reshape(net, [batch_size, -1])
net = slim.fully_connected(net, 512, reuse=reuse, scope='dec_fc2')
#for i in xrange(len(self.hidden_dims)):
#net = slim.fully_connected(net, self.hidden_dims[i], scope='dec_fc{}'.format(i),
#activation_fn=tf.nn.softplus, reuse=reuse)
x_mean = slim.fully_connected(net, x_dim, scope='x_mean', activation_fn=tf.nn.sigmoid, reuse=reuse)
"""
h0 = tf.nn.softplus(linear(z, self.hidden_dim, scope = 'de_fc0'))
h1 = tf.nn.softplus(linear(h0, self.hidden_dim, scope = 'de_fc1'))
x_mean = tf.nn.sigmoid(linear(h1, x_dim, scope = 'x_mean'))
"""
return x_mean
class Discriminator:
def __init__(self, hidden_dim = 100):
self.hidden_dim = hidden_dim
def __call__(self, x, batch_size=100, reuse=None, x_width=28):
#self.hidden_dims = [self.hidden_dim] * 4
net = tf.reshape(x, [batch_size, x_width, x_width, -1])
net = slim.conv2d(net, 20, [5,5], stride=1, reuse=reuse, scope='dis_conv0')
net = slim.max_pool2d(net, [2,2], stride=2)
net = slim.conv2d(net, 50, [5,5], stride=1, reuse=reuse, scope='dis_conv1')
net = slim.max_pool2d(net, [2,2], stride=2)
net = slim.conv2d(net, 100, [5,5], stride=1, reuse=reuse, scope='dis_conv2')
net = tf.reshape(net, [batch_size, -1])
net = slim.fully_connected(net, 500, reuse=reuse, scope='dis_fc0')
#for i in xrange(len(self.hidden_dims)):
#net = slim.fully_connected(net, self.hidden_dims[i], scope='dis_fc{}'.format(i), activation_fn=tf.tanh, reuse=reuse)
p = slim.fully_connected(net, 1, scope='p', activation_fn=tf.nn.sigmoid, reuse=reuse)
return p
| [
"[email protected]"
]
| |
54320cc144accbbc19a2366c523173264961565a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02922/s040859855.py | c93462013926489db291dd42664757224e2579ba | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | import sys
import math
from collections import deque
def input():
return sys.stdin.readline().rstrip()
def main():
A, B = map(int, input().split())
a = math.ceil((B-1)/(A-1))
print(a)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
85bb81e5e17872fc46fd3611bd88d10b73e77db0 | 297d426d5519c669b210e82d4aff479a51949e52 | /routines/flat_frames.py | 9cb1d822913380b9855f37540a8e5a5c7d5178d6 | []
| no_license | PulkitMalhotra15/Lunar-Eclipse-Analysis | 4722d6779bad00f107a553dccd4335051dde570b | d07f6b36eec79fea1f10e3129d8c6b72bc669521 | refs/heads/master | 2020-08-21T22:27:14.922373 | 2019-10-19T21:09:11 | 2019-10-19T21:09:11 | 216,261,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 31 11:23:59 2018
@author: sid
"""
import numpy as np
import astropy.io.fits as fit
import glob
wdir = '/home/atom/2018_07_27 TLE Jaisalmer/2018_07_28 TLE Jaisalmer/dark frames/'
lists = [[],[],[],[]]
lists[0] += glob.glob(wdir+'flat_bl*.fit')
lists[1] += glob.glob(wdir+'flat_IR*.fit')
lists[2] += glob.glob(wdir+'flat_HA*.fit')
lists[3] += glob.glob(wdir+'flat_HB*.fit')
flats=['bl','IR','HA','HB']
for i in range(len(lists)):
flat = np.zeros((1335,2003))
for j in range(len(lists[i])):
data=fit.open(lists[i][j])
hdr=data[0].header
img=data[0].data
flat += img
fit.writeto('/home/atom/2018_07_27 TLE Jaisalmer/Analysis/images/dark_and_flat/'
+'flat_'+flats[i]+'.fit',flat,header=hdr)
| [
"[email protected]"
]
| |
41aab887f5b4c35a78397323e316aa412cbfc975 | da934e0010380fdc6894063540f61b0ebc2c9ded | /vendor/lockfile/lockfile/linklockfile.py | f8aeaefcfc16578a51a1d2fb4c86a762e01c4047 | [
"Apache-2.0",
"MIT"
]
| permissive | bopopescu/cc-2 | ed4f1dfe3c98f476ff619058d99855a16272d36b | 37444fb16b36743c439b0d6c3cac2347e0cc0a94 | refs/heads/master | 2022-11-23T03:57:12.255817 | 2014-10-02T06:10:46 | 2014-10-02T06:10:46 | 282,512,589 | 0 | 0 | Apache-2.0 | 2020-07-25T19:36:05 | 2020-07-25T19:36:05 | null | UTF-8 | Python | false | false | 2,419 | py | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class LinkLockFile(LockBase):
"""Lock access to a file using atomic property of link(2).
>>> lock = LinkLockFile('somefile')
>>> lock = LinkLockFile('somefile', threaded=False)
"""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed("failed to create %s" % self.unique_name)
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
print 'making a hard link %s to %s' % (self.unique_name,
self.lock_file)
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout
else:
raise AlreadyLocked
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
| [
"[email protected]"
]
| |
7eb9e41beacc32274f19363e57b7522cb3378335 | a59d1faced9fe7348ca7143d2a8643e0ebad2132 | /pyvisdk/do/invalid_profile_reference_host.py | 8adf31269e62041bb94e93f6a596abce09a3a869 | [
"MIT"
]
| permissive | Infinidat/pyvisdk | c55d0e363131a8f35d2b0e6faa3294c191dba964 | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | refs/heads/master | 2023-05-27T08:19:12.439645 | 2014-07-20T11:49:16 | 2014-07-20T11:49:16 | 4,072,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def InvalidProfileReferenceHost(vim, *args, **kwargs):
'''A InvalidProfileReferenceHost fault is thrown when a valid host is not
associated with a profile in the Virtual Center inventory. This could be
because there is no host assciated with the profile or because the associated
host is incompatible with the profile.'''
obj = vim.client.factory.create('{urn:vim25}InvalidProfileReferenceHost')
# do some validation checking...
if (len(args) + len(kwargs)) < 7:
raise IndexError('Expected at least 8 arguments got: %d' % len(args))
required = [ 'host', 'profile', 'reason', 'dynamicProperty', 'dynamicType', 'faultCause',
'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"[email protected]"
]
| |
1bcb62c73a7864c830cfa531b0bf446869c0fc9e | 867cf8bec4365efd0d26729220dc84181792a35b | /snickerdoodle/lib/facebook/chat.py | 5656ab353f1a0579433c0411a8f1c944a368068e | []
| no_license | zach-taylor/snickerdoodle | 40ff50b908b54e867741a5c74889be2be1f24c07 | 03ae87cffa5e18c8d0100a3d7ac249ab3582cdea | refs/heads/master | 2016-09-11T08:10:12.200233 | 2014-05-01T23:11:17 | 2014-05-01T23:11:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,405 | py | import sys
import logging
import threading
import sleekxmpp
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
#
# Debug Level:
# CRITICAL
# ERROR
# WARNING
# INFO
# DEBUG
# NOTSET
LOG_LEVEL = logging.WARNING
FACEBOOK_SERVER = ('chat.facebook.com', 5222)
def invite_friends(access_token, api_key, sender, mesg, receivers):
"""
Easy broadcasting function to send multiple Facebook Chat messages to all
the receivers.
It requires an access token, api key (of an application), and then of
course a sender, a list of receivers, and a message body.
"""
args = [access_token, api_key, sender, mesg]
# There seriously has to be a better way of doing this...
for receiver in receivers:
thread = threading.Thread(target=send_message, args=args + [receiver])
# Off to the races we go
thread.start()
def send_message(access_token, api_key, sender, mesg, receiver):
"""
Send a message using the FacebookChat XMPP chat client to the receiver
from the sender.
Facebook Access Tokens as well as an API key must be included.
"""
logging.basicConfig(level=LOG_LEVEL,
format='%(levelname)-8s %(message)s')
xmpp = FacebookChat(sender, receiver, mesg)
xmpp.credentials['api_key'] = api_key
xmpp.credentials['access_token'] = access_token
if xmpp.connect(FACEBOOK_SERVER):
xmpp.process(block=True)
class FacebookChat(sleekxmpp.ClientXMPP):
def __init__(self, sender, receiver, mesg):
sleekxmpp.ClientXMPP.__init__(self,
'-{0}@chat.facebook.com'.format(sender),
None,
sasl_mech='X-FACEBOOK-PLATFORM')
self.mesg = mesg
self.receiver = '-{0}@chat.facebook.com'.format(receiver)
# Some plugins...
self.register_plugin('xep_0030')
self.register_plugin('xep_0004')
self.register_plugin('xep_0060')
self.register_plugin('xep_0199')
self.add_event_handler("session_start", self.start)
def start(self, event):
self.send_presence()
self.send_message(mto=self.receiver,
mbody=self.mesg,
mtype='chat')
self.disconnect(wait=True)
| [
"[email protected]"
]
| |
799bce63cfda70f9ffcd943ab5ca1d08e854a9e0 | 3d59cb4aba11ba06d466de5b3294e26b5c2c06df | /lib/battle/atomic_tasks.py | 02fe5cd5cce235e11fa87c9e3206e2ce23092698 | []
| no_license | tsx1453/FGO_AutoScript | 1a38881ab148cd0ff878a6374cb2d44e9b7da8d9 | a469142dd2a30a5aa86fd8a0296e560e828cf246 | refs/heads/master | 2023-04-03T14:01:16.447575 | 2021-04-15T09:13:45 | 2021-04-15T09:13:45 | 332,457,565 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,689 | py | import random
import time
import numpy
import lib.battle.global_status as global_status
import lib.battle.interfaces as interfaces
from lib.base import cv_util, logger, resources, ui_util
def wait_for(template, step=3, threshold=0.9, max_count=5, extra_resources=None):
if extra_resources is None:
extra_resources = []
count = 0
while not ui_util.get_new_capture_and_match(template, threshold=threshold, remove_after_match=True):
for extra in extra_resources:
if ui_util.get_new_capture_and_match(extra, threshold=threshold, remove_after_match=True):
return
time.sleep(step)
count = count + 1
if count > max_count:
logger.log(
"wait for {} has run for {} times, break out".format(logger.resource_path_to_name(template), count))
break
class SimpleMatchAndClickTask(interfaces.Task):
def accept(self, capture):
return cv_util.has_match(self.template, capture)
def on_execute(self, capture):
ui_util.match_and_click(self.template, delete_after_match=True)
def __init__(self, template):
super().__init__()
self.template = template
class SelectLastRunTask(SimpleMatchAndClickTask):
def __init__(self):
super().__init__(resources.battle_last_run_flag)
class RandomSelectFriendTask(interfaces.Task):
def accept(self, capture):
return not global_status.is_in_battle and cv_util.has_match(resources.battle_friend_select_title, capture)
def on_execute(self, capture):
ui_util.match_and_click(resources.battle_friend_select_normal_item)
class ClickStartBattleTask(SimpleMatchAndClickTask):
def __init__(self):
super().__init__(resources.battle_start_button)
def execute(self, capture):
super().execute(capture)
logger.log(
"click start battle task ,before reset status ,skill history is {}".format(global_status.skill_history))
global_status.is_in_battle = True
global_status.turn_num = 1
global_status.skill_history = {}
global_status.skill_history.clear()
# global_status.skill_position_cache = {}
global_status.hougu_status = [False, False, False]
global_status.loop_number = global_status.loop_number + 1
logger.log(
"click start battle task ,after reset status ,skill history is {}".format(global_status.skill_history))
logger.log_with_echo("start new battle {}".format(global_status.loop_number))
class BattleUpdateGlobalStatusTask(interfaces.Task):
def accept(self, capture):
return cv_util.has_match(resources.battle_attack_button, capture, match_threshold=0.7)
def on_execute(self, capture):
global_status.is_in_battle = True
turns = [
([resources.battle_turn_1_of_3, resources.battle_turn_1_of_2, resources.battle_turn_1_of_1], 1),
([resources.battle_turn_2_of_3, resources.battle_turn_2_of_2], 2),
([resources.battle_turn_3_of_3], 3),
]
for templates in turns:
for t in templates[0]:
if cv_util.has_match(t, capture):
original = global_status.turn_num
if original != templates[1]:
global_status.turn_num = templates[1]
logger.log_with_echo("update turn num from {} to {}".format(original, templates[1]))
return
class BattleClickSkillTask(interfaces.Task):
def __init__(self, template, skill_index, turn_num=-1, accept_extra=None):
super().__init__()
self.template = template
self.skill_index = skill_index
self.turn_num = turn_num
self.accept_extra = accept_extra
def accept(self, capture):
ac = global_status.is_in_battle
if self.turn_num != -1:
logger.log("skill check turn, target is {} and current is {}".format(self.turn_num,
global_status.turn_num))
ac = ac and (self.turn_num == global_status.turn_num)
if self.accept_extra is not None:
ac = ac and self.accept_extra(capture)
logger.log("skill task accept result is {}".format(ac))
return ac
def on_execute(self, capture):
skill_used = global_status.skill_history.get(self.template, {})
logger.log("skill history for {} is {}".format(logger.resource_path_to_name(self.template), skill_used))
can_use_skill = False
for skill in self.skill_index:
if not skill_used.get(skill, False):
can_use_skill = True
break
if not can_use_skill:
return
wait_for(resources.battle_attack_button)
skill_position = global_status.skill_position_cache.get(self.template, ())
if len(skill_position) == 0:
logger.log("not find cached skill position, start match in capture")
local_count = 0
while local_count < 5:
local_count = local_count + 1
capture = ui_util.get_new_capture()
max_val, tl_loc, br_loc = cv_util.match_template(self.template, capture)
if max_val > 0.6:
left, top, right, bottom = tl_loc[0], tl_loc[1], br_loc[0], br_loc[1]
skill_position = (left, top, right, bottom)
global_status.skill_position_cache[self.template] = skill_position
logger.log("find skill template ,position is {}".format(skill_position))
break
else:
wait_for(self.template)
if len(skill_position) == 0:
logger.log("not find skill position, return")
return
left, top, right, bottom = skill_position[0], skill_position[1], skill_position[2], skill_position[3]
width = right - left
item_width = width / 3
center_y = (top + bottom) / 2
for skill in self.skill_index:
if not skill_used.get(skill, False):
wait_for(resources.battle_attack_button)
x = left + item_width * (skill - 1) + item_width / 2
random_x = random.Random().randrange(start=-10, stop=10)
random_y = random.Random().randrange(start=-10, stop=10)
logger.log_with_echo(
"click skill {}, original position is ({} ,{}), random_delta = ({}, {})".format(
logger.resource_path_to_name(self.template), x, center_y, random_x, random_y))
ui_util.click(x + random_x, center_y + random_y)
skill_used = global_status.skill_history.get(self.template, {})
skill_used[skill] = True
global_status.skill_history[self.template] = skill_used
time.sleep(random.randrange(2, 3))
class BattleClickAttackTask(SimpleMatchAndClickTask):
def __init__(self):
super().__init__(resources.battle_attack_button)
def execute(self, capture):
super().execute(capture)
time.sleep(random.randrange(2, 5))
class BattleSelectActionCardTask(interfaces.Task):
def __init__(self, special_templates=None):
super().__init__()
if special_templates is None:
special_templates = {}
self.special_templates = special_templates
def accept(self, capture):
return cv_util.has_match(resources.battle_speed_set_button, capture)
def on_execute(self, capture):
if len(self.special_templates) > 0:
target_card = self.special_templates.get(global_status.turn_num, [])
if len(target_card) == 0:
target_card = self.special_templates.get(-1, [])
if len(target_card) > 0:
for card in target_card:
local_count = 0
while local_count < 2:
local_count = local_count + 1
if ui_util.match_and_click(card, delete_after_match=True):
time.sleep(random.randrange(1, 3))
logger.log_with_echo("selected card {}".format(logger.resource_path_to_name(card)))
break
else:
for index in range(3):
if global_status.hougu_status[index]:
# 此处坐标获取方法同NP检测,均为手动比划得来
top, bottom = 170, 300
item_width = 100
left = 96 * (index + 1) + 100 * index
right = left + item_width
x = random.randrange(left, right)
y = random.randrange(top, bottom)
ui_util.click(x, y)
global_status.hougu_status[index] = False
time.sleep(random.randrange(0, 2))
logger.log_with_echo("select hougo card at {}".format(index))
break
card_queue = [resources.action_buster, resources.action_arts, resources.action_quick]
for card in card_queue:
if ui_util.match_and_click(card, delete_after_match=True):
logger.log_with_echo("selected card {}".format(logger.resource_path_to_name(card)))
time.sleep(random.randrange(1, 3))
break
class BattleSettleJiBanTask(SimpleMatchAndClickTask):
def __init__(self):
super().__init__(resources.battle_jiban_title)
def execute(self, capture):
super().execute(capture)
wait_for(resources.battle_click_ui_tip)
class BattleSettleExpTask(SimpleMatchAndClickTask):
def __init__(self):
super().__init__(resources.battle_exp_title)
def execute(self, capture):
super().execute(capture)
wait_for(resources.battle_click_ui_tip)
class BattleSettleConfirmTask(SimpleMatchAndClickTask):
def __init__(self):
super().__init__(resources.battle_finish_next_step_button)
def on_execute(self, capture):
super().on_execute(capture)
global_status.is_in_battle = False
logger.log_with_echo("battle {} finish".format(global_status.loop_number))
logger.save_battle_info(global_status.turn_num, global_status.apple_eat_count)
class BattleQuickStartTask(SimpleMatchAndClickTask):
def __init__(self):
super().__init__(resources.battle_go_on_button)
class EatAppleTask(interfaces.Task):
def accept(self, capture):
return not global_status.is_in_battle and cv_util.has_match(resources.item_sliver_apple, capture)
def on_execute(self, capture):
ui_util.match_and_click(resources.item_sliver_apple)
wait_for(resources.apple_eat_dialog_confirm_button, step=1)
ui_util.match_and_click(resources.apple_eat_dialog_confirm_button)
global_status.apple_eat_count = global_status.apple_eat_count + 1
logger.log_with_echo("eat apple, total is {}".format(global_status.apple_eat_count))
time.sleep(random.randrange(1, 3))
class UsedSkillClickDialogTask(interfaces.Task):
def accept(self, capture_path):
return global_status.is_in_battle and cv_util.has_match(resources.skill_used_click_dialog_title, capture_path)
def on_execute(self, capture):
ui_util.match_and_click(resources.cancel_button)
wait_for(resources.battle_attack_button, step=1)
class ServantClickedStatusDialogTask(interfaces.Task):
def accept(self, capture_path):
return global_status.is_in_battle and cv_util.has_match(resources.servant_selected_ststus_title, capture_path)
def on_execute(self, capture):
ui_util.match_and_click(resources.close_button)
wait_for(resources.battle_attack_button, step=1)
class ServantCountOverflowDialogTask(interfaces.Task):
def accept(self, capture_path):
return cv_util.has_match(resources.need_release_space_buttons, capture_path)
def on_execute(self, capture):
logger.log_with_echo("need clear space, auto battle count is {}".format(global_status.loop_number))
raise StopBattleException()
class NpCheckTask(interfaces.Task):
def accept(self, capture):
return global_status.is_in_battle and cv_util.has_match(resources.battle_attack_button, capture)
def on_execute(self, capture):
img = cv_util.read_img(capture)
has_ready = False
for i in range(3):
has_ready = has_ready or self.check(img, i)
# 如果第一次检测不成功那么再重新截图检测一次
if not has_ready:
img = cv_util.read_img(ui_util.get_new_capture())
for i in range(3):
self.check(img, i)
logger.log_with_echo("check hougo result is {}".format(global_status.hougu_status))
# 检查方案:截取NP条区域,计算其像素值的平均值,因为未充满的话基本是比较暗(更靠近0)的状态,满了之后亮度提升(更靠近255)
# 所以根据这个数值来判断是不是满了
def check(self, capture, index):
# 这个数值是试出来的,反正能用
threshold = 40
top = 518
bottom = 524
np_bar_width = 86
left = 96 * (index + 1) + 100 * index
right = left + np_bar_width
crop = capture[top:bottom, left:right]
result = numpy.mean(crop) > threshold
logger.log("check np at {}, result is {}".format(index, result))
global_status.hougu_status[index] = result
return result
class StopBattleException(Exception):
pass
| [
"[email protected]"
]
| |
5501cf2b8e723b983e33f4eb719ece9c00156a65 | 87bdadf1219617b55ca257d617c5258772236e47 | /main.py | e960cee270ee89cd7d902eae0d6ca5e077433dd9 | []
| no_license | haixia-luo/Kaldi-master | 8a268824b16b80a2c83f3979f9b2baba2a057399 | 3716282d4ce716b4a51c45574edd2b3c2bcec393 | refs/heads/master | 2020-12-07T11:05:24.505516 | 2018-09-27T17:08:49 | 2018-09-27T17:08:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,839 | py | import os
import h5py
import librosa
import numpy as np
from extractors import VoiceActivityDetector
from extractors import KaldiMFCCFeatureExtraction
from extractors import KaldiXVectorExtraction
from utils import Utils
from utils import write_wav
from plda.classifier import Classifier
def wav_to_vad(wav_file, vad_file, sr=8000):
audio, rate = librosa.load(wav_file, sr=sr)
v = VoiceActivityDetector()
write_wav(vad_file, v.get_speech(audio), rate)
def get_mfcc_from_file(file_name, mfcc_dir=None, config_name="vbdiar.yml"):
if mfcc_dir is not None:
if not os.path.isdir(mfcc_dir):
os.mkdir(mfcc_dir)
config = Utils.read_config(config_name)
config_mfcc = config['MFCC']
config_path = os.path.abspath(config_mfcc['config_path'])
if not os.path.isfile(config_path):
raise ValueError('Path to MFCC configuration `{}` not found.'.format(config_path))
features_extractor = KaldiMFCCFeatureExtraction(
config_path=config_path, apply_cmvn_sliding=config_mfcc['apply_cmvn_sliding'],
norm_vars=config_mfcc['norm_vars'], center=config_mfcc['center'], cmn_window=config_mfcc['cmn_window'])
pp=os.path.abspath(file_name)
tempfile, features = features_extractor.audio2features(os.path.abspath(file_name))
os.remove(tempfile)
if mfcc_dir is not None:
hf = h5py.File(os.path.join(mfcc_dir, "{}.{}".format(file_name.split('/')[-1], "h5")), 'w')
hf.create_dataset('mfcc', data=features)
hf.close()
return features
def get_embeddings_from_features(features, config_name="vbdiar.yml"):
config = Utils.read_config(config_name)
config_embedding_extractor = config['EmbeddingExtractor']
embedding_extractor = KaldiXVectorExtraction(
nnet=os.path.abspath(config_embedding_extractor['nnet']), use_gpu=config_embedding_extractor['use_gpu'],
min_chunk_size=config_embedding_extractor['min_chunk_size'],
chunk_size=config_embedding_extractor['chunk_size'],
cache_capacity=config_embedding_extractor['cache_capacity'])
feat_dict={1:features}
emb_file, featur_file, embeddings = embedding_extractor.features2embeddings(feat_dict)
os.remove(emb_file.name)
os.remove(featur_file.name)
return embeddings['1']
def get_mfcc(file_path):
audio, sr = librosa.load(file_path, sr = 16000)
mfcc=[]
while audio.shape[0]>=(sr*3):
audio_frame=audio[:(sr*3)]
mfcc.append(librosa.feature.mfcc(y=audio_frame, sr = sr, n_mfcc=23).T)
audio=audio[int(sr*3/2):]
audio_frame=np.zeros(sr*3,dtype=np.float32)
audio_frame[:audio.shape[0]]=audio
mfcc.append(librosa.feature.mfcc(y=audio_frame, sr=sr, n_mfcc=23).T)
return mfcc
def compute_eer(probabilities, target, n=10000):
start_k = probabilities.min()
stop_k = probabilities.max()
proba_space = np.linspace(start_k, stop_k, n)
human_score = probabilities[np.logical_not(target == 0)]
spoof_score = probabilities[target == 0]
n_human = human_score.size
n_spoof = spoof_score.size
frr = np.empty(n)
far = np.empty(n)
n_eer = n // 2
eer = 1.0
min_gap = np.inf
for m, proba in enumerate(proba_space):
frr[m] = len(np.where(human_score >= proba)[0]) / n_human
far[m] = len(np.where(spoof_score < proba)[0]) / n_spoof
gap = np.abs(far[m] - frr[m])
if gap < min_gap:
min_gap = gap
n_eer = m
eer = (far[m] + frr[m]) / 2
return (n_eer, eer), frr, far, proba_space
if __name__ == '__main__':
"""
files = [line.rstrip('\n') for line in open("ASVspoof2017_dev.trl.txt")]
for file in files:
file=file.split(' ')[0]
mfccs = get_mfcc(os.path.join("../ASVspoof2017_dev",file))
emb=[]
for mfcc in mfccs:
emb.append(get_embeddings_from_features(mfcc, config_name="vbdiar.yml"))
xv=np.array(emb)
xv=np.mean(xv,axis=0)
hf = h5py.File(os.path.join("embs/dev","{}.{}".format(file,"h5")))
hf.create_dataset('xv',data=xv)
hf.close()
print(file)
"""
classifier=Classifier()
train_data=[]
train_label=[]
files = [line.rstrip('\n') for line in open("ASVspoof2017_train.trn.txt")]
for file in files:
file_sp=file.split(' ')
file_name=file_sp[0]
file_label=file_sp[1]
file_M=file_sp[2]
file_S=file_sp[3]
hf=h5py.File(os.path.join("embs/train","{}.{}".format(file_name,"h5")),'r')
xv=np.array(hf.get("xv"))
hf.close()
train_data.append(xv)
if(file_label=="spoof"):
train_label.append(0)
if (file_label == "genuine"):
train_label.append(1)
train_label=np.array(train_label)
train_data=np.array(train_data)
test_data=[]
test_label=[]
files = [line.rstrip('\n') for line in open("ASVspoof2017_dev.trl.txt")]
for file in files:
file_sp = file.split(' ')
file_name = file_sp[0]
file_label = file_sp[1]
file_M = file_sp[2]
file_S = file_sp[3]
hf = h5py.File(os.path.join("embs/dev", "{}.{}".format(file_name, "h5")), 'r')
xv = np.array(hf.get("xv"))
hf.close()
test_data.append(xv)
if (file_label == "spoof"):
test_label.append(0)
if (file_label == "genuine"):
test_label.append(1)
test_label = np.array(test_label)
test_data = np.array(test_data)
classifier.fit_model(train_data,train_label,n_principal_components=90)
predictions, log_p_predictions = classifier.predict(test_data)
(n_eer, eer), frr, far, proba_space=compute_eer(log_p_predictions,test_label)
print('Accuracy: {}'.format((test_label == predictions).mean()))
print('Eer {}'.format(eer))
pass
| [
"[email protected]"
]
| |
498b161763e04089ca2bc69b627c2c265422a62b | e23b28fc3ed196866a04af4e790c1c16b1b5183e | /django/portfolio/apps/portfolio_app/urls.py | 73a949e99ec0e9b82a53e892a13c8fb1443a2aa5 | []
| no_license | diazmc/Python | 6f47e7fcfb8c263eb154d59a5a9b3866e2c9d6a8 | 89e3d54eeb2b0ed7dc7af24103ace6fb6e45d65e | refs/heads/master | 2021-01-20T01:18:23.954877 | 2017-08-24T10:39:19 | 2017-08-24T10:39:19 | 101,283,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^testimonials$', views.testimonial)
]
| [
"[email protected]"
]
| |
06759d38b9ed7938096cb3cc98d884003725ef74 | 9febc42ff885c49d657fbc5c5f03de231b080431 | /pymoo/cpp/hypervolume/setup.py | 331cd97de2d43c5d77ee8a1cf29f5370f4992628 | [
"MIT"
]
| permissive | yashvesikar/pymoo | 06a6bec9dc55610d150c77700d7c93ea63ed7643 | 8ce725671d95df580654568fa9bc0e53268aff5d | refs/heads/master | 2020-03-18T22:35:59.638447 | 2018-06-20T17:18:06 | 2018-06-20T17:18:06 | 135,353,905 | 0 | 0 | MIT | 2018-05-29T21:16:52 | 2018-05-29T21:16:52 | null | UTF-8 | Python | false | false | 2,743 | py | import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name='hypervolume',
version='0.0.1',
author='Yash Vesikar',
author_email='yashvesikar at gmail.com',
description='A C++/Python implementation of hypervolume',
long_description="""n>3 dimensional hypervolume implementation using pybind. Hypervolume implementation adapted from:
https://ls11-www.cs.tu-dortmund.de/people/beume/publications/hoy.cpp""",
ext_modules=[CMakeExtension('hypervolume')],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
)
| [
"[email protected]"
]
| |
c451c1854b8bfd9dc2aa1c81ff03ee27356279ce | 7822e004b9697e451a9345589a411133ca12d74e | /scripts/createGradientImage.py | 54fb723a814d6d173509a46a8a6458d07aa24bec | []
| no_license | tomwright01/SLOAntsRegistration | 0e6335feff3f97e59728fdca0f174165df582f4a | 5ff0eb100d40604feae62500c5b8e6cd07c00017 | refs/heads/master | 2021-01-04T14:14:11.212043 | 2014-12-09T20:39:12 | 2014-12-09T20:39:12 | 26,826,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | import subprocess
import logging
import os
def main(dims,outputName,inputName,Sigma,antsPath):
"""Use the ANTs ImageMath to create a gradient image"""
imPath = os.path.join(antsPath,'ImageMath')
cmd = '{0} {1} {2} Grad {3} {4}'.format(imPath,dims,outputName,inputName,Sigma)
logging.info('Creating Gradient Image with command:')
logging.info('=======================')
logging.info(cmd)
logging.info('=======================')
try:
subprocess.check_call(cmd,shell=True,executable='/bin/bash')
return True
except subprocess.CalledProcessError:
return False
| [
"[email protected]"
]
| |
f6a87a9dedd704b40464a5040ddb2d851e703ba9 | a9b31181ad6f695a2809018167a52a6d9847c0df | /Chap05-funcoes-frutiferas/compara.py | 2de8e1459dc0075b09a80469e8aaee81d6d62fa9 | []
| no_license | frclasso/Aprendendo_computacao_com_Python | 21cdecdebcdbafad35a48d8425d06e4ec2ba1259 | 40276f396c90d25b301e15e855942a607efd895b | refs/heads/master | 2020-03-12T17:38:04.886153 | 2018-10-11T14:17:13 | 2018-10-11T14:17:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | #!/usr/bin/env python3
def compara(x, y):
if x > y:
return 1
elif x == y:
return 0
else:
return -1
print(compara(1,2))
print(compara(3,2))
print(compara(3,3)) | [
"[email protected]"
]
| |
8266ad2518e69a29b2cf165e690045bfa65ee2c4 | 343ab5dde66ab0314acdc339c08973e62283631c | /accounts/migrations/0006_auto_20200523_1655.py | f17986d7b6f890f687623a26572c9ee9aa04b13f | []
| no_license | pdolawat654/Hospital_Management | 45e400a9376aa3302ca500d20910859429417c5b | e9ee6fe9c6f6a659f69f0a70aaef875c73a8baf7 | refs/heads/master | 2022-10-18T19:08:37.741706 | 2020-06-12T12:35:42 | 2020-06-12T12:35:42 | 271,786,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 3.0.5 on 2020-05-23 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20200523_1655'),
]
operations = [
migrations.AlterField(
model_name='doctor',
name='verify',
field=models.CharField(default=0, max_length=1),
),
]
| [
"[email protected]"
]
| |
1d7fcddad197b9c1e5b50b8573b0b569e645370a | 35a1593fbd15c8ef1a20971055774a1cdcd41bce | /test/test_rpc_fork.py | 5e2432f60183e5c5213ef1772931d6b7939ae669 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | sahils2607/pytorch | 884a2da2a613b525522a1615c8c5ecef013e4fb1 | 16454095e09eab2e737d99ad569cd44bb7910f03 | refs/heads/master | 2020-08-07T15:13:53.319183 | 2019-10-07T21:22:42 | 2019-10-07T21:24:34 | 213,499,886 | 1 | 0 | NOASSERTION | 2019-10-07T22:39:43 | 2019-10-07T22:39:43 | null | UTF-8 | Python | false | false | 421 | py | #!/usr/bin/env python3
from __future__ import absolute_import, division, print_function, unicode_literals
from rpc_test import RpcTest
from common_distributed import MultiProcessTestCase
from common_utils import run_tests
class RpcTestWithFork(MultiProcessTestCase, RpcTest):
def setUp(self):
super(RpcTestWithFork, self).setUp()
self._fork_processes()
if __name__ == '__main__':
run_tests()
| [
"[email protected]"
]
| |
47e730aa8714ba1f0019d356f40e5f2765ec0b97 | 1ea0e2b4f064ba0de45a73c527ee89a36771e8fc | /tests/network/test_thing.py | a2add8475e00ce4592fce819a3454561158e1a66 | [
"BSD-2-Clause"
]
| permissive | atlassian/sentry | 6775e59c317f20f96982e91c2b3c88c02ecbb56b | b937615079d7b24dc225a83b99b1b65da932fc66 | refs/heads/master | 2023-08-27T15:45:47.699173 | 2017-09-18T22:14:55 | 2017-09-18T22:14:55 | 103,999,066 | 1 | 5 | BSD-3-Clause | 2023-04-01T07:49:37 | 2017-09-18T22:38:18 | Python | UTF-8 | Python | false | false | 250 | py | from __future__ import absolute_import
from sentry.constants import INTEGRATION_ID_TO_PLATFORM_DATA
from sentry.testutils import TestCase
class ThingTest(TestCase):
def test_thing(self):
assert len(INTEGRATION_ID_TO_PLATFORM_DATA) > 0
| [
"[email protected]"
]
| |
ad3c565c18ba4ee178c358eeda393a0b5f706bbd | 7df5cd1ba6296913a8bcb0650573dd28cfa66e93 | /emtennis/events/migrations/0004_auto_20190812_1555.py | 00cd2eaad9b997a82079bc6b667fda45f4007d07 | []
| no_license | Seiph59/EM_Tennis_P13 | 95da990962cfd290ec109ec0c77a0541b83d5927 | 889054b749d5f4403cd1282eb3972bacdc64d9e3 | refs/heads/master | 2023-04-28T01:16:33.496653 | 2021-06-07T19:23:22 | 2021-06-07T19:23:22 | 196,996,991 | 0 | 0 | null | 2023-04-21T20:36:31 | 2019-07-15T12:44:42 | JavaScript | UTF-8 | Python | false | false | 378 | py | # Generated by Django 2.2.3 on 2019-08-12 13:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0003_registration_registered'),
]
operations = [
migrations.RenameField(
model_name='registration',
old_name='amount_paid',
new_name='amount',
),
]
| [
"[email protected]"
]
| |
e6b0e6837166020928a9bfbdf5bc302fa4f86ad8 | 7dfa21d74dae975082c6d5deaa01248bac1dcc26 | /.circleci/cimodel/data/pytorch_build_data.py | 09476a970b40045f3d53a7de2f01f11f71d683ae | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
]
| permissive | mruberry/pytorch | 88cf536ed58d20a409c1e5119be4ec04ec960082 | 19f73180cfb39eb67110d2a1d541975a49211453 | refs/heads/master | 2022-02-03T16:25:31.070089 | 2019-04-22T17:52:28 | 2019-04-22T17:58:15 | 130,132,886 | 4 | 1 | NOASSERTION | 2020-01-16T16:51:39 | 2018-04-18T23:24:38 | C++ | UTF-8 | Python | false | false | 3,956 | py | #!/usr/bin/env python3
from cimodel.lib.conf_tree import ConfigNode, X
CONFIG_TREE_DATA = [
("trusty", [
(None, [
X("2.7.9"),
X("2.7"),
X("3.5"),
X("nightly"),
]),
("gcc", [
("4.8", [X("3.6")]),
("5.4", [("3.6", [X(False), X(True)])]),
("7", [X("3.6")]),
]),
]),
("xenial", [
("clang", [
("5", [X("3.6")]),
]),
("cuda", [
("8", [X("3.6")]),
("9", [
# Note there are magic strings here
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L21
# and
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L143
# and
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L153
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453144)
X("2.7"),
X("3.6"),
]),
("9.2", [X("3.6")]),
("10", [X("3.6")]),
]),
("android", [
("r19c", [X("3.6")]),
]),
]),
]
def get_major_pyver(dotted_version):
parts = dotted_version.split(".")
return "py" + parts[0]
class TreeConfigNode(ConfigNode):
def __init__(self, parent, node_name, subtree):
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
self.subtree = subtree
self.init2(node_name)
def modify_label(self, label):
return label
def init2(self, node_name):
pass
def get_children(self):
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
class TopLevelNode(TreeConfigNode):
def __init__(self, node_name, subtree):
super(TopLevelNode, self).__init__(None, node_name, subtree)
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return DistroConfigNode
class DistroConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["distro_name"] = node_name
def child_constructor(self):
distro = self.find_prop("distro_name")
next_nodes = {
"trusty": TrustyCompilerConfigNode,
"xenial": XenialCompilerConfigNode,
}
return next_nodes[distro]
class TrustyCompilerConfigNode(TreeConfigNode):
def modify_label(self, label):
return label or "<unspecified>"
def init2(self, node_name):
self.props["compiler_name"] = node_name
def child_constructor(self):
return TrustyCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
class TrustyCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
class PyVerConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["pyver"] = node_name
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return XlaConfigNode
class XlaConfigNode(TreeConfigNode):
def modify_label(self, label):
return "XLA=" + str(label)
def init2(self, node_name):
self.props["is_xla"] = node_name
class XenialCompilerConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_name"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return XenialCompilerVersionConfigNode
class XenialCompilerVersionConfigNode(TreeConfigNode):
def init2(self, node_name):
self.props["compiler_version"] = node_name
# noinspection PyMethodMayBeStatic
def child_constructor(self):
return PyVerConfigNode
| [
"[email protected]"
]
| |
15383f8452b8b4d90bb772acf97713ec41c5f3f8 | 4eedda8375df447499348cce1279861c911fc50e | /SConstruct | 0886f2e08662bead22ad1be592e2445a59e92d09 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
]
| permissive | DS1SQM/OPKR086_test4 | ee488a8fdd040ea761e746d758f003acf07cccc5 | db60403de9fd5a80580b75fd852aea538d9fd83a | refs/heads/main | 2023-06-16T19:55:11.583400 | 2021-07-14T05:27:41 | 2021-07-14T05:27:41 | 386,856,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,247 | import os
import shutil
import subprocess
import sys
import sysconfig
import platform
import numpy as np
TICI = os.path.isfile('/TICI')
Decider('MD5-timestamp')
AddOption('--test',
action='store_true',
help='build test files')
AddOption('--setup',
action='store_true',
help='build setup and installer files')
AddOption('--kaitai',
action='store_true',
help='Regenerate kaitai struct parsers')
AddOption('--asan',
action='store_true',
help='turn on ASAN')
AddOption('--ubsan',
action='store_true',
help='turn on UBSan')
AddOption('--clazy',
action='store_true',
help='build with clazy')
AddOption('--compile_db',
action='store_true',
help='build clang compilation database')
AddOption('--mpc-generate',
action='store_true',
help='regenerates the mpc sources')
AddOption('--snpe',
action='store_true',
help='use SNPE on PC')
AddOption('--external-sconscript',
action='store',
metavar='FILE',
dest='external_sconscript',
help='add an external SConscript to the build')
real_arch = arch = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
if platform.system() == "Darwin":
arch = "Darwin"
if arch == "aarch64" and TICI:
arch = "larch64"
USE_WEBCAM = os.getenv("USE_WEBCAM") is not None
lenv = {
"PATH": os.environ['PATH'],
}
if arch == "aarch64" or arch == "larch64":
lenv["LD_LIBRARY_PATH"] = '/data/data/com.termux/files/usr/lib'
if arch == "aarch64":
# android
lenv["ANDROID_DATA"] = os.environ['ANDROID_DATA']
lenv["ANDROID_ROOT"] = os.environ['ANDROID_ROOT']
cpppath = [
"#phonelibs/opencl/include",
]
libpath = [
"/usr/local/lib",
"/usr/lib",
"/system/vendor/lib64",
"/system/comma/usr/lib",
"#phonelibs/nanovg",
]
if arch == "larch64":
libpath += [
"#phonelibs/snpe/larch64",
"#phonelibs/libyuv/larch64/lib",
"/usr/lib/aarch64-linux-gnu"
]
cpppath += [
"#selfdrive/camerad/include",
]
cflags = ["-DQCOM2", "-mcpu=cortex-a57"]
cxxflags = ["-DQCOM2", "-mcpu=cortex-a57"]
rpath = ["/usr/local/lib"]
else:
libpath += [
"#phonelibs/snpe/aarch64",
"#phonelibs/libyuv/lib",
"/system/vendor/lib64"
]
cflags = ["-DQCOM", "-D_USING_LIBCXX", "-mcpu=cortex-a57"]
cxxflags = ["-DQCOM", "-D_USING_LIBCXX", "-mcpu=cortex-a57"]
rpath = []
else:
cflags = []
cxxflags = []
cpppath = []
if arch == "Darwin":
yuv_dir = "mac" if real_arch != "arm64" else "mac_arm64"
libpath = [
f"#phonelibs/libyuv/{yuv_dir}/lib",
"/usr/local/lib",
"/opt/homebrew/lib",
"/usr/local/opt/openssl/lib",
"/opt/homebrew/opt/openssl/lib",
"/System/Library/Frameworks/OpenGL.framework/Libraries",
]
cflags += ["-DGL_SILENCE_DEPRECATION"]
cxxflags += ["-DGL_SILENCE_DEPRECATION"]
cpppath += [
"/opt/homebrew/include",
"/usr/local/opt/openssl/include",
"/opt/homebrew/opt/openssl/include"
]
else:
libpath = [
"#phonelibs/snpe/x86_64-linux-clang",
"#phonelibs/libyuv/x64/lib",
"#phonelibs/mapbox-gl-native-qt/x86_64",
"#cereal",
"#selfdrive/common",
"/usr/lib",
"/usr/local/lib",
]
rpath = [
"phonelibs/snpe/x86_64-linux-clang",
"cereal",
"selfdrive/common"
]
# allows shared libraries to work globally
rpath = [os.path.join(os.getcwd(), x) for x in rpath]
if GetOption('asan'):
ccflags = ["-fsanitize=address", "-fno-omit-frame-pointer"]
ldflags = ["-fsanitize=address"]
elif GetOption('ubsan'):
ccflags = ["-fsanitize=undefined"]
ldflags = ["-fsanitize=undefined"]
else:
ccflags = []
ldflags = []
# no --as-needed on mac linker
if arch != "Darwin":
ldflags += ["-Wl,--as-needed"]
# change pythonpath to this
lenv["PYTHONPATH"] = Dir("#").path
env = Environment(
ENV=lenv,
CCFLAGS=[
"-g",
"-fPIC",
"-O2",
"-Wunused",
"-Werror",
"-Wno-unknown-warning-option",
"-Wno-deprecated-register",
"-Wno-register",
"-Wno-inconsistent-missing-override",
"-Wno-c99-designator",
"-Wno-reorder-init-list",
] + cflags + ccflags,
CPPPATH=cpppath + [
"#",
"#phonelibs/catch2/include",
"#phonelibs/bzip2",
"#phonelibs/libyuv/include",
"#phonelibs/openmax/include",
"#phonelibs/json11",
"#phonelibs/curl/include",
"#phonelibs/libgralloc/include",
"#phonelibs/android_frameworks_native/include",
"#phonelibs/android_hardware_libhardware/include",
"#phonelibs/android_system_core/include",
"#phonelibs/linux/include",
"#phonelibs/snpe/include",
"#phonelibs/mapbox-gl-native-qt/include",
"#phonelibs/nanovg",
"#phonelibs/qrcode",
"#phonelibs",
"#cereal",
"#opendbc/can",
],
CC='clang',
CXX='clang++',
LINKFLAGS=ldflags,
RPATH=rpath,
CFLAGS=["-std=gnu11"] + cflags,
CXXFLAGS=["-std=c++1z"] + cxxflags,
LIBPATH=libpath + [
"#cereal",
"#phonelibs",
"#opendbc/can",
"#selfdrive/boardd",
"#selfdrive/common",
],
CYTHONCFILESUFFIX=".cpp",
COMPILATIONDB_USE_ABSPATH=True,
tools=["default", "cython", "compilation_db"],
)
if GetOption('compile_db'):
env.CompilationDatabase('compile_commands.json')
# Setup cache dir
cache_dir = '/data/scons_cache' if TICI else '/tmp/scons_cache'
CacheDir(cache_dir)
Clean(["."], cache_dir)
node_interval = 5
node_count = 0
def progress_function(node):
global node_count
node_count += node_interval
sys.stderr.write("progress: %d\n" % node_count)
if os.environ.get('SCONS_PROGRESS'):
Progress(progress_function, interval=node_interval)
SHARED = False
def abspath(x):
if arch == 'aarch64':
pth = os.path.join("/data/pythonpath", x[0].path)
env.Depends(pth, x)
return File(pth)
else:
# rpath works elsewhere
return x[0].path.rsplit("/", 1)[1][:-3]
# Cython build enviroment
py_include = sysconfig.get_paths()['include']
envCython = env.Clone()
envCython["CPPPATH"] += [py_include, np.get_include()]
envCython["CCFLAGS"] += ["-Wno-#warnings", "-Wno-deprecated-declarations"]
envCython["LIBS"] = []
if arch == "Darwin":
envCython["LINKFLAGS"] = ["-bundle", "-undefined", "dynamic_lookup"]
elif arch == "aarch64":
envCython["LINKFLAGS"] = ["-shared"]
envCython["LIBS"] = [os.path.basename(py_include)]
else:
envCython["LINKFLAGS"] = ["-pthread", "-shared"]
Export('envCython')
# Qt build environment
qt_env = env.Clone()
qt_modules = ["Widgets", "Gui", "Core", "Network", "Concurrent", "Multimedia", "Quick", "Qml", "QuickWidgets", "Location", "Positioning"]
if arch != "aarch64":
qt_modules += ["DBus"]
qt_libs = []
if arch == "Darwin":
if real_arch == "arm64":
qt_env['QTDIR'] = "/opt/homebrew/opt/qt@5"
else:
qt_env['QTDIR'] = "/usr/local/opt/qt@5"
qt_dirs = [
os.path.join(qt_env['QTDIR'], "include"),
]
qt_dirs += [f"{qt_env['QTDIR']}/include/Qt{m}" for m in qt_modules]
qt_env["LINKFLAGS"] += ["-F" + os.path.join(qt_env['QTDIR'], "lib")]
qt_env["FRAMEWORKS"] += [f"Qt{m}" for m in qt_modules] + ["OpenGL"]
elif arch == "aarch64":
qt_env['QTDIR'] = "/system/comma/usr"
qt_dirs = [
f"/system/comma/usr/include/qt",
]
qt_dirs += [f"/system/comma/usr/include/qt/Qt{m}" for m in qt_modules]
qt_libs = [f"Qt5{m}" for m in qt_modules]
qt_libs += ['EGL', 'GLESv3', 'c++_shared']
else:
qt_env['QTDIR'] = "/usr"
qt_dirs = [
f"/usr/include/{real_arch}-linux-gnu/qt5",
f"/usr/include/{real_arch}-linux-gnu/qt5/QtGui/5.12.8/QtGui",
]
qt_dirs += [f"/usr/include/{real_arch}-linux-gnu/qt5/Qt{m}" for m in qt_modules]
qt_libs = [f"Qt5{m}" for m in qt_modules]
if arch == "larch64":
qt_libs += ["GLESv2", "wayland-client"]
elif arch != "Darwin":
qt_libs += ["GL"]
qt_env.Tool('qt')
qt_env['CPPPATH'] += qt_dirs + ["#selfdrive/ui/qt/"]
qt_flags = [
"-D_REENTRANT",
"-DQT_NO_DEBUG",
"-DQT_WIDGETS_LIB",
"-DQT_GUI_LIB",
"-DQT_QUICK_LIB",
"-DQT_QUICKWIDGETS_LIB",
"-DQT_QML_LIB",
"-DQT_CORE_LIB",
"-DQT_MESSAGELOGCONTEXT",
]
qt_env['CXXFLAGS'] += qt_flags
qt_env['LIBPATH'] += ['#selfdrive/ui']
qt_env['LIBS'] = qt_libs
if GetOption("clazy"):
checks = [
"level0",
"level1",
"no-range-loop",
"no-non-pod-global-static",
]
qt_env['CXX'] = 'clazy'
qt_env['ENV']['CLAZY_IGNORE_DIRS'] = qt_dirs[0]
qt_env['ENV']['CLAZY_CHECKS'] = ','.join(checks)
Export('env', 'qt_env', 'arch', 'real_arch', 'SHARED', 'USE_WEBCAM')
# cereal and messaging are shared with the system
SConscript(['cereal/SConscript'])
if SHARED:
cereal = abspath([File('cereal/libcereal_shared.so')])
messaging = abspath([File('cereal/libmessaging_shared.so')])
else:
cereal = [File('#cereal/libcereal.a')]
messaging = [File('#cereal/libmessaging.a')]
visionipc = [File('#cereal/libvisionipc.a')]
Export('cereal', 'messaging')
SConscript(['selfdrive/common/SConscript'])
Import('_common', '_gpucommon', '_gpu_libs')
if SHARED:
common, gpucommon = abspath(common), abspath(gpucommon)
else:
common = [_common, 'json11']
gpucommon = [_gpucommon] + _gpu_libs
Export('common', 'gpucommon', 'visionipc')
# Build rednose library and ekf models
rednose_config = {
'generated_folder': '#selfdrive/locationd/models/generated',
'to_build': {
'live': ('#selfdrive/locationd/models/live_kf.py', True, ['live_kf_constants.h']),
'car': ('#selfdrive/locationd/models/car_kf.py', True, []),
},
}
if arch != "aarch64":
rednose_config['to_build'].update({
'gnss': ('#selfdrive/locationd/models/gnss_kf.py', True, []),
'loc_4': ('#selfdrive/locationd/models/loc_kf.py', True, []),
'pos_computer_4': ('#rednose/helpers/lst_sq_computer.py', False, []),
'pos_computer_5': ('#rednose/helpers/lst_sq_computer.py', False, []),
'feature_handler_5': ('#rednose/helpers/feature_handler.py', False, []),
'lane': ('#xx/pipeline/lib/ekf/lane_kf.py', True, []),
})
Export('rednose_config')
SConscript(['rednose/SConscript'])
# Build openpilot
SConscript(['cereal/SConscript'])
SConscript(['panda/board/SConscript'])
SConscript(['opendbc/can/SConscript'])
SConscript(['phonelibs/SConscript'])
SConscript(['common/SConscript'])
SConscript(['common/kalman/SConscript'])
SConscript(['common/transformations/SConscript'])
SConscript(['selfdrive/camerad/SConscript'])
SConscript(['selfdrive/modeld/SConscript'])
SConscript(['selfdrive/controls/lib/cluster/SConscript'])
SConscript(['selfdrive/controls/lib/lateral_mpc/SConscript'])
SConscript(['selfdrive/controls/lib/longitudinal_mpc/SConscript'])
SConscript(['selfdrive/controls/lib/longitudinal_mpc_model/SConscript'])
SConscript(['selfdrive/boardd/SConscript'])
SConscript(['selfdrive/proclogd/SConscript'])
SConscript(['selfdrive/clocksd/SConscript'])
SConscript(['selfdrive/loggerd/SConscript'])
SConscript(['selfdrive/locationd/SConscript'])
SConscript(['selfdrive/sensord/SConscript'])
SConscript(['selfdrive/ui/SConscript'])
if arch != "Darwin":
SConscript(['selfdrive/logcatd/SConscript'])
external_sconscript = GetOption('external_sconscript')
if external_sconscript:
SConscript([external_sconscript])
| [
""
]
| ||
586c1ef6d174572c3418ab7a6b3d55977d2db9c7 | fa7d56df0d8819fb0a6aa8fccae2e552459122e8 | /data_analysis/概率分布_验证型数据分析.py | e63a747487ff7bc5ab86018122c964c705227e0f | []
| no_license | yelangweiwei/blogApp | 356aca72159db1ee9aff64e47b3dc0995f5f0325 | d1afa6de7260597975074aea604878a0a68e0278 | refs/heads/master | 2021-06-19T16:47:10.286136 | 2021-02-26T07:48:20 | 2021-02-26T07:48:20 | 176,499,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | import numpy as py
import pandas as pd
from scipy.stats.stats import ttest_1samp,ttest_ind
from statsmodels.stats.weightstats import ztest
def check_z():
#假设检验:
#构造平均值是175,标准差是5,样本量是100,
x = py.random.normal(175,5,100).round(1)
# print(x)
#使用Z检验pval
z,pavel =ztest(x,value=175)
#直接返回pavel,用pvael判断零假设是都成立
print(pavel)
def check_t():
x = py.random.normal(175,5,100).round(1)
t,pval =ttest_1samp(x,popmean=175)
print(pval)
def check_tt():
x1 = py.random.normal(175,5,100).round(1)
x2 = py.random.normal(175,5,100).round(1)
t,pval =ttest_ind(x1,x2)
print(pval)
def check_datasets_by_iris():
iris = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header =None)
iris.columns = ['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm','Species']
sample_iris_data = iris.sample(10)
print(sample_iris_data)
# data_mean = py.mean(iris['SepalLengthCm'])
data_mean = py.mean(iris['SepalLengthCm'])
print('data_mean:',data_mean)
# # z检验
# z, pval = ztest(sample_iris_data['SepalLengthCm'],value=7)
# print(pval)
#
# #t
# t,pval = ttest_1samp(sample_iris_data['SepalLengthCm'],popmean=7)
# print(pval)
#tt检验
iris1 = iris[iris.Species =='Iris-setosa']
iris2 = iris[iris.Species=='Iris-virginica']
t,pval = ttest_ind(iris1['SepalLengthCm'],iris2['SepalLengthCm'])
iris1_mean = py.mean(iris1['SepalLengthCm'])
print(iris1_mean)
iris2_mean = py.mean(iris2['SepalLengthCm'])
print(iris2_mean)
print('pval',pval)
if __name__ == '__main__':
check_datasets_by_iris() | [
"[email protected]"
]
| |
3472469d1a6567b5c42751cad45681f14a096b86 | b9bc60cca34c6b4f8a750af6062f357f18dfcae2 | /tensorflow/contrib/ndlstm/python/lstm2d.py | 3907046ddad48c43fe12f40301240acae3703489 | [
"Apache-2.0"
]
| permissive | lidenghui1110/tensorflow-0.12.0-fpga | 7c96753aafab5fe79d5d0c500a0bae1251a3d21b | f536d3d0b91f7f07f8e4a3978d362cd21bad832c | refs/heads/master | 2022-11-20T11:42:11.461490 | 2017-07-28T09:28:37 | 2017-07-28T09:28:37 | 98,633,565 | 3 | 2 | Apache-2.0 | 2022-11-15T05:22:07 | 2017-07-28T09:29:01 | C++ | UTF-8 | Python | false | false | 5,639 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A small library of functions dealing with LSTMs applied to images.
Tensors in this library generally have the shape (num_images, height, width,
depth).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.ndlstm.python import lstm1d
def _shape(tensor):
"""Get the shape of a tensor as an int list."""
return tensor.get_shape().as_list()
def images_to_sequence(tensor):
"""Convert a batch of images into a batch of sequences.
Args:
tensor: a (num_images, height, width, depth) tensor
Returns:
(width, num_images*height, depth) sequence tensor
"""
num_image_batches, height, width, depth = _shape(tensor)
transposed = tf.transpose(tensor, [2, 0, 1, 3])
return tf.reshape(transposed, [width, num_image_batches * height, depth])
def sequence_to_images(tensor, num_image_batches):
"""Convert a batch of sequences into a batch of images.
Args:
tensor: (num_steps, num_batches, depth) sequence tensor
num_image_batches: the number of image batches
Returns:
(num_images, height, width, depth) tensor
"""
width, num_batches, depth = _shape(tensor)
height = num_batches // num_image_batches
reshaped = tf.reshape(tensor, [width, num_image_batches, height, depth])
return tf.transpose(reshaped, [1, 2, 0, 3])
def horizontal_lstm(images, num_filters_out, scope=None):
"""Run an LSTM bidirectionally over all the rows of each image.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output depth
scope: optional scope name
Returns:
(num_images, height, width, num_filters_out) tensor, where
num_steps is width and new num_batches is num_image_batches * height
"""
with tf.variable_scope(scope, "HorizontalLstm", [images]):
batch_size, _, _, _ = _shape(images)
sequence = images_to_sequence(images)
with tf.variable_scope("lr"):
hidden_sequence_lr = lstm1d.ndlstm_base(sequence, num_filters_out // 2)
with tf.variable_scope("rl"):
hidden_sequence_rl = (
lstm1d.ndlstm_base(sequence,
num_filters_out - num_filters_out // 2,
reverse=1))
output_sequence = tf.concat(2, [hidden_sequence_lr, hidden_sequence_rl])
output = sequence_to_images(output_sequence, batch_size)
return output
def separable_lstm(images, num_filters_out, nhidden=None, scope=None):
"""Run bidirectional LSTMs first horizontally then vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
nhidden: hidden layer depth
scope: optional scope name
Returns:
(num_images, height, width, num_filters_out) tensor
"""
with tf.variable_scope(scope, "SeparableLstm", [images]):
if nhidden is None:
nhidden = num_filters_out
hidden = horizontal_lstm(images, nhidden)
with tf.variable_scope("vertical"):
transposed = tf.transpose(hidden, [0, 2, 1, 3])
output_transposed = horizontal_lstm(transposed, num_filters_out)
output = tf.transpose(output_transposed, [0, 2, 1, 3])
return output
def reduce_to_sequence(images, num_filters_out, scope=None):
"""Reduce an image to a sequence by scanning an LSTM vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
scope: optional scope name
Returns:
A (width, num_images, num_filters_out) sequence.
"""
with tf.variable_scope(scope, "ReduceToSequence", [images]):
batch_size, height, width, depth = _shape(images)
transposed = tf.transpose(images, [1, 0, 2, 3])
reshaped = tf.reshape(transposed, [height, batch_size * width, depth])
reduced = lstm1d.sequence_to_final(reshaped, num_filters_out)
output = tf.reshape(reduced, [batch_size, width, num_filters_out])
return output
def reduce_to_final(images, num_filters_out, nhidden=None, scope=None):
"""Reduce an image to a final state by running two LSTMs.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
nhidden: hidden layer depth (defaults to num_filters_out)
scope: optional scope name
Returns:
A (num_images, num_filters_out) batch.
"""
with tf.variable_scope(scope, "ReduceToFinal", [images]):
nhidden = nhidden or num_filters_out
batch_size, height, width, depth = _shape(images)
transposed = tf.transpose(images, [1, 0, 2, 3])
reshaped = tf.reshape(transposed, [height, batch_size * width, depth])
with tf.variable_scope("reduce1"):
reduced = lstm1d.sequence_to_final(reshaped, nhidden)
transposed_hidden = tf.reshape(reduced, [batch_size, width, nhidden])
hidden = tf.transpose(transposed_hidden, [1, 0, 2])
with tf.variable_scope("reduce2"):
output = lstm1d.sequence_to_final(hidden, num_filters_out)
return output
| [
"[email protected]"
]
| |
71dafe2db4bc761973d6704dc92903b815a5d803 | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/channel/channel_metadata/tests/test__parse_video_quality_mode.py | 1d27462067dbc38950831d7cb97ceae62bdabb9d | [
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 594 | py | import vampytest
from ..preinstanced import VideoQualityMode
from ..fields import parse_video_quality_mode
def test__parse_video_quality_mode():
"""
Tests whether ``parse_video_quality_mode`` works as intended.
"""
for input_data, expected_output in (
({}, VideoQualityMode.auto),
({'video_quality_mode': VideoQualityMode.auto.value}, VideoQualityMode.auto),
({'video_quality_mode': VideoQualityMode.full.value}, VideoQualityMode.full),
):
output = parse_video_quality_mode(input_data)
vampytest.assert_eq(output, expected_output)
| [
"[email protected]"
]
| |
9d2646bc3e31926b3990a17fa22fc0cf04de9f50 | f667c8f2f5585e1cd505d0b2a486e4550c12b0a6 | /venv/src/handler/SeafileHandler.py | c22ecfb62e6a11b8e1f8f7ddd95b334653fb4ac3 | []
| no_license | coderlfy/seafileapi | 5f5ed5651a68d20cfa196b5d613432c67b39d3ce | 8cbfaea99147a2c34f2fb031bc2647c661e06a5f | refs/heads/master | 2020-03-21T07:07:14.384814 | 2018-07-07T10:02:31 | 2018-07-07T10:02:31 | 138,261,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | #-*- coding: utf-8 -*-
import tornado.web
import entity.Result
import bll.Seafile
import HandlerBase
class AddFile(tornado.web.RequestHandler):
def post(self):
filename = HandlerBase.HandlerBase.getparam(self, 'filename')
if filename.success:
self.write(bll.Seafile.Seafile.add(filename.value))
else:
self.write(HandlerBase.HandlerBase.getjson(filename))
class GetFile(tornado.web.RequestHandler):
def get(self):
fileid = HandlerBase.HandlerBase.getparam(self, 'fileid')
if fileid.success:
self.write(bll.Seafile.Seafile.get(fileid.value))
else:
self.write(HandlerBase.HandlerBase.getjson(fileid))
class UploadLink(tornado.web.RequestHandler):
def get(self):
token = HandlerBase.HandlerBase.getparam(self, 'token')
if token.success:
self.write(bll.Seafile.Seafile.getuploadlink(token.value))
else:
self.write(HandlerBase.HandlerBase.getjson(token))
class DeleteFile(tornado.web.RequestHandler):
def post(self):
fileid = HandlerBase.HandlerBase.getparam(self, 'fileid')
if fileid.success:
self.write(bll.Seafile.Seafile.delete(fileid.value))
else:
self.write(HandlerBase.HandlerBase.getjson(fileid))
# 只保留n个月的缓存
class ClearFile(tornado.web.RequestHandler):
def post(self):
self.write('ClearFile!') | [
"[email protected]"
]
| |
794b89eb787626fad6961b510e3b80ab272f873e | be5ed8f7efb9cb0856ad1246e26991a47e683916 | /np-learn/xiaoyu.py | 880398b097b3d0a0987fb492435b8a23ecea6c7d | []
| no_license | qbmzc/numpy-pandas | c74dedead9e680b5e12cd71d4bfb4d5cff5a9faf | 114bd4ddb8388878a5c246d20aaddc9aa2991842 | refs/heads/master | 2021-08-10T16:59:35.418229 | 2021-06-28T14:34:08 | 2021-06-28T14:34:08 | 193,911,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | import pandas as pd
import os
# excel文件的路径
excel_path = 'D://temp/xlsx/'
# csv文件保存的位置
csv_path = 'D://temp/csv/'
# 定义一个转换方法
def excel_to_csv():
# 获取所有的excel文件
files = os.listdir(excel_path)
# 遍历文件,每次获取一个进行操作
for file in files:
# 判断是文件而不是文件夹
if not os.path.isdir(file):
print('开始转换:' + file)
# 获取文件名称和后缀名
(file_name, extension) = os.path.splitext(file)
# 读取excel,从第一行读取
data = pd.read_excel(excel_path + file, index_col=0)
# 写入csv文件,编码为utf-8
data.to_csv(csv_path + file_name + '.csv', encoding='gbk')
print('转换结束')
# 主函数
if __name__ == '__main__':
excel_to_csv() | [
"[email protected]"
]
| |
effc3b116d13b73680bc6ee89503bc15bb072e99 | b570f953a932642481051b4f0762de504f25eaeb | /inttest_md.py | 5f12ded508386aee50cbe6dbab0267013b1b809d | []
| no_license | matot830/hands-on-3 | bfd212fbfa11fa34900a1534bcdefe74138e75b6 | a4c5e5b7c44e358a1459ab904782829cc4438cd7 | refs/heads/main | 2022-12-27T08:58:17.879066 | 2020-10-05T15:05:20 | 2020-10-05T15:05:20 | 301,438,501 | 0 | 0 | null | 2020-10-05T15:05:22 | 2020-10-05T14:35:10 | Python | UTF-8 | Python | false | false | 151 | py | import md
import os
import sys
if os.path.exists('cu.traj'):
os.unlink('cu.traj')
md.run_md()
if not os.path.exists('cu.traj'):
sys.exit(1)
| [
"[email protected]"
]
| |
6a70ffe6deffc058b41ac39a1960e5593ee32b95 | 4c53e5174f7a71a00d94c7f91efbbd5cfd7df385 | /aw_names.py | 69c725c3575f91892efe7da83b7f556043f55363 | []
| no_license | peileppe/Apocalypse | 86206804f120032e0dba4bb225b4beba59f604b1 | 04714e44ab688cba73cb25f14af2b6f4038b9d8f | refs/heads/master | 2022-02-07T02:33:20.493973 | 2019-06-24T12:26:25 | 2019-06-24T12:26:25 | 114,765,037 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,068 | py | #!/usr/bin/python
# namelist.py
# ===================================================================
import random
names=['Abel', 'Abeline', 'Absinthe', 'Ace', 'Admiz', 'Akountan', 'Alabaster', 'Ambass', 'Amber', 'Ape', 'Applejack', 'Arizona', 'Arrow', 'Avenger', 'Azure', 'Baaba', 'Baiv', 'Banjo', 'Barbarian', 'Bazooka', 'Beans', 'Beastie', 'BeBop', 'Beep', 'Beezer', 'Bingo', 'Biscuit', 'Bish', 'Bizo', 'Blackjack', 'Blackstone', 'Blacktop', 'Blade', 'Blaze', 'Blitz', 'Blood', 'Blooper', 'Blowout', 'Blowtorch', 'Bluenote', 'Blur', 'Boffo', 'Bolt', 'Bomber', 'Bomzhell', 'Bones', 'Bong', 'Boogie', 'Boots', 'Boox', 'Brain', 'Breaker', 'Breezy', 'Brig', 'Brimstone', 'Briny', 'Brit', 'Broker', 'Bronco', 'Broom', 'Bruizer', 'Buckhorn', 'Bucktail', 'Buffalo', 'Bukaneer', 'Bulldog', 'Bulldozer', 'Bumper', 'Bunker', 'Bupp', 'Burgundy', 'Butcher', 'Butterfly', 'Butterwing', 'Buzz', 'Buzzard', 'Caboodle', 'Cactus', 'Calico', 'Calypso', 'Camel', 'Cameroon', 'Cannibal', 'Cap', 'Capricorn', 'Carve', 'Chameleon', 'Chancellor', 'Chantilly', 'Charn', 'Chestnut', 'Chevy', 'Cheyenne', 'Chiffon', 'Chigger', 'ChiliDog', 'Chime', 'Chopper', 'Chowder', 'Chute', 'Cinnamon', 'Clam', 'Clarion', 'Claw', 'Cleaver', 'Clipper', 'Clopper', 'Coach', 'Cobalt', 'Cobra', 'Cockpit', 'Colt', 'Comp', 'Coo-coo', 'Coosh', 'Core', 'Corkscrew', 'Cornbread', 'Corncob', 'Cottonfoot', 'Crableg', 'Crackers', 'Crimson', 'Crispy', 'Crooner', 'Crow', 'Crumbs', 'Crush', 'Crusher', 'Cupcake', 'Cutter', 'Cyclone', 'Cymbal', 'Dago', 'Dakota', 'Dambi', 'Damson', 'Dancer', 'Darkwing', 'Dazz', 'Demon', 'Devil', 'Dewdrop', 'Dez.Doc', 'Diamondback', 'Digger', 'Diggity', 'Ditto', 'Ditty', 'Diy', 'Dock', 'Doggie', 'Dong', 'DooDah', 'Doug', 'Downbeat', 'Drag', 'Dragon', 'Dremmer', 'Driller', 'Drummer', 'Duck', 'Dugout', 'Duke', 'Dumper', 'Durango', 'Dusky', 'Dustwind', 'Dutch', 'Ember', 'Emerald', 'Emperor', 'Fatso', 'Fin', 'Fingers', 'Fireball', 'Firebolt', 'Fist', 'Fixer', 'Fizz', 'Fleece', 'Flook', 'Foto', 'Frog', 'Fumbles', 'Fungo', 'Furf', 'Gabe', 'Gash', 'Giddyup', 'Glaze', 'Glider', 'Glory', 'Gog', 'Gong', 'Gonzo', 'Gooch', 'Goose', 'Gossamer', 'Graywing', 'Greaser', 'Grip', 'Grip', 'Grizzly', 'Groot', 'Gumdrop', 'Gunboat', 'Gypsy', 'Hammer', 'Hangman', 'Hatch', 'Hatchback', 'Hawse', 'Hexmen', 'Hickory', 'Hi-fi', 'Hikup', 'Hippity', 'Hojo', 'Homestretch', 'Honeytree', 'Honk', 'Hoochee', 'Hoodoo', 'Hoofer', 'Hoop', 'HotRod', 'HubCap', 'Hummer', 'Hush', 'Hut-hut', 'Iceman', 'Iggy', 'Imoo', 'Inch', 'Indigo', 'IronMan', 'Jackal', 'Jade', 'Jangle', 'Jart', 'Java', 'Javi', 'Jawbreaker', 'Jayn', 'Jazz', 'Jeep', 'Jiffy', 'Jigger', 'Jigsaw', 'Jingle', 'Jinx', 'Jipper', 'Jitterbug', 'Jive', 'Jock', 'Jolt', 'Juju', 'JukeBox', 'Jumbiliah', 'Jumper', 'Jupiter', 'Kala', 'Keyph', 'Kickapoo', 'Kicker', 'Kickskirt', 'Kiki', 'Kimu', 'King', 'Kingmaker', 'Kite', 'Kiwi', 'Knuckleface', 'Knucklenose', 'Knuckles', 'K.O.', 'Koko', 'Krone', 'Lano', 'Lava', 'Lefty', 'Legs', 'Lightning', 'Limey', 'Line', 'LittleBit', 'LoBall', 'Lollipop', 'Loo', 'Lugnut', 'Lunchbox', 'Madiera', 'Magnificent', 'Mahogany', 'Man-Eater', 'Mara', 'Marigold', 'Masher', 'Mayday', 'Meatball', 'Meatloaf', 'Melody', 'Melody', 'Mex', 'Midnight', 'Misk', 'Mistral', 'Misty', 'Mog', 'Mojo', 'Mombo', 'Monsoon', 'Montana', 'Moonbeam', 'Moonglow', 'Mootzie', 'Muddy', 'Munchy', 'Mungo', 'name', 'Neel', 'Nibbles', 'Nighthorse', 'Noodles', 'Nuff', 'Octo', 'Oogie', 'Orchid', 'Paffy', 'Palamar', 'Panky', 'Peekaboo', 'Pickles', 'Pick-off', 'Pidge', 'Piff', 'Pikolo', 'Ping-Pong', 'Pirate', 'Pitchfork', 'Pojo', 'Poke', 'Poof', 'Popcorn', 'Pop-up', 'Potluck', 'Potsie', 'Poucho', 'Pretzel', 'Princess', 'Pucker', "Puddin'", 'Puff', 'Pumper', 'Quick', 'Racer', 'Raindrop', 'Raksha', 'Ram', 'Rambler', 'Rango', 'Rasco', 'Raven', 'Razzle', 'RibEye', 'Rigger', 'Rings', 'Ringside', 'Rink', 'Ripsaw', 'Rivet', 'Rizzy', 'Rocket', 'Roo', 'Rouge', 'Rub-a-Dub', 'Ruby', 'Rudder', 'Runway', 'Ruthel', 'Sable', 'Saddlebags', 'Sahara', 'Sapphire', 'Sarge', 'Sark', 'Sash', 'Sax', 'Scar', 'Scarlet', 'Schooner', 'Scooter', 'Screech', 'Scud', 'Scuff', 'Scull', 'Scuttlebutt', 'Seaweed', 'Sess', 'Setter', 'Shadow', 'Shadrack', 'Shaker', 'Shalimar', 'Shark', 'Shazam', 'Shepherd', 'Shimmy', 'Shine', 'Shingle', 'Shiver', 'Shoke', 'Shoo', 'Shooter', 'Shortcake', 'Shot-Put', 'Shutout', 'Sidepockets', 'Silky', 'Silverado', 'Silverleaf', 'Sixpenny', 'Sizzle', 'Skadoo', 'Skat', 'Skater', 'Skib', 'Skid', 'Skiff', 'Skink', 'Skull', 'Skunk', 'Slade', 'Slappy', 'Slash', 'Sledge', 'Slewfoot', 'Slicer', 'Slick', 'Slider', 'Slingo', 'Sloo', 'Sloop', 'Slugger', 'Smack', 'Smash', 'Smasher', 'Smith', 'Smoke', 'Snail', 'Snake', 'Snapper', 'Snatch', 'Snatcher', 'Sneezer', 'Sniper', 'Snook', 'Snoot', 'Snort', 'Snot', 'Snout', 'Snow', 'Snow', 'Snowball', 'Solo', 'Sonar', 'Sonic', 'Soupy', 'Spade', 'Sparerib', 'Spars', 'Spike', 'Spinner', 'Spitter', 'Spoiler', 'Spokes', 'Spook', 'Spoons', 'Spree', 'Sprinkle', 'Spud', 'Spurlock', 'Sputz', 'Squint', 'Stabber', 'Steamboat', 'Sticks', 'Stinger', 'Stone', 'Strangler', 'Striker', 'Suitcase', 'Sunset', 'Surfboard', 'Swede', 'Swish', 'Switchblade', 'Swoop', 'Swoosh', 'Swoozie', 'Taco', 'Tailspin', 'Tangerine', 'Tango', 'Tank', 'Tanker', 'TapeDeck', 'Taps', 'Tarf', 'Tarpit', 'Tass', 'Tater', 'Tavin', 'Tawny', 'T-B', 'Teeshot', 'Tempo', 'Ten-Pin', 'Thunder', 'Ting', 'Tin-Pan', 'Toothpick', 'Torch', 'Torpedo', 'Touchdown', 'Trench', 'Trig', 'Trinity', 'Troon', 'Trotter', 'Trucker', 'Tunes', 'Turk', 'Tweedledee', 'Tweedledum', 'Violetta', 'Vista', 'Vulture', 'Warrior', 'Wart', 'Weasel', 'Weez', 'Weiss', 'Wharf', 'Wheels', 'Wheels', 'Whiskey', 'Whisper', 'Whistler', 'Whitehorn', 'Whizzer', 'Windjammer', 'Wings', 'Wishbone', 'X-Men', 'Yago', 'Yahoo', 'Yank', 'Yaz', 'Yellowhammer', 'Yellowstone', 'Yodel', 'Yogo', 'Yoho', 'Yorky', 'Yo-yo', 'Zang', 'Zap', 'Zart', 'Zazz', 'Zephyr', 'Ziggy', 'Zing', 'Zinger', 'Zip', 'Zoar', 'Zook', 'Zookie', 'Zoot', 'Zuzu']
def namePick():
return random.choice(names)
def main():
print(namePick())
return
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
eaad56cb2da50e99711c5cc02e7709b1053a7a15 | 485535e03eb3a612c353cec9d88d06dbb2f659bf | /leetcode/insert_bst.py | 72d930a2333f439497854b29e2e7099c006e8f44 | []
| no_license | nkchangliu/puzzles | bef8260aa436102d8ccfde3061f66aea52cb12fb | 19bbf17ab823a7f35c5b6ea365d2d72f2e76c3b9 | refs/heads/master | 2021-08-08T21:56:27.789303 | 2018-11-01T23:20:49 | 2018-11-01T23:20:49 | 133,545,891 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | def insert_tree(root, target):
if not root:
root = TreeNode(target)
elif root.val > target:
root.left = insert_tree(root.left, target)
else:
root.right = insert_tree(root.right, target)
return root
class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
| [
"[email protected]"
]
| |
421c13e8963ffe8366e023f610bb3eea356c8f96 | 86cc944246f0e3085129a0d5bd9f3e66cfaf65f2 | /applications/accel_mpu_9250_esp32/python_udp_server/show_helper.py | 8d31a70d352f11e525389727c48a45a1b84b901d | []
| no_license | DaveTheTroll/arduino_JG | 59b6bd581816db19ef12189ba31e6bf6d6400d64 | 61b21b8785ba9a9a81bd680d17c31f475db65700 | refs/heads/master | 2020-03-22T07:47:22.987845 | 2020-01-28T13:42:31 | 2020-01-28T13:42:31 | 139,724,604 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | import threading
from mpl_toolkits.mplot3d import proj3d
import numpy as np
from accel_server import accel_server
import matplotlib.pyplot as plt
def orthogonal_proj(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,0,zback]])
def start_show(onReading):
s = accel_server(onReading)
thread = threading.Thread(target=lambda: s.run(), daemon=True)
thread.start()
proj3d.persp_transformation = orthogonal_proj
plt.show() | [
"[email protected]"
]
| |
e941d310365b9d5743b86e3c6434440d6a26f351 | 2eb6c4a1293094b63d7a9e204bdc1ba7578b7537 | /webium/tests/test_base_page.py | e6f1dd17a6dfad47a3c72a131e81992a2e6ed768 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
]
| permissive | WandyYing/vscode-remote-webium | a60124e1b5439de9bf2cbae161513e73ad21455a | c6291cc6f72cbc9b323221a0ef4bbba3134f7f60 | refs/heads/master | 2020-12-14T23:51:21.499703 | 2020-02-04T11:04:13 | 2020-02-04T11:04:13 | 234,916,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | import pytest
from unittest import TestCase
from webium.base_page import BasePage
from webium.errors import WebiumException
class PageWithoutUrl(BasePage):
pass
class TestNoUrlValidation(TestCase):
def test_no_url_validation(self):
page = PageWithoutUrl()
with pytest.raises(WebiumException):
page.open() | [
"[email protected]"
]
| |
ec72ba24df443da46b514225c8fff2567fd507f9 | 6167f6fe448775c3b08aafcfc69b57dd98309548 | /Python_R/machinery.py | e886c336350b4eb8b569ddfbd71df57e71af83c6 | []
| no_license | quinnie-nguyen/Productivity-Changes-of-German-Manufacturing-Firms-and-Its-Components | 390e876b488c94634833bb37d517307ff67cefd2 | 04b63474e2d08794038b398febda38132396f523 | refs/heads/master | 2022-11-20T12:41:00.103261 | 2020-07-23T23:00:32 | 2020-07-23T23:00:32 | 282,074,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,454 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 16:51:36 2020
@author: DELL
"""
'''
################################## CHEMICALS #################################
chemicals_raw = preda.get_data_chem()
chem =preda.clean_datechem(chemicals_raw)
old_chem, young_chem, newborn_chem = preda.get_f_wrt_age(chem)
newborn_chem = preda.get_data_newborn(newborn_chem)
'''dataframe of chemicals for the whole period'''
total_chem = pd.concat([old_chem,young_chem,newborn_chem], axis = 0, ignore_index = True)
'''drop some firms that have unreasonable entry, firms with multi-industry, and
data for the whole cooperation around the world'''
total_chem = total_chem.drop(total_chem.index[total_chem['ID'].isin([8,21,51,54,156])]).reset_index()
total_chem = total_chem.drop('index', axis = 1)
total_chem.columns = ['ID', 'name',
'fa_18', 'fa_17', 'fa_16', 'fa_15', 'fa_14', 'fa_13', 'fa_12', 'fa_11', 'fa_10',
'em_18', 'em_17', 'em_16', 'em_15', 'em_14','em_13', 'em_12', 'em_11', 'em_10',
'ec_18', 'ec_17', 'ec_16', 'ec_15', 'ec_14', 'ec_13', 'ec_12', 'ec_11', 'ec_10',
'mc_18', 'mc_17', 'mc_16','mc_15', 'mc_14', 'mc_13','mc_12', 'mc_11', 'mc_10',
's_18', 's_17', 's_16', 's_15', 's_14','s_13','s_12', 's_11', 's_10',
'year_cor']
total_chem, total_chem_edit = preda.deflate_data(total_chem, index)
total_chem_edit.to_csv('chem.csv', sep='|')
chem_malm = preda.Malmquist_data(total_chem_edit, 198)
chem_malm.loc[(chem_malm['year']==2012) & (chem_malm['firm'].isin([335])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
chem_malm.loc[(chem_malm['year']==2013) & (chem_malm['firm'].isin([151,299,335])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
chem_malm.loc[(chem_malm['year']==2014) & (chem_malm['firm'].isin([151,299])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
chem_malm.to_csv('chem_malm.csv', sep = '|')
# read results
eff_chem = pd.read_csv('efficiency_chemical.csv').drop('Unnamed: 0', axis =1)
#distribution of efficiency score
preda.hist_eff_score(eff_chem)
eff_chem_dmu = preda.eff_dmu(eff_chem)
#change in efficiency level of newborns
plt.subplots(figsize =(12,12))
plt.ylim(0, 1.2)
plt.plot(eff_chem_dmu.loc[eff_chem_dmu['year'] >=2013, 'year'],
eff_chem_dmu.loc[eff_chem_dmu['firm_9'] != 0,'firm_9'],
color='skyblue', linewidth=3, marker='o', markerfacecolor='blue', markersize=10)
plt.plot(eff_chem_dmu.loc[eff_chem_dmu['year'] >=2015, 'year'],
eff_chem_dmu.loc[eff_chem_dmu['firm_221'] != 0,'firm_221'],
color='olive', linewidth=1, marker='o', markerfacecolor='yellowgreen', markersize=6)
plt.title('Efficient newborn firms in chemical sector', color='blue', fontsize=20)
plt.ylabel('efficiency score', color='blue', fontsize=14)
################################## ELECTRONICS #################################
elec = preda.get_data_elec()
elec = preda.clean_dateelec(elec)
old_elec, young_elec, newborn_elec = preda.get_f_wrt_age(elec)
newborn_elec = preda.get_data_newborn(newborn_elec)
'''dataframe of electronic for the whole period'''
total_elec = pd.concat([old_elec,young_elec,newborn_elec], axis =0, ignore_index = True)
'''drop some firms that have unreasonable entry, firms with multi-industry, and
data for the whole cooperation around the world'''
total_elec = total_elec.drop(total_elec.index[total_elec['ID'].isin([6,11,23,423,98,85])]).reset_index()
total_elec = total_elec.drop('index', axis = 1)
total_elec.columns = ['ID', 'name',
'fa_18', 'fa_17', 'fa_16', 'fa_15', 'fa_14', 'fa_13', 'fa_12', 'fa_11', 'fa_10',
'em_18', 'em_17', 'em_16', 'em_15', 'em_14','em_13', 'em_12', 'em_11', 'em_10',
'ec_18', 'ec_17', 'ec_16', 'ec_15', 'ec_14', 'ec_13', 'ec_12', 'ec_11', 'ec_10',
'mc_18', 'mc_17', 'mc_16','mc_15', 'mc_14', 'mc_13','mc_12', 'mc_11', 'mc_10',
's_18', 's_17', 's_16', 's_15', 's_14','s_13','s_12', 's_11', 's_10',
'year_cor']
total_elec, total_elec_edit = preda.deflate_data(total_elec,index)
total_elec_edit.to_csv('elec.csv', sep ='|')
elec_malm = preda.Malmquist_data(total_elec_edit,237)
elec_malm.loc[(elec_malm['year']==2011) & (elec_malm['firm'].isin([1010])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
elec_malm.loc[(elec_malm['year']==2012) & (elec_malm['firm'].isin([1010])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
elec_malm.loc[(elec_malm['year']==2013) & (elec_malm['firm'].isin([38,65,1010])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
elec_malm.loc[(elec_malm['year']==2014) & (elec_malm['firm'].isin([38,65])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
elec_malm.to_csv('elec_malm.csv', sep='|')
eff_elec = pd.read_csv('efficiency_electronic.csv').drop('Unnamed: 0', axis =1)
preda.hist_eff_score(eff_elec)
eff_elec_dmu = preda.eff_dmu(eff_elec)
#change in efficiency level of newborns
plt.subplots(figsize =(12,12))
plt.ylim(0, 1.2)
plt.plot(eff_elec_dmu.loc[eff_elec_dmu['year'] >=2012, 'year'],
eff_elec_dmu.loc[eff_elec_dmu['firm_54'] != 0,'firm_54'],
color='skyblue', linewidth=3, marker='o', markerfacecolor='blue', markersize=10)
plt.plot(eff_elec_dmu.loc[eff_elec_dmu['year'] >=2012, 'year'],
eff_elec_dmu.loc[eff_elec_dmu['firm_111'] != 0,'firm_111'],
color='olive', linewidth=1, marker='o', markerfacecolor='yellowgreen', markersize=6)
plt.plot(eff_elec_dmu.loc[eff_elec_dmu['year'] >=2011, 'year'],
eff_elec_dmu.loc[eff_elec_dmu['firm_533'] != 0,'firm_533'],
color='purple', linewidth=1, marker='o', markerfacecolor='violet', markersize=6)
plt.plot(eff_elec_dmu.loc[eff_elec_dmu['year'] >=2015, 'year'],
eff_elec_dmu.loc[eff_elec_dmu['firm_643'] != 0,'firm_643'],
color='darkred', linewidth=1, marker='o', markerfacecolor='red', markersize=6)
plt.title('Efficient newborn firms in electronic sector', color='blue', fontsize=20)
plt.ylabel('efficiency score', color='blue', fontsize=14)
'''
################################## MACHINERY ##################################
import pandas as pd
import numpy as np
import Preprocessing_data as preda
import matplotlib.pyplot as plt
#import DEA
index = preda.get_index()
mac = preda.get_raw_data('Machinery_raw.xls')
mac = preda.clean_datemac(mac)
old_mac, young_mac, newborn_mac = preda.get_f_wrt_age(mac)
newborn_mac = preda.get_data_newborn(newborn_mac)
'''dataframe of machinery for the whole period'''
total_mac = pd.concat([old_mac,young_mac,newborn_mac], axis =0, ignore_index = True)
'''drop some firms that have unreasonable entry, firms with multi-industry, and
data for the whole cooperation around the world'''
total_mac = total_mac.drop(total_mac.index[total_mac['ID'].isin([271])]).reset_index()
total_mac = total_mac.drop('index', axis = 1)
total_mac.columns = ['ID', 'name',
'fa_18', 'fa_17', 'fa_16', 'fa_15', 'fa_14', 'fa_13', 'fa_12', 'fa_11', 'fa_10',
'em_18', 'em_17', 'em_16', 'em_15', 'em_14','em_13', 'em_12', 'em_11', 'em_10',
'ec_18', 'ec_17', 'ec_16', 'ec_15', 'ec_14', 'ec_13', 'ec_12', 'ec_11', 'ec_10',
'mc_18', 'mc_17', 'mc_16','mc_15', 'mc_14', 'mc_13','mc_12', 'mc_11', 'mc_10',
's_18', 's_17', 's_16', 's_15', 's_14','s_13','s_12', 's_11', 's_10',
'year_cor']
total_mac, total_mac_edit = preda.deflate_data(total_mac,index)
total_mac_edit.to_csv('mac.csv', sep='|')
mac_malm = preda.Malmquist_data(total_mac_edit, 305)
# if any input is null, then set others also null values
mac_malm.loc[(mac_malm['year']==2010) & (mac_malm['firm'].isin([547,716])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
mac_malm.loc[(mac_malm['year']==2011) & (mac_malm['firm'].isin([377,716])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
mac_malm.loc[(mac_malm['year']==2012) & (mac_malm['firm'].isin([377,274])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
mac_malm.loc[(mac_malm['year']==2013) & (mac_malm['firm'].isin([92,274])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
mac_malm.loc[(mac_malm['year']==2014) & (mac_malm['firm']==274), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
"""for robustness check"""
mac_malm.loc[(mac_malm['year']==2010) & (mac_malm['firm'].isin([274,716])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
mac_malm.loc[(mac_malm['year']==2011) & (mac_malm['firm'].isin([274])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
mac_malm.loc[(mac_malm['year']==2012) & (mac_malm['firm'].isin([274])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
mac_malm.loc[(mac_malm['year']==2013) & (mac_malm['firm'].isin([274])), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
mac_malm.loc[(mac_malm['year']==2014) & (mac_malm['firm']==274), ['fixed_assets','employees',
'emp_cost','mat_cost','sales']] = np.nan
mac_malm.to_csv('mac_malm.csv', sep='|')
###extract only sale data for weighted average of the result
sales_mac = total_mac_edit.loc[:, ['ID', 's_10', 's_11', 's_12', 's_13', 's_14', 's_15',
's_16', 's_17', 's_18']]
sales_mac.loc[sales_mac['ID'] == 547, ['s_10']] = np.nan
sales_mac.loc[sales_mac['ID'] == 716, ['s_10', 's_11']] = np.nan
sales_mac.loc[sales_mac['ID'] == 377, ['s_11', 's_12']] = np.nan
sales_mac.loc[sales_mac['ID'] == 92, ['s_13']] = np.nan
sales_mac.loc[sales_mac['ID'] == 274, ['s_12', 's_13', 's_14']] = np.nan
for i in range(10,19):
sales_mac.loc[:,f's_{i}'] = sales_mac.loc[:,f's_{i}']/np.nansum(sales_mac.loc[:,f's_{i}'])
sales_mac.to_csv('sales_mac.csv', sep = '|')
# read result
#eff_mac = pd.read_csv('efficiency_machinery.csv').drop('Unnamed: 0', axis =1)
eff_mac = pd.read_csv('eff_score_mac.csv').drop('Unnamed: 0', axis =1)
eff_mac.replace(0,np.nan, inplace=True)
preda.hist_eff_score(eff_mac)
eff_mac_dmu = preda.eff_dmu(eff_mac)
preda.eff_static(eff_mac_dmu,50,15,6, 'Machinery')
#change in efficiency level of newborns
plt.subplots(figsize =(12,12))
plt.ylim(0, 1.2)
plt.plot(eff_mac_dmu.loc[eff_mac_dmu['year'] >=2012, 'year'],
eff_mac_dmu.loc[eff_mac_dmu['firm_67'] != 0,'firm_67'],
color='skyblue', linewidth=1, marker='o', markerfacecolor='blue', markersize=6)
plt.plot(eff_mac_dmu.loc[eff_mac_dmu['year'] >=2013, 'year'],
eff_mac_dmu.loc[eff_mac_dmu['firm_95'] != 0,'firm_95'],
color='olive', linewidth=1, marker='o', markerfacecolor='yellowgreen', markersize=6)
plt.plot(eff_mac_dmu.loc[eff_mac_dmu['year'] >=2014, 'year'],
eff_mac_dmu.loc[eff_mac_dmu['firm_292'] != 0,'firm_292'],
color='purple', linewidth=1, marker='o', markerfacecolor='violet', markersize=6)
plt.plot(eff_mac_dmu.loc[eff_mac_dmu['year'] >=2013, 'year'],
eff_mac_dmu.loc[eff_mac_dmu['firm_381'] != 0,'firm_381'],
color='darkred', linewidth=1, marker='o', markerfacecolor='red', markersize=6)
plt.plot(eff_mac_dmu.loc[eff_mac_dmu['year'] >=2014, 'year'],
eff_mac_dmu.loc[eff_mac_dmu['firm_554'] != 0,'firm_554'],
color='black', linewidth=1, marker='o', markerfacecolor='silver', markersize=6)
plt.plot(eff_mac_dmu.loc[eff_mac_dmu['year'] >=2014, 'year'],
eff_mac_dmu.loc[eff_mac_dmu['firm_1859'] != 0,'firm_1859'],
color='forestgreen', linewidth=1, marker='o', markerfacecolor='limegreen', markersize=6)
plt.title('Efficient newborn firms in machinery sector', color='blue', fontsize=20)
plt.ylabel('efficiency score', color='blue', fontsize=14)
summary=preda.efficiency_sum_stats(eff_mac)
summary_by_age = preda.eff_stats_by_age_merged(eff_mac, 241, 41, 23)
from time import strftime
writer = pd.ExcelWriter(strftime('Report_machinery_geo %Y-%m-%d.xlsx'))
eff_mac_dmu.to_excel(writer, 'eff_firms')
summary.to_excel(writer, 'eff_summary_stats')
summary_by_age.to_excel(writer, 'eff_score_by_age')
#malmquist index
# how many firms grow over time? pc>1
df = preda.read_malmquist('malmquist_machinery.csv')
df_mac = preda.read_malmquist('malmquist_tovrs_mac.csv')
df_compare = preda.encode_change(df, 241, 41, 23)
growth_dmu = preda.growth_dmu(df_compare, 307)
efficiency_growth = preda.ec_dmu(df_compare)
source_pc_machinery = preda.source_pc_sector(df, 242, 42, 23)
comparison = preda.comparison(df, 242, 42, 23)
source_pd_sector = preda.source_pd_sector(source_pc_machinery, comparison, 242, 42, 23)
avg_change_total = preda.average_change(df,'total')
avg_change_total.to_excel(writer, 'overall_avg_change')
writer.save()
preda.visualize_change_by_group(avg_change_total, 'total')
df_old = df.iloc[0:242,]
df_young = df.iloc[242:284,]
df_newborn = df.iloc[284:307,]
avg_change_by_age = preda.avg_change_bygroup(df_old, df_young, df_newborn)
preda.visualize_change_by_group(avg_change_by_age, 'newborn')
preda.visualize_change_by_group(avg_change_by_age, 'old')
preda.visualize_change_by_group(avg_change_by_age, 'young')
preda.visualize_change_by_component(avg_change_by_age,'MI')
preda.visualize_change_by_component(avg_change_by_age,'EC')
preda.visualize_change_by_component(avg_change_by_age,'TC')
| [
"[email protected]"
]
| |
9fc8a582712f21900f21753e436eb4d0b17709d6 | c3115c59b35eb037cf45831ff3f3ce5ca139e376 | /tango_with_django_project/tango_with_django_project/settings.py | 9fcf7ab1cfab791df08f27bf00c704856782f72b | []
| no_license | GLunPy/rango | df2aa7d74157acb41cd3fce3445f6ac12f6b9be6 | 4699f219fa65143c01fae1aea7e71e5a64954e6a | refs/heads/master | 2021-01-02T22:58:29.318268 | 2015-08-02T15:46:56 | 2015-08-02T15:46:56 | 39,783,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | """
Django settings for tango_with_django_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8^&stb0(-8_grgk7g+)66%j_be4@8pe6-7jxw4td8d_ji6-@i0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rango',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tango_with_django_project.urls'
WSGI_APPLICATION = 'tango_with_django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
STATIC_PATH,
)
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_PATH,
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"[email protected]"
]
| |
987b4b3cbc539214f6c1b21224e16479b498c98e | 4fda808f7a77ea4847b91ac98940b5b33010aa1d | /TestInvoice.py | 79ea82ca5d916ed0331a8439f1efc7167f0ce3e6 | []
| no_license | neastlan/Lab1Part3 | 77647b751d0ace1f1986f698fb5ac65e4fa282db | 019c39cebc22a5430cb891e80741c43696688b52 | refs/heads/master | 2023-03-11T19:44:08.585308 | 2021-03-02T06:22:59 | 2021-03-02T06:22:59 | 343,667,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | import pytest
from Invoice import Invoice
@pytest.fixture()
def products():
products={'Pen' : {'qnt' : 10, 'unit_price' : 3.75, 'discount' : 5},
'Notebook': {'qnt' : 5, 'unit_price' : 7.5, 'discount': 10}}
return products
@pytest.fixture()
def invoice():
invoice = Invoice()
return invoice
def test_CanCalucateTotalImpurePrice(invoice, products):
invoice.totalImpurePrice(products)
assert invoice.totalImpurePrice(products) == 75
def test_CanCalucateTotalDiscount(invoice, products):
invoice.totalDiscount(products)
assert invoice.totalDiscount(products) == 5.62
def test_CanCalucateTotalPurePrice(invoice, products):
invoice.totalPurePrice(products)
assert invoice.totalPurePrice(products) == 69.38 | [
"[[email protected]]"
]
| |
0375e087942c2c2346f1f6076ecf3070bb98af45 | 3424fd15a6b4a079c24cb45c896277e3dd7cf0c8 | /plugins/modules/oneview_enclosure_group_facts.py | 15216f74b45c47563922cd7e99e1ea7af0c69bc1 | [
"Apache-2.0"
]
| permissive | SHANDCRUZ/test-codecov | e7544c4616e281ada4858f34897b7a1e59be7748 | f7aca851423641570fa86d3f8233235d14c71756 | refs/heads/main | 2023-03-24T01:43:18.048625 | 2021-03-23T12:39:51 | 2021-03-23T12:39:51 | 350,258,321 | 0 | 0 | Apache-2.0 | 2021-03-23T12:47:30 | 2021-03-22T08:07:33 | Python | UTF-8 | Python | false | false | 4,355 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_enclosure_group_facts
short_description: Retrieve facts about one or more of the OneView Enclosure Groups.
description:
- Retrieve facts about one or more of the Enclosure Groups from OneView.
version_added: "2.3.0"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 5.4.0"
author:
- "Gustavo Hennig (@GustavoHennig)"
- "Bruno Souza (@bsouza)"
options:
name:
description:
- Enclosure Group name.
required: false
type: str
options:
description:
- "List with options to gather additional facts about Enclosure Group.
Options allowed:
C(configuration_script) Gets the configuration script for an Enclosure Group."
required: false
type: list
extends_documentation_fragment:
- hpe.oneview.oneview
- hpe.oneview.oneview.params
- hpe.oneview.oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Enclosure Groups
oneview_enclosure_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 2000
delegate_to: localhost
- debug: var=enclosure_groups
- name: Gather paginated, filtered and sorted facts about Enclosure Groups
oneview_enclosure_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 2000
params:
start: 0
count: 3
sort: 'name:descending'
filter: 'status=OK'
scope_uris: '/rest/scopes/cd237b60-09e2-45c4-829e-082e318a6d2a'
- debug: var=enclosure_groups
- name: Gather facts about an Enclosure Group by name with configuration script
oneview_enclosure_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 2000
name: "Test Enclosure Group Facts"
options:
- configuration_script
delegate_to: localhost
- debug: var=enclosure_groups
- debug: var=enclosure_group_script
'''
RETURN = '''
enclosure_groups:
description: Has all the OneView facts about the Enclosure Groups.
returned: Always, but can be null.
type: dict
enclosure_group_script:
description: The configuration script for an Enclosure Group.
returned: When requested, but can be null.
type: dict
'''
from ansible_collections.hpe.oneview.plugins.module_utils.oneview import OneViewModule
class EnclosureGroupFactsModule(OneViewModule):
argument_spec = dict(
name=dict(required=False, type='str'),
options=dict(required=False, type='list'),
params=dict(required=False, type='dict')
)
def __init__(self):
super().__init__(additional_arg_spec=self.argument_spec)
self.set_resource_object(self.oneview_client.enclosure_groups)
def execute_module(self):
facts = {}
enclosure_groups = []
name = self.module.params.get("name")
if name:
if self.current_resource:
enclosure_groups = self.current_resource.data
if "configuration_script" in self.options:
facts["enclosure_group_script"] = self.current_resource.get_script()
else:
enclosure_groups = self.resource_client.get_all(**self.facts_params)
facts["enclosure_groups"] = enclosure_groups
return dict(changed=False, ansible_facts=facts)
def main():
EnclosureGroupFactsModule().run()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
881f1c7160691ba11837b3fefa17d2720ce659c9 | e6e8fe0854ef69faa6276c6ee5a24e5978156c56 | /idc/Test.py | 7588cc839ce946ffacd8df0b6c478c43600dadf8 | []
| no_license | prakadambi/masters-vm-2sem | 50f56269dcb56bb749a463e47cf45b3f001df18a | 8b10f49330f0175f075e46d13c7c671e86766dca | refs/heads/master | 2021-06-16T06:41:04.058957 | 2017-05-09T03:14:14 | 2017-05-09T03:14:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | #!/usr/bin/env python3
from serial import Serial
import bitarray
import time
import sys; print(sys.version)
print('\nDemonstrating M2M communication using RS232\n')
serialPort = Serial("/dev/ttyUSB0", timeout=5)#timeout after 5 seconds
serialPort.baudrate=input("\nEnter Baud rate: ")#baud rate
serialPort.stopbits=1;#1 stop bit
serialPort.rtscts=True
if (serialPort.isOpen() == False):
serialPort.open()
outStr = ''
inStr = ''
serialPort.flushInput()
serialPort.flushOutput()
while(True):
outStr=input("\nEnter your msg and -1 to quit: ")
if(outStr=='-1'):
#print (outStr)
print('exiting')
break
outStr=outStr.encode()#encode the string
serialPort.setRTS(1)
serialPort.write(outStr)
ct=serialPort.getCTS()
if serialPort.getCTS():
inStr = serialPort.read(10)
ba=bitarray.bitarray()
ba.frombytes(inStr)
print("\n")
inStr=inStr.decode()
print('Received :' +inStr)
outStr=outStr.decode()
print("Sent : " + outStr)
serialPort.close()
| [
"[email protected]"
]
| |
1dbbd38333e4bdfa695a265eab97dede7839959c | 893a2fea722b77148f1fb1cac066ce476f1afa0a | /codeforces/cf_beta_85/problem2.py | 8c260d049262d5c1c10a34300dcf43695e7bd3a3 | []
| no_license | the-brainiac/contests | feb9f1ee1abdfb3cc9dccd5a69623192b4ec09ed | b95426aa3e54e703f7924fe0f222c2915e07c8f7 | refs/heads/main | 2023-05-12T13:10:11.765678 | 2021-06-03T04:05:50 | 2021-06-03T04:05:50 | 373,376,225 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | N = 10**5
is_prime = [1]*N
# We know 0 and 1 are composites
is_prime[0] = 0
is_prime[1] = 0
def sieve():
"""
We cross out all composites from 2 to sqrt(N)
"""
i = 2
# This will loop from 2 to int(sqrt(x))
while i*i <= N:
# If we already crossed out this number, then continue
if is_prime[i] == 0:
i += 1
continue
j = 2*i
while j < N:
# Cross out this as it is composite
is_prime[j] = 0
# j is incremented by i, because we want to cover all multiples of i
j += i
i += 1
sieve()
def nextPrime(n):
i = n
while True:
if is_prime[i]:
return i
i += 1
for _ in range(int(input())):
d = int(input())
k1 = nextPrime(d+1)
k2 = nextPrime(d+k1)
print(k1*k2) | [
"[email protected]"
]
| |
1ca84859582be255db54107b3def0b75db02deb7 | 892dee995ffb3c25ddd0d92d5b0b9777751102ea | /shared_storage.py | 61cb3f6cd76aa444142304331aca1afdb38d3868 | [
"MIT"
]
| permissive | goshawk22/muzero-general | 5a7fa73d3bdc12b5ceb97131b0ffb2c86afd3237 | 29ceb1a25c00d46456a87794de602451727a668e | refs/heads/master | 2021-11-29T08:12:57.929343 | 2021-08-11T09:03:38 | 2021-08-11T09:03:38 | 294,471,664 | 2 | 1 | MIT | 2021-01-01T14:42:03 | 2020-09-10T17:02:44 | Python | UTF-8 | Python | false | false | 1,147 | py | import copy
import os
import ray
import torch
@ray.remote
class SharedStorage:
"""
Class which run in a dedicated thread to store the network weights and some information.
"""
def __init__(self, checkpoint, config):
self.config = config
self.current_checkpoint = copy.deepcopy(checkpoint)
def save_checkpoint(self, path=None):
if not path:
path = os.path.join(self.config.results_path, "model.checkpoint")
torch.save(self.current_checkpoint, path)
def get_checkpoint(self):
return copy.deepcopy(self.current_checkpoint)
def get_info(self, keys):
if isinstance(keys, str):
return self.current_checkpoint[keys]
elif isinstance(keys, list):
return {key: self.current_checkpoint[key] for key in keys}
else:
raise TypeError
def set_info(self, keys, values=None):
if isinstance(keys, str) and values is not None:
self.current_checkpoint[keys] = values
elif isinstance(keys, dict):
self.current_checkpoint.update(keys)
else:
raise TypeError
| [
"[email protected]"
]
| |
02870225cf065083ba4335fd8a97915249b45f48 | cf50ea39bfd5a7dee49f10c5889637131bb40c74 | /11-CHAPTER/3-multiple-inheritance.py | f44b08ef42b3cd67a92e4e03882b0df37fad6336 | []
| no_license | Rishi05051997/Python-Notes | 4878b1760731d7b7f5060f320ec9758fc5946536 | 1c7c1d927e1c78be430d7131f569e3272f8e81ad | refs/heads/main | 2023-07-15T03:00:06.498240 | 2021-08-24T05:27:46 | 2021-08-24T05:27:46 | 377,142,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | class Employee:
company = "Visa"
eCode = 120
class Freelancer:
company = "Fiverr"
level = 0
def upgradeLevel(self):
self.level = self.level + 1
class Programmer(Employee, Freelancer):
name = "Vrushabh"
p = Programmer()
p.upgradeLevel()
print(p.level)
print(p.company)
| [
"[email protected]"
]
| |
6e44992389889c0388dc181964d4224150695804 | f28f73251faf98ca976425601e2ec65c02fe403c | /oop/objectfromfile_person.py | 78581e93167f7f4aa60291a6129411aa5d6bfa4a | []
| no_license | Keerthanavikraman/Luminarpythonworks | e2bd56fadf6c1a49609b631368da35d1d1e33807 | 9e203eed2c4c24abdaf5c9dbe2a09a2ce6b906b8 | refs/heads/master | 2023-07-08T02:04:55.341462 | 2021-08-11T06:34:03 | 2021-08-11T06:34:03 | 385,262,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def printval(self):
print("name:",self.name)
print("age:",self.age)
def __str__(self):
return self.name
f=open("person",'r')
for line in f:
#print(line)
data=line.rstrip("\n").split(",")
#print(data)
##print(data)
name=data[0]
age=data[1]
obj=Person(name,age)
print(obj)
obj.printval() | [
"[email protected]"
]
| |
8e15123ac1006ef3d53de1573baf06184dd75c95 | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/plat-mac/carbon/lists.py | 4fc2252e450153f6e01beee179948af9cb5f9698 | []
| no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,082 | py | # 2016.08.04 20:01:15 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-mac/Carbon/Lists.py
def FOUR_CHAR_CODE(x):
return x
listNotifyNothing = FOUR_CHAR_CODE('nada')
listNotifyClick = FOUR_CHAR_CODE('clik')
listNotifyDoubleClick = FOUR_CHAR_CODE('dblc')
listNotifyPreClick = FOUR_CHAR_CODE('pclk')
lDrawingModeOffBit = 3
lDoVAutoscrollBit = 1
lDoHAutoscrollBit = 0
lDrawingModeOff = 8
lDoVAutoscroll = 2
lDoHAutoscroll = 1
lOnlyOneBit = 7
lExtendDragBit = 6
lNoDisjointBit = 5
lNoExtendBit = 4
lNoRectBit = 3
lUseSenseBit = 2
lNoNilHiliteBit = 1
lOnlyOne = -128
lExtendDrag = 64
lNoDisjoint = 32
lNoExtend = 16
lNoRect = 8
lUseSense = 4
lNoNilHilite = 2
lInitMsg = 0
lDrawMsg = 1
lHiliteMsg = 2
lCloseMsg = 3
kListDefProcPtr = 0
kListDefUserProcType = kListDefProcPtr
kListDefStandardTextType = 1
kListDefStandardIconType = 2
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\plat-mac\carbon\lists.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 20:01:15 Střední Evropa (letní čas)
| [
"[email protected]"
]
| |
ae77f04a39360d46e9678ac92343a924b0ef173e | 8f9aa25d14293be5baab4f9563a17517ab70e9b3 | /AllMedia/models.py | 574432b279a6a6c5462f9c0cd8102321fafc86c6 | []
| no_license | MdAkdas/musicVibe | 0107dbd2385b98f6b5667aac4d437118d3cb5726 | f39f194785251067bd8cad5f77e07b306d62aa25 | refs/heads/master | 2022-11-27T14:05:16.384625 | 2020-07-02T01:01:00 | 2020-07-02T01:01:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | from django.db import models
from django.contrib.auth import get_user_model
# Create your models here.
User = get_user_model()
class MediaFiles(models.Model):
user_for_files = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=128)
files = models.FileField(upload_to='media')
def __str__(self):
return self.name
class Meta:
verbose_name = 'MediaFile'
verbose_name_plural = 'MediaFiles'
#my model for my user files uploaded by users | [
"[email protected]"
]
| |
fe3bd2b60fe879cd445a2e446ba1b5993ed6a05c | 18e3b17e11c11f67499975fad6b88237f8589ab6 | /CrApp/wsgi.py | e349cc5a158bc7adc41023d45a03f5149540a6fe | []
| no_license | PrashantNigam/Muzique | b95b948d7e8b7022cafbd11ca445a1f856995140 | 1ea59c1d29557d5ab6db28bfd6f37db105dc7125 | refs/heads/master | 2021-03-30T21:13:58.462949 | 2018-09-22T19:51:38 | 2018-09-22T19:51:38 | 124,611,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for CrApp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CrApp.settings")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
ea8118a80cd7998d5a78ae88a633e64187567637 | 56c85a5c2c7cbdfcd3137571f0dc79b05e3f25c9 | /agenda/urls.py | 3f9a4ad5ffee43e7119da9691ce7472112f2a43b | []
| no_license | UesleiJf/agenda | ce7fedf6b31515b16d0c195b592b7eafb2e08ad7 | e5436c7a0562adcabcce582621995e18fd42c1b9 | refs/heads/master | 2020-09-05T22:01:05.821951 | 2019-12-01T22:47:44 | 2019-12-01T22:47:44 | 220,226,115 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | """agenda URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from rest_framework import routers
from core.views import AgendaViewSet
router = routers.DefaultRouter()
router.register(r'agenda', AgendaViewSet, base_name='Agenda')
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"[email protected]"
]
| |
bdf9962736759e44947c2131c36b186465f236ab | fdacb98c24a40fb385d10634b8c5cf4d020a702e | /SMPyBandits/Experiment/Seznec_asymptotic/main.py | 647e5f2923dc05f7cf90950b4b929b6e83ca8a54 | [
"MIT"
]
| permissive | varun5001/SMPyBandits | 1649468313fc46c865067fe80ff9e5334a8c2076 | 86c91e6036a23aeefe0731333702d6bbf756c214 | refs/heads/master | 2022-12-02T05:27:30.659372 | 2020-08-17T08:49:27 | 2020-08-17T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,047 | py | """
author: Julien SEZNEC
Produce the experiment about the (potential) asymptotic optimality of RAW-UCB++
For the thesis manuscript.
"""
from SMPyBandits.Arms import RestedRottingGaussian, UnboundedGaussian as Gaussian
from SMPyBandits.Policies import GreedyOracle, RAWUCB, EFF_RAWUCB, EFF_RAWUCB_pp, EFF_RAWUCB_pp2, MOSSAnytime, UCB
from SMPyBandits.Environment.MAB_rotting import repetedRuns
import numpy as np
import datetime
import os
import logging
import sys
date = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
PARALLEL = -1 # Set positive int to indicate the number of core, -1 to use all the cores, and False to not parallelize
REPETITIONS = 1 if len(sys.argv) < 3 else int(sys.argv[2]) # Set the number of repetitions
HORIZON = T = 10**6 # Horizon T
sigma = 1 # Gaussian noise std
K = 2
### SET Policies
policies = [
[MOSSAnytime, {'alpha':3}], #0
[EFF_RAWUCB, {'alpha': 1.4, 'm': 1.01}], # 1
[EFF_RAWUCB_pp, {'beta': 0, 'm': 1.01}], # 2
[EFF_RAWUCB_pp, {'beta': 1, 'm': 1.01}], # 3
[EFF_RAWUCB_pp, {'beta': 2, 'm': 1.01}], # 4
[EFF_RAWUCB_pp, {'beta': 3, 'm': 1.01}], # 5
[UCB, {}], #6
[EFF_RAWUCB_pp, {'beta': 2.5, 'm': 1.01}], # 7
[EFF_RAWUCB_pp, {'beta': 3.5, 'm': 1.01}], # 8
[EFF_RAWUCB_pp2, {'alpha': 1.3, 'm': 1.01}], # 9
[EFF_RAWUCB_pp2, {'alpha': 1.4, 'm': 1.01}], # 10
[EFF_RAWUCB_pp2, {'alpha': 1.5, 'm': 1.01}], # 11
[EFF_RAWUCB_pp2, {'alpha': 1.7, 'm': 1.01}], # 12
]
policy_ind = 9 if len(sys.argv) == 1 else int(sys.argv[1])
policy = policies[policy_ind]
policy_name = str(policy[0](nbArms=2, **policy[1]))
policy_name_nospace = policy_name.replace(' ', '_')
regret_path = os.path.join('./data', 'REGRET_' + policy_name_nospace + '_' + date)
time_path = os.path.join('./data', 'TIME_' + policy_name_nospace + '_' + date)
os.makedirs('./data/logging/', exist_ok=True)
logging.basicConfig(filename=os.path.join('./data/logging', date + '.log'), level=logging.INFO,
format='%(asctime)s %(message)s')
logging.info("Policy : %s$" % (policy_name))
### SET L/2
mus = [0.01, 1]
logging.info("CONFIG : CPU %s" % os.cpu_count())
logging.info("CONFIG : REPETITIONS %s" % REPETITIONS)
logging.info("CONFIG : HORIZON %s" % HORIZON)
logging.info("CONFIG : SIGMA %s" % sigma)
noisy_reward_res = []
regret_res = []
time_res = []
overpull_res = []
for m, mu in enumerate(mus):
logging.info("GAME %s : $\mu = %s$" % (m, mu))
print(mu)
### SET K arms
arms = [
[Gaussian, {"mu":0, "sigma": sigma}],
[Gaussian, {"mu":mu, "sigma": sigma}]
]
rew, noisy_rew, time, pulls, cumul_pulls = repetedRuns(policy, arms, rep=REPETITIONS, T=HORIZON, parallel=PARALLEL)
oracle_rew, noisy_oracle_rew, oracle_time, oracle_pull, oracle_cumul_pulls = repetedRuns(
[GreedyOracle, {}], arms, rep=1, T=HORIZON, oracle=True
)
regret = oracle_rew - rew
regret_res.append(regret)
# time_res.append(time)
logging.info("EVENT : SAVING ... ")
np.save(regret_path, np.array(regret_res))
#np.save(time_path, np.array(time_res))
logging.info("EVENT : END ... ")
| [
"[email protected]"
]
| |
a38de2894ee0a326653069ed1136de8ad8003727 | 5eb1c28a42fc1f6b8e16d11d6c724839f7a1ae90 | /problem_2/summary.py | 411fe344296d791c84e51c052081c21551bc5e10 | []
| no_license | sjho/PlanetA | 1137a97452fd24c323d0eb6afaeff27516758d51 | 2d1cd4e7328186050a8735020949d1a3b4934ae6 | refs/heads/master | 2023-07-24T19:26:14.414938 | 2021-09-08T14:52:13 | 2021-09-08T14:52:13 | 399,930,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | import numpy as np
import pandas as pd
temp = pd.read_csv("test_output.csv")
columns = temp.columns
temp = temp.astype(np.float)
result_t = temp.to_numpy()
temp = pd.read_csv("output_LeakyReLU/test_output_100.csv")
result_t += temp.to_numpy()
result = result_t.copy()
print(result)
result[result_t >= 1] = 1
result[result_t < 1] = 0
result = pd.DataFrame(result, columns=columns)
result.to_csv("test_output_sum.csv", index=None)
| [
"[email protected]"
]
| |
2dfb77cb4398fef7572ac2dc4986ea5a0c371b2d | cecd0e679c149f08968254f3196f8d4a6c88564a | /nomadgram/users/models.py | 69f191c2b6a5eb4761bf70761d9a033d8fafb72b | [
"MIT"
]
| permissive | cowkong/nomadgram | c9e09441761d74133d0f0355f5fa497956740cb2 | e3b1c3593f0efbb4e766ff6c2b4db51b78371cfa | refs/heads/master | 2021-05-08T10:14:02.165301 | 2018-02-10T03:08:02 | 2018-02-10T03:08:02 | 119,833,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
"""User model"""
GENDER_CHOICES =(
('male',"Male"),
('female','Female'),
('not-specified','Not specified')
)
# First Name and Last Name do not cover name patterns
# around the globe.
profile_image = models.ImageField(null=True)
name = models.CharField(_('Name of User'), blank=True, max_length=255)
website = models.URLField(null=True)
bio = models.TextField(null=True)
phone = models.CharField(max_length =140, null= True)
gender = models.CharField(max_length = 80, choices=GENDER_CHOICES, null= True)
followers = models.ManyToManyField("self")
following = models.ManyToManyField("self")
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
| [
"[email protected]"
]
| |
1431c6bd5242f083472d3f4131c2ace9a83705a5 | 15655eddf9ab65f17b11ef62fccd24f38d89f58e | /db_create.py | 22f9142ff0ef3ee33af09e6be51021cf6cd984f0 | []
| no_license | nenni/flask_tutorial | 87d8e0c4571ebde116a03cab6d4183893fab8d71 | 71cceee24374db67fa8097dd522768d57dcb3aed | refs/heads/master | 2020-12-31T05:24:52.451558 | 2016-04-27T21:51:23 | 2016-04-27T21:51:23 | 56,140,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | #!/usr/bin/env python3
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
| [
"[email protected]"
]
| |
36119431fd312a3e8902674067afbe6396c63da9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/219/29883/submittedfiles/testes.py | 4f4b4f48b93942c5a8eddaabeee18acfd3de9bd6 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
a=int(input('Digite a:'))
b=int(input('Digite b:'))
c=int(input('Digite c:'))
d=int(input('Digite d:'))
if a>=b and a>=c and a>=d:
print(a)
if b<=c and b<=d:
print(b)
elif b>=a and b>=c and b>=d
print(
| [
"[email protected]"
]
| |
61f2b2619c96c01b5dda1b6c9aeb86457872c271 | 0ee8350bedb5c8ac575ee0b634fece214a06646a | /poezio/asyncio.py | 2b02a91ffd66a8b6a9a97f66ca7342e5dcce6026 | [
"Zlib",
"CC-BY-2.0"
]
| permissive | LukeMarlin/poezio | 3fcad784d37aa665850b649622d6f8d75cc1fa3f | 884aae28a24d65951cc7d57f6044098f236f52bc | refs/heads/master | 2021-01-09T20:12:18.234842 | 2016-10-28T15:26:02 | 2016-10-28T15:57:38 | 72,423,788 | 1 | 0 | null | 2016-10-31T09:54:48 | 2016-10-31T09:54:48 | null | UTF-8 | Python | false | false | 1,353 | py | """
A module that monkey patches the standard asyncio module to add an
idle_call() method to the main loop. This method is used to execute a
callback whenever the loop is not busy handling anything else. This means
that it is a callback with lower priority than IO, timer, or even
call_soon() ones. These callback are called only once each.
"""
import asyncio
import functools
import collections
from asyncio import events
import slixmpp
def monkey_patch_asyncio_slixmpp():
def idle_call(self, callback):
if asyncio.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with idle_call()")
handle = events.Handle(callback, [], self)
self._idle.append(handle)
def my_run_once(self):
if self._idle:
self._ready.append(events.Handle(lambda: None, (), self))
real_run_once(self)
if self._idle:
handle = self._idle.popleft()
handle._run()
cls = asyncio.get_event_loop().__class__
cls._idle = collections.deque()
cls.idle_call = idle_call
real_run_once = cls._run_once
cls._run_once = my_run_once
spawn_event = slixmpp.xmlstream.XMLStream._spawn_event
def patchy(self, xml):
self.loop.idle_call(functools.partial(spawn_event, self, xml))
slixmpp.xmlstream.XMLStream._spawn_event = patchy
| [
"[email protected]"
]
| |
40698577f3d6a9e12c305866465dd5726f9b04de | 184b86162199d0288023aef5f44e94bc70e09d24 | /brain_games/games/calc.py | 2479f6527e3ec539e948f0117e31d961a1b64a09 | []
| no_license | Everyday24/python-project-lvl1 | fb38ae876c39d05c62c5f3de9efae22d7801bdca | ccb2488a31ccda7fcc5818b9ee511fc265c829e8 | refs/heads/master | 2022-12-28T02:43:14.298689 | 2020-10-15T04:58:22 | 2020-10-15T04:58:22 | 281,569,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | """Игра 2. Калькулятор."""
import random
from brain_games.games import logic
def random_ex():
num_1 = random.randint(0, 100)
num_2 = random.randint(0, 100)
sign = random.choice('-+*')
return num_1, num_2, sign
def generate_question_answer_pair_calc():
num_1, num_2, sign = random_ex()
if sign == '+':
correct_answer = num_1 + num_2
question = f'{num_1} + {num_2}'
elif sign == '-':
correct_answer = num_1 - num_2
question = f'{num_1} - {num_2}'
else:
correct_answer = num_1 * num_2
question = f'{num_1} * {num_2}'
correct_answer = str(correct_answer)
return question, correct_answer
def calc():
logic.greeting()
print('What is the result of the expression?\n')
logic.run_game(generate_question_answer_pair_calc)
| [
"[email protected]"
]
| |
e9caaed5feffaa2f6965b63e3b19f250be2c80b8 | a311c9c1c3a96b6b96196daba0453fd3388a94ba | /cookbook/chapter8/01_changing_the_string_representation_of_instances.py | 42a21e282cdd2a9db809a7cf8ed46e1bdfa8942a | []
| no_license | foryou7242/smart_study | 2c2ab69f7444fe14ba677f5d646a6b156ad259a0 | d813e59c542f8de66e3479197f64dee6fb04e456 | refs/heads/master | 2020-03-27T17:25:16.225197 | 2018-11-19T06:32:07 | 2018-11-19T06:32:07 | 146,850,340 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 3 16:48:53 2018
@author: son
"""
class Car1:
def __init__(self, model):
self.model = model;
def __repr__(self):
return 'car({0.model})'.format(self)
def __str__(self):
return 'car model is %s' %(self.model)
class Car:
def __init__(self, model):
self.model = model;
def __repr__(self):
return 'car({0.model})'.format(self)
def __str__(self):
return 'car model is {0.model}'.format(self)
car = Car("santafe")
car1 = Car1("santafeDM")
print(car);
car;
print(car1)
f = open('test.dat')
print(f)
| [
"[email protected]"
]
| |
0f3d5dffd8873b5a26473fd63a63e53da38dddd6 | dd242374f1cffb3184b16d9ad24ef7f53c6255cc | /app/module/database/SQLiteDB.py | f6225f41400e4776e2db6cd7d2d1ee948e740787 | []
| no_license | zruibin/zruibin.cn_Comment | 03ac2e1e031f22c0b12b0f78e13e3193a64583b8 | a8af1804c3dfc23aad95971186a53c9659d88f62 | refs/heads/master | 2020-03-28T17:18:47.658487 | 2018-09-17T12:09:17 | 2018-09-17T12:09:17 | 148,776,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,604 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# SQLiteDB.py
#
# Created by ruibin.chow on 2017/07/31.
# Copyright (c) 2017年 ruibin.chow All rights reserved.
#
from Log import *
import sqlite3
def dict_factory(cursor, row):
return dict((col[0], row[idx]) for idx, col in enumerate(cursor.description))
class SQLiteDBManager(object):
''' mysql 数据库连接池 '''
__slots__ = ("__conn", "__instance")
__instance = None
def __init__(self):
''''' Constructor '''
# try:
# sqlite3
# except Exception as e:
# Loger.error(e, __file__)
# raise e
pass
@classmethod
def shareInstanced(cls):
"""单例模式"""
if(cls.__instance == None):
cls.__instance = SQLiteDBManager()
return cls.__instance
def setDBPath(self, path):
self.__conn = sqlite3.connect(path)
self.__conn.row_factory = dict_factory
def executeDml(self, strsql):
self.__conn.execute(strsql)
self.__conn.commit()
def executeDmlWithArgs(self, strsql, args):
self.__conn.execute(strsql, args)
self.__conn.commit()
def executeQuery(self, strsql):
curson = self.__conn.execute(strsql)
self.__conn.commit()
rows = curson.fetchall()
return rows
def executeQueryWithArgs(self, strsql, args):
curson = self.__conn.execute(strsql, args)
self.__conn.commit()
rows = curson.fetchall()
return rows
if __name__ == '__main__':
pass
| [
"[email protected]"
]
| |
f3a32fed002a3701d7b450a5289be2d49cb13fe6 | d9c5d7705864c23a27ba809d915b8910922eda65 | /code/ex00003.py | 2557f14d29b928fbbf1e3bf74108001787b04404 | []
| no_license | xfree86/Python | 1519913e31ab715f44eb6393697bbfe6073d2127 | 79173597f6cbc04e0d98071c6cc298ef35342d8d | refs/heads/master | 2021-01-21T10:59:42.118667 | 2017-04-14T08:28:51 | 2017-04-14T08:28:51 | 83,509,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | # -*- coding: utf-8 -*-
# 运算符的用法
print "I will now count my chickens:"
print u"我现在数我的鸡:"
print "Hens", 25 + 30 / 6
print "Roosters", 100 - 25 * 3 % 4
print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6
print "Is it true that 3 + 2 < 5 - 7?"
print u"3 + 2 < 5 - 7 这是真的吗?"
print 3 + 2 < 5 - 7
print "What is 3 + 2?", 3 + 2
print u"3 + 2 是什么?", 3 + 2
print "What is 5 - 7?", 5 - 7
print u"5 - 7 是什么?", 5 - 7
print "Oh, that's why it's False."
print u"噢,这就是为什么它是假的。"
print "How about some more."
print u"再来一些怎么样。"
print "Is it greater?", 5 > -2
print u"它更大吗?", 5 > -2
print "Is it greater or equal?", 5 >= -2
print u"它更大还是相等?", 5 >= -2
print "Is it less or equal?", 5 <= -2
print u"它更小还是相等?", 5 <= -2
| [
"[email protected]"
]
| |
3c288b0c24feff89d6007028f01d23e3a6030c44 | fd3c30fe9afdf03fb2ff627fa19f6b7739075393 | /homeassistant/components/plugwise/__init__.py | f7986f915401c6c2dbc3df607fe1f60618f0245d | [
"Apache-2.0"
]
| permissive | uvjustin/home-assistant | 8b761a56c7d98c22395367ce83eb41cf45ccf11a | 223000a9fbd2a46539054ad93a9dd29333205415 | refs/heads/dev | 2023-08-31T04:01:57.353481 | 2020-09-23T15:57:06 | 2020-09-23T15:57:06 | 234,028,164 | 3 | 2 | Apache-2.0 | 2023-01-13T06:03:32 | 2020-01-15T07:56:05 | Python | UTF-8 | Python | false | false | 6,308 | py | """Plugwise platform for Home Assistant Core."""
import asyncio
from datetime import timedelta
import logging
from typing import Dict
from Plugwise_Smile.Smile import Smile
import async_timeout
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
COORDINATOR,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
UNDO_UPDATE_LISTENER,
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
_LOGGER = logging.getLogger(__name__)
SENSOR_PLATFORMS = ["sensor"]
ALL_PLATFORMS = ["binary_sensor", "climate", "sensor", "switch"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Plugwise platform."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Plugwise Smiles from a config entry."""
websession = async_get_clientsession(hass, verify_ssl=False)
api = Smile(
host=entry.data[CONF_HOST],
password=entry.data[CONF_PASSWORD],
port=entry.data.get(CONF_PORT, DEFAULT_PORT),
timeout=30,
websession=websession,
)
try:
connected = await api.connect()
if not connected:
_LOGGER.error("Unable to connect to Smile")
raise ConfigEntryNotReady
except Smile.InvalidAuthentication:
_LOGGER.error("Invalid Smile ID")
return False
except Smile.PlugwiseError as err:
_LOGGER.error("Error while communicating to device")
raise ConfigEntryNotReady from err
except asyncio.TimeoutError as err:
_LOGGER.error("Timeout while connecting to Smile")
raise ConfigEntryNotReady from err
update_interval = timedelta(
seconds=entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL[api.smile_type]
)
)
async def async_update_data():
"""Update data via API endpoint."""
try:
async with async_timeout.timeout(10):
await api.full_update_device()
return True
except Smile.XMLDataMissingError as err:
raise UpdateFailed("Smile update failed") from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="Smile",
update_method=async_update_data,
update_interval=update_interval,
)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
api.get_all_devices()
if entry.unique_id is None:
if api.smile_version[0] != "1.8.0":
hass.config_entries.async_update_entry(entry, unique_id=api.smile_hostname)
undo_listener = entry.add_update_listener(_update_listener)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"api": api,
COORDINATOR: coordinator,
UNDO_UPDATE_LISTENER: undo_listener,
}
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, api.gateway_id)},
manufacturer="Plugwise",
name=entry.title,
model=f"Smile {api.smile_name}",
sw_version=api.smile_version[0],
)
single_master_thermostat = api.single_master_thermostat()
platforms = ALL_PLATFORMS
if single_master_thermostat is None:
platforms = SENSOR_PLATFORMS
for component in platforms:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
coordinator.update_interval = timedelta(
seconds=entry.options.get(CONF_SCAN_INTERVAL)
)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in ALL_PLATFORMS
]
)
)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class SmileGateway(CoordinatorEntity):
"""Represent Smile Gateway."""
def __init__(self, api, coordinator, name, dev_id):
"""Initialise the gateway."""
super().__init__(coordinator)
self._api = api
self._name = name
self._dev_id = dev_id
self._unique_id = None
self._model = None
self._entity_name = self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the entity, if any."""
return self._name
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
device_information = {
"identifiers": {(DOMAIN, self._dev_id)},
"name": self._entity_name,
"manufacturer": "Plugwise",
}
if self._model is not None:
device_information["model"] = self._model.replace("_", " ").title()
if self._dev_id != self._api.gateway_id:
device_information["via_device"] = (DOMAIN, self._api.gateway_id)
return device_information
async def async_added_to_hass(self):
"""Subscribe to updates."""
self._async_process_data()
self.async_on_remove(
self.coordinator.async_add_listener(self._async_process_data)
)
@callback
def _async_process_data(self):
"""Interpret and process API data."""
raise NotImplementedError
| [
"[email protected]"
]
| |
ab512419d61466446b1eb72ac84831498db20e06 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_02_01/operations/express_route_circuits_operations.py | 3f54bb957557bb672da6c0a1511c65b6114abde7 | [
"MIT"
]
| permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 46,037 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ExpressRouteCircuitsOperations(object):
"""ExpressRouteCircuitsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-02-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-02-01"
self.config = config
def _delete_initial(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def get(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuit or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def _create_or_update_initial(
self, resource_group_name, circuit_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, circuit_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express
route circuit operation.
:type parameters:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ExpressRouteCircuit or
ClientRawResponse<ExpressRouteCircuit> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def _update_tags_initial(
self, resource_group_name, circuit_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, circuit_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates an express route circuit tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ExpressRouteCircuit or
ClientRawResponse<ExpressRouteCircuit> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuit', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'}
def _list_arp_table_initial(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_arp_table.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_arp_table(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised ARP table associated with the express
route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsArpTableListResult or
ClientRawResponse<ExpressRouteCircuitsArpTableListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsArpTableListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsArpTableListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_arp_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'}
def _list_routes_table_initial(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_routes_table.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_routes_table(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised routes table associated with the express
route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsRoutesTableListResult or
ClientRawResponse<ExpressRouteCircuitsRoutesTableListResult> if
raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsRoutesTableListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsRoutesTableListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_routes_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'}
def _list_routes_table_summary_initial(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_routes_table_summary.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_routes_table_summary(
self, resource_group_name, circuit_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised routes table summary associated with the
express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsRoutesTableSummaryListResult or
ClientRawResponse<ExpressRouteCircuitsRoutesTableSummaryListResult> if
raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'}
def get_stats(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitStats or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitStats or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_stats.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitStats', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'}
def get_peering_stats(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, **operation_config):
"""Gets all stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitStats or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitStats or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_peering_stats.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitStats', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_peering_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuit
:rtype:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitPaged[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the express route circuits in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuit
:rtype:
~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuitPaged[~azure.mgmt.network.v2018_02_01.models.ExpressRouteCircuit]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'}
| [
"[email protected]"
]
| |
07e5f0e71f423af3c4020570575fed9135c2cab2 | ece651284bb8e98abed1017fb99df84fc404b3fc | /GUI Based Projects (PyQt5)/Basics/CreatingWindowTitle.py | e8cfddf743f0a498e6089533035ba49da261ad00 | []
| no_license | pathakabhi24/Python-Projects | 9f9237c1cc431df1049c675ff429fd2c64a5f180 | 8115f9e91bb0011f16fa1bc1258779c63659aa36 | refs/heads/master | 2023-06-12T23:31:52.598868 | 2021-07-07T06:22:03 | 2021-07-07T06:22:03 | 293,288,542 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import sys
import PyQt5.QtWidgets as qw
def window():
# application object
app = qw.QApplication([])
# application window
win = qw.QMainWindow() # --
# set windows location. The position is the location of top left corner of the window.
# (x_position, y_position, width, height)
win.setGeometry(200, 200, 200, 200)
# setting window title
win.setWindowTitle("I am window title")
# set label to the window
label = qw.QLabel(win)
label.setText("This is a label")
label.move(50, 60) # (x, y) from top left corner
# showing application window
win.show()
# so called clean exit for the below line otherwise we can use app.exec_() also.
sys.exit(app.exec_())
window()
| [
"[email protected]"
]
| |
734ae0fd9511a0bbd0af6d038331aabd1d758efa | fc364e871cdd0132dbc91bea4ce95a26d2ac02ad | /plugins/operators/S3_Redshift.py | 73de4d891027a865e961111dae2ced9ac8c286aa | []
| no_license | NVME-git/NYC-TLC-Data-Engineering | 6912bcfbfa3df1567d66881ac75aca32a53816c4 | 022d1e25597540491d45974ca58f5a8f2109318e | refs/heads/master | 2022-03-06T01:56:36.393665 | 2019-11-09T08:15:36 | 2019-11-09T08:15:36 | 216,543,471 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,761 | py | from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class S3ToRedshiftOperator(BaseOperator):
"""
S3ToRedshiftOperator controls the movement of csv trip data and json map data from AWS S3 to Redshift both manually
and automatically using the environment execution date.
"""
ui_color = '#5496eb'
template_fields = ("s3_key",)
copy_sql = """
COPY public.{sink}
FROM '{source}'
REGION 'us-east-1'
ACCESS_KEY_ID '{id}'
SECRET_ACCESS_KEY '{secret}'
IGNOREHEADER 1
delimiter ','
IGNOREBLANKLINES
REMOVEQUOTES
EMPTYASNULL
;
"""
copy_sql_time = """
COPY public.{sink}
FROM '{source}_{year}-{month}.csv'
REGION 'us-east-1'
ACCESS_KEY_ID '{id}'
SECRET_ACCESS_KEY '{secret}'
IGNOREHEADER 1
delimiter ','
IGNOREBLANKLINES
REMOVEQUOTES
EMPTYASNULL
;
"""
copy_sql_JSON = """
COPY public.{sink}
FROM '{source}'
REGION 'us-east-1'
ACCESS_KEY_ID '{id}'
SECRET_ACCESS_KEY '{secret}'
FORMAT AS JSON '{jsonpath}'
"""
@apply_defaults
def __init__(self,
redshift_conn_id="",
aws_credentials_id="",
table="",
s3_bucket="",
s3_key="",
jsonpath='',
*args, **kwargs):
super(S3ToRedshiftOperator, self).__init__(*args, **kwargs)
self.table = table
self.redshift_conn_id = redshift_conn_id
self.aws_credentials_id = aws_credentials_id
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.jsonPath = jsonpath
self.execution_date = kwargs.get('execution_date')
def execute(self, context):
# Connect to AWS and Redshift
self.log.info('StageToRedshiftOperator implementation')
aws_hook = AwsHook(self.aws_credentials_id)
credentials = aws_hook.get_credentials()
redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)
# Remove existing data in staging table
self.log.info("Clearing data from destination Redshift table")
redshift.run("TRUNCATE TABLE {}".format(self.table))
# Copy new data from S3 to Redshift
self.log.info("Copying data from S3 to Redshift")
rendered_key = self.s3_key.format(**context)
s3_path = "s3://{}/{}".format(self.s3_bucket, rendered_key)
if self.jsonPath is '':
if self.execution_date:
formatted_sql = S3ToRedshiftOperator.copy_sql_time.format(
sink=self.table,
source=s3_path,
year=self.execution_date.strftime("%Y"),
month=self.execution_date.strftime("%m"),
id=credentials.access_key,
secret=credentials.secret_key
)
else:
formatted_sql = S3ToRedshiftOperator.copy_sql.format(
sink=self.table,
source=s3_path,
id=credentials.access_key,
secret=credentials.secret_key
)
else:
formatted_sql = S3ToRedshiftOperator.copy_sql_JSON.format(
sink=self.table,
source=s3_path,
id=credentials.access_key,
secret=credentials.secret_key,
jsonpath="s3://{}/{}".format(self.s3_bucket, self.jsonPath)
)
redshift.run(formatted_sql)
| [
"[email protected]"
]
| |
688c1f6ae2c65d4effab4cbba727ba63a81f0fad | 1280eb47426c8760c042376c174047a0176bb886 | /CIV_completed_targets/complete_scripts/RXJ2154.1-4414.py | 2705f474f286d180608b415c2dd79d5b272b3c94 | []
| no_license | ryanlindley/Research | 33cb43acaf68fb2068fa7166fdab5d2a9ed307b4 | 066a65f67778a41301551dea1607d89a61d6d64c | refs/heads/master | 2021-01-04T02:21:17.734053 | 2020-07-31T04:51:52 | 2020-07-31T04:51:52 | 240,336,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,101 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import auto_fitting as auto
import glob
import os
auto.plt.rcParams['figure.figsize'] = [11, 8]
# In[2]:
filenames = glob.glob('/Users/ryanlindley/Research/CIV/*')
CIV_targets = []
for f in filenames:
CIV_targets.append(f.replace('/Users/ryanlindley/Research/CIV/', ''))
#print(CIV_targets)
# In[24]:
name = CIV_targets[2]
print(name)
print(len(CIV_targets))
# In[25]:
directory = '/Users/ryanlindley/Research/CIV/' + name
os.chdir(directory)
os.getcwd()
# In[26]:
wl1, f1, gamma1, elem, state = (1548.187, 0.19, 0.00324, 'C', 'IV')
wl2, f2, gamma2, elem, state = (1550.772, 0.0952, 0.00325, 'C', 'IV')
path = '/Users/ryanlindley/Research/CIV/' + name + '/CIV.bin3.linespec'
wl, fl, fe, ctn = auto.np.loadtxt(path).transpose()
lsf = auto.np.loadtxt('/Users/ryanlindley/Research/CIV.old_setup/CIV.lsf')
# In[30]:
new_ctn = auto.find_continuum(name, wl, fl, fe, ctn, [True]*len(wl))
#ctn_flag = auto.mask_wl(wl, [1543, 1544.2, 1545.5, 1546.2, 1546.4, 1551]) # add custom ctn if needed
#manual_ctn = auto.find_continuum(name, wl, fl, fe, ctn, ctn_flag)
# In[31]:
def check_larger_continuum(name, wl, ctn):
spec = auto.np.loadtxt('/Users/ryanlindley/Research/CIV/' + name + '/full.spec')
auto.plt.plot(spec[:,0], spec[:,1])
auto.plt.plot(wl, ctn)
auto.plt.xlim(1542, 1556)
auto.plt.ylim(0e-15, 2e-15)
auto.plt.show
#check_larger_continuum(name, wl, manual_ctn)
# In[32]:
#new_ctn = manual_ctn # only include when manual continuum us found
fn = fl / new_ctn
fne = fe / new_ctn
# In[33]:
#chi, chi_mean = auto.find_chi(wl, fl, fe, new_ctn)
#auto.plot_chi_histogram(chi, chi_mean)
#flag_chi = auto.mask_wl(wl, [1546, 1546.7, 1548, 1551.2]) # mask out regions for better calculation of chi histogram
#new_chi, new_chi_mean = auto.find_chi(wl[flag_chi], fl[flag_chi], fe[flag_chi], new_ctn[flag_chi])
#auto.plot_chi_histogram(new_chi, new_chi_mean)
# In[37]:
x0 = [13.5, 20.3, auto.Wave2V(1548.5, wl1)] + [13, 20.3, auto.Wave2V(1548, wl1)] + [13.5, 20.3, auto.Wave2V(1548.8, wl1)] + [13, 20.3, auto.Wave2V(1550.5, wl2)] #+ [13, 20.3, auto.Wave2V(1548.2, wl1)] + [13.2, 20.3, auto.Wave2V(1547.2, wl1)] #+ [13, 20.3, auto.Wave2V(1549.6, wl2)] #+ [13, 20.3, auto.Wave2V(1551.67, wl2)]
feat = [0, 0, 1, 2] #which features used to model 0 - both, 1 - strong, 2 - weak
p0, cov, a, b, c = auto.leastsq(auto.fitting, x0, full_output=1, args=(feat, wl, wl1, wl2, f1, f2, gamma1, gamma2, lsf, fn, fne, [1547, 1553]))
auto.plot_model(p0, feat, wl, fn, wl1, wl2, f1, f2, gamma1, gamma2, lsf, [1547, 1553]) #normally [1547, 1553]
print(p0)
# In[41]:
p0[10] = 10
N1, N2 = auto.make_features(p0, feat, wl, wl1, wl2, lsf, gamma1, gamma2)
Ne1 = auto.nfle2Nev(fn, fne, f1, wl1)
Ne2 = auto.nfle2Nev(fn, fne, f2, wl2)
auto.plot_features(wl, wl1, wl2, N1, N2, Ne1, Ne2, [-300, 400])
print(p0)
# In[42]:
N1r, N2r = auto.add_residual(p0, N1, N2, feat, wl, wl1, wl2, f1, f2, gamma1, gamma2, lsf, fn)
auto.plot_features(wl, wl1, wl2, N1r, N2r, Ne1, Ne2, [-400, 400])
# In[45]:
strong_flag = auto.mask_v(wl, wl1,[80, 200]) # add masking for any regions not to be used in combined data
weak_flag = auto.mask_v(wl, wl2, [-100, 0])
v1, v2, Nv1, Nv2, Nve1, Nve2 = auto.remove_regions(strong_flag, weak_flag, wl, wl1, wl2, N1r, N2r, Ne1, Ne2)
v_bins, Nv_bins, Nve_bins = auto.make_bins(v1, v2, Nv1, Nv2, Nve1, Nve2)
v_final, Nv_final, Nve_final = auto.final_data(v_bins, Nv_bins, Nve_bins)
auto.plot_final_data(wl, wl1, wl2, N1r, N2r, v_final, Nv_final, Nve_final, name, [-400, 400])
# In[46]:
data = auto.np.c_[v_final, Nv_final, Nve_final]
CIV_regions = [[-75, 125]]
cont_regions = [[-75, 0], [-80, 200]]
auto.np.savetxt('CIV.data', data)
auto.np.savetxt('CIV.regions', CIV_regions, fmt='%1.3i')
auto.np.savetxt('contamintion.regions', cont_regions, fmt='%1.3i')
auto.save_final_data_plot(wl, wl1, wl2, N1r, N2r, v_final, Nv_final, Nve_final, name, [-400, 400])
auto.save_final_continuum_data(wl, fl, new_ctn, name)
# In[ ]:
#remember to save file
# In[ ]:
# In[ ]:
| [
"[email protected]"
]
| |
52fd37577fe3d760e5a1895cd15f5e333db1388f | 47d1c840f9cb8576614e41470348aac388e9bb51 | /config.py | 26a522bd1ce1c11cf408a1049863060a378634df | [
"Apache-2.0"
]
| permissive | Nitinguptadu/Yugen-ai-Heroku | 389d95dc0f97b8b44e6789d5388c1365e545b954 | 7d135fc2625c198c36a11ff4f5cfa565cbc8086e | refs/heads/main | 2022-12-29T03:21:03.127328 | 2020-10-17T19:07:11 | 2020-10-17T19:07:11 | 304,949,836 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from os import environ as env
import multiprocessing
PORT = int(env.get("PORT", 8000))
DEBUG_MODE = int(env.get("DEBUG_MODE", 1))
# Gunicorn config
bind = ":" + str(PORT)
workers = multiprocessing.cpu_count() * 2 + 1
threads = 2 * multiprocessing.cpu_count()
| [
"[email protected]"
]
| |
14a1849af30c0bf50e531e04171634750a80e48e | 37147a1530ee74b347a907777b9e0de29dea0b2c | /offline.py | c145bd9b901b901c6775f0ae7063e0645ed41081 | []
| no_license | brenolf/gmdl-scripts | 5f391cadfb5a263d7a4e9a7fccd563c230ab858d | 70c01f16b4dac46ac357844cc6d4816cf8c0e7c7 | refs/heads/master | 2021-03-27T15:03:03.817955 | 2017-11-19T18:12:58 | 2017-11-19T18:12:58 | 95,069,379 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,873 | py | from os import listdir, environ
from os.path import join, realpath
import argparse
from multiprocessing.dummy import Pool
from itertools import repeat, product
import numpy as np
from aux.GetFold import get_fold
from aux.Metrics import macro, micro
import pandas as pd
import sys
parser = argparse.ArgumentParser(description='Runs k-fold CV and grid search')
parser.add_argument('--path', dest='path', type=str, required=True,
help='the path to the datasets folds')
parser.add_argument('--exclude-sets', dest='exclude_sets', nargs='+', default=[], help='the list of sets that should not be ran')
parser.add_argument('--only-sets', dest='only_sets', nargs='+', default=[], help='the list of sets that should be ran (prioritized)')
parser.add_argument('--sets', dest='sets', action='store_true',
help='only prints the sets')
parser.add_argument('--k', dest='k', type=int, default=5,
help='the number of folds')
parser.add_argument('--measure', dest='measure', type=str, default='macro-f',
help='the index of the returned score measure to be used to decide the winner model')
parser.add_argument('--exclude', dest='exclude', nargs='+', default=[],
help='the list of methods that should not be ran')
parser.add_argument('--only', dest='only', nargs='+', default=[],
help='the list of methods that should be ran only (prioritized)')
parser.add_argument('--pool', dest='pool', type=int, default=5,
help='the number of threads to have')
args = parser.parse_args()
sets = set(listdir(args.path)) - set(args.exclude_sets)
sets = args.only_sets if len(args.only_sets) > 0 else sets
sets = list(sets)
classifiers_files = filter(lambda x: \
x.find('.pyc') == -1 and x.find('__init__') == -1, \
listdir('./offline_classifiers/') \
)
classifiers_files = map(lambda x: x.split('.')[0], classifiers_files)
classifiers_files = set(classifiers_files) - set(args.exclude)
classifiers_files = args.only if len(args.only) > 0 else classifiers_files
classifiers = map(lambda x: 'offline_classifiers.' + x, list(classifiers_files))
def set_importance(name):
df = pd.read_csv(join(args.path, name))
return df.shape[0] * df.shape[1]
sets = sorted(sets, key=set_importance)
if args.sets:
print sets
exit(0)
modules = map(
lambda x: __import__(x, fromlist=['Classifier']),
classifiers
)
def classify(data):
model, instance, training, validation, labels = data
return (model, instance.run(model, training, validation, labels))
def grid_search(data):
search_data, models = data
training, validation, test, labels = search_data
pool = Pool(args.pool)
data = product(models, [instance], [training], [validation], [labels])
results = pool.map(classify, data)
pool.close()
pool.join()
return results
def compute_outer_fold(data):
search_data, models = data
confusion_matrices = {}
models_results = {}
classes = None
pool = Pool(args.pool)
data = product(search_data, [models])
results = pool.map(grid_search, data)
pool.close()
pool.join()
results = reduce(lambda x, y: x + y, results)
for result in results:
model, confusion_matrix = result
if classes is None:
classes = confusion_matrix.columns.tolist()
confusion_matrix = confusion_matrix.reindex_axis(classes, axis=1)
key = str(model)
if confusion_matrices.has_key(key):
confusion_matrices[key] += confusion_matrix
else:
confusion_matrices[key] = confusion_matrix
models_results[key] = {'model': model}
for model in confusion_matrices:
p_macro, r_macro, f_macro = macro(confusion_matrices[key])
p_micro, r_micro, f_micro = micro(confusion_matrices[key])
models_results[model]['macro-f'] = f_macro
models_results[model]['micro-f'] = f_micro
best = None
for model in confusion_matrices:
if best is None:
best = model
else:
best_score = models_results[best][args.measure]
current_score = models_results[model][args.measure]
if current_score > best_score:
best = model
training, validation, test, labels = search_data[-1]
X_training, y_training = training
X_validation, y_validation = validation
X_training = pd.concat([X_training, X_validation])
y_training = pd.concat([y_training, y_validation])
training = (X_training, y_training)
return instance.run(models_results[best]['model'], training, test, labels)
print '# ' + environ['GMDL_PATH'] + ' ' + ' '.join(sys.argv)
for s in sets:
print '{}:'.format(s)
for module in modules:
current_set = get_fold(args.path, s, args.k)
instance = module.Classifier()
print ' {}:'.format(instance)
models = instance.models()
search_data = []
for _ in xrange(args.k ** 2):
search_data.append(next(current_set))
search_data = [ \
search_data[(i * args.k) : (i * args.k + args.k)] \
for i in xrange(args.k) \
]
pool = Pool(args.pool)
data = product(search_data, [models])
results = pool.map(compute_outer_fold, data)
pool.close()
pool.join()
final_cm = None
cm_counter = 1
for cm in results:
print ' fold-{}: |'.format(str(cm_counter))
print ' ' + cm.to_string().replace('\n', '\n ')
cm_counter += 1
if final_cm is None:
final_cm = cm
else:
final_cm += cm.reindex_axis(final_cm.columns.tolist(), axis=1)
p_macro, r_macro, f_macro = macro(final_cm)
p_micro, r_micro, f_micro = micro(final_cm)
print ' macro:'
print ' - precision: {}'.format(p_macro)
print ' - recall: {}'.format(r_macro)
print ' - f1: {}'.format(f_macro)
print ' micro:'
print ' - precision: {}'.format(p_micro)
print ' - recall: {}'.format(r_micro)
print ' - f1: {}'.format(f_micro)
| [
"[email protected]"
]
| |
eee2ebee536d3981e904ba7a1acd3f627e127252 | d20a55eb8eeb9ae69f00ee221ea2c11a2b3fd21f | /blog/forms.py | 7102121e4b984246a64cd2fce49d32fbc40c95af | []
| no_license | ahmedt1997/mysite | fa31af20f48e2cd34d47b990890118e5e829eadc | 9df34c4a1316ca0cf2ceb00715f21f48c33ee324 | refs/heads/master | 2023-08-07T00:18:05.866405 | 2021-10-04T03:49:28 | 2021-10-04T03:49:28 | 413,265,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | from django import forms
from blog.models import Comment
class EmailPostForm(forms.Form):
name = forms.CharField(max_length=25)
email = forms.EmailField()
to = forms.EmailField()
comments = forms.CharField(required=False,
widget=forms.Textarea)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('name', 'email', 'body')
| [
"[email protected]"
]
| |
7172aff4a743ab0828967ea16773a8f18ff85c98 | 390fdd1e185fe08a49ce2b9ce6da5475c8cf872d | /ss_admin/lib/verification_code.py | 905008bf5e02d9dd458b6304a3c811a00a48e2df | [
"Apache-2.0"
]
| permissive | 19years/shadowsocks-admin | 427bc7a6bede7d1c34c88d60d425e45099dae4d4 | c565518e13ebdeb7821cba62685ca80a380f3815 | refs/heads/master | 2021-06-17T04:39:37.756853 | 2017-06-06T02:25:45 | 2017-06-06T02:25:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,569 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# author arrti
# ref: https://segmentfault.com/a/1190000002978886
import random, os
from PIL import Image, ImageDraw, ImageFont
current_path = os.path.split(os.path.realpath(__file__))[0]
class VerificationCode(object):
def __init__(self, font_color=(0, 0, 0),
font_size = 20,
font_path = current_path + '/SIMSUN.TTC',
size = (100, 40),
bg_color = (255, 255, 255, 255)):
self.font_color = font_color
self.font_size = font_size
self.font_path = font_path
self.font = ImageFont.truetype(self.font_path, self.font_size)
self.size = size
self.bg_color = bg_color
self.image = Image.new('RGBA', self.size, self.bg_color)
def __random_gb2312(self):
empty = range(0xD7FA, 0xD7FF)
val = 0xB0A1
while 1:
head = random.randint(0xB0, 0xD7) # 常用字
body = random.randint(0xA1, 0xFE)
val = (head << 8) | body
if val not in empty:
break
str = "%x" % val
return str.decode('hex').decode('gb2312', 'ignore')
def __rotate(self):
img1 = self.image.rotate(random.randint(-5, 5), expand=0) # 默认为0,表示剪裁掉伸到画板外面的部分
img = Image.new('RGBA', img1.size, (255,) * 4)
self.image = Image.composite(img1, img, img1)
def __random_rgb(self):
return (random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
def __random_point(self):
(width, height) = self.size
return (random.randint(0, width), random.randint(0, height))
def __random_line(self, num):
draw = ImageDraw.Draw(self.image)
for i in range(0, num):
draw.line([self.__random_point(), self.__random_point()], self.__random_rgb())
del draw
def __draw_text(self, pos, txt, fill):
draw = ImageDraw.Draw(self.image)
draw.text(pos, txt, font=self.font, fill=fill)
del draw
def generate(self, num = 4):
gap = 5
start = 0
random_str = ''
for i in range(0, num):
char = self.__random_gb2312()
random_str += char
x = start + self.font_size * i + random.randint(0, gap) + gap * i
self.__draw_text((x, random.randint(5, 10)), char, (0, 0, 0))
self.__rotate()
self.__random_line(8)
return random_str, self.image
| [
"[email protected]"
]
| |
7ec7550591ee36f5a1a0f233433ed7580c4809bf | 805c42f8016a8af18f4780a90c4755e6a9e45eed | /coworkings/models.py | 4959f9af5496b787b2b7a7ee1084f0c3e5d6f45c | []
| no_license | Stuffing-code/coworking | b799e457d52aee19073c21f38eee2dac62e0b145 | 9db3f666fe9a354012b8fdb622b1b267da6e217c | refs/heads/main | 2023-03-27T22:04:38.857980 | 2021-03-19T16:01:45 | 2021-03-19T16:01:45 | 347,388,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | from django.db import models
from datetime import *
# Create your models here.
class NumberOffice(models.Model):
number_office = models.CharField(max_length=40)
def __str__(self):
return self.number_office
def __iter__(self):
return iter(self.number_office)
class Reservation(models.Model):
number_office = models.ForeignKey(
NumberOffice,
on_delete=models.CASCADE,
unique_for_date="datetime_from",
)
datetime_from = models.DateTimeField()
datetime_to = models.DateTimeField()
def __str__(self):
date_create = f'From {self.datetime_from.isoformat()} to {self.datetime_to.isoformat()}.'
return date_create
def __iter__(self):
return iter(self.number_office) | [
"[email protected]"
]
| |
29de0570a9ad8bb2ad28f83511776fa32289b625 | 46753973350f11ccbdfb68acada19035374df626 | /MasterPythonUdemy/18-Tkinter_DesktopApp/18.8-EjercicioCalculadora.py | d7703dfe58ac053914f3529453db7827e6118bc1 | []
| no_license | atehortua1907/Python | c35c67a2052c177fbf64debc344ecef2766e75c4 | a0dc53e233276991b9066fdd444d45e5fe38cb8f | refs/heads/master | 2021-06-09T15:46:42.463043 | 2021-05-25T19:37:11 | 2021-05-25T19:37:11 | 186,865,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | """
CALCULADORA:
-Dos campos de texto
-4 botones para las operaciones
-Mostrar el resultado en una alerta
"""
from tkinter import *
from tkinter import messagebox as MessageBox
def operaciones(operacion):
numero1 = int(num1.get())
numero2 = int(num2.get())
result = 0
if operacion == "suma":
result = numero1 + numero2
if operacion == "resta":
result = numero1 - numero2
if operacion == "multiplicacion":
result = numero1 * numero2
if operacion == "division":
result = numero1 / numero2
message = f"""
La {operacion} entre el numero {numero1}
y el {numero2} es {result}
"""
MessageBox.showinfo("Resultado", message)
ventana = Tk()
ventana.title("Formularios en Tkinter | Ejercicio Calculadora")
#Texto encabezado para
encabezado = Label(ventana, text="Calculadora")
encabezado.config(
fg="white",
bg="darkgray",
font=("Open Sans", 18),
padx = 10,
pady = 10
)
encabezado.grid(row=0, column=0, columnspan=12, sticky=N)
#Label para el campo Número 1
label = Label(ventana, text="Número 1:")
label.grid(row=1, column=0, sticky=W, padx=5, pady=5)
#Campo de texto
num1 = Entry(ventana)
num1.grid(row=1, column=1, sticky=W, padx=5, pady=5)
num1.config(justify="right", state="normal")
#Label para el campo Número 2
label = Label(ventana, text="Número 2:")
label.grid(row=2, column=0, sticky=W, padx=5, pady=5)
#Campo de texto
num2 = Entry(ventana)
num2.grid(row=2, column=1, sticky=W, padx=5, pady=5)
num2.config(justify="left", state="normal")
Button(ventana, text="Sumar", command=lambda: operaciones("suma")).grid(row=3, column=0, sticky=W)
Button(ventana, text="Restar", command=lambda: operaciones("resta")).grid(row=3, column=1, sticky=W)
Button(ventana, text="Multiplicar", command=lambda: operaciones("multiplicacion")).grid(row=3, column=2, sticky=W)
Button(ventana, text="Dividir", command=lambda: operaciones("division")).grid(row=3, column=3, sticky=W)
ventana.mainloop() | [
"[email protected]"
]
| |
cbc55fbc3fbf24e5cfc0b0aa7040b77823bd1510 | 845317cb77172ae8499e1debb7958a96bd47ad6e | /gui/multi_XY_plots.py | dd4c861802dcfb7bb3e4157c872cf2851fc09d5a | []
| no_license | nileshshah/optrode | c3e62b26f4a1f52436657f6af24ecda0a472a264 | be085216e501ccfde39df4d42cc4660dd433945e | refs/heads/master | 2021-01-02T22:39:38.773998 | 2015-07-25T03:28:36 | 2015-07-25T03:28:36 | 39,671,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,492 | py | #!/usr/bin/env python
import os, sys, csv
from itertools import izip_longest
import numpy as np
from PySide import QtGui, QtCore
import pyqtgraph as pg
from qtgui_util import QColorButton
class Plot_Checkbox(QtGui.QWidget):
def __init__(self,plot_data):
super(Plot_Checkbox, self).__init__()
self.plot_data = plot_data
self.label = self.plot_data.label
self.checkbox = QtGui.QCheckBox('%s' %str(self.label),self)
self.color_button = QColorButton(self)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.color_button)
hbox.addWidget(self.checkbox)
self.setLayout(hbox)
self.checkbox.stateChanged.connect(self.set_plot)
self.color_button.colorChanged.connect(self.change_color)
def set_plot(self):
if self.checkbox.isChecked():
self.plot_data.add_plot(self.color_button.color())
else:
self.plot_data.remove_plot()
def change_color(self):
self.plot_data.change_color(self.color_button.color())
def setColor(self,color):
self.color_button.setColor(color)
class Plot_Checkboxes_Column(QtGui.QWidget):
def __init__(self, initial_colors, col_label = None):
super(Plot_Checkboxes_Column,self).__init__()
self.initial_colors = initial_colors
self. vbox = QtGui.QVBoxLayout()
if col_label:
self.col_label = QtGui.QLabel(col_label)
self.col_label.setAlignment(QtCore.Qt.AlignCenter)
font = self.col_label.font()
font.setPointSize(20)
self.col_label.setFont(font)
self.vbox.addWidget(self.col_label)
def add_row(self, plot_data):
#plot_data.checkbox = Plot_Checkbox(plot_data)
plot_data.checkbox.setColor(self.initial_colors.next())
self.vbox.addWidget(plot_data.checkbox)
def finalize_layout(self):
self.setLayout(self.vbox)
class Plot_Checkboxes_2D(QtGui.QWidget):
def __init__(self,checkbox_columns):
super(Plot_Checkboxes_2D,self).__init__()
hbox = QtGui.QHBoxLayout()
for checkbox_column in checkbox_columns:
hbox.addWidget(checkbox_column)
self.setLayout(hbox)
class XY_Plot_Data(object):
def __init__(self, parent_plot, label = '', name = ''):
self.mplot = parent_plot
self.plot = None
self.label = label
self.name = name
self.checkbox = Plot_Checkbox(self)
self.is_active = True
self.active=False
self.x_window = 5.
def add_plot(self, color='red'):
self.plot = self.mplot.plot()
self.plot.setPen(color)
def remove_plot(self):
self.mplot.removeItem(self.plot)
self.plot = None
def change_color(self, color):
if self.plot:
self.plot.setPen(color)
def change_x_window(self, x):
#if self.is_active:
self.x_window = x
def get_xy(self):
pass
class XY_Line_Plots(pg.PlotWidget):
def __init__(self,parent=None,xlabel=('Time','s')):
#super(XY_Line_Plots, self).__init__(parent)
pg.PlotWidget.__init__(self,parent=parent)
self.xydatas = []
self.x_window = 5.0 # seconds
self._active = False
self.setLabel('bottom', xlabel[0], units=xlabel[1])
self.xlabel = xlabel
self.disableAutoRange()
self.setMouseEnabled(x=False,y=True)
def update_plots(self):
if self._active:
#self.setXRange(-self.x_window,0)
for xydata in self.xydatas:
if xydata.plot:
x,y = xydata.get_xy()
#print min(x),max(x)
#xydata.plot.setData(x,y,clipToView=True)
xydata.plot.setData(x,y)
def change_x_window(self, x):
#if self.active:
self.x_window = x
self.setXRange(-self.x_window,0)
for xydata in self.xydatas:
xydata.change_x_window(x)
#def set_active(self):
# if self.active:
# self.active = False
# else:
# self.active = True
# for xydata in self.xydatas:
# xydata.active = self.active
#
def set_active(self,which):
self._active = which
for xydata in self.xydatas:
xydata.active = self._active
def add_data(self, xy_plot_data):
self.xydatas.append(xy_plot_data)
return self.xydatas[-1]
def save_snapshot(self,filename):
data_fields = []
data = []
for xydata in self.xydatas:
if xydata.plot:
data_fields.append('%s %s' %(self.xlabel[0],xydata.name))
data_fields.append(xydata.name)
x,y = xydata.get_xy()
data.append(x)
data.append(y)
column_data = izip_longest(*data)
snapshot_file = open(filename,'wb')
csv_writer = csv.writer(snapshot_file)
csv_writer.writerow(data_fields)
csv_writer.writerows(column_data)
snapshot_file.close()
class X_Window_Control(QtGui.QWidget):
def __init__(self, default_x_window=1.0):
super(X_Window_Control, self).__init__()
self.plots = []
self.default_x_window = default_x_window
self.x_window_slider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.x_window_slider_label = QtGui.QLabel(self)
self.x_window_slider.setFocusPolicy(QtCore.Qt.NoFocus)
self.x_window_slider.setRange(1,1000*10)
self.x_window_slider.setSingleStep(1)
self.x_window_slider.valueChanged.connect(self.change_x_window)
self.x_window_slider.setValue(self.default_x_window*1000)
layout = QtGui.QHBoxLayout()
layout.addWidget(self.x_window_slider)
layout.addWidget(self.x_window_slider_label)
self.setLayout(layout)
self.change_x_window(default_x_window*1000)
def add_plots(self,plots):
self.plots.extend(plots)
self.change_x_window(self.default_x_window*1000)
def change_x_window(self, value):
x = value/1000.
self.x_window_slider_label.setText('%.3f s' %x)
for plot in self.plots:
plot.change_x_window(x)
| [
"[email protected]"
]
| |
16338fb6e5f473ae3a9162902ef91ac38c0ce3dc | 090487c2d57a368830bea1e0996b263153fceb2f | /crypto_crawler/sql_utils.py | 98a6c57f88d71e3ac741c60cecbd8fb690f6579b | [
"MIT"
]
| permissive | stargrep/crypto-crawler | a48f8e9389b81aa590560e4cb4601fcd69740a45 | a567ff6c1eb6e5e6b332fe781558c81558656d12 | refs/heads/master | 2021-06-22T22:05:33.725805 | 2021-05-16T12:34:15 | 2021-05-16T12:34:15 | 216,241,176 | 5 | 3 | MIT | 2020-06-20T05:04:46 | 2019-10-19T17:02:59 | Python | UTF-8 | Python | false | false | 1,222 | py | from sqlite3 import Connection, connect
from typing import Any
def get_connection(db_name: str) -> Connection:
try:
return connect(db_name) # 'test.db'
except RuntimeError as e:
print("I am unable to connect to the database" + str(e))
def read(statement: str, db_name: str) -> []:
conn = get_connection(db_name)
try:
cursor = conn.cursor()
cursor.execute(statement)
results = cursor.fetchall()
cursor.close()
return results
except Exception as error:
print(error)
finally:
conn.close()
def write(statement: str, db_name: str, *data: Any) -> None:
conn = get_connection(db_name)
try:
cursor = conn.cursor()
cursor.execute(statement, data)
conn.commit()
cursor.close()
except Exception as error:
print(error)
finally:
conn.close()
def write_many(statement: str, db_name: str, data_list: []) -> None:
conn = get_connection(db_name)
try:
cursor = conn.cursor()
cursor.executemany(statement, data_list)
conn.commit()
cursor.close()
except Exception as error:
print(error)
finally:
conn.close()
| [
"[email protected]"
]
| |
0a0504b0bc50786ad6319cc72a59f6bd7ed5d613 | 8f7c595f2b9d075a89417760b7fbf9abb1fecb72 | /try_enricher.py | e94463b5038fad8d0620db03b893bab816739527 | [
"MIT"
]
| permissive | MainakMaitra/trading-utils | 555ed240a20b26d4876f1490fc8a2d9273231fc5 | 3e73091b4d3432e74c385a9677b7f7ca4192c67f | refs/heads/main | 2023-07-04T09:19:40.122188 | 2021-08-08T09:01:37 | 2021-08-08T09:01:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | import json
import matplotlib.pyplot as plt
import pandas as pd
from common.analyst import fetch_data_from_cache
plt.ioff()
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
ticker = "AAPL"
# weekly_options.set_index('Symbol', inplace=True)
# cboe_options = pd.read_csv(f"data/cboesymboldirequityindex.csv")
# print(has_options('AAPL'))
# data, ticker_df = fetch_data_on_demand(ticker)
data = fetch_data_from_cache(ticker, is_etf=False)
key_values = list([(k, data[k]) for k in data.keys() if "month_" in k])
for kv in key_values:
print(kv)
# weekly_ticker_candles = convert_to_weekly(df)
#
# for wp in [4, 8]:
# df[["max_weekly_{}".format(wp), "max_weekly_{}_at".format(wp)]] = max_weekly(
# weekly_ticker_candles, week_until=wp
# )
# print(df)
| [
"[email protected]"
]
| |
be3e729594f07cb038176734a04d4256d967d617 | 3f147cb29b64ae5670813094301d80401fd28204 | /requirements/editor.py | d5dcc6f8fa9eced3f2dc94848846f56b7c506334 | [
"MIT"
]
| permissive | venkatesh-sivaraman/fireroad-server | 0542380fc9854ebf7e79a1571ec6b3add8010ecf | 3930bd2bc75b38458efd0912b33ae5b3578e8713 | refs/heads/master | 2023-07-05T20:49:33.805794 | 2023-05-07T00:33:04 | 2023-05-07T00:33:04 | 115,356,031 | 14 | 6 | MIT | 2023-07-02T23:36:03 | 2017-12-25T18:21:53 | Python | UTF-8 | Python | false | false | 16,073 | py | from django.shortcuts import render, redirect, reverse
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.contrib.admin.views.decorators import staff_member_required
from .models import *
from django.contrib.auth import login, authenticate, logout
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.utils.html import escape
from common.decorators import logged_in_or_basicauth
import json
import os
import requests
from courseupdater.views import *
import re
from progress import RequirementsProgress
from catalog.models import Course, Attribute, HASSAttribute, GIRAttribute, CommunicationAttribute
import logging
from reqlist import *
from views import REQUIREMENTS_EXT
from django.http import Http404
from .diff import build_diff
NEW_DOC_ID = "new_doc"
NEW_DOC_NAME = "new requirements list"
KNOWN_DEPARTMENTS = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "14", "15", "16", "17", "18", "20", "21", "21A", "21W", "CMS", "21G", "21H", "21L", "21M", "WGS", "22", "24", "CC", "CSB", "EC", "EM", "ES", "HST", "IDS", "MAS", "SCM", "STS", "SWE", "SP"]
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
def build_sidebar_info(request):
discovered = set()
majors = []
for reqlist in RequirementsList.objects.filter(list_id__contains="major"):
discovered.add(reqlist.list_id)
majors.append({'id': reqlist.list_id.replace(REQUIREMENTS_EXT, ""), 'short': reqlist.medium_title, 'long': reqlist.title_no_degree})
majors.sort(key=lambda x: natural_keys(x['short']))
minors = []
for reqlist in RequirementsList.objects.filter(list_id__contains="minor"):
discovered.add(reqlist.list_id)
minors.append({'id': reqlist.list_id.replace(REQUIREMENTS_EXT, ""), 'short': reqlist.medium_title, 'long': reqlist.title_no_degree})
minors.sort(key=lambda x: natural_keys(x['short']))
other = []
for reqlist in RequirementsList.objects.all():
if reqlist.list_id in discovered: continue
other.append({'id': reqlist.list_id.replace(REQUIREMENTS_EXT, ""), 'short': reqlist.medium_title, 'long': reqlist.title_no_degree})
other.sort(key=lambda x: natural_keys(x['short']))
return {'majors': majors, 'minors': minors, 'other': other, 'is_staff': is_staff(request)}
# Create your views here.
def index(request):
params = build_sidebar_info(request)
params['active_id'] = 'index'
params['exists'] = False
return render(request, "requirements/index.html", params)
def save_change_request(form, type, list_id="", committed=False):
data = form.cleaned_data
original_contents = None
if type == REQUEST_TYPE_EDIT and len(list_id) > 0:
try:
req = RequirementsList.objects.get(list_id=list_id + REQUIREMENTS_EXT)
original_contents = req.contents
except:
pass
edit_req = EditRequest.objects.create(email_address=data["email_address"],
reason=data["reason"],
list_id=list_id,
type=type,
original_contents=original_contents,
contents=data["contents"],
committed=committed)
edit_req.save()
def is_staff(request):
"""Returns whether or not the request's user is an authenticated staff member."""
return request.user is not None and request.user.is_staff and request.user.is_authenticated()
def populate_initial_text(request, params, edit_req):
params['initial_text'] = edit_req.contents
if 'like' in request.GET:
# Get the edit request for initial text population
try:
edit_req_id = int(request.GET['like'])
edit_req = EditRequest.objects.get(pk=edit_req_id)
params['initial_text'] = edit_req.contents
except:
pass
def create(request):
if request.method == 'POST':
form = EditForm(request.POST)
print(form.errors)
if form.is_valid():
should_commit = is_staff(request)
save_change_request(form, REQUEST_TYPE_CREATE, list_id=form.cleaned_data['new_list_id'], committed=should_commit)
if should_commit:
return redirect('review_all')
return redirect('submit_success')
params = build_sidebar_info(request)
req = RequirementsList(list_id=NEW_DOC_ID,
short_title=NEW_DOC_NAME,
medium_title=NEW_DOC_NAME,
title=NEW_DOC_NAME)
req.contents = "X#,#X Major#,#Title No Degree#,#Title With Degree\nDescription\n\nsection_1\nDescription of section 1\n...\n\n%% Variable declarations\n\nsection_1, \"Section 1\" := ..."
form = EditForm()
params['form'] = form
params['req'] = req
params['active_id'] = params['req'].list_id
params['action'] = REQUEST_TYPE_CREATE
params['exists'] = False
populate_initial_text(request, params, req)
return render(request, "requirements/edit.html", params)
def edit(request, list_id):
req = RequirementsList.objects.get(list_id=list_id + REQUIREMENTS_EXT)
if request.method == 'POST':
form = EditForm(request.POST)
if form.is_valid():
should_commit = is_staff(request)
save_change_request(form, REQUEST_TYPE_EDIT, list_id=list_id, committed=should_commit)
if should_commit:
return redirect('review_all')
return redirect('submit_success')
form = EditForm()
params = build_sidebar_info(request)
params['active_id'] = list_id
params['req'] = req
params['action'] = REQUEST_TYPE_EDIT
params['form'] = form
params['exists'] = True
populate_initial_text(request, params, req)
return render(request, "requirements/edit.html", params)
def success(request):
params = build_sidebar_info(request)
return render(request, "requirements/success.html", params)
# Rendering
@csrf_exempt
def preview(request):
"""Takes as POST body the contents of a requirements list, and returns HTML
to display the requirements list preview."""
if request.method != 'POST':
return HttpResponseBadRequest("Must use POST")
req_contents = request.body.decode('utf-8')
req_list = RequirementsList.objects.create()
try:
req_list.parse(req_contents, full=True)
html = build_presentation_items(req_list)
req_list.delete()
except:
req_list.delete()
return HttpResponse("<p>An error occurred while generating the preview. Please double-check your syntax!</p>")
return HttpResponse(html)
def show_in_row(requirement):
"""Returns whether the given requirement should be displayed in a single row."""
if requirement.minimum_nest_depth() < 1:
return True
if not requirement.requirements.exists():
return True
if not any(r.requirement is not None for r in requirement.requirements.all()):
return False
if any(r.title is not None and len(r.title) > 0 for r in requirement.requirements.all()):
return False
return True
def make_row(requirement):
"""Returns HTML for displaying the given requirement in a row."""
html = u"<div class=\"course-list\"><div class=\"course-list-inner\">"
if requirement.requirements.exists():
reqs = requirement.requirements.all()
else:
reqs = [requirement]
for req in reqs:
html += "<div class=\"course-tile-outer\">"
desc = req.short_description()
tile_classes = "card hoverable white-text course-tile"
dept = desc[:desc.find(".")] if "." in desc else "none"
if dept not in KNOWN_DEPARTMENTS:
dept = "none"
html += "<div class=\"{} course-{}\">".format(tile_classes, dept)
try:
course = Course.public_courses().get(subject_id=desc)
html += "<span class=\"course-id\">" + desc + "</span>"
html += "<br/>"
html += "<span class=\"course-title\">" + course.title + "</span>"
except ObjectDoesNotExist:
html += "<span class=\"course-id\">" + desc + "</span>"
html += "</div></div>"
html += "</div></div>"
return html
def presentation_items(requirement, level, always_show_title=False):
"""Generates JSON presentation items for the given requirement at the given
level."""
items = []
desc = requirement.threshold_description()
if requirement.title is not None and len(requirement.title) > 0:
tag = "h4"
if level == 0: tag = "h2"
elif level <= 2: tag = "h3"
title_text = requirement.title
if len(desc) > 0 and requirement.connection_type != CONNECTION_TYPE_ALL and not requirement.is_plain_string:
title_text += " (" + desc + ")"
items.append(u"<{} class=\"req-title\">{}</{}>".format(tag, title_text, tag))
elif len(desc) > 0 and (requirement.connection_type != CONNECTION_TYPE_ALL or always_show_title) and not requirement.is_plain_string:
items.append(u"<h4 class=\"req-title\">{}:</h4>".format(desc[0].upper() + desc[1:]))
if requirement.description is not None and len(requirement.description) > 0:
items.append(u"<p class=\"req\">{}</p>".format(requirement.description.replace("\n\n", "<br/><br/>")))
if level == 0 and requirement.title is None and len(desc) > 0 and not (requirement.connection_type != CONNECTION_TYPE_ALL or always_show_title):
items.append(u"<h4 class=\"req-title\">{}:</h4>".format(desc[0].upper() + desc[1:]))
if show_in_row(requirement):
# Show all the child requirements in a single row
items.append(make_row(requirement))
elif requirement.requirements.exists():
# Show each child requirement as a separate row
show_titles = any(r.connection_type == CONNECTION_TYPE_ALL and r.requirements.exists() and r.requirements.count() > 0 for r in requirement.requirements.all())
for req in requirement.requirements.all():
items += presentation_items(req, level + 1, show_titles)
return items
def build_presentation_items(list):
"""Builds HTML for the given requirements list."""
if not list.requirements.exists():
return ""
ret = []
if list.maximum_nest_depth() <= 1:
ret = presentation_items(list, 0)
else:
if list.title is not None and len(list.title) > 0:
ret.append(u"<h1 class=\"req-title\">{}</h1>".format(list.title))
if list.description is not None and len(list.description) > 0:
ret.append(u"<p class=\"req\">{}</p>".format(list.description.replace("\n\n", "<br/><br/>")))
for top_req in list.requirements.all():
rows = presentation_items(top_req, 0)
ret += rows
return "\n".join(ret)
# Review and commit (admin only)
@staff_member_required
def review(request, edit_req):
"""
Allow admin users to review a particular edit request, commit it to the list
of changes to be added or revise and resubmit.
"""
try:
edit_request = EditRequest.objects.get(pk=int(edit_req))
except ObjectDoesNotExist:
raise Http404
except ValueError:
return redirect(reverse('requirements_index'))
params = build_sidebar_info(request)
params['edit_req'] = edit_request
params['action'] = "Review"
req_list = RequirementsList.objects.create()
try:
req_list.parse(edit_request.contents, full=False)
params['medium_title'] = req_list.medium_title
req_list.delete()
except:
req_list.delete()
params['medium_title'] = '<could not parse>'
if edit_request.type == REQUEST_TYPE_EDIT:
try:
req_list = RequirementsList.objects.get(list_id=edit_request.list_id + REQUIREMENTS_EXT)
params['diff'] = build_diff(req_list.contents, edit_request.contents)
except ObjectDoesNotExist:
params['diff'] = 'The edit request refers to a non-existent requirements list.'
else:
params['diff'] = '\n'.join(['<p class="diff-line">' + escape(line) + '</p>' for line in edit_request.contents.split('\n')])
return render(request, 'requirements/review.html', params)
def count_conflicts(reqs_to_deploy):
"""Counts the number of committed changes that would override a previous pending deployment."""
conflicts = set()
list_ids = set(req.list_id for req in reqs_to_deploy.all())
print(list_ids)
for deployment in Deployment.objects.filter(date_executed=None):
for other_req in deployment.edit_requests.all():
print(other_req)
if other_req.list_id in list_ids:
conflicts.add(other_req.list_id)
return len(conflicts)
@staff_member_required
def review_all(request):
"""Displays all available edit requests and allows the user to commit them."""
if request.method == 'POST':
form = DeployForm(request.POST)
if form.is_valid():
deployment = Deployment.objects.create(author=form.cleaned_data['email_address'], summary=form.cleaned_data['summary'])
for edit_req in EditRequest.objects.filter(committed=True, resolved=False):
# Resolve this edit request, and show when it was deployed
edit_req.deployment = deployment
edit_req.committed = True
edit_req.resolved = True
edit_req.save()
deployment.save()
# Go back and re-render the same page
params = build_sidebar_info(request)
form = DeployForm()
params['form'] = form
params['active_id'] = 'review_all'
params['num_to_deploy'] = EditRequest.objects.filter(committed=True, resolved=False).count()
params['committed'] = EditRequest.objects.filter(committed=True, resolved=False).order_by('pk')
params['pending'] = EditRequest.objects.filter(committed=False, resolved=False).order_by('pk')
params['deployments'] = Deployment.objects.filter(date_executed=None).count()
params['conflicts'] = count_conflicts(params['committed'])
return render(request, 'requirements/review_all.html', params)
@staff_member_required
def commit(request, edit_req):
"""
"Commits" the given edit request by setting its 'committed' flag to True.
"""
try:
edit_request = EditRequest.objects.get(pk=int(edit_req))
except ObjectDoesNotExist:
raise Http404
except ValueError:
return redirect(reverse('review_all'))
edit_request.committed = True
edit_request.save()
return redirect(reverse('review_all'))
@staff_member_required
def uncommit(request, edit_req):
"""
Removes the committed flag from the given edit request.
"""
try:
edit_request = EditRequest.objects.get(pk=int(edit_req))
except ObjectDoesNotExist:
raise Http404
except ValueError:
return redirect(reverse('review_all'))
edit_request.committed = False
edit_request.save()
return redirect(reverse('review_all'))
@staff_member_required
def ignore_edit(request, edit_req):
return redirect(reverse('review_all'))
@staff_member_required
def resolve(request, edit_req):
"""
Marks the given edit request as resolved.
"""
try:
edit_request = EditRequest.objects.get(pk=int(edit_req))
except ObjectDoesNotExist:
raise Http404
except ValueError:
return redirect(reverse('review_all'))
edit_request.committed = False
edit_request.resolved = True
edit_request.save()
return redirect(reverse('review_all'))
| [
"[email protected]"
]
| |
43a08ccc7487f6dc08dd3dcc5dc285c918aaac1d | 27c1e46f8efdaa0009645d2a02d2822e32493469 | /ReportingTool.py | 80c459180736aed755fdd47ce507ec4c49b869df | [
"MIT"
]
| permissive | Nsharma96/FullStack | c593a0d7572f071149ec502d5eac1c87d22696c4 | 17d9a6b549e543ca0a5f3b5e0fd799d91b54a74b | refs/heads/master | 2020-04-24T20:28:09.556801 | 2019-03-13T14:59:39 | 2019-03-13T14:59:39 | 172,244,847 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,804 | py | #!/usr/bin/env python2
import psycopg2
import pandas as pd
pd.set_option("display.colheader_justify", "center")
dbname = "news"
def execute_q(q):
db = psycopg2.connect(database=dbname)
c = db.cursor()
c.execute(q)
return c.fetchall()
db.close()
# The most popular three articles of all time.
result1 = execute_q("""select articles.title, artiView.num from articles ,
(select path , count(path)
as num from log where status
like '%200%' group by path order by num desc limit 4)
as artiView
where '/article/' || articles.slug = artiView.path
order by artiView.num desc;""")
# The most popular article authors of all time.
result2 = execute_q("""select authors.name,authorViewSums.authorView
from authors,authorViewSums
where authors.id=authorViewSums.author""")
# Days on which more than 1% of requests lead to errors.
result3 = execute_q("""select d as Day,m as Month,y as Year,
(err*1.0/total_Requests)*100 as Error
from error_Matrix
where (err*1.0/total_Requests)*100>1;""")
print("\nThe most popular three articles of all time.\n")
res = pd.DataFrame(data=result1, columns=['Article', 'Views'])
print(res)
print("\n")
print("The most popular article authors of all time.\n")
res = pd.DataFrame(data=result2, columns=['Author', 'Views'])
print(res)
print("\n")
print("Days on which more than 1 percent of requests lead to errors.\n")
res = pd.DataFrame(data=result3, columns=['Day', 'Month', 'Year', 'ERROR%'])
res = res.astype(int)
print(res)
print("\n")
| [
"[email protected]"
]
| |
ed177d6ef08407058c4f31e04e9a1b606d58512a | 5c0c393ee727470f5a0570e3508be6b5bd0a6d96 | /myapp/views.py | 7a1b43474540a9c24cd611e3fa54233d5f7769d5 | []
| no_license | lynch829/omp-gynae-brachy | 53b8f0431ec0e725422f9f817c89e183d3d1eefa | 52823a732ba52d62b8e388042931ad64ba39965b | refs/heads/master | 2020-08-11T15:15:25.550527 | 2017-07-17T07:34:15 | 2017-07-17T07:34:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,445 | py | from django.shortcuts import redirect, render
from .forms import PatientForm, DVHDumpForm, PatientNameForm
from .pyTG43.pyTG43 import *
from .oncprost_utilities.oncprost_utilities import *
from django.conf import settings
import dicom
import os
from scipy.interpolate import interp1d
import json
def index(request):
"""
Render home page, which just has a search
box for Patient ID's in it.
"""
if request.method == "POST":
if 'search_by_id' in request.POST:
id_form = PatientForm(request.POST) #create form
if id_form.is_valid():
post = id_form.save(commit=False)
patient_ID = post
patient_ID.patient_ID = patient_ID.patient_ID.upper() #convert ID to uppercase
return redirect('view_patient',patient_ID=patient_ID)
elif 'search_by_name' in request.POST:
name_form = PatientNameForm(request.POST)
if name_form.is_valid():
post = name_form.save(commit=False)
patient_name = post
patient_name = "%"+patient_name.patient_name.upper().replace(' ','%')+"%" #convert ID to uppercase
return redirect('view_ids',patient_name=patient_name)
else:
id_form = PatientForm() #remove default ID in production
name_form = PatientNameForm() #remove default ID in production initial={'patient_name': 'white'}
return render(request, 'myapp/index.html', {'id_form': id_form,
'name_form':name_form})
def parse_patient_name(name_raw):
name_raw = name_raw.split('^')
for i in range(1,len(name_raw)):
name_raw[i] = name_raw[i].title()
name_raw = ' '.join(name_raw).rstrip()
return name_raw
def view_patient(request, patient_ID):
"""
Produces view for list of cases associated with patient ID
"""
available_cases = get_patient_studies(patient_ID)
if available_cases == []: # If I couldn't find any cases, through an error page
return render(request, 'myapp/error_cases_not_found.html',{})
else:
patient_name = parse_patient_name(get_patient_name(patient_ID))
return render(request, 'myapp/view_patient.html', {'patient_ID':patient_ID,
'patient_name':patient_name,
'cases':sorted(available_cases)})
def view_ids(request, patient_name):
"""
Produces view for list of cases associated with patient ID
"""
patient_IDs, patient_names = get_patient_IDs_and_names(patient_name)
# patient_names = get_patient_names(patient_name)
patient_names = [parse_patient_name(x) for x in patient_names]
list_of_results = []
for i in range(len(patient_IDs)):
temp_dict = {}
temp_dict['patient_id'] = patient_IDs[i]
temp_dict['patient_name'] = patient_names[i]
list_of_results.append(temp_dict)
if patient_IDs == []: # If I couldn't find any cases, through an error page
return render(request, 'myapp/error_patient_not_found.html',{})
else:
return render(request, 'myapp/view_ids.html', {'results':list_of_results})
def view_case(request, patient_ID, case_label):
"""
Lists plans available for that case
"""
available_plans, raw_plan_names = get_plans_from_study(patient_ID, case_label)
patient_name = parse_patient_name(get_patient_name(patient_ID))
return render(request, 'myapp/view_case.html', {'patient_ID':patient_ID,
'patient_name':patient_name,
'case_label':case_label,
'plans':available_plans})
def view_plan(request, patient_ID, case_label, plan_name):
"""
Open up some basic details of the plan + give user option to
make protocol or run dose check
"""
try:
#my_plan = fetch_plan(patient_ID, case_label, plan_name)
patient_ID = patient_ID.split('/')[0]
patient_name = parse_patient_name(get_patient_name(patient_ID))
# import ipdb; ipdb.set_trace()
return render(request, 'myapp/view_plan.html', {'patient_ID':patient_ID,
'case_label':case_label,
'patient_name':patient_name,
'plan_name':plan_name,
})
except:
return render(request, 'myapp/error_parse_plan.html',{'patient_ID':patient_ID,
'case_label':case_label})
def view_protocol(request, patient_ID, case_label, plan_name):
"""
Create basic HTML with protocol details. Have to do
stupid reformatting of the data before passing it to
template because Jinja2 + Django = annoying limits on
accessing lists indexes.
"""
try:
plan_number = plan_name.split(' ')[1]
my_plan, my_POIs = fetch_plan(patient_ID, case_label, plan_number)
patient_name = parse_patient_name(get_patient_name(patient_ID))
my_plan['patient_name'] = patient_name
my_plan['plan_name'] = plan_name
insert_list = []
for i in range(len(my_plan['channels'])):
temp_dict = {}
# temp_dict['channel_name'] = my_plan.channel_names[1+i]
temp_dict['channel_number'] = my_plan['channels'][i]['channel_number']
temp_dict['channel_time_total'] = round( my_plan['channels'][i]['channel_time_total'],1)
temp_dict['reference_length'] = my_plan['channels'][i]['reference_length']
temp_dict['step_size'] = my_plan['channels'][i]['step_size']
dwells = []
for j in range(len(my_plan['sources']['dwell_times'])):
dwell_dict = {}
if my_plan['sources']['channel_number'][j] == my_plan['channels'][i]['channel_number']:
dwell_dict['dwell_time'] = round(my_plan['sources']['dwell_times'][j],1)
dwell_dict['dwell_position'] = my_plan['sources']['dwell_positions'][j]
dwells.append(dwell_dict)
temp_dict['dwells'] = dwells
insert_list.append(temp_dict)
# my_plan.prescription = "{0:.2f}".format(my_plan.prescription)
# import ipdb; ipdb.set_trace()
return render(request, 'myapp/view_protocol.html', {'plan':my_plan,
'case_label':case_label,
'patient_name':patient_name,
'plan_data':insert_list,
})
except:
return render(request, 'myapp/error_parse_plan.html',{'patient_ID':patient_ID,
'case_label':case_label})
def dose_check(request, patient_ID, case_label, plan_name):
"""
Perform dose check and render results page
"""
try:
plan_number = plan_name.split(' ')[1]
plan_data_dict, my_POIs = fetch_plan(patient_ID, case_label, plan_number)
plan_data_dict['POIs'] = []
plan_data_dict['POI_names'] = []
op_poi_doses = []
for k in range(len(my_POIs)):
plan_data_dict['POIs'].append([0.1*float(x) for x in my_POIs[k][1].split(':')])
plan_data_dict['POI_names'].append(my_POIs[k][0])
op_poi_doses.append(my_POIs[k][2])
my_source_train = []
for i in range(len(plan_data_dict['sources']['coordinates'])):
my_source_train.append(SourcePosition(x=plan_data_dict['sources']['coordinates'][i][0],
y=plan_data_dict['sources']['coordinates'][i][1],
z=plan_data_dict['sources']['coordinates'][i][2],
dwell_time=plan_data_dict['sources']['dwell_times'][i],
apparent_activity=10,
Sk=40820,
dose_rate_constant=1.109,
L=0.36,
t_half=73.83))
#reformat results into a list of dictionaries due to limitations in Jinja2
insert_list = []
idx = 0
for point in plan_data_dict['POIs']:
temp_dict = {}
temp_dict['x_coord'] = round(point[0],2)
temp_dict['y_coord'] = round(point[1],2)
temp_dict['z_coord'] = round(point[2],2)
temp_dict['poi_name'] = plan_data_dict['POI_names'][idx]
my_dose = calculate_dose(my_source_train, point) #perform dose calculation
temp_dict['pyTG43_dose'] = round(my_dose,2)
temp_dict['OP_dose'] = round(op_poi_doses[idx],2)
temp_dict['perc_difference'] = round(100*((op_poi_doses[idx]/my_dose)-1),2)
insert_list.append(temp_dict)
idx += 1
context_data = {'patient_ID':patient_ID,
'case_label':case_label,
'plan_name':plan_name,
'calc_data':insert_list}
return render(request, 'myapp/dose_check.html', context_data)
except:
return render(request, 'myapp/error_parse_plan.html',{'patient_ID':patient_ID,
'case_label':case_label})
def dvh_dump(request):
if request.method == "POST":
form = DVHDumpForm(request.POST)
if form.is_valid():
try:
dvh_data, tolerances_json = analyse_dvh(form.data['dump'])
except:
return render(request, 'myapp/error_dvh_parse_fail.html',{})
return render(request, 'myapp/view_dvh.html', {'dvh_data':dvh_data,'tolerances':tolerances_json})
else:
form = DVHDumpForm()
return render(request, 'myapp/dvh_dump.html', {'form': form})
def analyse_dvh(input_data):
with open(r'myapp/static/myapp/tolerances.json') as data_file:
tolerances_json = json.load(data_file)
raw_ROIs = input_data.split('ROI')
prescription = 7.1
ROI_dict = {}
for ROI in raw_ROIs[2:]:
temp_dict = {}
temp_dict['bin'] = []
temp_dict['dose'] = []
temp_dict['volume'] = []
for i in range(len(ROI.split('\n')[4:-3])):
temp_dict['bin'].append(int(ROI.split('\n')[4:-3][i].split('\t')[0]))
temp_dict['dose'].append(float(ROI.split('\n')[4:-3][i].split('\t')[1]))
temp_dict['volume'].append(float(ROI.split('\n')[4:-3][i].split('\t')[2]))
temp_dict['volume_cc'] = round(float(temp_dict['volume'][0]),2)
ROI_dict[ROI.split('***')[0].split('\n')[0][2:].split('\r')[0].replace('-','')] = temp_dict
for ROI in ROI_dict:
# import ipdb; ipdb.set_trace()
f = interp1d(ROI_dict[ROI]['volume'],ROI_dict[ROI]['dose'])
try:
ROI_dict[ROI]['D2cc'] = {}
ROI_dict[ROI]['D2cc']['value'] = round(float(f(2)),2)
ROI_dict[ROI]['D2cc']['result'] = ''
ROI_dict[ROI]['D2cc']['result'] = ('fail', 'pass')[eval('ROI_dict[ROI][\'D2cc\'][\'value\']'+tolerances_json[ROI]['D2cc']['math']+'float(tolerances_json[ROI][\'D2cc\'][\'value\'])')]
except:
pass
try:
ROI_dict[ROI]['D90'] = {}
ROI_dict[ROI]['D90']['value'] = round(float(f(0.9*ROI_dict[ROI]['volume_cc'])),2)
ROI_dict[ROI]['D90']['result'] = ''
ROI_dict[ROI]['D90']['result'] = ('fail', 'pass')[eval('ROI_dict[ROI][\'D90\'][\'value\']'+tolerances_json[ROI]['D90']['math']+'float(tolerances_json[ROI][\'D90\'][\'value\'])')]
except:
pass
f = interp1d(ROI_dict[ROI]['dose'],ROI_dict[ROI]['volume'])
try:
ROI_dict[ROI]['V100'] = {}
ROI_dict[ROI]['V100']['value'] = round(float(100*(f(prescription)/ROI_dict[ROI]['volume_cc'])),2)
ROI_dict[ROI]['V100']['result'] = ''
ROI_dict[ROI]['V100']['result'] = ('fail', 'pass')[eval('ROI_dict[ROI][\'V100\'][\'value\']'+tolerances_json[ROI]['V100']['math']+'float(tolerances_json[ROI][\'V100\'][\'value\'])')]
except:
pass
try:
ROI_dict[ROI]['V67'] = {}
ROI_dict[ROI]['V67']['value'] = round(float(100*(f(0.67*prescription)/ROI_dict[ROI]['volume_cc'])),2)
ROI_dict[ROI]['V67']['result'] = ''
ROI_dict[ROI]['V67']['result'] = ('fail', 'pass')[eval('ROI_dict[ROI][\'V67\'][\'value\']'+tolerances_json[ROI]['V67']['math']+'float(tolerances_json[ROI][\'V67\'][\'value\'])')]
except:
pass
ROI_dict['patient_name'] = raw_ROIs[0].split('\r')[0].split('Patient: ')[1]
ROI_dict['patient_ID'] = raw_ROIs[0].split('\r')[1].split('Patient Id: ')[1]
ROI_dict['case_label'] = raw_ROIs[0].split('\r')[2].split('Case: ')[1]
ROI_dict['plan_name'] = raw_ROIs[0].split('\r')[3].split('Plan: ')[1]
return ROI_dict, tolerances_json
| [
"[email protected]"
]
| |
4455b3a1b142bedf192ae2f451c4ff35db376820 | 176c59cf09d42c66d4101eca52beb9c3ea7362a1 | /pyramid_authsanity/tests/test_includeme.py | ad635c82cfa576e918a2c5fbe5ec15a7f88c8027 | [
"ISC"
]
| permissive | stevepiercy/pyramid_authsanity | 146d90abcf7622e1d509eb069bfbbf80ed61acc8 | daf7188a8ab1a8bd215d9e1e1cb6682e87fa8ac7 | refs/heads/master | 2021-07-16T08:33:46.683994 | 2016-01-10T05:48:32 | 2016-01-10T05:48:32 | 51,718,108 | 0 | 0 | null | 2016-02-14T22:53:13 | 2016-02-14T22:53:13 | null | UTF-8 | Python | false | false | 3,043 | py | import pytest
from pyramid.authorization import ACLAuthorizationPolicy
import pyramid.testing
from zope.interface import (
Interface,
implementedBy,
providedBy,
)
from zope.interface.verify import (
verifyClass,
verifyObject
)
from pyramid_services import IServiceClassifier
from pyramid_authsanity.interfaces import (
IAuthSourceService,
)
class TestAuthServicePolicyIntegration(object):
@pytest.fixture(autouse=True)
def pyramid_config(self, request):
from pyramid.interfaces import IDebugLogger
self.config = pyramid.testing.setUp()
self.config.set_authorization_policy(ACLAuthorizationPolicy())
def finish():
del self.config
pyramid.testing.tearDown()
request.addfinalizer(finish)
def _makeOne(self, settings):
self.config.registry.settings.update(settings)
self.config.include('pyramid_authsanity')
def test_include_me(self):
from pyramid_authsanity.policy import AuthServicePolicy
self._makeOne({})
self.config.commit()
introspector = self.config.registry.introspector
auth_policy = introspector.get('authentication policy', None)
assert isinstance(auth_policy['policy'], AuthServicePolicy)
with pytest.raises(ValueError):
find_service_factory(self.config, IAuthSourceService)
def test_include_me_cookie_no_secret(self):
settings = {'authsanity.source': 'cookie'}
with pytest.raises(RuntimeError):
self._makeOne(settings)
def test_include_me_cookie_with_secret(self):
from pyramid_authsanity.policy import AuthServicePolicy
settings = {'authsanity.source': 'cookie', 'authsanity.secret': 'sekrit'}
self._makeOne(settings)
self.config.commit()
introspector = self.config.registry.introspector
auth_policy = introspector.get('authentication policy', None)
assert isinstance(auth_policy['policy'], AuthServicePolicy)
assert verifyClass(IAuthSourceService, find_service_factory(self.config, IAuthSourceService))
def test_include_me_session(self):
from pyramid_authsanity.policy import AuthServicePolicy
settings = {'authsanity.source': 'session'}
self._makeOne(settings)
self.config.commit()
introspector = self.config.registry.introspector
auth_policy = introspector.get('authentication policy', None)
assert isinstance(auth_policy['policy'], AuthServicePolicy)
assert verifyClass(IAuthSourceService, find_service_factory(self.config, IAuthSourceService))
def find_service_factory(
config,
iface=Interface,
):
context_iface = providedBy(None)
svc_types = (IServiceClassifier, context_iface)
adapters = config.registry.adapters
svc_factory = adapters.lookup(svc_types, iface, name='')
if svc_factory is None:
raise ValueError('could not find registered service')
return svc_factory
| [
"[email protected]"
]
| |
88ca9452a4509137e40048ebdbbc850643a241a0 | 04f09d432a28fa50f3b13f5f46bf4b9b62c01e2f | /html-png_code.py | f01c308213943a840d27cc0d717a3587ebecb18e | [
"MIT"
]
| permissive | sketch2code-mit/html-png | adfad5884dac50154356ec4b87213f635e4e6f6d | 5808c3a4a1b4784e59dcf4427867a3fe501f29d6 | refs/heads/main | 2023-04-06T09:58:01.806598 | 2021-04-12T00:36:02 | 2021-04-12T00:36:02 | 357,011,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | # this is the coding for generating html screenshot
# written by Doodle2Code team for MIT 6.962 Applied Machine Learning
import time
from selenium import webdriver
import os
c = 0
n = 0
j = 0
i = 0
for c in range(3):
for n in range(10):
for j in range(10):
for i in range(10):
# name of the file in format of 0000.html
fn = str(c)+str(n)+str(j)+str(i)+".html"
path = '/Users/Username/Desktop/filename/'
tmpurl = 'file://{path}/{mapfile}'.format(
path=path, mapfile=fn) # find the address of the html
driver = webdriver.Chrome()
driver.maximize_window()
# maximum waiting time for opening the html
driver.implicitly_wait(6)
driver.get(tmpurl) # open html
time.sleep(1)
# print("done')
driver.get_screenshot_as_file(
str(c)+str(n)+src(j)+str(i)+".png") # rename the file
| [
"[email protected]"
]
| |
0ab91b8fc7a8f722176caa772e62d86f3f98bab8 | ecbf6a7c04b068a4f5606bbab46b974e53bd28d8 | /src/replace_localparam.py | debb116037d08c9f96c454b86e2bebe117057dc9 | [
"MIT"
]
| permissive | jamesjiang52/V2SV | 970357be757ba068111645fd6964e8672a72f69f | 4b6109d16482131785b9dfec13fd66452078ae17 | refs/heads/main | 2022-12-30T06:47:27.814305 | 2020-10-18T00:07:39 | 2020-10-18T00:07:39 | 300,928,285 | 1 | 2 | MIT | 2020-10-18T00:06:56 | 2020-10-03T16:41:38 | Python | UTF-8 | Python | false | false | 4,354 | py | def __remove_extra_declarations(module_string, replaced_wires, debug=False):
buffer = module_string[:module_string.index(");") + 2] + "\n"
body_string = module_string[module_string.index(");") + 2:]
statements = body_string.split(";")
# remove the previous declarations of any new enums
for statement in statements:
words = statement.split()
if not words:
continue
if words[0] in ["reg", "wire", "logic"]:
if ":" in words[1]:
# wire is an array
signals = statement[statement.index("]") + 1:].split()
else:
signals = words[1:]
signals = [signal[:-1] if signal[-1] == "," else signal for signal in signals]
signals_remaining = signals[:]
for signal in signals:
if signal in replaced_wires:
signals_remaining.remove(signal)
if signals_remaining == signals:
# none of these signals were changed to enums
buffer += "{};\n".format(" ".join(words))
elif signals_remaining == []:
# all signals are declared as new enums now, so don't write anything
if debug:
print("Removed:\n{}\n\n".format(" ".join(words)))
else:
new_statement = "logic " # might as well do this
if ":" in words[1]:
# wire is an array
new_statement += words[1] + " "
for signal in signals_remaining:
new_statement += signal + ", "
# remove trailing comma from last wire
if new_statement[-2] == ",":
new_statement = new_statement[:-2]
buffer += "{};\n".format(new_statement)
if debug:
print("Replaced:\n{}\nwith\n{}\n\n".format(" ".join(words), new_statement))
else:
# don't care
buffer += "{};\n".format(" ".join(words))
# remove trailing semicolon from endmodule
if buffer[-2] == ";":
buffer = buffer[:-2] + "\n"
return buffer
def replace_localparam(module_string, debug=False):
buffer = module_string[:module_string.index(");") + 2] + "\n"
body_string = module_string[module_string.index(");") + 2:]
statements = body_string.split(";")
replaced_wires = []
for statement in statements:
words = statement.split()
if not words:
continue
if words[0] == "localparam":
new_statement = "enum int unsigned {\n"
params = []
pair_strings = "".join(words[1:]).split(",")
# get all localparam names
for pair_string in pair_strings:
param = pair_string.split("=")[0]
new_statement += param + ",\n"
params.append(param)
# remove trailing comma from last param
if new_statement[-2] == ",":
new_statement = new_statement[:-2] + "\n} "
# need to search for wires that are being assigned to these localparams,
# and declare these as the new enums
for statement_i in statements:
if "=" in statement_i or "<=" in statement_i:
statement_i = statement_i.replace("<=", "=")
words_i = statement_i.split()
if words_i[-1] in params:
wire = statement_i[:statement_i.index("=")].split()[-1]
if wire not in replaced_wires:
new_statement += wire + ", "
replaced_wires.append(wire)
else:
# don't care
pass
# remove trailing comma from last wire
if new_statement[-2] == ",":
new_statement = new_statement[:-2]
buffer += "{};\n".format(new_statement)
if debug:
print("Replaced:\n{}\nwith\n{}\n\n".format(" ".join(words), new_statement))
else:
# don't care at all about anything else
buffer += "{};\n".format(" ".join(words))
buffer = __remove_extra_declarations(buffer, replaced_wires, debug=debug)
return buffer
| [
"[email protected]"
]
| |
b0d4c96a5402e57985cbbb13d21d08abf5b648e9 | 13a4af1f8b7d1445c722bf74d886daf54525c7f1 | /test.py | 7e7f20b1ca5392da1f172978e6ef331997ecee0c | []
| no_license | JiaCheng-Lai/bird-sound | 8caf16fe1d5f7ea876080ffa29f84b1fd5c50236 | 0f8b4493cf42203678a41098fa61a8ff54498500 | refs/heads/main | 2023-01-31T00:59:39.660448 | 2020-12-15T09:49:51 | 2020-12-15T09:49:51 | 321,600,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | import random
from tqdm import tqdm
import time
import os
import struct
import matplotlib.pyplot as plt
import IPython.display as ipd
import pandas as pd
import numpy as np
import librosa
import librosa.display
from sklearn import manifold
import scipy, pylab
import scipy.io
from scipy.io import wavfile
from scipy.io.wavfile import write
from os.path import dirname, join as pjoin
import scipy.io as sio
import h5py
import scipy.io.wavfile as wav
from scipy import signal
import math
from scipy.fftpack import ifft2
my_path = "/home/jerry/japanesebirdsound_plot_train/"
path = '/media/iml/jerry/JapaneseBirdSound/'
data = pd.read_csv("JapaneseBirdSound.csv")
# y,sr = librosa.load(path, sr=None, offset=5, duration=10)
x_train = []
x_test = []
y_train = [*range(1,421)]*10
y_train = np.array(y_train)
y_test = [*range(1,421)]
y_test = np.array(y_test)
#training data
def generating_training_data(XX):
for i in tqdm(range(len(data))):
fold_no=str(data.iloc[i]["fold"])
file=data.iloc[i]["slice_file_name"]
label=data.iloc[i]["classID"]
filename=path+fold_no+"/"+file
y,sr = librosa.load(filename, sr=None, offset=XX, duration=5)
S = np.abs(librosa.stft(y, n_fft=512, hop_length=64, window='hann'))**2
STFT=np.mean(librosa.feature.chroma_stft(S=S, sr=sr).T,axis=1)
STFT*=1000
x_train.append(STFT)
#testing data
def generating_testing_data(XX):
for i in tqdm(range(len(data))):
fold_no=str(data.iloc[i]["fold"])
file=data.iloc[i]["slice_file_name"]
label=data.iloc[i]["classID"]
filename=path+fold_no+"/"+file
y,sr = librosa.load(filename, sr=None, offset=XX, duration=5)
S = np.abs(librosa.stft(y, n_fft=512, hop_length=64, window='hann'))**2
STFT=np.mean(librosa.feature.chroma_stft(S=S, sr=sr).T,axis=1)
STFT*=1000
x_test.append(STFT)
for i in range(4,14,1):
generating_training_data(i)
generating_testing_data(18)
# generating_testing_data(21)
x_train = np.array(x_train)
x_test = np.array(x_test)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
#convert into 2D
# x_train_2d=np.reshape(x_train,(x_train.shape[0],x_train.shape[1]*x_train.shape[2]))
# x_test_2d=np.reshape(x_test,(x_test.shape[0],x_test.shape[1]*x_test.shape[2]))
# print(x_train_2d.shape,x_test_2d.shape)
np.save("x_train_japan_STFT",x_train,allow_pickle=True)
np.save("y_train_japan_STFT",y_train,allow_pickle=True)
np.save("x_test_japan_STFT",x_test,allow_pickle=True)
np.save("y_test_japan_STFT",y_test,allow_pickle=True)
train1 = pd.DataFrame(x_train)
train2 = pd.DataFrame(y_train)
train3 = pd.DataFrame(x_test)
train4 = pd.DataFrame(y_test)
train1.to_csv("x_train_japan_STFT.csv",index=False)
train2.to_csv("y_train_japan_STFT.csv",index=False)
train3.to_csv("x_test_japan_STFT.csv",index=False)
train4.to_csv("y_test_japan_STFT.csv",index=False)
| [
"[email protected]"
]
| |
f3e6b51c6adbdde53612d33cbfe7e40f64cd622f | 416e78d220f8ed3ebeca547a9f3619e1593ec5a4 | /common/do_excel.py | 4aead9c15b4499200caea02adec7330ecc2fd3a1 | []
| no_license | juanjuankeke/coco | ca33f00ef7c8475b6eb8b9d97d51e4d8503d6345 | cb88cf3dfb9426ccf6b4d0940e09a34ba7322496 | refs/heads/master | 2022-12-22T19:31:53.735388 | 2019-02-23T07:38:56 | 2019-02-23T07:38:56 | 172,161,232 | 0 | 0 | null | 2022-12-08T01:38:07 | 2019-02-23T02:20:50 | HTML | UTF-8 | Python | false | false | 1,871 | py | from openpyxl import load_workbook # 读写
#case_id 第一行 url 第三行 param 第5 method 6 Expected 7
from common import get_path
import json
class Cases:
def __init__(self): #创建手机容器
self.case_id=None
self.url=None
self.param=None
self.method=None
self.Expected=None
self.title=None
self.module=None
class doexcel:
def __init__(self,file_name):
self.file_name=file_name
def read_data(self,sheet_name):
self.sheet_name = sheet_name
wb = load_workbook(self.file_name) # 打开表
sheet = wb[self.sheet_name] # 定位表单
cases_1=[]
for i in range(2,sheet.max_row+1):
row_case=Cases()
row_case.case_id=sheet.cell(row=i,column=1).value ##case_id的值
row_case.module = sheet.cell(row=i, column=2).value ##module的值
row_case.url=sheet.cell(row=i,column=3).value #URL的值
row_case.title= sheet.cell(row=i, column=4).value ##title的值
row_case.param=sheet.cell(row=i,column=5).value #param的值
row_case.method=sheet.cell(row=i,column=6).value #method的值
row_case.Expected=sheet.cell(row=i,column=7).value #Expected的值
cases_1.append(row_case)
return cases_1
def write_back(self, sheet_name, row,result,testresult): # 传入行,列,数值
self.sheet_name = sheet_name
wb = load_workbook(self.file_name) # 打开表
sheet = wb[self.sheet_name] # 定位表单
sheet.cell(row,8).value = result
sheet.cell(row,9).value = testresult
wb.save(self.file_name)
if __name__ == '__main__':
t=doexcel(get_path.cases_path)
u=t.read_data('info')
for i in u:
print(i.module)
t=json.loads(i.module)
print(type(t))
| [
"[email protected]"
]
| |
849fb6140aab077665d1f82cf50fe9a7ef8225b2 | 0e2ec51b4c2dc3281fa95d784d65a6380d198857 | /src/visualization/uv_vectorwidget.py | 8297607c0969378c1b0e2a8a122adb96dc42cc18 | [
"MIT"
]
| permissive | CaiBirdHSA/tidal_melting | f7fbab542344cfca18016252f2f05a50bbc9ed1b | b71eec6aa502e1eb0570e9fc4a9d0170aa4dc24b | refs/heads/master | 2023-04-09T05:41:21.808241 | 2018-08-10T03:44:45 | 2018-08-10T03:44:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,252 | py |
from netCDF4 import Dataset
from numpy import *
from matplotlib.pyplot import *
from features.rotate_vector_roms import *
import cmocean
from ipywidgets import interact
# Make a circumpolar Antarctic plot of speed overlaid with velocity vectors at
# the given depth (surface, bottom, or vertically averaged).
# Input:
# grid_path = path to ROMS grid file
# file_path = path to ocean history/averages file
# tstep = timestep in file_path to plot (1-texed)
# depth_key = integer flag ticating whether to plot the surface velocity (1),
# the bottom velocity (2), or vertically averaged velocity (3)
# save = optional boolean flag ticating that the plot should be saved to a
# file rather than displayed on the screen
# fig_name = if save=True, filename for figure
def uv_vectorwidget (file_path, tstart,tstop):
# Radius of the Earth in metres
r = 6.371e6
# Degrees to radians conversion factor
deg2rad = pi/180
# Side length of blocks to average vectors over (can't plot vector at every
# single point or the plot will be way too crowded)
block = 15
print('read in the data')
id = Dataset(file_path, 'r')
lon = id.variables['lon_rho'][:,:]
lat = id.variables['lat_rho'][:,:]
zeta = id.variables['zeta'][tstart:tstop+1,:,:]
# Vertically averaged u and v
u = id.variables['ubar'][tstart:tstop+1,:,:]
v = id.variables['vbar'][tstart:tstop+1,:,:]
id.close()
print('initialize and fill up the arrays')
numt = size(u,0)
numy = size(lon,0) #530
numx = size(lon,1) #630
u_rho = ma.empty([numt,numy,numx])
v_rho = ma.empty([numt,numy,numx])
speed = ma.empty([numt,numy,numx])
angle = zeros(shape(lon))
x = arange(numx)
y = arange(numy)
xmesh,ymesh = meshgrid(x,y)
#print(numx,numy,x,y)
# Average x, y, u_circ, and v_circ over block x block intervals
# Calculate number of blocks
sizet = size(u,0)
size0 = int(ceil(numy/float(block)))
size1 = int(ceil(numx/float(block)))
# Set up arrays for averaged fields
x_block = ma.empty([size0, size1])
y_block = ma.empty([size0, size1])
u_block = ma.empty([sizet,size0, size1])
v_block = ma.empty([sizet,size0, size1])
# Set up arrays containing boundary tices
posn0 = list(arange(0, numy, block))
posn0.append(numy)
posn1 = list(arange(0, numx, block))
posn1.append(numx)
for t in arange(numt):
print("processing time step: ",t)
# Rotate velocities to lat-lon space
u_rho[t],v_rho[t] = rotate_vector_roms(u[t], v[t], angle)
speed[t] = sqrt(square(u_rho[t]) + square(v_rho[t]))
for j in arange(size0):
for i in arange(size1):
start0 = posn0[j]
end0 = posn0[j+1]
start1 = posn1[i]
end1 = posn1[i+1]
x_block[j,i] = mean(xmesh[start0:end0, start1:end1])
y_block[j,i] = mean(ymesh[start0:end0, start1:end1])
u_block[t,j,i] = mean(u_rho[t,start0:end0, start1:end1])
v_block[t,j,i] = mean(v_rho[t,start0:end0, start1:end1])
print("building the widget")
def plot(tstep):
# Make the plot
fig,(ax0,ax1) = subplots(2,figsize=(10,13))
speedP = ax0.pcolormesh(xmesh,ymesh,speed[tstep]*100, vmin=0,vmax=30,cmap=cmocean.cm.speed)
colorbar(speedP,ax=ax0)
#cbar.ax.tick_params(labelsize=10)
# Add vectors for each block
quiverP = ax0.quiver(x_block, y_block, u_block[tstep], v_block[tstep],pivot="mid", color='black',units="width")
quiverkey(quiverP, 0.8, 0.99, 0.2, r'$20 \frac{cm}{s}$', labelpos='E',
coordinates='figure')
ax0.set_title('Vertically averaged velocity (cm/s)', fontsize=16)
ax0.set_aspect('equal')
ax0.axis('off')
sshP = ax1.pcolormesh(zeta[tstep],vmin=-10,vmax=10,cmap=cm.bwr)
colorbar(sshP,ax=ax1)
ax1.set_title("Sea surface height [m]", fontsize=16)
ax1.set_aspect("equal")
ax1.axis("off")
tight_layout()
show()
print('done')
interact(plot,tstep=(0,numt-1))
| [
"[email protected]"
]
| |
92612e52e20cfea9a63de031d8883e91b91af44a | a496f49aeb9f9463f76e156ab4cd316346d8b21b | /ips/util/process_variables.py | 2ffd73a76287f3d2a12a5001666db371213486d2 | []
| no_license | cadmiumcat/ips_services | cb9320f2d0831399d8a3ee471e0e1c913ca1ebcf | 274bdc04440a094d1e8a8f9d4d2d6224e6736276 | refs/heads/master | 2020-06-24T17:15:48.030598 | 2019-04-05T12:19:20 | 2019-04-05T12:19:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,257 | py | import random
from ips_common.logging import log
import ips_common_db.sql as db
import numpy as np
# for exec
import math
random.seed(123456)
count = 1
def modify_values(row, pvs, dataset):
"""
Author : Thomas Mahoney
Date : 27 / 03 / 2018
Purpose : Applies the PV rules to the specified dataframe on a row by row basis.
Parameters : row - the row of a dataframe passed to the function through the 'apply' statement called
pvs - a collection of pv names and statements to be applied to the dataframe's rows.
dataset - and identifier used in the executed pv statements.
Returns : a modified row to be reinserted into the dataframe.
Requirements : this function must be called through a pandas apply statement.
Dependencies : NA
"""
for pv in pvs:
code = pv[1]
try:
exec(code)
except ValueError:
log.error(f"ValueError on PV: {pv[0]}, code: {code}")
raise ValueError
except KeyError:
log.error(f"KeyError on PV: {pv[0]}, code: {code}")
raise KeyError
except TypeError:
log.error(f"TypeError on PV: {pv[0]}, code: {code}")
raise TypeError
except SyntaxError:
log.error(f"SyntaxError on PV: {pv[0]}, code: {code}")
raise SyntaxError
if dataset in ('survey', 'shift'):
row['SHIFT_PORT_GRP_PV'] = str(row['SHIFT_PORT_GRP_PV'])[:10]
return row
def get_pvs():
"""
Author : Thomas Mahoney
Date : 27 / 03 / 2018
Purpose : Extracts the PV data from the process_variables table.
Parameters : conn - a connection object linking the database.
Returns : a collection of pv names and statements
Requirements : NA
Dependencies : NA
"""
engine = db.get_sql_connection()
if engine is None:
raise ConnectionError("Cannot get database connection")
with engine.connect() as conn:
sql = """SELECT
PROCVAR_NAME,PROCVAR_RULE
FROM
SAS_PROCESS_VARIABLE
ORDER BY
PROCVAR_ORDER"""
v = conn.engine.execute(sql)
return v.fetchall()
def process(in_table_name, out_table_name, in_id, dataset):
"""
Author : Thomas Mahoney
Date : 27 / 03 / 2018
Purpose : Runs the process variables step of the IPS calculation process.
Parameters : in_table_name - the table where the data is coming from.
out_table_name - the destination table where the modified data will be sent.
in_id - the column id used in the output dataset (this is used when the data is merged into the main
table later.
dataset - an identifier for the dataset currently being processed.
Returns : NA
Requirements : NA
Dependencies : NA
"""
# Ensure the input table name is capitalised
in_table_name = in_table_name.upper()
# Extract the table's content into a local dataframe
df_data = db.get_table_values(in_table_name)
# Fill nan values
df_data.fillna(value=np.NaN, inplace=True)
# Get the process variable statements
process_variables = get_pvs()
if dataset == 'survey':
df_data = df_data.sort_values('SERIAL')
# Apply process variables
df_data = df_data.apply(modify_values, axis=1, args=(process_variables, dataset))
# Create a list to hold the PV column names
updated_columns = []
# Loop through the pv's
for pv in process_variables:
updated_columns.append(pv[0].upper())
# Generate a column list from the in_id column and the pvs for the current run
columns = [in_id] + updated_columns
columns = [col.upper() for col in columns]
# Create a new dataframe from the modified data using the columns specified
df_out = df_data[columns]
# for column in df_out:
# if df_out[column].dtype == np.int64:
# df_out[column] = df_out[column].astype(int)
# Insert the dataframe to the output table
db.insert_dataframe_into_table(out_table_name, df_out)
| [
"[email protected]"
]
| |
c5cabb06152a474b0fd33a7d8fdaee8b5d41f662 | 50ec14d4d3a461f4e8cdec500e387054c7702f37 | /equip/analysis/constraint/__init__.py | ff472fc22ea757afd022f7f0776971bedd6d65a7 | [
"Apache-2.0"
]
| permissive | ZeoVan/equip | e99377b2a3ef20e0468cbfe83be68135ca9fc732 | 470c168cf26d1d8340aa5ab37a5364d999a0b2f4 | refs/heads/master | 2021-06-11T04:40:01.780587 | 2016-12-14T20:09:27 | 2016-12-14T20:09:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # -*- coding: utf-8 -*-
"""
equip.analysis.constraint
~~~~~~~~~~~~~~~~~~~~~~~~~
Represent code constraints.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
from .container import Constraint
from .expr import Expr, \
Const, \
Ref, \
Comparator, \
Operator, \
Undef
| [
"[email protected]"
]
| |
7a3b447c4f4b7109582b88cc05c5fb8d9be6273c | 975155d627cb7beda576cafd2693514bc84e7ca8 | /astartool/number/_number.py | fd198edf03fca7a3fbcbbe2e7e920c4c73692cf1 | [
"Apache-2.0"
]
| permissive | fossabot/astartool | dc85796f0aabfea156c01140daeaf7f7d61c85bc | 5099254069235f0ca387a27157369d4fd8fefad0 | refs/heads/master | 2022-12-16T15:17:37.008072 | 2020-09-07T18:01:06 | 2020-09-07T18:01:06 | 293,597,649 | 0 | 0 | null | 2020-09-07T18:01:01 | 2020-09-07T18:01:00 | null | UTF-8 | Python | false | false | 2,899 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: 河北雪域网络科技有限公司 A.Star
# @contact: [email protected]
# @site: www.snowland.ltd
# @file: _number.py
# @time: 2019/5/29 11:59
# @Software: PyCharm
__author__ = 'A.Star'
from astartool.common import hex_allowed_string, BIT_EACH
import numpy as np
from random import randint
def ishex(s: str):
"""
判断一个字符串是否是16进制数
:param s:
:return:
"""
for ch in s:
if ch not in hex_allowed_string:
return False
return True
def gcd(a: int, b: int):
"""
a和b的最大公约数
:param a:
:param b:
:return:
"""
while b != 1:
a, b = divmod(a, b)
return a
def lcm(a: int, b: int):
"""
a和b的最小公倍数
:param a:
:param b:
:return:
"""
return a // gcd(a, b) * b
def get_primes(number):
"""
得到小于num的质数
:param number:
:return:
"""
w = [True] * number
li_number = []
for i in range(2, number):
if w[i]:
w[i * i::i] = [False] * (len(range(i*i, number, i)))
li_number.append(i)
return li_number
def prime_factorization(number: int, li_number=None):
"""
把一个数拆成其质因数之积
:param number:
:param li_number: 素数列表
:return:
"""
if li_number is None:
li_number = get_primes(int(np.sqrt(number)) + 1)
li = []
for k in li_number:
while not (number % k):
li.append(k)
number /= k
if number == 1:
break
return li
def is_prime(number: (str, int), itor=10):
"""
快速判断一个数是否为素数
:param number:
:param itor:
:return:
"""
if not isinstance(number, int):
number = int(number)
for i in range(itor):
a = randint(1, number - 1)
if pow(a, number - 1, number) != 1:
return False
return True
def rotate_left(a, k, mod=32):
"""
a循环左移k位
:param a:
:param k:
:param mod:
:return:
"""
k %= mod
high, low = divmod(a, BIT_EACH[mod - k])
return high + low * BIT_EACH[k]
def equals_zero_all(matrix, eps=1e-8):
"""
判断是否是全0
:param matrix:
:param eps:
:return:
"""
assert eps >= 0, "eps 应该大于0"
return np.all((matrix > -eps) & (matrix < eps))
def equals_zero_any(matrix, eps=1e-8):
"""
判断是否是存在0
:param matrix:
:param eps:
:return:
"""
assert eps >= 0, "eps 应该大于0"
return np.any((matrix > -eps) & (matrix < eps)) # 不能化简
def equals_zero(matrix, eps=1e-8):
"""
判断每项是否是0
:param matrix:
:param eps:
:return:
"""
assert eps >= 0, "eps 应该大于0"
return (matrix > -eps) & (matrix < eps) # 不能化简
| [
"[email protected]"
]
| |
d63408c2d9adafeadf3ac5e64efccfc40b438cae | 025fa245d4cbffdaa422287ed2f31c4d0442ee28 | /menus/models.py | 27649dd15508def3b9d933b9bfa95ba0bc8eb771 | [
"MIT"
]
| permissive | elcolie/zero-to-deploy | 01f346ca50b8ccb271faef23934abe6a487baca6 | 6191a33ef55af7c550c0e529a4e373bfe40bc014 | refs/heads/master | 2022-02-08T23:22:17.008555 | 2018-06-15T19:39:06 | 2018-06-15T19:39:06 | 137,083,690 | 0 | 0 | MIT | 2022-01-21T19:35:33 | 2018-06-12T14:28:01 | Python | UTF-8 | Python | false | false | 653 | py | from django.db import models
from djchoices import DjangoChoices, ChoiceItem
from commons.models import AbstractTimestamp
class Menu(AbstractTimestamp):
class BackType(DjangoChoices):
food = ChoiceItem(f"Food")
drink = ChoiceItem(f"Drink")
menu_type = models.CharField(max_length=15, choices=BackType.choices, default=BackType.food)
name = models.CharField(max_length=20)
image = models.ImageField(default='sr.png', upload_to='menus')
take_home = models.BooleanField(default=False)
price = models.DecimalField(max_digits=6, decimal_places=2)
def __str__(self):
return f"{self.name} {self.price}"
| [
"[email protected]"
]
| |
240a7289d113f8698a15f0a955c1a8ee2f5aec27 | 6e786e8f4c229aeb0822c9cdffccf109d7a32d41 | /CodeChef/CARPTUN.py | f8ef8e7280275fddc7f1f8aa48f53533cb8dd129 | []
| no_license | rajatdiptabiswas/competitive-programming | 337da9c86e0cfae7ed1b36fec9b6e225167edfcc | ed036e91b9516d8a29d1322db159d68462178d94 | refs/heads/master | 2021-06-10T08:59:40.296322 | 2021-04-17T06:51:18 | 2021-04-17T06:51:18 | 138,393,143 | 0 | 1 | null | 2019-10-26T19:53:33 | 2018-06-23T10:43:17 | Python | UTF-8 | Python | false | false | 378 | py | #!/usr/bin/env python3
def main():
t = int(input())
for testcase in range(t):
tolls = int(input())
toll_time = [int(toll) for toll in input().split()]
cars,distance,velocity = map(int, input().split())
if cars == 2:
print("{:.8f}".format(max(toll_time)))
elif cars > 2:
print("{:.8f}".format(max(toll_time) * (cars-1)))
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
047f0f6a9df8c5388a7a773194f4e8b8a08c80a4 | 3f8dc5d3bbe5faa9507c3941db4da446fb3b0c5c | /day1/day1-07 operators.py | ed7b46019aadc455e2c90d8617cb8e96d35e6dd1 | [
"MIT"
]
| permissive | hajin-kim/2020-HighSchool-Python-Tutoring | 91c626593e097dd703f4e84da6a910192f8eefda | 352025a954bff37d21cc3d59e7d5e0f0269a1f17 | refs/heads/main | 2023-03-12T04:22:57.287110 | 2021-03-01T18:27:28 | 2021-03-01T18:27:28 | 325,773,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | a = int( input() ) * -1
print(a)
b = int( input() )
print(b * -1)
| [
"[email protected]"
]
| |
0d8a1b2b108cc0e64cc844704ad0d367dc981df0 | df256ccf9821df359bfc0a6aba66dbf0639f0d09 | /bin/pip | f39dadc90dc8cecdec615b77118e2fbc1495f0e1 | []
| no_license | samiaellin06/Khawoon | 053381e757a09adc9981405f53ff8c4ca5bbc178 | 0d8f16a957726b8c7db22edc787b60c6895ebfe6 | refs/heads/master | 2022-12-08T11:55:43.207109 | 2020-08-27T11:54:25 | 2020-08-27T11:54:25 | 290,758,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | #!/home/kruf/PycharmProjects/khawoon/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
]
| ||
ff48930557120a3d44fe17e1cef02df767b2ea71 | f47ac27c95b3d75314a5acf15c93ea4dd737d5e6 | /android/performance_esweek_rebuttal/parse_timevalues.py | e38d38345da02bca4426c60bb20b51526a4b466a | []
| no_license | nitthilan/ml_tutorials | 10ecc5e3f54b6fa44abf949e80b8a2a1fa63f0fb | a24266882a0754ce5071f7a7cabd0694aae311aa | refs/heads/master | 2021-05-10T20:18:56.931046 | 2020-03-21T09:57:48 | 2020-03-21T09:57:48 | 118,182,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | filename = "performance.txt"
with open(filename) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
for line in content:
words = line.split()
if(len(words) and words[0]=="count=50"):
print((float(words[5][4:])/1000)**2)
if(len(words) and words[0]=="Graph:" and words[1].split("/")[4][:-8].split("_")[-1] == "2.0"):
print(words[1].split("/")[4][:-8])
# 537.79929025
# 518.53665796
# 2625.07646025
# 2630.30508225
# 8114.87482276
# 8252.17812225
# 1319.64366361
# 1261.18737424
# 4776.86940201
# 4686.53883889
| [
"[email protected]"
]
| |
100854a6d18277d4c0dd905456dcf69e64b42394 | e96e9990ba26757b834eeff95e8bee9b720b72aa | /django/test007/blog/models.py | fa736c53142137dcf246805e9ccfa1fbf92b1a0a | []
| no_license | cuiyanan89/Python | 475a0a2778d7be5b9f6aa87ba35c21569080e056 | f742684474730e3b032aabd0151d584167c3ed02 | refs/heads/master | 2016-09-06T05:31:48.956411 | 2013-09-06T08:13:09 | 2013-09-06T08:13:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.db import models
# Create your models here.
class New(models.Model):
new_title = models.CharField(max_length=30)
new_content = models.TextField(max_length=3000)
new_img = models.FileField(upload_to="./images")
def __unicode__(self):
return self.new_title
| [
"root@yanan-Rev-1-0.(none)"
]
| root@yanan-Rev-1-0.(none) |
db6f2f09a0d8daefdad6eb9f4f41bd89a4ac860e | e7ca19f01903016680ab7b72debc66bafeb1eaac | /frames_orig.py | f398a70f8250512b5b571162de0653df9718e8cd | []
| no_license | MedSun/count_video | 528816f92bf12c5415dd599480eae146e31bcaff | d8aca296ba427e50f95d1d5af4b68a0b615a43dd | refs/heads/master | 2022-11-30T09:09:13.829684 | 2020-08-13T16:17:29 | 2020-08-13T16:17:29 | 287,324,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import json
import os
import cv2
import requests
from app import ROOT_DIR
def frames_from_video(res):
response = json.loads(res)
video_name = response["video_name"]
video_file = response["video_file"]
video_cap = cv2.VideoCapture(video_file)
success, image = video_cap.read()
if success:
path = os.path.join(ROOT_DIR, 'frames_orig_images/' + video_name + '.jpg')
cv2.imwrite(path, image)
file = {'file': open(path, "rb")}
response = requests.post("http://localhost:4000/api/upload-file", files=file)
return json.dumps({"pic": response.json()["path"]})
else:
print("Ошибка при создании опорного кадра для ролика " + video_name)
return json.dumps("") | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.