blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f229805f9cf2c5e9ccf9db6b269e61c6ee22a9e6 | 739e91039c05943352a3fc07e768641f74097482 | /BaekJoon_Online/Math/No_10569.py | 81399b6ffd373638043c6a8784cd45439e70058c | [] | no_license | doublejy715/Problem-Solve | 651182079ded1a9da3478dd30a4c4507894de85e | 57d5a672a48103769c8cc022cb7132d988624600 | refs/heads/master | 2023-07-13T09:51:23.145427 | 2021-08-31T04:35:04 | 2021-08-31T04:35:04 | 234,250,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | T = int(input())
for _ in range(T):
V,E = map(int,input().split())
print(2-V+E) | [
"[email protected]"
] | |
b59e2feb7ab32d6a99987df4967c61e8d778901a | ebdd865324a7e3bd2e3a4de357483a3056a093e4 | /web/django_server/scripts/init_db.py | 07502590b9ebeb5b488180db88c9abccbed45220 | [] | no_license | hanaui-call/contacts-v2 | 73f4d9fb68d0936b441b14f4339273806b4731ed | a14649cb45d41d91a9a2a20e5047e7fd572e2e30 | refs/heads/main | 2023-02-16T06:52:38.876648 | 2020-12-30T00:21:15 | 2020-12-30T03:05:41 | 325,415,016 | 0 | 0 | null | 2021-01-08T16:32:24 | 2020-12-30T00:20:20 | CSS | UTF-8 | Python | false | false | 1,161 | py | from django_server.models import Member
from django.contrib.auth.models import User
from django_server.const import SexEnum
import datetime
def run(*args):
members = [
{
'name': '홍길동',
'email': '[email protected]',
'password': '12345',
'birth': datetime.date(1997, 10, 19),
'sex': SexEnum.MALE.value,
'phone': '010-1234-5678',
'tags': ['가족모임리더', '소명센터'],
},
{
'name': '김영희',
'email': '[email protected]',
'password': '09876',
'birth': datetime.date(1982, 2, 3),
'sex': SexEnum.FEMALE.value,
'phone': '010-6789-1234',
'tags': [],
}
]
for m in members:
user = User.objects.create(username=m['email'], password=m['password'], email=m['email'])
member = Member.objects.create(
user=user,
name=m['name'],
phone=m['phone'],
birth=m['birth'],
sex=m['sex'],
tags=m['tags'],
is_active=True,
)
print(member)
| [
"[email protected]"
] | |
cb0467cf7981a70c05b02c4b293a20023ed38dcf | f036605fdf878e9c5bd44dad0f72f078d3b9ed25 | /main.py | 625c40050e2c2d99f87f967c7b14eab467019979 | [] | no_license | hiriorima/audio-collecter | 760b1892e64b67850759b9b772beea6c53f16c55 | 80d2b23e68a55ddb584e11135d8ff372d7a0a34c | refs/heads/master | 2020-04-05T15:37:46.780798 | 2018-11-10T12:03:27 | 2018-11-10T12:03:27 | 156,977,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | import pyaudio
import sys
import pylab
import numpy
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
#秒数指定
RECORD_SECONDS = 2
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = chunk)
print("* recording")
all = []
for i in range(0, int(RATE / chunk * RECORD_SECONDS)):
data = stream.read(chunk)
all.append(data)
print("* done recording")
stream.close()
p.terminate()
# write data to WAVE file
data = ''.join(all)
result = numpy.frombuffer(data,dtype="int16") / float(2**15)
pylab.plot(result)
pylab.ylim([-1,1])
pylab.show()
| [
"[email protected]"
] | |
72095e6fd93a46905b087fc69f644ed46ff8da49 | c762ab8c1c25ffa97229a62ff43a33543093f963 | /venv/lib/python3.7/site-packages/cvxpy/tests/test_lin_ops.py | 17e3218df682a1ee8db6508e8333ac3f078755cc | [
"Apache-2.0"
] | permissive | nahyunkwon/multi-ttach | e68948d66541e85b764216efc54a82f6fc9ac044 | 971d0d93cc39f295deb23ea71146647f6db50ebc | refs/heads/master | 2023-08-09T18:25:31.658950 | 2023-07-24T17:46:04 | 2023-07-24T17:46:04 | 297,783,964 | 0 | 1 | Apache-2.0 | 2021-04-07T07:46:24 | 2020-09-22T22:08:53 | G-code | UTF-8 | Python | false | false | 5,197 | py | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.lin_ops.lin_utils import (create_var, create_param, sum_expr,
sum_entries, get_expr_vars, neg_expr,
create_const, create_eq, create_leq)
from cvxpy.lin_ops.lin_op import (VARIABLE, PARAM, SCALAR_CONST, NEG,
DENSE_CONST, SPARSE_CONST, SUM_ENTRIES)
import numpy as np
import scipy.sparse as sp
from cvxpy.tests.base_test import BaseTest
import sys
PY2 = sys.version_info < (3, 0)
class test_lin_ops(BaseTest):
""" Unit tests for the lin_ops module. """
def test_variables(self):
"""Test creating a variable.
"""
var = create_var((5, 4), var_id=1)
self.assertEqual(var.shape, (5, 4))
self.assertEqual(var.data, 1)
self.assertEqual(len(var.args), 0)
self.assertEqual(var.type, VARIABLE)
def test_param(self):
"""Test creating a parameter.
"""
var = create_param((5, 4))
self.assertEqual(var.shape, (5, 4))
self.assertEqual(len(var.args), 0)
self.assertEqual(var.type, PARAM)
def test_constant(self):
"""Test creating a constant.
"""
# Scalar constant.
shape = (1, 1)
mat = create_const(1.0, shape)
self.assertEqual(mat.shape, shape)
self.assertEqual(len(mat.args), 0)
self.assertEqual(mat.type, SCALAR_CONST)
assert mat.data == 1.0
# Dense matrix constant.
shape = (5, 4)
mat = create_const(np.ones(shape), shape)
self.assertEqual(mat.shape, shape)
self.assertEqual(len(mat.args), 0)
self.assertEqual(mat.type, DENSE_CONST)
assert (mat.data == np.ones(shape)).all()
# Sparse matrix constant.
shape = (5, 5)
mat = create_const(sp.eye(5), shape, sparse=True)
self.assertEqual(mat.shape, shape)
self.assertEqual(len(mat.args), 0)
self.assertEqual(mat.type, SPARSE_CONST)
assert (mat.data.todense() == sp.eye(5).todense()).all()
def test_add_expr(self):
"""Test adding lin expr.
"""
shape = (5, 4)
x = create_var(shape)
y = create_var(shape)
# Expanding dict.
add_expr = sum_expr([x, y])
self.assertEqual(add_expr.shape, shape)
assert len(add_expr.args) == 2
def test_get_vars(self):
"""Test getting vars from an expression.
"""
shape = (5, 4)
x = create_var(shape)
y = create_var(shape)
A = create_const(np.ones(shape), shape)
# Expanding dict.
add_expr = sum_expr([x, y, A])
vars_ = get_expr_vars(add_expr)
ref = [(x.data, shape), (y.data, shape)]
if PY2:
self.assertItemsEqual(vars_, ref)
else:
self.assertCountEqual(vars_, ref)
def test_neg_expr(self):
"""Test negating an expression.
"""
shape = (5, 4)
var = create_var(shape)
expr = neg_expr(var)
assert len(expr.args) == 1
self.assertEqual(expr.shape, shape)
self.assertEqual(expr.type, NEG)
def test_eq_constr(self):
"""Test creating an equality constraint.
"""
shape = (5, 5)
x = create_var(shape)
y = create_var(shape)
lh_expr = sum_expr([x, y])
value = np.ones(shape)
rh_expr = create_const(value, shape)
constr = create_eq(lh_expr, rh_expr)
self.assertEqual(constr.shape, shape)
vars_ = get_expr_vars(constr.expr)
ref = [(x.data, shape), (y.data, shape)]
if PY2:
self.assertItemsEqual(vars_, ref)
else:
self.assertCountEqual(vars_, ref)
def test_leq_constr(self):
"""Test creating a less than or equal constraint.
"""
shape = (5, 5)
x = create_var(shape)
y = create_var(shape)
lh_expr = sum_expr([x, y])
value = np.ones(shape)
rh_expr = create_const(value, shape)
constr = create_leq(lh_expr, rh_expr)
self.assertEqual(constr.shape, shape)
vars_ = get_expr_vars(constr.expr)
ref = [(x.data, shape), (y.data, shape)]
if PY2:
self.assertItemsEqual(vars_, ref)
else:
self.assertCountEqual(vars_, ref)
def test_sum(self):
"""Test sum entries op.
"""
shape = (5, 5)
x = create_var(shape)
expr = sum_entries(x, (1, 1))
self.assertEqual(expr.shape, (1, 1))
self.assertEqual(len(expr.args), 1)
self.assertEqual(expr.type, SUM_ENTRIES)
| [
"[email protected]"
] | |
8b0bbf53df939cdde5cf9d36134a502b02f7ccc9 | 21ff9739497feef293fdc438ecdb9946884abb3f | /Linear algebra/beta estimate by QRdecomp and QRsolve.py | 4a84a07a365a1f7559a7ce7dd8fcb177fbe888f8 | [] | no_license | mshasan/Python-Computing | 877c8bcaaf1bdb9bd030945c4aec09ae1a96b269 | dc4f6031becba29f601089677ac5e438c0014da8 | refs/heads/master | 2021-01-20T03:53:09.358852 | 2017-04-28T15:33:47 | 2017-04-28T15:33:47 | 89,597,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | #-------------------------------------------------------------------------------------------------------------------
####################################################################################################################
"""some libraries to perform our desired calculations"""
import math # used to perform mathematical calculation
import random # used to generate random number
import scipy # for decomposition and matrix
import numpy # for matrix operation
from scipy.linalg import * # for linear algebra
from math import sqrt # to perform square root
from pprint import pprint # to get better output interface
####################################################################################################################
#-------------------------------------------------------------------------------------------------------------------
"""Defining the given quadratic curve to simulate random data
random.gauss(0.0,5.0)' generate Normal(0,25)'"""
def infunc(x):
return(x*x + 3.0*x + 4.0 + random.gauss(0.0,5.0))
#-------------------------------------------------------------------------------------------------------------------
"""the function 'RandomData will be used ' to simulate random data, it takes
n=# of simulation and s=random seed,"""
def RandomData(n,s):
random.seed(s) # set seed
one = range(n) # a variable to store 1
x = range(n) # variable to store x
x_sq = range(n) # variable to store x square
y = range(n) # variable to store y
for i in range(n): # initializing a for loop to simulate data
one[i] = 1 # vector of one
x[i] = random.uniform(-20.0, 20.0) # vector of x
x_sq[i] = x[i]**2 # vector of x square
y[i] = infunc(x[i]) # vector of y
random.seed(s) # stopping seed
return (y,one,x,x_sq) # returns different variables required for design matrix
#-------------------------------------------------------------------------------------------------------------------
"""applying 'RandomData' function to simulate data"""
n = 100 # number of simulated data
s = 897894321 # random seed
y1,one,x,x_sq = RandomData(n,s)
####################################################################################################################
# Problem - l-------------------------------------------------------------------------------------------------------
####################################################################################################################
print "-----------------------------Problem-l------------------------------------------------------------------"
"""using QRdecomp and QRsolve to find beta_hat by using matrix 'A' from problem - a"""
Q,R = QRdecomp(A) # QRdecomp function to get QR decomposition
QRsolve(Q, R, Y) # applying QRsolve function
#------------------------------------------------------------------------------------------------------------------
####################################################################################################################
| [
"[email protected]"
] | |
7eb3286fb99fbbdb30d332b99846691d24f337d6 | f507ddbb7b07a9e9acec144120bbf7d1a48747a6 | /책/이진탐색/me부품찾기.py | af9e7dbf94b8a4828bfbe330f3bc8c3d661209df | [] | no_license | Insookim0702/python_Algorithm | 9f3c95b61f07bce46a5dc82c613a23d5aec6d5cd | 56ac719330ce03e2764c372cd37373e1e27a0614 | refs/heads/master | 2023-01-08T04:41:02.935269 | 2020-10-28T08:55:17 | 2020-10-28T08:55:17 | 298,799,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | # 15:53~ #16:00
n = 5
arr = [8, 3, 7, 9, 2]
request = [5, 7, 9]
def binary_search(start, end, target):
if start > end:
return None
mid = (start + end) // 2
if arr[mid] == target:
return mid
elif arr[mid] > target:
return binary_search(start, mid - 1, target)
else:
return binary_search(mid + 1, end, target)
request.sort()
arr.sort()
for target in request:
search = binary_search(0, len(arr) - 1, target)
if search == None:
print('no', end=' ')
else:
print('yes', end=' ')
| [
"[email protected]"
] | |
e5aa57a5343b0198c4e1e63d15dafb59d41583a5 | 010279e2ba272d09e9d2c4e903722e5faba2cf7a | /catboost/spark/catboost4j-spark/core/src/test/python/config.py | cd3c8b838d1d6354736665b42fbcdda4ac15a0b9 | [
"Apache-2.0"
] | permissive | catboost/catboost | 854c1a1f439a96f1ae6b48e16644be20aa04dba2 | f5042e35b945aded77b23470ead62d7eacefde92 | refs/heads/master | 2023-09-01T12:14:14.174108 | 2023-09-01T10:01:01 | 2023-09-01T10:22:12 | 97,556,265 | 8,012 | 1,425 | Apache-2.0 | 2023-09-11T03:32:32 | 2017-07-18T05:29:04 | Python | UTF-8 | Python | false | false | 58 | py |
CATBOOST_TEST_DATA_DIR = '../../../../../../pytest/data'
| [
"[email protected]"
] | |
2fbb577a3736eb77df3b1f053a2b458f58cc1e6c | 9cc1b58d0319308da98187d071295b2fabf1f080 | /0727/a0728_台灣高鐵_爬蟲_05.py | d34d89a8c031566971cc6b77d581391a658c394d | [
"MIT"
] | permissive | Arwen0905/Python_Test | 60d1dee383c9cf27df6b93cfde7884c91092229c | c75357e4354a684a9fae41f751dae60d4cf0716c | refs/heads/master | 2023-01-13T13:14:55.355898 | 2020-10-31T18:52:07 | 2020-10-31T18:52:07 | 265,150,874 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | import requests
import pandas as pd
from bs4 import BeautifulSoup as soup
url = "https://www.twse.com.tw/exchangeReport/STOCK_DAY?response=json&date=20190101&stockNo=2633&_=1595925201119"
# url = "https://rate.bot.com.tw/xrt/all/2020-01-06"
# def convertDate(date):
# str1 = str(date)
# yearst = str1[:3] #取出民國年
# ryear = str(int(yearst)+1911) #轉為西元年
# findata = ryear + str1[4:6] + str1[7:9]
# return findata
# ans = lambda x:int(x)+1911
# print(ans("109"))
re = requests.get(url)
re.encoding = "utf-8"
rs = soup(re.text,"html.parser")
# rq = rs.select()
print(rs)
| [
"[email protected]"
] | |
b7b085e55826b6bf4cb38342d4c8fdf379526942 | b7da8b6ded5b59938d20a915c43e310c7324041d | /app/config/urls.py | 9c9cfcb8b012a781a89d22ee314f74b6aeed6c38 | [] | no_license | miguel250/miguelpz-core | 1afb20b9f5386b246f94a9c163415a25e4fbb24c | a4dc01fbc8ec74382d36b21b66262a94eb8438f0 | refs/heads/master | 2021-03-12T23:33:55.312067 | 2012-07-02T05:40:54 | 2012-07-02T05:40:54 | 4,618,243 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from app.api import views as views_api
from app.core import views as views_core
prints = {
views_core.main : '/',
views_api.main : '/api',
} | [
"[email protected]"
] | |
4a1a3fb6fa603b30f35c6503a4fbb2bcdd9db378 | 2f48beec577a5ec1357e3ed703bbb08ea45bec44 | /01_jumptopy/chap03/116_tax_ver2.py | 1889daa82b7b45e289014e642fea8f7f42d206a1 | [] | no_license | kimgwanghoon/openbigdata | 2a09edcce96f927692656e051d9c37499007edb7 | 274c3e8cf0b0562facfb496743b879fef6164c73 | refs/heads/master | 2021-09-11T15:25:13.973451 | 2018-04-09T07:52:40 | 2018-04-09T07:52:40 | null | 0 | 0 | null | null | null | null | UHC | Python | false | false | 421 | py | # coding: cp949
print("\n-----택시 안내 가이드 프로그램 ver1-----")
#money=2000
print("가지고 계신 금액을 입력하세요 : ",end='')
money =int(input())
print("\n현재 가지고 계신 금액은 "+str(money)+"원 입니다")
if money>=3000:
print("택시를 타고 갈 수 있습니다")
else:
print("돈이 부족하니 걸어가야 합니다.")
print("프로그램을 종료합니다")
| [
"[email protected]"
] | |
7a433b27298f388c3634d5d6bc9acade8ea7a957 | 9d6debb136268baeed974370d043c17e1d476ba2 | /27-10-2020/s1.py | e9673c7f9e554a91ef966c4e4579c5dc6fc6431d | [] | no_license | Prasannarajmallipudi/Django_Batch8 | 55a8d1460c75a1564ff0c02f00d6eacfb0150fa0 | 914d106ed1873793642428839b776be9e6ad5589 | refs/heads/main | 2023-01-03T05:09:12.207833 | 2020-10-28T05:13:43 | 2020-10-28T05:13:43 | 307,913,360 | 0 | 0 | null | 2020-10-28T05:13:03 | 2020-10-28T05:13:03 | null | UTF-8 | Python | false | false | 593 | py | '''class New:
def __init__(self,name,designation):
self.name = name
self.designation = designation
obj = New('Prasanna Raj','Multi Skill Tranier')
class Abc:
def __newfun__(self):
print("Welcome New Function")
ob = Abc()
ob.__newfun__()
'''
# Multiple Inheritances
class P1:
def G1(self):
print("Parent1 Method")
class P2:
def G2(self):
print("Parent2 Method")
class C(P1,P2):
def G3(self):
print("Child Method")
obj = C()
obj.G1()
obj.G2()
obj.G3()
| [
"[email protected]"
] | |
197c293cdb4e2329c9dc3f518eee7110737180ea | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /xrR8zHmdTvEWqdr5g_12.py | 74311c6d3a51d72cede937f4cc9e28786e2be6af | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | """
Given the shortest side of a 30 60 90 triangle you have to find out the other
2 sides, (return the longest side, medium-length side).
### Examples
returnsides(1) ➞ (2, 1.73)
returnsides(2) ➞ (4, 3.46)
returnsides(3) ➞ (6, 5.2)
### Notes
* 30 60 90 triangles always follow this rule, let's say the shortest side length is x units, the hypotenuse would be 2x units and the other side would be x * root3 units.
* In the **Tests** , the decimal is rounded to 2 places.
* Return the values as a tuple.
"""
import math
def returnsides(length):
tup = (2*length,round(length * math.sqrt(3),2))
return tup
| [
"[email protected]"
] | |
3c3e54dc504faee77326ce820cc0425e79b31ef0 | 9b290a914350b2ba0b386b0427d29a29268f3c9a | /controllers/mainh.py | bd4a176be851aec091813c183a38ac55e5ce5102 | [] | no_license | bhaskarb/Donor-database | 2a069ffecc14ec49a73631d8dd02ae471568e0ea | 2fd8f3410ba66629426175a6750b4d050ac87e48 | refs/heads/master | 2021-01-19T04:55:34.888246 | 2012-02-08T00:36:52 | 2012-02-08T00:36:52 | 3,382,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env/python
__author__ = 'Bhaskar Bharath'
#import re
import urllib2
import os
from models.models import *
from google.appengine.ext import webapp
from google.appengine.api import users
from google.appengine.ext.webapp import template
class MainPage(webapp.RequestHandler):
def get(self):
#logging.debug("In main Page")
path = os.path.join(os.path.dirname(__file__), '..', 'views', 'main.html')
projects=Project.all()
if projects.count():
options=["Donor", "Recipient", "Steward"]
else:
options=["Steward"]
template_values = {
'projects' : projects,
'options' : options,
'logout' : users.create_logout_url('/'),
}
self.response.out.write(template.render(path, template_values))
| [
"[email protected]"
] | |
cd1ab1c2f86ee030b100d3545064382f648bb42a | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/1-Python-Basics/25-dictionary-keys_20200413193453.py | 2c7728e6e1aa45c4887103c0fb3fa3c9ef41a1c6 | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | # Dictionary key has to be immutable
# do not create an array as a key
# for example [32]: true --- this does not work
# 2 same keys
game_archive = {
'weapons': [1,2,3],
'greeting': "Good day",
'is_Magic': True
}
print(game_archive['a'[1]])
| [
"[email protected]"
] | |
1096e2be2a1a93f744e62127a8fb95b52a2e5b07 | 1a6c35729dcd7bf9d7661f1260f0a7b94a527ff3 | /ParameterManager/get_from_db.py | 31bd3eee45ba89d97365e720c2ccc1c388977fa2 | [] | no_license | lentiann/production-manager | f433870da7e1990055002cfc48b26bab336650ee | d70cf7674c50cc82b61a475ed42276db496a57be | refs/heads/master | 2020-09-01T16:50:03.367073 | 2019-12-02T08:44:19 | 2019-12-02T08:44:19 | 219,008,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,596 | py | from django.shortcuts import render
from django.http import JsonResponse
from opcua import Client
from .models import Database, Node, Value
from .models import Node, Value
from datetime import datetime
import time
from .update_db import get_from_plc
from django.db.models import Q
from django.contrib.auth.decorators import login_required, user_passes_test
import json
def walk_db(node, data):
if not data:
data.append({"id": node.node_id, 'parent': '#', 'important': node.important, 'text': node.name})
a = node.node_set.all()
for node in a:
# If it is not an endpoint create a nested list and call walk_(node) again to get node's children
if node.node_set.all():
data.append({"id": node.node_id, 'parent': node.parent.node_id, 'important': node.important, 'text': node.name})
# Call walk_db again for the child
walk_db(node, data)
# If it is an endpoint add nodes to the list -------
else:
data.append({"id": node.node_id, 'parent': node.parent.node_id, 'important': node.important,
'text': node.name, "state": {"checked": node.important}})
return data
# @user_passes_test(lambda u: u.is_superuser)
def get_database(request):
dbs = Database.objects.filter(updating=True)
for db in dbs:
data = []
databases = Database.objects.all()
dbs = Database.objects.filter(next=True)
if dbs:
curr_db = dbs[0]
else:
curr_db = Database.objects.get(pk=1)
all_nodes = Node.objects.filter(database=curr_db, first=True)
if all_nodes:
parent_node = all_nodes[0]
data = walk_db(parent_node, data)
else:
time.sleep(10)
get_database(request)
check = False
for db in databases:
if db.id == curr_db.id + 1:
db.next = True
db.save()
check = True
else:
db.next = False
db.save()
if not check:
first_db = Database.objects.get(pk=1)
first_db.next = True
first_db.save()
return JsonResponse(data, safe=False)
# @user_passes_test(lambda u: u.is_superuser)
def get_importants(request):
data_list = []
nodes = Node.objects.filter(important=True)
for node in nodes:
if node.value_set:
last_value = node.value_set.all().order_by('-id')
if last_value:
last_value = last_value[0]
value = last_value.str_data
if not value:
value = last_value.float_data
data_list.append((node, last_value.date, value))
if request.is_ajax():
dict_ = {}
for parameter in data_list:
dict_[parameter[0].pk] = [parameter[1], str(parameter[2])]
return JsonResponse(dict_)
return render(request, 'show_importants.html', {"parameters": data_list})
# @user_passes_test(lambda u: u.is_superuser)
def get_history(request):
id = int(request.POST['parameter'])
parameter = Node.objects.get(id=id)
values = parameter.value_set.all()
data_list = []
for value in values:
data_fields = []
all_fields = [value.float_data, value.str_data, value.detail]
for field in all_fields:
if field:
data_fields.append(field)
data_list.append((value.date.strftime("%Y-%m-%d %H:%M:%S"), data_fields))
data_list.sort()
dict_ = {'data': data_list}
return JsonResponse(dict_)
main_dict = {'Inputs': {}, 'Memory': {}, 'Outputs': {}}
def iomem_db(node):
global main_dict
try:
display_name = str(node.get_display_name())
start = display_name.index('Text:') + 5
node_name = display_name[start:-1]
except:
node_name = node
if node_name == "Inputs":
v = node.get_children()
for node in v:
display_name = str(node.get_display_name())
start = display_name.index('Text:') + 5
node_name = display_name[start:-1]
main_dict['Inputs'][node_name] = node.get_value()
elif node_name == "Outputs":
v = node.get_children()
for node in v:
display_name = str(node.get_display_name())
start = display_name.index('Text:') + 5
node_name = display_name[start:-1]
main_dict['Outputs'][node_name] = node.get_value()
elif node_name == "Memory":
v = node.get_children()
for node in v:
display_name = str(node.get_display_name())
start = display_name.index('Text:') + 5
node_name = display_name[start:-1]
main_dict['Memory'][node_name] = node.get_value()
return main_dict
# @login_required()
def get_iomem(request):
global main_dict
inputs = Node.objects.filter(name="Inputs")
outputs = Node.objects.filter(name="Outputs")
memories = Node.objects.filter(name="Memory")
if inputs and outputs and memories:
inputs = Node.objects.filter(parent=inputs[0])
for input in inputs:
input_name = input.name
input_value = input.value_set.all().order_by('-id')
i_value = input_value[0].str_data
if not i_value:
i_value = input_value[0].float_data
elif not i_value:
i_value = input_value[0].detail
main_dict['Inputs'][input_name] = str(i_value)
outputs = Node.objects.filter(parent=outputs[0])
for output in outputs:
output_name = output.name
output_value = output.value_set.all().order_by('-id')
o_value = output_value[0].str_data
if not o_value:
o_value = output_value[0].float_data
elif not o_value:
o_value = output_value[0].detail
main_dict['Outputs'][output_name] = str(o_value)
memories = Node.objects.filter(parent=memories[0])
for memory in memories:
memory_name = memory.name
memory_value = memory.value_set.all().order_by('-id')
m_value = memory_value[0].str_data
if not m_value:
m_value = memory_value[0].float_data
elif not m_value:
m_value = memory_value[0].detail
main_dict["Memory"][memory_name] = str(m_value)
return render(request, 'iomem.html', {'data': main_dict})
# Walk through all databases
# node = client.get_objects_node()
# return render(request, 'iomem.html', {'data': iomem(node)})
# # Walk through all databases
# node = client.get_objects_node()
# main_dict_ = iomem(node)
# client.disconnect()
# return render(request, 'iomem.html', {'data': main_dict_})
# @login_required()
def get_certain_data(request):
client = Client("opc.tcp://192.168.160.215:4840")
client.connect()
# ns = request.POST['ns']
s = request.POST['s']
# node = client.get_node("ns="+ns+';'+'s=\"'+s+'\"').get_variables()
vars = {}
for i in range(1, 6):
node = client.get_node("ns=" + str(i) + ';' + 's=\"' + s + '\"').get_variables()
for v in node:
display_name = str(v.get_display_name())
start = display_name.index('Text:') + 5
node_name = display_name[start:-1]
vars[node_name] = v.get_value()
client.disconnect()
return render(request, 'certain-data.html', {'data': vars, 's': s})
# @login_required()
def certain_data_form(request):
return render(request, "certain-data-form.html", {})
# @login_required()
def filter_by_time(request):
# start_date = datetime.strptime("2018-09-18 7:00:00", '%Y-%m-%d %H:%M:%S')
# end_date = datetime.strptime("2018-09-24 12:56:00", '%Y-%m-%d %H:%M:%S')
start_date = request.GET['startDate']
end_date = request.GET['endDate']
hour = start_date.split(' ')
end_h = end_date.split(' ')
start_date = datetime.strptime(hour[0], '%Y-%m-%d')
end_date = datetime.strptime(end_h[0], '%Y-%m-%d')
start = datetime.strptime(hour[1], '%H:%M')
end = datetime.strptime(end_h[1], '%H:%M')
start_hour = start.strftime('%H')
end_hour = end.strftime('%H')
start_min = start.strftime('%M')
end_min = end.strftime('%M')
parameter_list = []
parameters = Value.objects.filter(Q(date__range=[start_date, end_date], date__hour__gte=start_hour,
date__hour__lte=end_hour), Q(date__minute__gte=start_min) | Q(date__minute__lte=end_min))
if parameters:
for parameter in parameters:
if parameter.node.important:
date = parameter.date.strftime("%Y-%m-%d %H:%M:%S")
value = parameter.str_data
if not value:
value = parameter.float_data
elif not value:
value = parameter.detail
parameter_list.append(({'parameterName' : parameter.node.name, 'parameterValue' :value, 'parameterDate': date}))
else:
return JsonResponse(parameter_list, safe=False)
def filter_by_value(request):
value = "False"
number = 0
dict = {}
parameters = Value.objects.filter(Q(str_data=value) | Q(float_data=number), node__important=True)
for parameter in parameters:
last_value = parameter.node.value_set.all().order_by('-id')
if last_value:
last_value = last_value[0]
value = last_value.str_data
if not value:
value = last_value.float_data
dict[parameter.node.name] = value
| [
"[email protected]"
] | |
2fbaf648ba5bae81e14b36d711e2c3c32e9d6a84 | 782add83942f817364080f9dc8df298d94d53bc4 | /circuits/http/server/routing/router.py | 65ae15bc1d1d1da807d7bd994aed546a1cf20311 | [] | no_license | spaceone/circuits.http | 058ea3538600a9f82465dac85196da839d5d23c5 | ad204b161ee45899ed4672384e4236d3ef3a9374 | refs/heads/master | 2021-11-22T17:50:07.660090 | 2021-10-07T23:31:24 | 2021-10-07T23:33:10 | 34,882,701 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from httoop import FORBIDDEN, NOT_FOUND
from circuits import BaseComponent
from circuits.http.events import request, response
from circuits.http.utils import httphandler
class Router(BaseComponent):
@httphandler('routing_success')
def _on_routing_success(self, evt, result):
if not evt.stopped:
client = evt.args[0]
channels = [c.channel for c in (client.domain, client.resource) if c is not None]
if client.events.request:
return
client.events.request = self.fire(request(client), *channels).event
@httphandler('request_success')
def _on_request_success(self, evt, result):
if not evt.stopped and not getattr(evt, '_request_success_foobar', None):
evt._request_success_foobar = True # FIXME: it seems this handler is called twice (for each channel?)
client = evt.args[0]
self.fire(response(client), client.server.channel)
@httphandler('routing', priority=-0.12)
def _on_resource_routing(self, client):
if client.resource is None:
if client.request.method in ('PUT', 'POST'):
raise FORBIDDEN()
raise NOT_FOUND(client.request.uri.path)
| [
"[email protected]"
] | |
369118c3bb63fadbf9c81dbf0990bdd9080dcb3a | bba684fcdf354c5cbb0cde6c2ab7e26270056436 | /links/views.py | ac0b98ee5a2818cd546a7aa1f414c00b94948859 | [] | no_license | bekaarcoder/amazon-price-tracker | d2017b63076f1e67c84d2f8bc1234988f1564919 | bcfe2e6bfad5e7e8627111c0bb8277ed306274ae | refs/heads/main | 2023-04-04T16:30:09.514951 | 2021-04-20T17:22:18 | 2021-04-20T17:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | from django.shortcuts import render, redirect
from .forms import AddLinkForm
from .models import Link
def home_view(request):
no_discounted = 0
error = None
form = AddLinkForm(request.POST or None)
if request.method == "POST":
try:
if form.is_valid():
form.save()
except AttributeError:
error = "Error Occured! Coudn't get the data for the url."
except:
error = "Something went wrong!"
form = AddLinkForm()
links = Link.objects.all().order_by("created")
items_no = links.count()
if items_no > 0:
discount_list = []
for item in links:
if item.old_price > item.current_price:
discount_list.append(item)
no_discounted = len(discount_list)
context = {
"links": links,
"items_no": items_no,
"no_discounted": no_discounted,
"form": form,
"error": error,
}
return render(request, "links/main.html", context)
def update_items(request):
links = Link.objects.all()
for link in links:
link.save()
return redirect("home")
def delete_item(request, id):
link = Link.objects.get(pk=id)
context = {"link": link}
if request.method == "POST":
link.delete()
return redirect("home")
return render(request, "links/delete.html", context)
| [
"[email protected]"
] | |
9d12cb3b60ff395ea9823f4727db08d662aec2d3 | 7c18e950ac82775020cd118abc01840907c6a57f | /accounts/views.py | 6fd48dfdd70dcc5c41b41dc6286caf67b65eda1e | [] | no_license | jayedeep/colortogreyscale | 104998e1c7a6e1df091a0176c7abfb305aa3aa6a | 5b854d00812c7a86c3e0608cbd71fe8ab49425e0 | refs/heads/master | 2022-12-28T16:05:59.304248 | 2020-10-01T02:43:17 | 2020-10-01T02:43:17 | 300,119,431 | 0 | 0 | null | 2020-10-01T02:42:34 | 2020-10-01T02:42:34 | null | UTF-8 | Python | false | false | 2,036 | py | from django.shortcuts import render, redirect, HttpResponse
from django.contrib import messages, auth
from django.contrib.auth.models import User
def signup(request):
if request.method == "POST":
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
confirm_password = request.POST['confirm_password']
if password == confirm_password:
if User.objects.filter(username=username).exists():
messages.error(request,"Username already exists")
return redirect('signup')
else:
if User.objects.filter(email=email).exists():
messages.error(request,"Email already exists")
return redirect('signup')
else:
user = User.objects.create_user(username=username, email=email, password=password)
user.save()
messages.success(request, "You're are registered!")
return redirect('login')
else:
messages.error(request, "Both passowrd should be match")
return redirect('signup')
else:
return render(request, 'accounts/signup.html')
def login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
print(username)
print(password)
user= auth.authenticate(username=username, password=password)
if user:
auth.login(request, user)
messages.success(request, "You're logged in")
return redirect("upload")
else:
messages.error(request, "Invalied Username or Password")
return redirect("login")
else:
return render(request, 'accounts/login.html')
def logout(request):
if request.method == 'POST':
auth.logout(request)
messages.success(request, 'You are logged out')
return redirect('upload')
| [
"[email protected]"
] | |
b10f329072040843fe29da3ea7ff80d7ed7c4668 | 06bd8ff7e4b07b223639c9f462579b6e31bb9974 | /read_data.py | da24129235512e196fda353414027df19b282f7a | [] | no_license | atantczak/Basic_Finance | e259b17d1593325c612ca2a6867a04030482b277 | a59b53f3794888e5407aaab2e1273b23fcc51b20 | refs/heads/master | 2023-02-24T18:42:50.380161 | 2021-01-20T16:37:10 | 2021-01-20T16:37:10 | 274,242,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | # This is the SECOND 'Python Programming for Finance' episode from sentdex.
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import pandas_datareader.data as web
style.use('ggplot')
# This is simply a style choice. Feel free to look into others once you get going.
'''
Important to note: You need to write the file path as seen below. Let's say I have a folder titled "Stocks"; that's why
I tell the "filename" portion of the command to go to "Stocks/AAPL.csv" ... if I was already in the folder with the AAPL
data, I would simply write 'AAPL.csv'. Often times, however, you will keep data in separate folders.
'''
df = pd.read_csv('Stocks/AAPL.csv',parse_dates=True, index_col=0)
# Parse_dates ensures the format of all dates will be legible.
# index_col = 0 is telling it to treat the first column as the index, or 0th column.
# You can read from many different file formats such as: json, SQL, excel, etc.)
df['Adj Close'].plot()
plt.show()
'''
I chose to only plot adjusted close here. Of course, you can plot the highs, volume, or whatever else you may want to
look at. This is where the awesome powers of customization come into play. When you get comfortable with this stuff, you
can really make all of these your own.
'''
| [
"[email protected]"
] | |
b732e224201c0f33230cda944132b44b1a322524 | a652b1bcb5dbd5bbe57b1e26a1e80c45c42ed856 | /apptest.py | 90dbc8660f359a5ef64ad0b5ed1858c8cea365e7 | [] | no_license | kvenkatasai4/test | a9f151265455b3adf5ac1dbfb0e9faf1fcc2346d | 2ecf5e14ed7dc0fef9873205690194c130a69a4e | refs/heads/master | 2021-09-24T10:11:12.188871 | 2018-10-07T21:32:57 | 2018-10-07T21:32:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | class App:
a = 10
print(a)
| [
"[email protected]"
] | |
5b2f410ad7d3aee2f2ebdb956b213c8d3ceaddfa | b8cc42d12d19129cd79850c5c17ee55fca12ffa1 | /src/music/views.py | 04ac79a74b859c25dbe47029c420c579fbfff110 | [] | no_license | shashikantedx/django-music-project | eab7de3b1b4d71d3566d9954529e0ac1dec1b36f | c66411b14b64a24cef1622312afd3802f6706770 | refs/heads/master | 2021-07-13T08:05:22.388806 | 2020-09-02T12:44:10 | 2020-09-02T12:44:10 | 201,950,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,833 | py | from django.shortcuts import render, get_object_or_404
from django.contrib.auth import authenticate,login
from django.contrib.auth.models import User
from django.contrib.auth import logout
from django.db.models import Q
from .models import Album, Song , myplay
from .forms import User_Forms
# Create your views here.
#-----------------------------------------#
# view to define the home detail page........
def home_detail(request):
albums = Album.objects.all()
song_results = Song.objects.all()
query = request.GET.get("q")
if query:
albums = albums.filter(
Q(Album_title__icontains=query) |
Q(Artist__icontains=query)
).distinct()
song_results = song_results.filter(
Q(song_title__icontains=query)
).distinct()
return render(request, 'home-detail.html', {
'albums': albums,
'songs': song_results,
})
else:
return render(request, 'home-detail.html', {'albums': albums})
# view to define the signup page.............
def SignUp(request):
form = User_Forms(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
hello='Welcome'
albums = Album.objects.all()
return render(request, 'after-login-home.html', {'albums': albums,'hello':hello})
context = {
"form": form,
}
return render(request, 'signup.html', context)
# view to define the detail page..........
def detail(request, album_id):
if not request.user.is_authenticated:
return render(request, 'login.html')
else:
user = request.user
album = get_object_or_404(Album, pk=album_id)
return render(request, 'detail.html', {'album': album, 'user': user})
# view to define the logout page for app.....
def logout_user(request):
logout(request)
form = User_Forms(request.POST or None)
context = {
"form": form,
}
return render(request, 'login.html', context)
# view to define the login page for app.......
def login_user(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
albums = Album.objects.all()
return render(request, 'after-login-home.html', {'albums': albums,})
else:
return render(request, 'login.html', {'error_message': 'Your account has been disabled'})
else:
return render(request, 'login.html', {'error_message': 'Invalid login'})
return render(request, 'login.html')
# view to define the user after loin page to provide thr message functionality and to provide the mlay music app detail...
def after_login_home(request):
if not request.user.is_authenticated:
return render(request, 'login.html')
else:
albums = Album.objects.all()
song_results = Song.objects.all()
query = request.GET.get("q")
if query:
albums = albums.filter(
Q(Album_title__icontains=query) |
Q(Artist__icontains=query)
).distinct()
song_results = song_results.filter(
Q(song_title__icontains=query)
).distinct()
return render(request, 'after-login-home.html', {
'albums': albums,
'songs': song_results,
})
return render(request, 'after-login-home.html', {'albums': albums})
# view to define the add function to add the fav song to your play list....
def add(request,song_id):
if not request.user.is_authenticated():
return render(request, 'login.html')
else:
song = get_object_or_404(Song, pk=song_id)
my=myplay.objects.all()
my.mysong=song.pk
my.save()
albums = Album.objects.all()
song_results = Song.objects.all()
query = request.GET.get("q")
if query:
albums = albums.filter(
Q(Album_title__icontains=query) |
Q(Artist__icontains=query)
).distinct()
song_results = song_results.filter(
Q(song_title__icontains=query)
).distinct()
return render(request, 'after-login-home.html', {
'albums': albums,
'songs': song_results,
})
return render(request, 'after-login-home.html', {'albums': albums})
| [
"[email protected]"
] | |
657c41b1196bbd52be0069b9a51ee59954b7e73d | 02b877ecb1922cbca67973970ea0873a21e6528c | /Queries-CM-Diagrams/Mean-AG-EBminR-Queries/OB-dataset_mean_AG-EBminR.py | 8000567a034fc98b60949cfae62eda737986682d | [] | no_license | renlliang3/Gaia_research | 681132c660622ecfe0c304501c8870e0433af133 | 6303185c41d549ebc8bc7bce020a5cbec0f8159f | refs/heads/master | 2021-09-19T11:47:27.137611 | 2018-07-27T17:37:34 | 2018-07-27T17:37:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | import astropy.units as u
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.units import Quantity
from astropy.io.votable import parse
from astropy.table import Table
from matplotlib.colors import LogNorm
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
extra_data = np.genfromtxt("Data/Mean_extinction_excess_SourceID_500pc_AG-4.4_BminR-1.7.dat", names = True, dtype=None)
source_id = extra_data['source_id']
readresults = Table.read('Data/All-star-500pc-AG-4.4-BminR-1.7.fits',format='fits')
results = np.array(readresults)
nonmatches = np.array([])
j=0
for i in range(len(source_id)):
not_found = True
while not_found:
if source_id[i]!=results['source_id'][j]:
nonmatches = np.append(nonmatches,j)
j+=1
else:
not_found = False
j+=1
print(j)
for k in range(len(nonmatches)):
results = np.delete(results,nonmatches[k]-k)
j=0
nonmatches_check = np.array([])
for i in range(len(source_id)):
not_found = True
while not_found:
if source_id[i]!=results['source_id'][j]:
nonmatches_check = np.append(nonmatches_check,j)
j+=1
else:
not_found = False
j+=1
print(j)
print('Making OB-Dataset')
#Build a new array with only the OB-Sources.
first_indices = np.array([])
k=0
for i in range(len(results)):
if results['bp_rp'][i]-extra_data['EBminR'][i] <= 0.0 and (results['phot_g_mean_mag'][i] + 5 * np.log10(results['parallax'][i]) - 10 - extra_data['AG'][i]) <= (3*(results['bp_rp'][i]-extra_data['EBminR'][i]) + 2.1):
if k==2:
OB_sources = np.append(results[int(first_indices[0])],results[int(first_indices[1])])
k+=1
elif k>2:
OB_sources = np.append(OB_sources, results[i])
else:
first_indices = np.append(first_indices,i)
k+=1
t_OB = Table(OB_sources)
t_OB.write('OB_corrected_mean_AG_EBminR.fits', format='fits')
| [
"[email protected]"
] | |
96f88354793d8a9149846bf4e8fa205689bdf6b1 | 6dae400af965a87f209e5e72b18868927ae00053 | /rooms/admin.py | 2ffab64a5e43ca0a83a4d764f9e5bb31675f2f8e | [] | no_license | junha6316/airbnb-clone-using-Django | 341ab71d7723ad413379723fc36c7a35a4d6fbba | fbce59ef9092c5ffbe6799b8c590492a9f0d40bf | refs/heads/master | 2023-01-19T17:36:24.220832 | 2020-12-01T06:50:47 | 2020-12-01T06:50:47 | 292,712,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,862 | py | from django.contrib import admin
from django.utils.html import mark_safe
from . import models
@admin.register(models.Photo)
class PhotoAdmin(admin.ModelAdmin):
""" Photo Admin Definition"""
list_display = ("__str__", "get_thumbnail")
def get_thumbnail(self, obj):
return mark_safe(f"<img width = 50px src ={obj.file.url} />")
get_thumbnail.short_description = "Thumbnail"
# class PhotoInline(admin.TabularInline):
class PhotoInline(admin.StackedInline):
model = models.Photo
@admin.register(models.Room)
class RoomAdmin(admin.ModelAdmin):
""" Room Admin Definition"""
inlines = (PhotoInline,)
fieldsets = (
(
"Basic_Info",
{"fields": ("name", "description", "country", "city", "address", "price")},
),
("Times", {"fields": ("check_in", "check_out", "instant_book")}),
(
"Spaces",
{
"fields": (
"guests",
"bedrooms",
"baths",
),
},
),
(
"More About the Space",
{
"fields": (
"amenities",
"facilities",
"houseRules",
)
},
),
("Last Details", {"fields": ("host",)}),
)
list_display = (
"name",
"description",
"country",
"city",
"price",
"guests",
"bedrooms",
"baths",
"check_in",
"check_out",
"instant_book",
"countItems",
"Count_Photos",
"Rating_Reviews",
)
ordering = ("price",)
list_filter = (
"instant_book",
"host__superhost",
"host__gender",
"amenities",
"facilities",
"houseRules",
"country",
"city",
)
raw_id_fields = ("host",) # Item들을 아이디를 이용해 관리하게 한다.
search_fields = ("^city", "host__username")
filter_horizontal = ( # filter_horizontal
"amenities",
"facilities",
"houseRules",
)
def save_model(self, request, obj, form, change):
print(obj, change, form)
super().save_model(request, obj, form, change)
def countItems(self, obj): # self: admin class /obj : current row
return obj.amenities.count() # queryset
def Count_Photos(self, obj):
return obj.photos.count()
Count_Photos.short_description = "Photo Count"
@admin.register(models.RoomType, models.Amenity, models.Facility, models.HouseRule)
class ItemAdmin(admin.ModelAdmin):
""" Item Admin Definition """
list_display = ("name", "used_by")
def used_by(selfm, obj): # ROOMS THAT HAVE THE ITEM
return obj.rooms.count()
# Register your models here.
| [
"[email protected]"
] | |
5d8aeed4c432b6a82a177870497c7821f93d0c08 | e16f5e6158a9b1e3f9206744996f94f682248725 | /assignment2/cs231n/classifiers/cnn.py | 13984995b0cc61bce2adf12eb7c49953d17b9a93 | [] | no_license | fighting-liu/cs231n | 435284acc0511f6f6213294a345fe70b924ef469 | 952534f9d132068f41f08f7968c5c0174fe4a698 | refs/heads/master | 2021-01-01T05:17:37.137220 | 2016-05-05T02:46:54 | 2016-05-05T02:46:54 | 57,176,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,905 | py | import numpy as np
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import *
class ThreeLayerConvNet(object):
"""
A three-layer convolutional network with the following architecture:
conv - relu - 2x2 max pool - affine - relu - affine - softmax
The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
"""
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32):
"""
Initialize a new network.
Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: Number of filters to use in the convolutional layer
- filter_size: Size of filters to use in the convolutional layer
- hidden_dim: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
self.params = {}
self.reg = reg
self.dtype = dtype
############################################################################
# TODO: Initialize weights and biases for the three-layer convolutional #
# network. Weights should be initialized from a Gaussian with standard #
# deviation equal to weight_scale; biases should be initialized to zero. #
# All weights and biases should be stored in the dictionary self.params. #
# Store weights and biases for the convolutional layer using the keys 'W1' #
# and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #
# hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #
# of the output affine layer. #
############################################################################
C, H, W = input_dim
W1 = weight_scale * np.random.randn(num_filters, C, filter_size, filter_size)
b1 = np.zeros((num_filters))
W2 = weight_scale * np.random.randn(num_filters * H * W / 4, hidden_dim)
b2 = np.zeros((hidden_dim))
W3 = weight_scale * np.random.randn(hidden_dim, num_classes)
b3 = np.zeros((num_classes))
self.params['W1'] = W1
self.params['b1'] = b1
self.params['W2'] = W2
self.params['b2'] = b2
self.params['W3'] = W3
self.params['b3'] = b3
############################################################################
# END OF YOUR CODE #
############################################################################
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network.
Input / output: Same API as TwoLayerNet in fc_net.py.
"""
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3']
# pass conv_param to the forward pass for the convolutional layer
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer convolutional net, #
# computing the class scores for X and storing them in the scores #
# variable. #
############################################################################
out, cache_conv_relu_pool = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)
out, cache_affine_relu = affine_relu_forward(out, W2, b2)
out, cache_affine = affine_forward(out, W3, b3)
scores = out
############################################################################
# END OF YOUR CODE #
############################################################################
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the three-layer convolutional net, #
# storing the loss and gradients in the loss and grads variables. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
############################################################################
data_loss, dscores = softmax_loss(scores, y)
reg_loss = 1.0 / 2 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))
loss = data_loss + reg_loss
dx, dW3, db3 = affine_backward(dscores, cache_affine)
dx, dW2, db2 = affine_relu_backward(dx, cache_affine_relu)
dx, dw1, db1 = conv_relu_pool_backward(dx, cache_conv_relu_pool)
grads['W3'] = dW3 + self.reg * W3
grads['W2'] = dW2 + self.reg * W2
grads['W1'] = dw1 + self.reg * W1
grads['b3'] = db3
grads['b2'] = db2
grads['b1'] = db1
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
pass
| [
"[email protected]"
] | |
4f86eca5a3468acb69cf9475c5fe32630dff5d7d | a9c3db07c29a46baf4f88afe555564ed0d8dbf2e | /src/0653-two-sum-iv-input-is-a-bst/two-sum-iv-input-is-a-bst.py | 72232f21d6dd661901232bc99868e4c984563a8f | [] | no_license | HLNN/leetcode | 86d2f5b390be9edfceadd55f68d94c78bc8b7644 | 35010d67341e6038ae4ddffb4beba4a9dba05d2a | refs/heads/master | 2023-03-13T16:44:58.901326 | 2023-03-03T00:01:05 | 2023-03-03T00:01:05 | 165,402,662 | 6 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | # Given the root of a binary search tree and an integer k, return true if there exist two elements in the BST such that their sum is equal to k, or false otherwise.
#
#
# Example 1:
#
#
# Input: root = [5,3,6,2,4,null,7], k = 9
# Output: true
#
#
# Example 2:
#
#
# Input: root = [5,3,6,2,4,null,7], k = 28
# Output: false
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [1, 104].
# -104 <= Node.val <= 104
# root is guaranteed to be a valid binary search tree.
# -105 <= k <= 105
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findTarget(self, root: Optional[TreeNode], k: int) -> bool:
q = deque([root])
seen = set()
while q:
cur = q.popleft()
if cur:
if k - cur.val in seen:
return True
seen.add(cur.val)
q.append(cur.left)
q.append(cur.right)
return False
| [
"[email protected]"
] | |
e0697c83f283140deb66bb3ec28117302a6ef0c3 | 47243c719bc929eef1475f0f70752667b9455675 | /bungeni.main/branches/sterch-del-proxy/bungeni/core/workflows/transitioncron.py | 21bf28399ed6dbf3bad899a54ce1e0ca84463de4 | [] | no_license | malangalanga/bungeni-portal | bbf72ce6d69415b11287a8796b81d4eb6520f03a | 5cf0ba31dfbff8d2c1b4aa8ab6f69c7a0ae9870d | refs/heads/master | 2021-01-19T15:31:42.943315 | 2014-11-18T09:03:00 | 2014-11-18T09:03:00 | 32,453,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,292 | py | # encoding: utf-8
#Automatically transition Questions
#The system needs to provide a settable time-frame beyond which “Admissible”
#questions which are available for scheduling - change status to “Deferred”
import sys
import datetime
import zope.lifecycleevent
import sqlalchemy.sql.expression as sql
from bungeni.alchemist import Session
import bungeni.models.domain as domain
import bungeni.models.schema as schema
import bungeni.core.audit as audit
#import bungeni.core.workflows.question as question_workflow
import bungeni.core.globalsettings as prefs
import bungeni.core.workflows.dbutils as dbutils
from ore.workflow.interfaces import IWorkflowInfo
##############################
# imports for main
from zope import component
from sqlalchemy import create_engine
import ore.workflow.workflow
from bungeni.alchemist.interfaces import IDatabaseEngine
import bungeni.core.interfaces
import bungeni.core.workflows.question
import bungeni.core.workflows.adapters
from bungeni import core as model
#import pdb
def _getQuestionsApprovedBefore(date, status):
"""
get all questions with the workflow status before date
"""
session = Session()
qfilter=sql.and_(
(domain.Question.approval_date < date ),
(domain.Question.status == status)
)
query = session.query(domain.Question).filter(qfilter)
return query.all()
def _deferAdmissibleQuestionsBefore(date):
"""
set all admissible Questions before this
date to defered
"""
status = u"admissible"
admissibleQuestions = _getQuestionsApprovedBefore(date, status)
for question in admissibleQuestions:
IWorkflowInfo(question).fireTransitionToward(u'deferred',
check_security=False)
def deferAdmissibleQuestions():
"""
get the timeframe and defer all questions
before the current date - timeframe
"""
timedelta = prefs.getDaysToDeferAdmissibleQuestions()
deferDate = datetime.date.today() - timedelta
_deferAdmissibleQuestionsBefore(deferDate)
def main(argv=None):
"""
run this as a cron job and execute all
time based transitions
"""
db = create_engine('postgres://localhost/bungeni', echo=False)
component.provideUtility( db, IDatabaseEngine, 'bungeni-db' )
model.metadata.bind = db
session = Session()
component.provideAdapter(
bungeni.core.workflows.states.WorkflowState,
(bungeni.core.interfaces.IBungeniContent,))
component.provideAdapter(
bungeni.core.workflows.question.QuestionWorkflowAdapter,
(domain.Question,))
component.provideAdapter(
bungeni.core.workflows.states.StateWorkflowInfo,
(domain.Question,))
component.provideHandler(
bungeni.core.workflows.question.workflowTransitionEventDispatcher)
# add autitor for time based transitions
#component.provideAdapter(
# (bungeni.core.interfaces.IAuditable, bungeni.core.interfaces.IQuestion, ),
# (domain.Question, ))
#component.provideAdapter( audit.objectModified,
#(domain.Question, bungeni.core.interfaces.IAuditable, ))
deferAdmissibleQuestions()
session.flush()
session.commit()
if __name__ == "__main__":
sys.exit(main())
| [
"ashok.hariharan@fc5d704a-7d24-0410-8c4a-57ddeba10ffc"
] | ashok.hariharan@fc5d704a-7d24-0410-8c4a-57ddeba10ffc |
dfb663789d60d50ac6cd1c56b4a8bf9a33a5d07b | abea9a09bf265a19bb947789e814b1c3e1f21e24 | /iepy/preprocess/ner/literal.py | f3fafbba5df518d09cc9c67d1495cb37ae209e35 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | hanjx16/iepy_zh | ea28289c807ae97e131e68dd7b67c3f884dc5ee7 | 98c9acf76410a085c0a77dfbe8837daf81c27a48 | refs/heads/master | 2022-11-06T18:34:24.533294 | 2018-07-16T08:06:11 | 2018-07-16T08:06:11 | 141,105,817 | 1 | 0 | BSD-3-Clause | 2022-11-03T13:08:05 | 2018-07-16T07:58:03 | Python | UTF-8 | Python | false | false | 3,791 | py | import codecs
from iepy.preprocess.ner.base import BaseNERRunner
class LiteralNER(object):
"""Trivial Named Entity Recognizer that tags exact matches.
"""
def __init__(self, labels, src_filenames):
"""The i-th label is used to tag the occurrences of the terms in the
i-th source file. If a term can have several labels, the last one in
the list is selected.
"""
assert len(labels) == len(src_filenames)
self.labels = labels
self.src_filenames = src_filenames
names = set()
names_map = {}
for label, filename in zip(labels, src_filenames):
f = codecs.open(filename, encoding="utf8")
namelist = f.read().strip().split('\n')
names.update(namelist)
for name in namelist:
names_map[name] = label
self.names = frozenset(names)
self.names_map = names_map
# compute prefix closure
prefixes = set()
for name in self.names:
sname = name.split()
prefixes.update([' '.join(sname[:i]) for i in range(1, len(sname) + 1)])
self.prefixes = frozenset(prefixes)
def tag(self, sent):
"""Tagger with output a la Stanford (no start/end markers).
"""
entities = self.entities(sent)
# dummy entity for nicer code:
entities.append(((len(sent), len(sent)), 'X'))
next_entity = entities.pop(0)
result = []
for i, t in enumerate(sent):
if i >= next_entity[0][1]:
# assert entities
next_entity = entities.pop(0)
if i < next_entity[0][0]:
result.append((t, 'O'))
elif i < next_entity[0][1]:
result.append((t, next_entity[1]))
return result
def entities(self, sent):
"""Return entities as a list of pairs ((offset, offset_end), label).
"""
result = []
i = 0
while i < len(sent):
j = i + 1
prev_segment = segment = ' '.join(sent[i:j])
while segment in self.prefixes and j <= len(sent):
j += 1
prev_segment = segment
segment = ' '.join(sent[i:j])
if prev_segment in self.names:
label = self.names_map[prev_segment]
result.append(((i, j - 1), label))
i = j - 1
else:
i += 1
return result
class LiteralNERRunner(BaseNERRunner):
def __init__(self, labels, src_filenames, override=False):
super(LiteralNERRunner, self).__init__(override=override)
self.lit_tagger = LiteralNER(labels, src_filenames)
def run_ner(self, doc):
entities = []
sent_offset = 0
for sent in doc.get_sentences():
sent_entities = self.lit_tagger.entities(sent)
for ((i, j), label) in sent_entities:
name = ' '.join(sent[i:j])
kind = label.lower() # XXX: should be in models.ENTITY_KINDS
entities.append(
self.build_occurrence(
key=name,
kind_name=kind,
alias=name,
offset=sent_offset + i,
offset_end=sent_offset + j)
)
sent_offset += len(sent)
return entities
def to_lower_normalizer(name):
"""Utility normalizer that converts a name to lowercase unless it is an
acronym. To be used as parameter of download_freebase_type().
"""
words = name.split()
result = []
for w in words:
if not w.isupper():
w = w.lower()
result.append(w)
return ' '.join(result)
| [
"[email protected]"
] | |
27c8f713e9ddf86a65a0a33babb885ac9b025b97 | 0a0deefcf08355576acae85b47b9807574ccb552 | /x.py | 39a3b9b94a9827733f36ba137ab2fc2db3f95ad6 | [] | no_license | val-labs/ll | 792a38bf48174498c54cb8e407a8b6f43de9724b | 2f3af6648fd3b9f6039ec1db9b143384ac614d14 | refs/heads/master | 2021-01-23T03:54:10.086043 | 2017-03-29T00:59:29 | 2017-03-29T00:59:29 | 86,135,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | #!/usr/bin/env python
def push(x,y): x.append(y) ; return x
def pop(x): return x.pop()
import sys
fni=sys.argv[1] ; fno=fni[:-1]+'o'
fi,fo=open(fni),open(fno,'w')
stack = [ 0 ]
prev_line = ''
N=-1
def count_spaces(line):
for n,c in enumerate(line):
if c!=' ':
return n
return -1
def do_line(last_line=False):
global c
c = curr = count_spaces(curr_line)
p = prev = count_spaces(prev_line)
if c == -1: c = 0
if p == -1: p = 0
if last_line: c = 0
if prev == -1:
print(".")
else:
line_str = prev_line
if c > p:
ch = '>'
line_str = '('+line_str
push(stack, c)
elif c == p:
ch = '='
line_str = '.'+line_str+close(c)
elif c < p:
ch = '<'
line_str = ':'+line_str+close(c)
pass
print("x", N, ch, c, curr, p, prev,
last_line, line_str)
def close(c):
ret = []
while stack[-1] > c:
pop(stack)
push(ret, ')')
pass
return ''.join(ret)
for N,line in enumerate(fi):
curr_line = line[:-1]
if N>0:
do_line()
prev_line = curr_line
pass
do_line(last_line=True)
| [
"[email protected]"
] | |
a5062b3d6841fb040c6222a02ba3122f057374e1 | 09f8a3825c5109a6cec94ae34ea17d9ace66f381 | /cohesity_management_sdk/models/cloud_domain_migration_query_result.py | c7fa752fad7e5795b19de2a2f86a934d61dca4d9 | [
"Apache-2.0"
] | permissive | cohesity/management-sdk-python | 103ee07b2f047da69d7b1edfae39d218295d1747 | e4973dfeb836266904d0369ea845513c7acf261e | refs/heads/master | 2023-08-04T06:30:37.551358 | 2023-07-19T12:02:12 | 2023-07-19T12:02:12 | 134,367,879 | 24 | 20 | Apache-2.0 | 2023-08-31T04:37:28 | 2018-05-22T06:04:19 | Python | UTF-8 | Python | false | false | 4,050 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
import cohesity_management_sdk.models.common_job_info
import cohesity_management_sdk.models.universal_id
class CloudDomainMigrationQueryResult(object):
"""Implementation of the 'CloudDomainMigrationQueryResult' model.
CloudDomainMigrationQueryResult represents the info returned while querying
cloud domain migration.
Attributes:
app_job_uid_list (list of UniversalId): Specifies the list of the
application jobs discovered.
cloud_domain_id (long|int): Specifies the Id of a specific cloud domain
present inside the vault, that needs to be migrated. If not set,
all cloud domains found in the vault or under the
'domain_namespace' specified in CADConfig will be migrated.
common_job_info (CommonJobInfo): Specifies the common job info.
is_cad_mode (bool): Specifies if the migration mode is CAD or not.
is_migration_ready (bool): Specifies whether the protection
jobs/objects in the cloud domain are ready to be migrated. This is
set after required snap trees have been downloaded and CM tables
have been populated.
num_of_bytes_downloaded (long|int): Specifies the Number of bytes
downloaded by this job. The downloaded bytes are for reading
metadata object, data objects and index files.
"""
# Create a mapping from Model property names to API property names
_names = {
"app_job_uid_list":'appJobUidList',
"cloud_domain_id":'cloudDomainId',
"common_job_info":'commonJobInfo',
"is_cad_mode":'isCadMode',
"is_migration_ready":'isMigrationReady',
"num_of_bytes_downloaded":'numOfBytesDownloaded',
}
def __init__(self,
app_job_uid_list=None,
cloud_domain_id=None,
common_job_info=None,
is_cad_mode=None,
is_migration_ready=None,
num_of_bytes_downloaded=None,
):
"""Constructor for the CloudDomainMigrationQueryResult class"""
# Initialize members of the class
self.app_job_uid_list = app_job_uid_list
self.cloud_domain_id = cloud_domain_id
self.common_job_info = common_job_info
self.is_cad_mode = is_cad_mode
self.is_migration_ready = is_migration_ready
self.num_of_bytes_downloaded = num_of_bytes_downloaded
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
app_job_uid_list = None
if dictionary.get('appJobUidList') != None:
app_job_uid_list = list()
for structure in dictionary.get('appJobUidList'):
app_job_uid_list.append(cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(structure))
cloud_domain_id = dictionary.get('cloudDomainId')
common_job_info = cohesity_management_sdk.models.common_job_info.CommonJobInfo.from_dictionary(dictionary.get('commonJobInfo')) if dictionary.get('commonJobInfo') else None
is_cad_mode = dictionary.get('isCadMode')
is_migration_ready = dictionary.get('isMigrationReady')
num_of_bytes_downloaded = dictionary.get('numOfBytesDownloaded')
# Return an object of this model
return cls(
app_job_uid_list,
cloud_domain_id,
common_job_info,
is_cad_mode,
is_migration_ready,
num_of_bytes_downloaded
) | [
"[email protected]"
] | |
ec31bc444aed72d7d0050366d378bddac1e55dab | 1d230067a680871af1127f89cc23dda6901f02e3 | /python/cursoemvideo/ex046.py | 4795ce11e5367dd3ae8b87045d5eb898782bdca1 | [] | no_license | pedrocambui/exercicios_e_desafios | 981f47d8ef5b37eda5c637da76918c29f4a4d700 | 65eb86f232aad4abc7cc2e95897ca0d5bb72dcf2 | refs/heads/main | 2023-03-13T13:28:46.058769 | 2021-03-30T16:25:08 | 2021-03-30T16:25:08 | 353,064,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | import time
for c in range(10, 0, -1):
print(c)
time.sleep(1)
print('FELIZ ANO NOVO!') | [
"[email protected]"
] | |
e3a9d91fb649e5c2c2eda79763d811cec1053536 | 9fdd0b9b581c169f4e3f8e1cf8bb20a32285da7a | /misc/server/communicator.py | e5a95ecc52f182c4cb479f0b17439ebdd45cf9e0 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Adrien4193/windshape | ddf716e5554e2a94b3bb5f6bae5950ed1a1dd5a3 | 4c73a4a85409f04518029f0ddb8bd7e3c60e4905 | refs/heads/master | 2020-03-23T14:55:37.532702 | 2018-08-17T07:07:58 | 2018-08-17T07:07:58 | 141,708,616 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,433 | py | import socket
import os
import MySQLdb
import threading
import time
import dbapi
from Module import Module
# THREAD 1: Get modules state from NUCLEO
########################################################################
def recvUDP():
time.sleep(0.1)
print "[RECVTH] Receiving thread started..."
# While sending thread is running
while sendth.isAlive():
# Receive a message on socket
recvd, sAddr = sock.recvfrom(256)
# Safety
if len(recvd) == 0:
continue
# DATA (Module state)
if recvd[0] == 'D':
# Get data of interest from the incomming message
data = recvd[2:recvd.index('\0')]
isPowered, isFlashing, modID, _, rpm = data.split(';')
ipAddr = sAddr[0]
#print 'RECEIVED: '+str(recvd)+'\nFROM: '+str(ipAddr)
# Get sender
if switch_fake:
modID = int(modID)
else:
if ipAddr in id_from_ip.keys():
modID = id_from_ip[ipAddr]
else:
continue
# Safety
if not modID in modules.keys():
continue
# Write immediately if new
modules[modID].setAttribute('isPowered', isPowered)
modules[modID].setAttribute('isFlashing', isFlashing)
# Will be written at update
modules[modID]['rpm'] = rpm
# MAC
elif recvd[0] == 'M':
macAddr = recvd[2:-1]
ipAddr = sAddr[0]
#print 'RECEIVED: '+str(recvd)+'\nFROM: '+str(ipAddr)
# Get ID from MAC
modID = id_from_mac[macAddr]
# Update IP
if ipAddr in id_from_ip:
del id_from_ip[ipAddr]
id_from_ip[ipAddr] = modID
# Write immediately if new
modules[modID].setAttribute('ipAddr', ipAddr)
modules[modID].resetLifePoints()
print "[RECVTH] terminated"
# THREAD 2: Send modules state from DB to NUCLEO
def sendUDP():
dbCon, dbCur = openDB_function()
time.sleep(0.1)
print "[SENDTH] Sending thread started..."
# Sending rate
dt = 0.01
# While receiving thread is running
while recvth.isAlive():
for i in range(10):
for mod in dbapi.getModules(dbCon, dbCur):
isPowered = mod['isPowered']
isFlashing = mod['isFlashing']
pwm_str = mod['pwm']
cmd = "O:{};{};{};{};{}\0".format(isPowered, isFlashing,
0, 0, pwm_str)
ipAddr = mod['ipAddr']
try:
sock.sendto(cmd, (ipAddr, port))
except:
print 'ERROR: CANNOT SEND '+str(cmd)+' TO '+str(ipAddr)
time.sleep(dt)
# Send broadcast message
sock.sendto("B", ('<broadcast>', port))
# Decay life points of modules
for module in modules.values():
module.decayLifePoints()
print "[SENDTH] terminated"
# THREAD 3: Update RPM in DB
def updateDB():
con, cur = openDB_function()
time.sleep(0.1)
print '[UPDATETH] Start ...'
while recvth.isAlive():
ref = time.time()
cmd = 'UPDATE modules SET rpm=CASE modID '
cmds = []
values = []
for modID in modules.keys():
cmds.append('WHEN %s THEN %s')
values.append(modID)
values.append(modules[modID]['rpm'])
cmds.append('ELSE rpm END;')
cmd += ' '.join(cmds)
with con:
cur.execute(cmd, values)
duration = time.time() - ref
#print duration
if duration < 1.0 / Module.rate:
time.sleep(1.0/Module.rate - duration)
print "[UPDATETH] terminated"
def create_modules(modulesDict):
global id_from_ip, id_from_mac
# Indexing IDs to find modules from IP or MAC
id_from_ip = {}
id_from_mac = {}
con, cur = openDB_function()
for modID in modulesDict.keys():
macAddr = modulesDict[modID]['macAddr']
x = modulesDict[modID]['posX']
y = modulesDict[modID]['posY']
# Index modules Ids with MAC for optimization
id_from_mac[macAddr] = modID
# Create Module instance
modules[modID] = Module(con, cur, modID, macAddr, x, y)
def create_socket(port_number):
global sock, port
# Same as fake and modules
port = port_number
# Set socket as UDP and use IPv4
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Make the socket reusable in case of inproper closure
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Block until data available
sock.setblocking(True)
# Bind to port
sock.bind(('', port))
# Allow broadcast
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# INITIALIZATION
########################################################################
def start(open_function, modulesDict, fake, port_number):
global modules
modules = {}
global openDB_function
openDB_function = open_function
global switch_fake
switch_fake = fake
global recvth, sendth
global log_file
log_file = open('log.txt', 'w')
# Create empty module table on DB
dbCon, dbCur = open_function()
dbapi.createModulesTable(dbCon, dbCur)
dbapi.removeModuleFromDB(dbCon, dbCur, None)
# Fill table with modules
for modID in modulesDict.keys():
dbapi.insertModuleToDB(dbCon, dbCur, modID, modulesDict[modID])
# Communication with DB
create_modules(modulesDict)
# Init communication with modules
create_socket(port_number)
# Initiate and start a thread for receiving over UDP
recvth = threading.Thread(target=recvUDP)
recvth.setDaemon(True)
recvth.start()
# Initiate and start a thread for sending over UDP
sendth = threading.Thread(target=sendUDP)
sendth.setDaemon(True)
sendth.start()
# Update modules state in DB
updateth = threading.Thread(target=updateDB)
updateth.setDaemon(True)
updateth.start()
| [
"[email protected]"
] | |
906045475be3ddf2f4693588bbc982f9337e92ff | 9a731b24a38e356f0c53e2ef181d7bccf01f030c | /pythonProject/udiacce/android/gradle/UdiAcce/app/src/main/assets/python/app_packages/toga_android/window.py | 2ebfa068c6713de41f094c287a32ada714f03753 | [
"BSD-3-Clause"
] | permissive | yarinudi/udiacce | aa7a83237195c23ace7a4fe7ff57d6b81c76806c | 4d0fc9734646768a117f28880cd606c66cc36fef | refs/heads/master | 2023-07-13T05:55:35.400126 | 2021-08-06T12:14:47 | 2021-08-06T12:14:47 | 393,357,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,775 | py | from . import dialogs
from .libs.android import R__attr
from .libs.android.util import TypedValue
class AndroidViewport:
def __init__(self, native):
self.native = native
self.dpi = self.native.getContext().getResources().getDisplayMetrics().densityDpi
# Toga needs to know how the current DPI compares to the platform default,
# which is 160: https://developer.android.com/training/multiscreen/screendensities
self.baseline_dpi = 160
@property
def width(self):
return self.native.getContext().getResources().getDisplayMetrics().widthPixels
@property
def height(self):
screen_height = self.native.getContext().getResources().getDisplayMetrics().heightPixels
return screen_height - self._status_bar_height() - self._action_bar_height()
def _action_bar_height(self):
"""
Get the size of the action bar. The action bar shows the app name and can provide some app actions.
"""
tv = TypedValue()
has_action_bar_size = self.native.getContext().getTheme().resolveAttribute(R__attr.actionBarSize, tv, True)
if not has_action_bar_size:
return 0
return TypedValue.complexToDimensionPixelSize(
tv.data, self.native.getContext().getResources().getDisplayMetrics())
def _status_bar_height(self):
"""
Get the size of the status bar. The status bar is typically rendered above the app,
showing the current time, battery level, etc.
"""
resource_id = self.native.getContext().getResources().getIdentifier("status_bar_height", "dimen", "android")
if resource_id <= 0:
return 0
return self.native.getContext().getResources().getDimensionPixelSize(resource_id)
class Window:
def __init__(self, interface):
self.interface = interface
self.interface._impl = self
self.create()
def create(self):
pass
def set_app(self, app):
self.app = app
def set_content(self, widget):
# Set the widget's viewport to be based on the window's content.
widget.viewport = AndroidViewport(widget.native)
# Set the app's entire contentView to the desired widget. This means that
# calling Window.set_content() on any Window object automatically updates
# the app, meaning that every Window object acts as the MainWindow.
self.app.native.setContentView(widget.native)
# Attach child widgets to widget as their container.
for child in widget.interface.children:
child._impl.container = widget
child._impl.viewport = widget.viewport
def set_title(self, title):
pass
def set_position(self, position):
pass
def set_size(self, size):
pass
def create_toolbar(self):
pass
def show(self):
pass
def set_full_screen(self, is_full_screen):
self.interface.factory.not_implemented('Window.set_full_screen()')
def info_dialog(self, title, message):
dialogs.info(self, title, message)
def question_dialog(self, title, message):
self.interface.factory.not_implemented('Window.question_dialog()')
def confirm_dialog(self, title, message):
self.interface.factory.not_implemented('Window.confirm_dialog()')
def error_dialog(self, title, message):
self.interface.factory.not_implemented('Window.error_dialog()')
def stack_trace_dialog(self, title, message, content, retry=False):
self.interface.factory.not_implemented('Window.stack_trace_dialog()')
def save_file_dialog(self, title, suggested_filename, file_types):
self.interface.factory.not_implemented('Window.save_file_dialog()')
| [
"[email protected]"
] | |
e72b97ba944366314ab410155e3a81c1c4b77eb5 | fe97a6d69fa1a18a280768a2aeb43c573a3b8e28 | /A3/TTP.py | d39f45609a146e5591ae423ca1b29c96f511612d | [] | no_license | dylnstwrt/cpsc418 | d4acccf4f7443d81635087070c86bce559b5f41f | a7bf4f3dd51ea10f02242d225822a5fe57df2cc6 | refs/heads/master | 2022-06-25T14:02:20.929829 | 2020-04-20T00:32:18 | 2020-04-20T00:32:18 | 235,682,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,076 | py | #!/usr/bin/env python3
'''
File: TTP.py
Class: CPSC418 - Winter 2020
Name: Dylan Stewart
UCID: 30024193
Assignment : 3
Problem: 8
'''
import socket
import sys
import os
import sympy
import secrets
import time
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
HOST = '127.0.4.18' # Standard loopback interface address (localhost)
PORT = 31802 # Port to listen on (non-privileged ports are > 1023)
def xgcd(a, b):
"""return (g, x, y) such that a*x + b*y = g = gcd(a, b)"""
x0, x1, y0, y1 = 0, 1, 1, 0
while a != 0:
(q, a), b = divmod(b, a), a
y0, y1 = y1, y0 - q * y1
x0, x1 = x1, x0 - q * x1
return b, x0, y0
def modinv(a, b):
"""return x such that (x * a) % b == 1"""
g, x, _ = xgcd(a, b)
return x % b
def generate_RSA_Prime(size):
while True:
p = secrets.randbits(size)
while True:
if sympy.isprime(p):
p = (2*p) + 1
if sympy.isprime(p):
return p
else:
break
else:
if (p % 2 == 0):
p = p + 1
p = p + 2
def gen_rsa_pub(phi_n):
while True:
e = secrets.randbelow(phi_n)
if (e >= 1) & (sympy.gcd(e, phi_n) == 1):
return e
def hashBytes(bytesToHash):
digest = hashes.Hash(hashes.SHA3_512(), backend=default_backend())
digest.update(bytesToHash)
return digest.finalize()
def rsa_keygen():
p = generate_RSA_Prime(256)
q = generate_RSA_Prime(256)
n = p*q
phi_n = (p - 1)*(q - 1)
e = gen_rsa_pub(phi_n)
d = modinv(e, phi_n)
print("TTP: TTP_p =",p,flush=True)
print("TTP: TTP_q =",q,flush=True)
print("TTP: TTP_N =",n,flush=True)
print("TTP: TTP_e =", e,flush=True)
print("TTP: TTP_d =",d,flush=True)
return e, d, n
def rsa_sig_gen(data, d, n):
t = hashBytes(data)
t_naught = hashBytes(t)
t_naught = int.from_bytes(t+t_naught, byteorder='big') % n
return pow(t_naught, d, n)
def main():
e, d, n = rsa_keygen()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST,PORT))
while True:#
print("TTP Listening for connections...")
s.listen()
conn, addr = s.accept()
with conn:
msg = conn.recv(11).decode('utf-8')
if (msg == "REQUEST SIG"):
conn.recv(1)
print("REQUEST SIGN")
nameLength = int.from_bytes(conn.recv(4), byteorder='big')
name_bytes = conn.recv(nameLength)
print("TTP: S = \'"+name_bytes.decode('utf-8')+"\'", flush=True)
Server_N = conn.recv(128)
print("TTP: Server_N =", int.from_bytes(Server_N, byteorder='big'), flush=True)
Server_e = conn.recv(128)
print("TTP: Server_e =", int.from_bytes(Server_e, byteorder='big'), flush=True)
signature = rsa_sig_gen(name_bytes+Server_N+Server_e, d, n)
print("TTP: TTP_SIG =",signature,flush='True')
print("TTP: Sending TTP_N <"+n.to_bytes(128, byteorder='big').hex()+">",flush=True)
print("TTP: Sending TTP_SIG <"+signature.to_bytes(128, byteorder='big').hex()+">",flush=True)
conn.sendall(n.to_bytes(128, byteorder='big') + signature.to_bytes(128, byteorder='big'))
conn.close()
if (msg == "REQUEST KEY"):
print("REQUEST KEY")
print("TTP: Sending TTP_N <"+n.to_bytes(128, byteorder='big').hex()+">",flush=True)
print("TTP: Sending TTP_e <"+e.to_bytes(128, byteorder='big').hex()+">",flush=True)
conn.sendall(n.to_bytes(128, byteorder='big') + e.to_bytes(128, byteorder='big'))
#exit(0)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
72603b9a0cf234e5000de9a40607dcc4a986fda7 | 17a3418a6143ea2d953cf6509aeca7cc6e074686 | /Final-Project/backend/venv/bin/mid3v2 | e4b1fef5fc4f98a75b18321d25adc4e23bd0ab9d | [] | no_license | francolmenar-USYD/Internet-Software-Platforms | addb69a5582a63877e5f3408d64485a7ca942721 | 9e82ab6e7d0f8d4b3d55789cf5cfcd8e524a85df | refs/heads/master | 2022-04-22T02:07:25.419086 | 2020-04-22T10:02:43 | 2020-04-22T10:02:43 | 256,714,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | #!/mnt/c/Shared/ELEC3609/bird-repo/backend/venv/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mid3v2 import entry_point
if __name__ == "__main__":
sys.exit(entry_point())
| [
"[email protected]"
] | ||
6c29e4c998dee4f35cd93f6f38bb8e40e1f9e3c0 | ce4202088bf28a070d9ff015f916c1e09f107dfb | /trivial_main.py | 43a88671aa5d854bc41452adbbb402e9c18192eb | [] | no_license | AHKerrigan/Epidemic-Control | f7b0fae49307317ca15565becb034f324157816d | dc3f2e315fb67f50e4f808c70e08088cbdb60bb2 | refs/heads/master | 2020-04-20T09:39:54.446033 | 2019-10-02T11:37:10 | 2019-10-02T11:37:10 | 168,770,608 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,279 | py | import argparse
import sys
import math
from collections import namedtuple
from itertools import count
import gym
import numpy as np
import scipy.optimize
from gym import wrappers
import networkx as nx
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as T
from torch.autograd import Variable
import epi_env
from models import Policy, Value, ActorCritic
from replay_memory import Memory
from running_state import ZFilter
# from utils import *
torch.set_default_tensor_type('torch.DoubleTensor')
PI = torch.DoubleTensor([3.1415926])
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=float, default=0.995, metavar='G',
help='discount factor (default: 0.995)')
parser.add_argument('--env-name', default="Reacher-v1", metavar='G',
help='name of the environment to run')
parser.add_argument('--tau', type=float, default=0.97, metavar='G',
help='gae (default: 0.97)')
# parser.add_argument('--l2_reg', type=float, default=1e-3, metavar='G',
# help='l2 regularization regression (default: 1e-3)')
# parser.add_argument('--max_kl', type=float, default=1e-2, metavar='G',
# help='max kl value (default: 1e-2)')
# parser.add_argument('--damping', type=float, default=1e-1, metavar='G',
# help='damping (default: 1e-1)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--batch-size', type=int, default=5000, metavar='N',
help='batch size (default: 5000)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=20, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--entropy-coeff', type=float, default=0.0, metavar='N',
help='coefficient for entropy cost')
parser.add_argument('--clip-epsilon', type=float, default=0.2, metavar='N',
help='Clipping for PPO grad')
parser.add_argument('--use-joint-pol-val', action='store_true',
help='whether to use combined policy and value nets')
args = parser.parse_args()
import pickle
graph = pickle.load(open("Standard_Watts100.pkl", "rb"))
env = epi_env.Epi_Env(B=0.3, graph=graph, beta_hi=0.177, beta_low=0.071, delta_low=0.20, delta_hi=0.285)
print(env.observation_space)
num_inputs = env.observation_space
num_actions = env.action_space
env.seed(args.seed)
torch.manual_seed(args.seed)
if args.use_joint_pol_val:
ac_net = ActorCritic(num_inputs, num_actions)
opt_ac = optim.Adam(ac_net.parameters(), lr=0.001)
else:
policy_net = Policy(num_inputs, num_actions)
value_net = Value(num_inputs)
opt_policy = optim.Adam(policy_net.parameters(), lr=0.001)
opt_value = optim.Adam(value_net.parameters(), lr=0.001)
def select_action(state):
"""
state = torch.from_numpy(state).unsqueeze(0)
action_mean, _, action_std = policy_net(Variable(state))
action = torch.normal(action_mean, action_std)
"""
action = np.full(env.action_space, 100)
return action
def select_action_actor_critic(state):
"""
state = torch.from_numpy(state).unsqueeze(0)
action_mean, _, action_std, v = ac_net(Variable(state))
action = torch.normal(action_mean, action_std)
"""
action = np.full(env.action_space, 100)
return action
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * torch.log(2 * Variable(PI)) - log_std
return log_density.sum(1)
def update_params_actor_critic(batch):
rewards = torch.Tensor(batch.reward)
masks = torch.Tensor(batch.mask)
actions = torch.Tensor(np.concatenate(batch.action, 0))
states = torch.Tensor(batch.state)
action_means, action_log_stds, action_stds, values = ac_net(Variable(states))
returns = torch.Tensor(actions.size(0),1)
deltas = torch.Tensor(actions.size(0),1)
advantages = torch.Tensor(actions.size(0),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]
advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
targets = Variable(returns)
# kloldnew = policy_net.kl_old_new() # oldpi.pd.kl(pi.pd)
# ent = policy_net.entropy() #pi.pd.entropy()
# meankl = torch.reduce_mean(kloldnew)
# meanent = torch.reduce_mean(ent)
# pol_entpen = (-args.entropy_coeff) * meanent
action_var = Variable(actions)
# compute probs from actions above
log_prob_cur = normal_log_density(action_var, action_means, action_log_stds, action_stds)
action_means_old, action_log_stds_old, action_stds_old, values_old = ac_net(Variable(states), old=True)
log_prob_old = normal_log_density(action_var, action_means_old, action_log_stds_old, action_stds_old)
# backup params after computing probs but before updating new params
ac_net.backup()
advantages = (advantages - advantages.mean()) / advantages.std()
advantages_var = Variable(advantages)
opt_ac.zero_grad()
ratio = torch.exp(log_prob_cur - log_prob_old) # pnew / pold
surr1 = ratio * advantages_var[:,0]
surr2 = torch.clamp(ratio, 1.0 - args.clip_epsilon, 1.0 + args.clip_epsilon) * advantages_var[:,0]
policy_surr = -torch.min(surr1, surr2).mean()
vf_loss1 = (values - targets).pow(2.)
vpredclipped = values_old + torch.clamp(values - values_old, -args.clip_epsilon, args.clip_epsilon)
vf_loss2 = (vpredclipped - targets).pow(2.)
vf_loss = 0.5 * torch.max(vf_loss1, vf_loss2).mean()
total_loss = policy_surr + vf_loss
total_loss.backward()
torch.nn.utils.clip_grad_norm(ac_net.parameters(), 40)
opt_ac.step()
def update_params(batch):
rewards = torch.Tensor(batch.reward)
masks = torch.Tensor(batch.mask)
actions = torch.Tensor(np.concatenate(batch.action, 0))
states = torch.Tensor(batch.state)
values = value_net(Variable(states))
returns = torch.Tensor(actions.size(0),1)
deltas = torch.Tensor(actions.size(0),1)
advantages = torch.Tensor(actions.size(0),1)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]
advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
targets = Variable(returns)
opt_value.zero_grad()
value_loss = (values - targets).pow(2.).mean()
value_loss.backward()
opt_value.step()
# kloldnew = policy_net.kl_old_new() # oldpi.pd.kl(pi.pd)
# ent = policy_net.entropy() #pi.pd.entropy()
# meankl = torch.reduce_mean(kloldnew)
# meanent = torch.reduce_mean(ent)
# pol_entpen = (-args.entropy_coeff) * meanent
action_var = Variable(actions)
action_means, action_log_stds, action_stds = policy_net(Variable(states))
log_prob_cur = normal_log_density(action_var, action_means, action_log_stds, action_stds)
action_means_old, action_log_stds_old, action_stds_old = policy_net(Variable(states), old=True)
log_prob_old = normal_log_density(action_var, action_means_old, action_log_stds_old, action_stds_old)
# backup params after computing probs but before updating new params
policy_net.backup()
advantages = (advantages - advantages.mean()) / advantages.std()
advantages_var = Variable(advantages)
opt_policy.zero_grad()
ratio = torch.exp(log_prob_cur - log_prob_old) # pnew / pold
surr1 = ratio * advantages_var[:,0]
surr2 = torch.clamp(ratio, 1.0 - args.clip_epsilon, 1.0 + args.clip_epsilon) * advantages_var[:,0]
policy_surr = -torch.min(surr1, surr2).mean()
policy_surr.backward()
torch.nn.utils.clip_grad_norm(policy_net.parameters(), 40)
opt_policy.step()
running_state = ZFilter((num_inputs,), clip=5)
running_reward = ZFilter((1,), demean=False, clip=10)
episode_lengths = []
running_reward = 0
f = open("noninvestmentWatts100.csv", "w+")
for i_episode in count(1):
memory = Memory()
num_steps = 0
reward_batch = 0
num_episodes = 0
while num_steps < args.batch_size:
state = env.reset()
state = running_state(state)
reward_sum = 0
for t in range(10000): # Don't infinite loop while learning
if args.use_joint_pol_val:
action = select_action_actor_critic(state)
else:
action = select_action(state)
next_state, reward, done, _ = env.step(action)
reward_sum += reward
next_state = running_state(next_state)
mask = 1
if done:
mask = 0
#memory.push(state, np.array([action]), mask, next_state, reward)
if args.render:
env.render()
if done:
break
state = next_state
num_steps += (t-1)
num_episodes += 1
reward_batch += reward_sum
reward_batch /= num_episodes
#batch = memory.sample()
#if args.use_joint_pol_val:
# update_params_actor_critic(batch)
#else:
# update_params(batch)
running_reward = (running_reward * 0.995) + (reward_batch * 0.005)
if i_episode % args.log_interval == 0:
f.write("{}\n".format(str(running_reward)))
print('Episode {}\tLast reward: {}\tAverage reward {:.2f}\tRunning reward: {}'.format(
i_episode, reward_sum, reward_batch, running_reward))
| [
"[email protected]"
] | |
3f20447565c06c4aa890c48c346f5587bbadcc92 | e35ae6f6b15e81842f941988e9ee30deb142827b | /python/54.py | 9b9541246364669514e6b9e37e7b7599c8c895eb | [] | no_license | MrHuxu/leetcode150 | 97f9862271f659a482c9e6775e47d421e779f1f1 | dc6be12ae5d46f32d3f9334e2f0f0a866fc2c837 | refs/heads/master | 2023-08-22T03:58:19.400027 | 2023-08-15T03:55:53 | 2023-08-15T03:55:53 | 211,640,798 | 1 | 0 | null | 2023-04-25T07:08:47 | 2019-09-29T09:47:46 | Go | UTF-8 | Python | false | false | 1,242 | py | import unittest
from operator import le
from typing import List
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
ret: List[int] = []
level = 0
m = len(matrix)
n = len(matrix[0])
while len(matrix) > 0 and len(ret) < m * n:
for i in range(level, n - level):
ret.append(matrix[level][i])
for i in range(level + 1, m - level - 1):
ret.append(matrix[i][n - level - 1])
tmp = n - level - 1
while level != m - level - 1 and tmp >= level:
ret.append(matrix[m - level - 1][tmp])
tmp -= 1
tmp = m - level - 2
while level != n - level - 1 and tmp > level:
ret.append(matrix[tmp][level])
tmp -= 1
level += 1
return ret
class TestSolution(unittest.TestCase):
def test(self):
solution = Solution()
self.assertListEqual([1, 2, 3, 6, 9, 8, 7, 4, 5], solution.spiralOrder(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
self.assertListEqual([1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7], solution.spiralOrder(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]))
| [
"[email protected]"
] | |
6a4ced94e320a4124a9df23a43204e5406060955 | e65fb2168c4aaa3def9c752f7b161cb430e7d9fb | /onap_tests/utils/utils.py | ea3ec54b15cded631ba21a491e605545c038873c | [
"Apache-2.0"
] | permissive | Orange-OpenSource/xtesting-onap-tests | ead27e0f809d53f1ad4aa407be578207454acdfd | ce4237f49089a91c81f5fad552f78fec384fd504 | refs/heads/master | 2023-06-11T00:57:37.514952 | 2018-05-16T08:34:40 | 2018-05-16T08:34:40 | 126,787,905 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,297 | py | #!/usr/bin/python
#
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# pylint: disable=missing-docstring
from difflib import SequenceMatcher
import logging
import random
import string
import os
import requests
import yaml
# ----------------------------------------------------------
#
# YAML UTILS
#
# -----------------------------------------------------------
def get_parameter_from_yaml(parameter, config_file):
"""
Returns the value of a given parameter in file.yaml
parameter must be given in string format with dots
Example: general.openstack.image_name
"""
with open(config_file) as my_file:
file_yaml = yaml.safe_load(my_file)
my_file.close()
value = file_yaml
# Ugly fix as workaround for the .. within the params in the yaml file
ugly_param = parameter.replace("..", "##")
for element in ugly_param.split("."):
value = value.get(element.replace("##", ".."))
if value is None:
raise ValueError("The parameter %s is not defined" % parameter)
return value
def get_config(parameter):
"""
Get configuration parameter from yaml configuration file
"""
local_path = os.path.dirname(os.path.abspath(__file__))
yaml_ = local_path.replace("utils", "onap-testing.yaml")
return get_parameter_from_yaml(parameter, yaml_)
def get_template_param(vnf_type, parameter):
"""
Get VNF template
"""
local_path = os.path.dirname(os.path.abspath(__file__))
if "ims" in vnf_type:
template_path = "templates/service-ClearwaterVims-template.yml"
elif "vfw" in vnf_type:
template_path = "templates/service-VfwService-template.yml"
else:
template_path = "templates/service-VmrfService-template.yml"
yaml_ = local_path.replace("utils",
template_path)
return get_parameter_from_yaml(parameter, yaml_)
# ----------------------------------------------------------
#
# LOGGER UTILS
#
# -----------------------------------------------------------
def get_logger(module):
"""
Get Logger
"""
log_formatter = logging.Formatter("%(asctime)s [" +
module +
"] [%(levelname)-5.5s] %(message)s")
logger = logging.getLogger()
log_file = get_config('general.log.log_file')
log_level = get_config('general.log.log_level')
file_handler = logging.FileHandler("{0}/{1}".format('.', log_file))
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
logger.setLevel(log_level)
return logger
# ----------------------------------------------------------
#
# Misc
#
# -----------------------------------------------------------
def random_string_generator(size=6,
chars=string.ascii_uppercase + string.digits):
"""
Get a random String for VNF
"""
return ''.join(random.choice(chars) for _ in range(size))
def get_vf_module_index(vnf_list, target):
# until we understand how to match vnf & vf from the service template
best_index = 0
best_index_proba = 0
for i, elt in enumerate(vnf_list):
current_proba = SequenceMatcher(None,
target.lower(),
elt.lower()).ratio()
if current_proba > best_index_proba:
best_index = i
best_index_proba = current_proba
return best_index
# ----------------------------------------------------------
#
# requests
#
# -----------------------------------------------------------
def get_simple_request(url, headers, proxy):
try:
response = requests.get(url, headers=headers,
proxies=proxy, verify=False)
request_info = response.json()
except Exception: # pylint: disable=broad-except
request_info = response.text
return request_info
| [
"[email protected]"
] | |
b8e31604babd214cfd29e449025f1312f75e49db | 4b0ccc880137b8d46501b84ba25af92449919e04 | /screenshot.name.py | b4716e451eb382008e1b368893102d7cc38b06b8 | [] | no_license | P-Jayaprakash/python_projects | 6113309e9b64547da9cc6c48a338b1de4f073926 | 198d7879eabc5e6633fdaf16ebba987c5de9728e | refs/heads/master | 2022-12-19T08:18:35.928697 | 2020-09-12T14:57:08 | 2020-09-12T14:57:08 | 291,396,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | #In this program a new name is generated for each screenshot according to the time. So there will be no chance for the overwritten of previous clicked screenshots.
import time
import pyautogui
def screenshot():
name=int(round(time.time()*100))
name='{}.png'.format(name)
time.sleep(5)
img=pyautogui.screenshot(name)
img.show()
screenshot()
| [
"[email protected]"
] | |
e8ed8b7bbc2c950827cdf8d68de707dbc93fde74 | d483cd30d2ddea0da535e18258ea6906deae1495 | /harmonic.py | 4a27ff52e35066ec302c4f12cd0758d6facde4f8 | [] | no_license | xLuis190/Python-School-work | 4b19a3cb52e1ad9f565789eb3aed61e7f8aacbc2 | 235136aae3de5af2949b4ec3b314d3809e216706 | refs/heads/master | 2020-03-18T13:39:36.103043 | 2018-05-25T03:42:26 | 2018-05-25T03:42:26 | 134,798,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | def harmonic(n):
i = 1
if(n > 1):
return n / 2
elif(i == n):
return
i + harmonic(i+1)
print(harmonic(10))
| [
"[email protected]"
] | |
61bef9e4c7d1006aa90c415eebbab965bb28228a | 32b72152fa92d3f87e233325d3050dbdb9a7fe20 | /Django_blog/mysite/blog/migrations/0010_auto_20171011_1155.py | d141f39646f270c921e22f0a426d6c656bb2a1b2 | [] | no_license | mshava/django-learning-projects | aa710243df461d84b965749dbb7c1cdc1f8e65e8 | b95a10c1cb94e64169b1a2f92434efea312fd556 | refs/heads/master | 2021-07-13T17:04:04.866801 | 2017-10-18T12:23:01 | 2017-10-18T12:23:01 | 105,898,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-11 09:55
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20171011_1140'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='create_date',
field=models.DateTimeField(default=datetime.datetime(2017, 10, 11, 9, 55, 48, 903718, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='create_date',
field=models.DateTimeField(default=datetime.datetime(2017, 10, 11, 9, 55, 48, 903718, tzinfo=utc)),
),
]
| [
"[email protected]"
] | |
8a52917c00fc0bd7d1f0e5ffd6d1db0c38a5ed36 | bdf630acc7cb153b53fe4b6b300e4d4ecfd23e96 | /starmap.py | 414142776565b710ba0b752b78da36d55d5f3bf1 | [
"MIT"
] | permissive | icefoxen/games-ig | f4c5e569773c2f58abebbe710b30be75082cc121 | 89ef1e102b52bfb58d9511373475a3def300b2cf | refs/heads/master | 2020-07-03T12:01:49.218366 | 2016-11-18T23:34:45 | 2016-11-18T23:34:45 | 74,171,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,732 | py | # starmap.py
# A map full of stars. Yay!
#
# This could be converted to blit itself onto a surface at an arbitrary
# location, but it'd be a pain in the ass. It doesn't really matter,
# so I'm not gonna bother.
import star, sprite
import spaceobj
import wx
class Starmap:
w = 0
h = 0
# A quadtree would be rather nicer for this than a list. Oh well!
objects = []
selected = ()
def __init__( self, x, y, numstars ):
self.w = x
self.h = y
for i in range( numstars ):
self.objects.append( star.Star( x, y ) )
self.selected = self.objects[0]
def getSelectedObj( self ):
return self.selected
# Returns True if a new star has been selected, False otherwise
# Should call the object-defined getLocation method! This'll let fleets
# in orbit report slightly false values, which would work.
def selectObj( self, x, y ):
delta = 7
for obj in self.objects:
objx, objy = obj.getLocation()
if ((x > (objx - delta)) and (x < (objx + delta))) and \
((y > (objy - delta)) and (y < (objy + delta))):
self.selected = obj
print obj # DEBUG
return True
return False
def getStar( self, x, y ):
for thingy in objects:
if thingy.x == x and thingy.y == y:
return thingy
return False
def getStarByName( self, name ):
for thingy in objects:
if thingy.name == name:
return thingy
return False
def add( self, obj ):
self.objects.append( obj )
def remove( self, obj ):
try:
self.objects.remove( obj )
except ValueError:
print "Tried to remove %s twice!" % str( obj )
| [
"[email protected]"
] | |
861f58abc577b1b4cd88a46137677a2b41e1d526 | 05df41d9fdee79a5004bd5530864f3d8c42e52c2 | /import_textures.py | 6de3e3b611c80eee65cfcc62761867185e700779 | [] | no_license | Aralta/NSI_Projet | 675d69ae08042cfbe3e842bcf7a3a2a11dcfc004 | 619db4d5bc95fc3a42e79c18ff9b3f18caa3837f | refs/heads/master | 2023-04-16T02:23:49.438544 | 2021-04-26T18:05:33 | 2021-04-26T18:05:33 | 312,720,442 | 2 | 0 | null | 2021-04-26T18:05:34 | 2020-11-14T01:15:14 | Python | UTF-8 | Python | false | false | 5,355 | py | import pygame
zz = 0
aa= 1
ab = 2
ac = 3
ad = 4
ae = 5
af = 6
ag = 7
ah = 8
ai = 9
aj = 10
ak = 11
al = 12
am = 13
an = 14
ao = 15
ap = 16
aq = 17
ar = 18
aS = 19
at = 20
au = 21
av = 22
aw = 23
ax = 24
ay = 25
az = 26
ba = 27
bb = 28
bc = 29
bd = 30
be = 31
bf = 32
bg = 33
bh = 34
bi = 35
bj = 36
bk = 37
bl = 38
bm = 39
bn = 40
bo = 41
bp = 42
bq = 43
br = 44
bs = 45
bt = 46
bu = 47
bv = 48
bw = 49
bx = 50
by = 51
bz = 52
ca = 53
cb = 54
cc = 55
cd = 56
ce = 57
cf = 58
cg = 59
ch = 60
ci = 61
cj = 62
ck = 63
cl = 64
cm = 65
cn = 66
co = 67
TEXTURES ={
zz: pygame.image.load("./Textures/herbe.png"),
#texture pour lac (eau)
bg: pygame.image.load("./Textures/lac/eau.png"),
bi: pygame.image.load("./Textures/lac/eau_sup_gauche.png"),
bj: pygame.image.load("./Textures/lac/eau_cote_sup.png"),
bk: pygame.image.load("./Textures/lac/eau_sup_droit.png"),
bh: pygame.image.load("./Textures/lac/eau_cote_droit.png"),
be: pygame.image.load("./Textures/lac/eau_inf_droit.png"),
bd: pygame.image.load("./Textures/lac/eau_cote_inf.png"),
bc: pygame.image.load("./Textures/lac/eau_inf_gauche.png"),
bf: pygame.image.load("./Textures/lac/eau_cote_gauche.png"),
#texture maison
aa: pygame.image.load("./Textures/maison/aa.png"),
ab: pygame.image.load("./Textures/maison/ab.png"),
ac: pygame.image.load("./Textures/maison/ac.png"),
ad: pygame.image.load("./Textures/maison/ad.png"),
ae: pygame.image.load("./Textures/maison/ae.png"),
af: pygame.image.load("./Textures/maison/af.png"),
ag: pygame.image.load("./Textures/maison/ag.png"),
ah: pygame.image.load("./Textures/maison/ah.png"),
ai: pygame.image.load("./Textures/maison/ai.png"),
aj: pygame.image.load("./Textures/maison/aj.png"),
ak: pygame.image.load("./Textures/maison/ak.png"),
al: pygame.image.load("./Textures/maison/al.png"),
am: pygame.image.load("./Textures/maison/am.png"),
an: pygame.image.load("./Textures/maison/an.png"),
ao: pygame.image.load("./Textures/maison/ao.png"),
ap: pygame.image.load("./Textures/maison/ap.png"),
aq: pygame.image.load("./Textures/maison/aq.png"),
ar: pygame.image.load("./Textures/maison/ar.png"),
aS: pygame.image.load("./Textures/maison/as.png"),
at: pygame.image.load("./Textures/maison/at.png"),
au: pygame.image.load("./Textures/maison/au.png"),
av: pygame.image.load("./Textures/maison/av.png"),
aw: pygame.image.load("./Textures/maison/aw.png"),
ax: pygame.image.load("./Textures/maison/ax.png"),
ay: pygame.image.load("./Textures/maison/ay.png"),
az: pygame.image.load("./Textures/maison/az.png"),
ba: pygame.image.load("./Textures/maison/ba.png"),
bb: pygame.image.load("./Textures/maison/bb.png"),
#chemin
bl: pygame.image.load("./Textures/chemin/bl.png"),
bm: pygame.image.load("./Textures/chemin/bm.png"),
bn: pygame.image.load("./Textures/chemin/bn.png"),
bo: pygame.image.load("./Textures/chemin/bo.png"),
bp: pygame.image.load("./Textures/chemin/bp.png"),
bq: pygame.image.load("./Textures/chemin/bq.png"),
br: pygame.image.load("./Textures/chemin/br.png"),
bs: pygame.image.load("./Textures/chemin/bs.png"),
bt: pygame.image.load("./Textures/chemin/bt.png"),
bu: pygame.image.load("./Textures/chemin/bu.png"),
bv: pygame.image.load("./Textures/chemin/bv.png"),
bw: pygame.image.load("./Textures/chemin/bw.png"),
bx: pygame.image.load("./Textures/chemin/bx.png"),
#texture toit maison 2
by: pygame.image.load("./Textures/maison2/by.png"),
bz: pygame.image.load("./Textures/maison2/bz.png"),
ca: pygame.image.load("./Textures/maison2/ca.png"),
cb: pygame.image.load("./Textures/maison2/cb.png"),
cc: pygame.image.load("./Textures/maison2/cc.png"),
cd: pygame.image.load("./Textures/maison2/cd.png"),
ce: pygame.image.load("./Textures/maison2/ce.png"),
cf: pygame.image.load("./Textures/maison2/cf.png"),
cg: pygame.image.load("./Textures/maison2/cg.png"),
ch: pygame.image.load("./Textures/maison2/ch.png"),
ci: pygame.image.load("./Textures/maison2/ci.png"),
cj: pygame.image.load("./Textures/maison2/cj.png"),
ck: pygame.image.load("./Textures/maison2/ck.png"),
cl: pygame.image.load("./Textures/maison2/cl.png"),
cm: pygame.image.load("./Textures/maison2/cm.png"),
cn: pygame.image.load("./Textures/maison2/cn.png"),
co: pygame.image.load("./Textures/maison2/co.png"),
#ar: pygame.image.load("./Textures/toit-maison2/ar.png"),
#aS: pygame.image.load("./Textures/toit-maison2/as.png"),
#at: pygame.image.load("./Textures/toit-maison2/at.png"),
#au: pygame.image.load("./Textures/toit-maison2/au.png"),
#av: pygame.image.load("./Textures/toit-maison2/av.png"),
#aw: pygame.image.load("./Textures/toit-maison2/aw.png"),
#ax: pygame.image.load("./Textures/toit-maison2/ax.png"),
#ay: pygame.image.load("./Textures/toit-maison2/ay.png"),
#az: pygame.image.load("./Textures/toit-maison2/az.png"),
#ba: pygame.image.load("./Textures/toit-maison2/ba.png"),
#bb: pygame.image.load("./Textures/toit-maison2/bb.png"),
# i: pygame.image.load("./Textures/chemin/ .png"),
# j: pygame.image.load("./Textures/chemin/ .png"),
# k: pygame.image.load
# l: pygame.image.load
# m: pygame.image.load
} | [
"[email protected]"
] | |
d9d59927e2b551be7e2189f9d1e9d85d7cf08b1e | 8f8423134350b754aa6fd7f431ebdbcecd0f4646 | /files/babycrypto1.py | 66d68d9a4d5dd272cdec6671ded6818ac518218b | [
"MIT"
] | permissive | niktay/niktay.github.io | 363870e59c05d32cd775119dc196ea2dc6075890 | 4f7e81e9d8debb51618172bcdd731ce240b6d1b7 | refs/heads/master | 2022-03-26T09:28:02.285140 | 2022-03-13T01:44:50 | 2022-03-13T01:44:50 | 101,560,268 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | #!/usr/bin/env python
from base64 import b64decode
from base64 import b64encode
import socket
import multiprocessing
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import pad, unpad
import hashlib
import sys
class AESCipher:
def __init__(self, key):
self.key = key
def encrypt(self, data):
iv = get_random_bytes(AES.block_size)
self.cipher = AES.new(self.key, AES.MODE_CBC, iv)
return b64encode(iv + self.cipher.encrypt(pad(data,
AES.block_size)))
def encrypt_iv(self, data, iv):
self.cipher = AES.new(self.key, AES.MODE_CBC, iv)
return b64encode(iv + self.cipher.encrypt(pad(data,
AES.block_size)))
def decrypt(self, data):
raw = b64decode(data)
self.cipher = AES.new(self.key, AES.MODE_CBC, raw[:AES.block_size])
return unpad(self.cipher.decrypt(raw[AES.block_size:]), AES.block_size)
flag = open("flag", "rb").read().strip()
COMMAND = [b'test',b'show']
def run_server(client, aes_key, token):
client.send(b'test Command: ' + AESCipher(aes_key).encrypt(token+COMMAND[0]) + b'\n')
client.send(b'**Cipher oracle**\n')
client.send(b'IV...: ')
iv = b64decode(client.recv(1024).decode().strip())
client.send(b'Message...: ')
msg = b64decode(client.recv(1024).decode().strip())
client.send(b'Ciphertext:' + AESCipher(aes_key).encrypt_iv(msg,iv) + b'\n\n')
while(True):
client.send(b'Enter your command: ')
tt = client.recv(1024).strip()
tt2 = AESCipher(aes_key).decrypt(tt)
client.send(tt2 + b'\n')
if tt2 == token+COMMAND[1]:
client.send(b'The flag is: ' + flag)
client.close()
break
if __name__ == '__main__':
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', 16001))
server.listen(1)
while True:
client, address = server.accept()
aes_key = get_random_bytes(AES.block_size)
token = b64encode(get_random_bytes(AES.block_size*10))[:AES.block_size*10]
process = multiprocessing.Process(target=run_server, args=(client, aes_key, token))
process.daemon = True
process.start()
| [
"[email protected]"
] | |
07fa1a691d141b0be42e0ba1259ed7f1121db648 | 10b8fa6ce869e23c2d603f1a8aacda4136f907f2 | /metrics/publisher/event_publisher_test.py | c62a483213687c9c21e3f936febdd5b05f45641e | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/ml-testing-accelerators | 2fdbaa42fcc1cee19c0b191e83801dfd767112b1 | 96a8320b941d0bf9023657bd2e18ea8fedd03247 | refs/heads/master | 2023-09-01T01:07:35.887904 | 2023-08-29T23:12:18 | 2023-08-29T23:12:18 | 244,933,863 | 65 | 62 | Apache-2.0 | 2023-09-13T20:45:03 | 2020-03-04T15:21:13 | Jsonnet | UTF-8 | Python | false | false | 5,662 | py | import collections
import datetime
import json
import os
from absl.testing import absltest
from absl.testing import parameterized
import kubernetes
import event_publisher
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import json_format
import metrics_pb2
_START_TIME = datetime.datetime.fromisoformat('2020-12-15T19:58:44')
_END_TIME = datetime.datetime.fromisoformat('2020-12-15T20:14:56')
# HACK: See https://github.com/kubernetes-client/python/issues/977#issuecomment-594045477
def _job_from_dict(d):
_FakeResponse = collections.namedtuple('FakeResponse', 'data')
resp = _FakeResponse(json.dumps(d, default=str))
return kubernetes.client.BatchV1Api().api_client.deserialize(resp, 'V1Job')
class EventPublisherTest(parameterized.TestCase):
def assertProtoEqual(self, first, second):
self.assertJsonEqual(
json_format.MessageToJson(first, including_default_value_fields=True),
json_format.MessageToJson(second, including_default_value_fields=True),
)
@parameterized.named_parameters(
('passing', 1, 0, [('Complete', None)], 'COMPLETED'),
('retried_passing', 1, 1, [('Complete', None)], 'COMPLETED'),
('failing', 0, 2, [('Failed', 'BackoffLimitExceeded')], 'FAILED'),
('timed_out', 0, 1, [('Failed', 'DeadlineExceeded')], 'TIMEOUT'),
('barely_timed_out', 0, 1, [('Failed', 'DeadlineExceeded'), ('Complete', None)], 'TIMEOUT'),
)
def test_create_test_completed_event(self, succeeded_count, failed_count, conditions, expected_status):
job = _job_from_dict({
'metadata': {
'name': 'job-name',
'namespace': 'namespace',
'labels': {
'benchmarkId': 'test-job',
},
},
'status': {
'startTime': _START_TIME,
'succeeded': succeeded_count,
'failed': failed_count,
'conditions': [
{
'status': True,
'reason': reason,
'type': cond_type,
'lastTransitionTime': _END_TIME,
}
for cond_type, reason in conditions
]
}
})
actual_event = event_publisher.create_test_completed_event(
job,
model_output_bucket='gs://fake-bucket',
cluster_name='cluster-name',
cluster_location='cluster-location',
project='project-id'
)
start_time = timestamp_pb2.Timestamp()
start_time.FromDatetime(_START_TIME)
duration = duration_pb2.Duration()
duration.FromTimedelta(_END_TIME - _START_TIME)
expected_event = metrics_pb2.TestCompletedEvent(
benchmark_id='test-job',
output_path='gs://fake-bucket/job-name',
status=metrics_pb2.TestCompletedEvent.TestStatus.Value(expected_status),
num_attempts=succeeded_count + failed_count,
start_time=start_time,
duration=duration,
labels={'benchmarkId': 'test-job'},
debug_info=metrics_pb2.DebugInfo(
logs_link='https://console.cloud.google.com/logs?project=project-id&advancedFilter=resource.type%3Dk8s_container%0Aresource.labels.project_id%3Dproject-id%0Aresource.labels.cluster_name%3Dcluster-name%0Aresource.labels.namespace_name%3Dnamespace%0Aresource.labels.pod_name%3Ajob-name%0Aresource.labels.location%3Acluster-location%0A',
details_link=f'https://console.cloud.google.com/kubernetes/job/cluster-location/cluster-name/namespace/job-name?project=project-id'
),
metric_collection_config=metrics_pb2.MetricCollectionConfig(),
)
self.assertProtoEqual(expected_event, actual_event)
@parameterized.named_parameters(
('with_subdir', 'some/subdir/path'),
('no_subdir', None),
)
def test_metric_collection_config(self, gcs_subdir):
job = _job_from_dict({
'metadata': {
'name': 'job-name',
'namespace': 'namespace',
'labels': {
'benchmarkId': 'test-job',
},
'annotations': {
'ml-testing-accelerators/metric-config': json.dumps({
'sources': [{
'literals': {
'assertions': {
'duration': {
'within_bounds': {
'lower_bound': 1,
'upper_bound': 2,
}
}
}
}
}]
})
}
},
'status': {
'startTime': _START_TIME,
'completionTime': _END_TIME,
'succeeded': 1,
'conditions': [
{
'status': True,
'type': 'Complete',
'lastTransitionTime': _END_TIME,
}
]
}
})
if gcs_subdir:
job.metadata.annotations['ml-testing-accelerators/gcs-subdir'] = gcs_subdir
actual_event = event_publisher.create_test_completed_event(
job,
model_output_bucket='gs://fake-bucket',
cluster_name='cluster-name',
cluster_location='cluster-location',
project='project-id'
)
actual_mcc = actual_event.metric_collection_config
expected_mcc = metrics_pb2.MetricCollectionConfig(
sources=[
metrics_pb2.MetricSource(
literals=metrics_pb2.LiteralSource(
assertions={
'duration': metrics_pb2.Assertion(
within_bounds=metrics_pb2.Assertion.WithinBounds(
lower_bound=1,
upper_bound=2,
)
)
}
)
)
]
)
self.assertEqual(actual_event.output_path, os.path.join('gs://fake-bucket', gcs_subdir or '', 'job-name'))
self.assertProtoEqual(expected_mcc, actual_mcc)
if __name__ == "__main__":
absltest.main()
| [
"[email protected]"
] | |
b31c5121c198fa52ab61ce34b003dc8c404bb232 | aece9569373acc984da8a2c45faa8272772e4486 | /RTD_ws/src/autonomous_mode/FRS_intersect_ipopt.py | 4ea32a10284cafd583ff4a9856daf5ad575ba7be | [] | no_license | ramvasudevan/roahm-rover | 368e24d35cc234a8efd517fafbecaefc8896e044 | f7e9b6606817e553f434b29dd0908089c630a669 | refs/heads/master | 2020-12-18T20:00:28.443179 | 2020-05-06T03:46:40 | 2020-05-06T03:46:40 | 235,506,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,067 | py | #! /usr/bin/env python
# -----------------------------------------------------------------------------
# This script implements the online component of RTD.
# It intersects the offline generated FRS (Forward Reachable Set) with
# obstacles online and optimizes for a trajectory based on the safe parameters.
#
# Adopted from matlab simulation code found at
# https://github.com/skousik/RTD_tutorial
# https://github.com/ramvasudevan/RTD
#
# Search ADJUSTABLE to find easily changed parameters
#
# Author of this script: Steven van Leeuwen ([email protected])
# -----------------------------------------------------------------------------
import sys
import rospy
from std_msgs.msg import Header
from nav_msgs.msg import OccupancyGrid
from nav_msgs.msg import Odometry
from ackermann_msgs.msg import AckermannDriveStamped
from autonomous_mode.msg import TrajStamped
from autonomous_mode.msg import RoverPoseGlobalStamped
from autonomous_mode.msg import FloatStamped
from rover_dynamics import rover_dynamics
from FRS_solver_obj import solver_obj
import ipopt
import numpy as np
import scipy.io as sio
import math
#------------------------------------------------------------------------------
# These functions are support functions for the scipt, do not relate to
# the RTD pipeline
#------------------------------------------------------------------------------
def MSG_TIME(msg1=None,msg2=None,msg3=None,msg4=None):
#
# Print to std out is msg1 given
#
global time_start
if (msg1 != True):
return
if time_start == 0:
time_start = rospy.get_time()
return
else:
print msg2,
time_rec = rospy.get_time()
print '-Elapsed time:',time_rec-time_start
if type(msg3) is int:
print ' Number of constraints:',msg3
elif type(msg3) is tuple:
print ' ',['%.3f' % k for k in msg3]
print ' ',['%.3f' % k for k in msg4]
elif type(msg3) is np.ndarray:
print ' Control commands:',msg3
print '- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -'
print ' '
print '- - - - - - - - - - - - - - B E G I N - - - - - - - - - - - - - - - - -'
time_start = 0
#------------------------------------------------------------------------------
# These functions have to do with processing the obstacle points.
#------------------------------------------------------------------------------
def gen_buf_discrete(oc_box_xy,oc_wh,oc_1,data,oc_buffer,oc,xy0,res):
#
# Generate occupancy points a certain spacing around each occupied point
# When the buffer generation is done, add the buffer points as constraints
# to the solver format.
#
obs = []
# Buffer each point registered with oc_buffer.
for x in oc_box_xy[0]:
for y in oc_box_xy[1]:
if data[y*oc_wh[0]+x] >= oc_1:
oc[x-oc_buffer:x+oc_buffer+1,y-oc_buffer:y+oc_buffer+1] = 1
# Remove interior points and add to obstacle list.
for x in oc_box_xy[0]:
for y in oc_box_xy[1]:
if oc[x,y] == 1 and not sum_surround_oc(x,y,oc,oc_wh) == 8:
# Put obstacles in FRS frame. Shift to initial_x, initial_y
# as in FRS computation.
obs_FRS = obs_map_2_FRS(res*x+xy0[0],res*y+xy0[1],pose_future[0:3])
# Append as a constraint if it is reachable in the FRS frame.
# Further trim the [-1,1] box according to FRS countour to reduce
# number of constraints.
if (obs_FRS[0] >= box_FRS[0] and obs_FRS[0] <= box_FRS[1]
and obs_FRS[1] >= box_FRS[2] and obs_FRS[1] <= box_FRS[3]):
obs.append(obs_FRS)
return obs
def check_buf_discrete_ok(res,d,W):
#
# It is possible the rover can pass far enough through the discretized buffer.
# Ensure this is not the case.
#
a = res/2
if (W <= res):
return 0
buf = res+(d-1)*res
if (buf <= a):
return 0
return 1
def gen_oc_box_ind(ind,ind_expand,lim,oc_buffer):
#
# Make the list of indicies of the occupancy grid for
# occupancy consideration.
#
oc_box = np.arange(max(ind-((ind_expand-1)/2),oc_buffer),
min(ind+((ind_expand-1)/2),lim-oc_buffer))
return oc_box
def sum_surround_oc(x,y,oc,oc_wh):
#
# A utility function to gather whether a point is an interior point.
#
sum_ = 0
if not (x < 1 or x >= oc_wh[0]-1):
if not (y < 1 or y >= oc_wh[1]-1):
sum_ = (oc[x-1,y-1]+oc[x-1,y]+oc[x-1,y]+oc[x,y-1]+
oc[x,y+1]+oc[x+1,y-1]+oc[x+1,y]+oc[x+1,y+1])
return sum_
#------------------------------------------------------------------------------
# These functions have to do with transforming between the occupancy map
# and FRS frames.
#------------------------------------------------------------------------------
def obs_map_2_FRS(x,y,pose):
#
# The FRS frame has the rover heading as zero heading, and in this
# implementation at (0,0) position.
#
pose_xy = np.array([[x-pose[0]],[y-pose[1]]])
R = np.array([[np.cos(pose[2]),np.sin(pose[2])],
[-np.sin(pose[2]),np.cos(pose[2])]])
pose_frs = (1/dist_scale)*np.matmul(R,pose_xy)
return [pose_frs[0]+initial_x,-pose_frs[1]+initial_y]
def k_FRS_2_map(k):
#
# Scale the k parameters to their control commands.
#
U = [0,0]
U[0] = k[0]*k_scale[0]+k_const
U[1] = k[1]*k_scale[1]
return U
def cart_rover_global_2_map():
#
# Get pose into map frame. Additionally
# get velocity estimate.
#
return (pose_raw.x,-pose_raw.y,-pose_raw.psi,vel_est)
#------------------------------------------------------------------------------
# These functions have to do with constraint generation, setting up, and
# solving for the optimization of k.
#------------------------------------------------------------------------------
def proj_obs_to_k(obs):
#
# For each obstacle point, a constraint polynomial as a function of
# k is generated
#
n = len(obs)
z_0 = np.zeros((n,2))
# Create solver_obj object that is in the ipopt solver form.
constr_obj = solver_obj(rover_dynamics,oc_box_xy,w_p,w_c,w_k)
for k in range(n):
z_0[k,:] = [round(obs[k][0],2),round(obs[k][1],2)]
p_tmp = w_p.astype(float)
# Evaluate all powers of the current z variable in w
c_tmp = w_c.astype(float)*np.power(z_0[k,0],w_p[:,w_z[0]])*\
np.power(z_0[k,1],w_p[:,w_z[1]])
# Zero out z columns
p_tmp[:,w_z[0]] = 0
p_tmp[:,w_z[1]] = 0
# Realize the obstacle point as a constraint.
constr_obj.add_constraint(p_tmp,c_tmp,w_k)
constr_obj.finalize()
return constr_obj
def solver_opt_k_FRS(solver_obj):
#
# Calls the solver to optimize over the cost function given constraints.
#
global brake
# Solver options
k_lb = [-1,-1]
k_ub = [1,1]
c_lb = -1e19*np.ones(solver_obj.cnt)
c_ub = np.ones(solver_obj.cnt)
solver_obj.set_goal(Z_FRS_goal)
nlp = ipopt.problem(len(opt_result),len(c_ub),problem_obj=solver_obj,
lb=k_lb,ub=k_ub,cl=c_lb,cu=c_ub)
# Below are ADJUSTABLE*********************************************
t_opt = 0.07 # Min time alloted to do optimization. If less than this time
# before t_plan will not do optimization.
t_latency = 0.12 # Unusable time at the end of the planning period to
# account for latency. This time is subtracted from
# the computed optimization time allowed.
nlp.addOption('tol',0.1)
nlp.addOption('acceptable_iter',4)
nlp.addOption('acceptable_tol',0.1)
nlp.addOption('linear_solver','ma97')
# Options specific to Ipopt.
# See online documentation
# for a description of all options
if solver_print_:
nlp.addOption('print_level',3)
else:
nlp.addOption('print_level',0)
# End ADJUSTABLE***************************************************
# Decide if the algorithm should undergo optimization.
t_j_to_now = rospy.get_time()-t_j
if t_j_to_now+t_opt > t_plan:
brake = True
else:
# Call solver
# Initial point likely to be feasible
nlp.addOption('max_cpu_time',max(0.02,round(t_plan-t_j_to_now-t_latency,2)))
(result,result_info) = nlp.solve([-1,0])
# Check status since latency can cause time beyond t_opt
# Allow suboptimal solution that satisifies constraint violation
if rospy.get_time()-t_j > t_plan:
brake = True
elif (result_info['status'] == 0 or result_info['status'] == 1):
brake = False
elif result_info['status'] == -4 and solver_obj.constr_viol < 1e-3:
brake = False
else:
brake = True
if brake == True:
result = [-float(k_const)/k_scale[0],opt_result[1]]
MSG_TIME(print_,'- -\n- - **BRAKING**\n- - **Optimization not used**')
return result
#------------------------------------------------------------------------------
# This function is executed every time a new k is desired. All above
# functions are called through the algorithm_main.
#------------------------------------------------------------------------------
def algorithm_main(oc):
#
# This function processes the occupancy grid from the environment
# and produces a trajectory to track.
#
global oc_box_xy,tpoints_traj_push,pose_future,ts_h,Z_FRS_goal,Z_h_roverinit
MSG_TIME(print_)
# Get pose in map frame, the same frame the occupancy grid is in
pose_init = cart_rover_global_2_map()
# Parse Occupancy Grid
oc_wh = (oc.info.width,oc.info.height)
oc_res = oc.info.resolution
oc_xy0 = (oc.info.origin.position.x,oc.info.origin.position.y)
oc_ = np.zeros((oc_wh[0],oc_wh[1]),dtype=int)
oc_rover_xy_ind = []
oc_box_xy = []
# Below are ADJUSTABLE*********************************************
oc_buffer = 1 # Distance in grid points for the buffer.
# For example oc_buffer=1 registers the 8 surrounding
# grid points as occupied.
oc_box_size = 4.0 # Centered box area side length, (m), to register occupied
# grid points
oc_1 = 60 # Confidence level criteria of oc grid point 0(none)-100(max)
ts_h = 0.05 # Time step for ref traj generation and future pose
# calculation with high fid dynamics
# End ADJUSTABLE***************************************************
# Estimate future pose of rover and form box to evaluate obstacles
# Calculate future pose at t_plan elapsed time
displ = rover_dynamics.flowf('low','no_record',[0,0,0,pose_init[3]],
[0,t_plan],ts_h)
pose_future = np.array([pose_init[0],pose_init[1],0,0])+\
rover_dynamics.roverinit_2_map(displ,pose_init[2])
MSG_TIME(print_,'- - **Poses calculated, current pose and xy goal**',
pose_init,Z_map_goal)
# Only looking at the occupancy grid in a conservative box area around
# the pose of rover will further reduce the computation time.
oc_box_ind_expand = int(math.floor((oc_box_size/oc_res)))
if oc_box_ind_expand%2 == 0:
oc_box_ind_expand = oc_box_ind_expand+1
for k in range(2):
oc_rover_xy_ind.append(int(math.floor((pose_future[k]-oc_xy0[k])/oc_res)))
oc_box_xy.append(gen_oc_box_ind(oc_rover_xy_ind[k],oc_box_ind_expand,
oc_wh[k],oc_buffer))
# Buffer obstacles and obtain objects which to evaulate FRS against
# Rover is estimated to have a rectangular footprint, L is longitudinal,
# W is lateral (m)
if not check_buf_discrete_ok(oc_res,oc_buffer,footprint_W):
MSG_TIME(print_,'- - **Discretization of buffer not ok**')
obs = gen_buf_discrete(oc_box_xy,oc_wh,oc_1,oc.data,oc_buffer,
oc_,oc_xy0,oc_res)
MSG_TIME(print_,'- - **Generated buffer**')
# Find the unsafe k parameters (constraint polynomials in k)
solver_obj = proj_obs_to_k(obs)
MSG_TIME(print_,'- - **Generated constraints**',solver_obj.cnt)
# Decision to optimize and if yes then do
Z_FRS_goal = obs_map_2_FRS(Z_map_goal[0],Z_map_goal[1],pose_future)
opt_result = solver_opt_k_FRS(solver_obj)
MSG_TIME(print_,'- - **Optimized**')
# Realize the traj with chosen k
opt_U = k_FRS_2_map(opt_result)
rover_dynamics.pass_U(opt_U)
if brake:
t_horz = t_stop-(t_stop%ts_h)+ts_h
else:
t_horz = t_plan-(t_plan%ts_h)+ts_h
tpoints_traj_push = np.linspace(0,t_horz,num=math.ceil(1+t_horz/ts_h))
X_h_roverinit = rover_dynamics.flowf('high','record',[0,0,0,pose_future[3]],
tpoints_traj_push,ts_h)
# Convention in use is X is [x,y,psi,vel], while Z is [X' U']'
Z_h_roverinit = np.concatenate((X_h_roverinit,
opt_U*np.ones((np.size(X_h_roverinit,0),2))),axis=1)
# Push traj_to_low_RTD
while rospy.get_time()-t_j < t_plan and not brake:
# Wait until t_plan elapsed time to push
pass
traj_publisher(Z_h_roverinit)
MSG_TIME(print_,'- - **Done**',Z_h_roverinit[0,4:])
#------------------------------------------------------------------------------
# Publisher functions
#------------------------------------------------------------------------------
def traj_publisher(Z):
#
# Format the message that contains the trajectory to track, and publish.
#
global t_j
traj_msg = TrajStamped()
traj_msg_header = Header(stamp=rospy.Time.now(),frame_id='base_link')
traj_msg.ref_traj = Z.flatten()
traj_msg.ref_size = np.size(Z[:,0],0)
traj_msg.Ts = ts_h;
traj_pub.publish(traj_msg)
while rospy.get_time()-t_j < t_plan and brake:
# Wait until t_plan to report t_j
pass
t_j = rospy.get_time()
#------------------------------------------------------------------------------
# Callback functions
#------------------------------------------------------------------------------
def callback_oc(occupancy_grid):
#
# This function gathers the occupancy grid from the environment
#
global cur_oc
cur_oc = occupancy_grid
def callback_pose(pose):
#
# This function gathers the pose from the environment
#
global pose_raw
pose_raw = pose
def callback_goal(goal):
#
# This function gets the goal from the planner
#
global Z_map_goal
Z_map_goal = goal.data
def callback_odom(odom):
#
# Calculate vx
#
global vel_est
vel_est = abs(np.cos(rover_dynamics.U[1]))*odom.twist.twist.linear.x
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
if __name__ == '__main__':
#
# Default main function. Will enter into algorithm_main.
#
global w_p,w_c,w_z,w_k,dist_scale,k_scale,k_const,t_plan,t_stop,footprint_W,\
initial_x,initial_y,rover_dynamics,Z_h_roverinit,pose_raw,pose_rate,\
Z_map_goal,time_start,t_j,print_,solver_print_,opt_result,brake,box_FRS
try:
rospy.init_node('FRS_intersect_opt',anonymous=False)
# Initialize
time_start = 0
tpoints_traj_push = 0
Z_h_roverinit = np.zeros((1,6))
cur_oc = None
pose_rate = 15.0
opt_result = [0,0]
# Load FRS data structure
# -Assume not sparse
# -Assume w_z and w_k are each size 2, with the lower indexed column x,
# higher indexed column y for w_z
frs = sio.loadmat(
'/home/nvidia/RTD_ws/src/autonomous_mode/Mat_Files/FRS/rover_FRS_deg_10.mat')
w_p = frs['pows']
w_c = np.squeeze(frs['coef'])
w_z = np.squeeze(frs['z_cols'])
w_k = np.squeeze(frs['k_cols'])
dist_scale = np.squeeze(frs['dist_scale'])
k_scale = np.squeeze(frs['k_scale'])
k_const = np.squeeze(frs['k_const'])
t_f = np.squeeze(frs['t_f'])
t_plan = np.squeeze(frs['t_plan'])
t_stop = np.squeeze(frs['t_stop'])
footprint_W = np.squeeze(frs['footprint_W'])
wb = np.squeeze(frs['wb'])
initial_x = np.squeeze(frs['initial_x'])
initial_y = np.squeeze(frs['initial_y'])
box_FRS = np.squeeze(frs['box_FRS'])
coeffile = sio.loadmat(
'/home/nvidia/RTD_ws/src/autonomous_mode/Mat_Files/newest_traj_lqr.mat')
c = coeffile['coeffs']
c = np.squeeze(c,0)
# Below are ADJUSTABLE**************************************************
Z_map_goal = [-4.04,4.633]
# Goal position for the rover for single goal mode,
# At where it would not move any further.
print_ = True # Print elapsed time and diagnostic information to
# to standard out.
solver_print_ = True # Print solver specific information.
# End ADJUSTABLE********************************************************
# Check for high-level planner
try:
rospy.wait_for_message('goal_to_mid_RTD',FloatStamped,timeout=0.2)
planner = True
print('Starting RTD with planner')
except:
# Single goal mode
# Check for Z_map_goal entered on command line
y_n = raw_input('Starting RTD. No planner. Enter Z_map_goal (y/n): ')
try:
if y_n == 'y':
gx,gy = raw_input().split()
Z_map_goal = [float(gx),float(gy)]
except:
pass
planner = False
# Create rover dynamics object
# NOTE the lambda functions as found in the rover_dynamics class
# are FRS specific for fast runtime
rover_dynamics = rover_dynamics()
rover_dynamics.pass_coeff(c,wb,(t_f/dist_scale),dist_scale)
rover_dynamics.setup()
# Publications to ros topics
traj_pub = rospy.Publisher('traj_to_low_RTD',TrajStamped,queue_size=100)
# Subscriptions to ros topics.
# Wait for enough info before proceeding to algorithm_main.
vesc_sub = rospy.Subscriber('/vesc/odom',Odometry,callback_odom)
rospy.wait_for_message('/vesc/odom',Odometry)
grid_sub = rospy.Subscriber('map',OccupancyGrid,callback_oc)
rospy.wait_for_message('map',OccupancyGrid)
pose_raw = rospy.wait_for_message('cart_rover_global',RoverPoseGlobalStamped)
pose_sub = rospy.Subscriber('cart_rover_global',RoverPoseGlobalStamped,callback_pose)
rospy.wait_for_message('cart_rover_global',RoverPoseGlobalStamped)
if planner:
goal_sub = rospy.Subscriber('goal_to_mid_RTD',FloatStamped,callback_goal)
rospy.wait_for_message('goal_to_mid_RTD',FloatStamped)
t_j = rospy.get_time()
while not rospy.is_shutdown():
# Loop
algorithm_main(cur_oc)
rospy.spin()
except rospy.ROSInterruptException:
pass
| [
"[email protected]"
] | |
ffd22fe756867d898a76925bc17830736ee33079 | 5fc266d4f08a9222744b3597b72e81c91dd94269 | /Camera Calibration/undistort_img.py | 9c3d262d29e807d60082c7e1f855b6c1d27afd01 | [] | no_license | aldinataITS/BAYUSUTA | 3986bb38486d12d0c007e5768b20f65ca2a4204b | 48511b089ff80a4bd542d42d0822e80eff55f55a | refs/heads/master | 2020-03-30T04:49:58.242740 | 2018-10-23T11:37:25 | 2018-10-23T11:37:25 | 150,764,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import numpy as np
import cv2
#Loading Calibration Data
calibrationData = np.load('camcalib_3.npz')
mtx = calibrationData['mtx']
dist = calibrationData['dist']
newcameramtx = calibrationData['newcameramtx']
roi = calibrationData['roi']
#Close File
calibrationData.close()
print mtx
print dist
print newcameramtx
print roi
x,y,w,h = roi
print x
print y
print w
print h
img = cv2.imread('c.jpg')
img = cv2.undistort(img, mtx, dist, None, newcameramtx)
#img = img[y:y+h, x:x+w]
#img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
#cv2.imshow('img', img)
cv2.imwrite("undistort_c0.png",img)
| [
"[email protected]"
] | |
e9562b760fcfc452be8a8be746853fa3948c58d2 | bf9ff34412dbeadba43b4a355c351947d2f8c80e | /src/Functions.py | 7eeeda272ad3203dac831cd7d6294fb6b87b35c0 | [
"MIT"
] | permissive | zibaparsons/DPFL-IADMM-Classification | 3b53d7fd2057a2a4135e598f2f9254ea1df23f6a | 4610458edaad176f5108199393f2bc18c93659e2 | refs/heads/main | 2023-07-31T00:37:12.831700 | 2021-08-06T20:31:35 | 2021-08-06T20:31:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,506 | py | import numpy as np #(activate this if CPU is used)
# import cupy as np #(activate this if GPU is used)
import math
from scipy.stats import matrix_normal
import time
def calculate_hypothesis(W_val, x_train):
Temp_H = np.exp( np.matmul(x_train, W_val) )
H = Temp_H/Temp_H.sum(axis=1)[:,None]
H = np.clip(H,1e-9, 1.) ## I X K matrix
return H
def calculate_cost(par, num_data, H, y_train_Bin):
return -np.sum( np.multiply(y_train_Bin, np.log(H)) ) / num_data + par.gamma * np.sum( np.square(par.W_val) )
def calculate_accuracy(par, W, x_train, y_train):
H = calculate_hypothesis(W, x_train)
H_argmax = np.argmax(H, axis=1)
return np.mean( np.equal(H_argmax, y_train ) )
def calculate_gradient(par, num_data, H, x_train, y_train_Bin):
grad = np.zeros((par.num_features, par.num_classes)) ## J X K matrix
H_hat= np.subtract(H, y_train_Bin) ## I X K matrix
grad = np.matmul(x_train.transpose(), H_hat) / num_data + 2. * par.gamma * par.W_val ## J X K matrix
return grad
def calculate_residual(par):
Temp = np.absolute(par.W_val - par.Z_val)
residual = np.sum(Temp) / par.split_number
return residual
def generate_laplacian_noise(par, H, num_data, x_train, y_train_Bin, tilde_xi):
H_hat=np.subtract(H, y_train_Bin) ## I_p X K matrix
H_hat_abs = np.absolute(H_hat)
x_train_sum = np.sum(x_train, axis = 1)
H_hat_abs_sum = np.sum(H_hat_abs, axis = 1)
x_train_H_hat_abs = np.multiply(x_train_sum,H_hat_abs_sum) / num_data
bar_lambda = np.max(x_train_H_hat_abs)/float(par.bar_eps_str)
tilde_xi_shape = par.M + bar_lambda
tilde_xi = np.random.laplace( par.M, tilde_xi_shape, [par.num_features, par.num_classes])
return tilde_xi
def calculate_eta_Base(par,num_data, Iteration):
delta = 1e-6 ## (epsilon, delta)-differential privacy
c1 = num_data*1
c3 = num_data*0.25
cw = math.sqrt( par.num_features*par.num_classes*4 )
if par.bar_eps_str != "infty":
par.eta = 1.0 / ( c3 + 4.0*c1*math.sqrt( par.num_features*par.num_classes*(Iteration+1)*math.log(1.25/delta) )/(num_data*float(par.bar_eps_str)*cw) )
else:
par.eta = 1.0 / c3
par.eta = par.eta * float(par.a_str)
return par.eta
def generate_matrix_normal_noise(par, num_data,tilde_xi):
c1 = num_data*1
delta = 1e-6 ## 1e-308, 1e-6
sigma = 2*c1*math.sqrt(2*math.log(1.25/delta)) / (num_data*float(par.bar_eps_str)*(par.rho + 1.0/par.eta))
tilde_xi_shape = par.M + sigma*sigma
tilde_xi = np.random.normal( par.M, tilde_xi_shape, [par.num_features, par.num_classes])
return tilde_xi
def hyperparameter_rho(par, iteration):
if par.rho_str == "dynamic_1" or par.rho_str == "dynamic_2":
if par.Instance =="MNIST":
c1 = 2.0; c2=5.0; Tc = 10000.0; rhoC=1.2
if par.Instance =="FEMNIST":
c1 = 0.005; c2=0.05; Tc = 2000.0; rhoC=1.2
if par.bar_eps_str == "infty":
par.rho = c1 * math.pow(rhoC, math.floor( (iteration+1) / Tc ) )
else:
par.rho = c1 * math.pow(rhoC, math.floor( (iteration+1) / Tc ) ) + c2/float(par.bar_eps_str)
if par.rho_str == "dynamic_2":
par.rho = par.rho/100.0
else:
par.rho = float(par.rho_str)
# the parameter is bounded above
if par.rho > 1e9:
par.rho = 1e9
| [
"[email protected]"
] | |
39e0d8160a3b0812c2a752531ce2c6e65bf47201 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L43/43-27_MD_NVT_rerun/set.py | c90bf6739886a0bcd70e144171deac7ecf6d72b5 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L43/MD/ti_one-step/43_27/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../43-27_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
7530a00760c2d03d1c4025884ae463e500444761 | 80be7278883df59d4cb045d1e97f01ec18f9e32a | /PivotRule.py | 551a13362b3da9c63a2ef057214927547913e1f5 | [
"MIT"
] | permissive | vladan-jovicic/SimplexMethod | 05a60faa916aebd77e118ceaff2a27f247025511 | 9acf12db07da046f8563d3500183e54bb3d01860 | refs/heads/master | 2021-01-22T05:05:46.699376 | 2017-02-10T22:47:29 | 2017-02-10T22:47:29 | 81,613,885 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | #Pivot rule
from fractions import Fraction
import random
class PivotRule:
def __init__(self):
self.bounded = True
def bland_rule(self, curr_dict, n, m):
enter, leave = (-1, -1)
poss_enter = [idx for idx in range(n+m+1) if curr_dict[-1][idx] > 0]
if (len(poss_enter) == 0):
return (-1, -1)
else:
enter = min(poss_enter)
poss_leave = [(Fraction(-1)*curr_dict[i][-1]/curr_dict[i][enter],i) for i in range(m) if curr_dict[i][enter] < 0]
if (len(poss_leave) == 0):
return (enter, -1)
else:
return (enter,min(poss_leave)[1])
def max_coef(self, curr_dict, n, m):
enter, leave = (-1, -1)
poss_enter = [(val,idx) for idx,val in enumerate(curr_dict[-1][:n+m+1]) if val > 0]
if (len(poss_enter) == 0):
return (-1, -1)
enter = max(poss_enter)[1]
poss_leave = [(Fraction(-1)*curr_dict[i][-1]/curr_dict[i][enter],i) for i in range(m) if curr_dict[i][enter] < 0]
if (len(poss_leave) == 0):
return (enter, -1)
else:
return (enter,min(poss_leave)[1])
def my_rule(self, curr_dict, n, m):
enter, leave = (-1, -1)
poss_enter = [(val,idx) for idx,val in enumerate(curr_dict[-1][:n+m+1]) if val > 0]
if (len(poss_enter) == 0):
return (-1, -1)
inc_val = []
for tmp_e in poss_enter:
poss_leave = [(Fraction(-1)*curr_dict[i][-1]/curr_dict[i][tmp_e[1]],i) for i in range(m) if curr_dict[i][tmp_e[1]] < 0]
if(len(poss_leave) == 0):
return (tmp_e[1], -1)
tmp_l = min(poss_leave)
inc_val.append((tmp_e[0]*tmp_l[0], tmp_e[1], tmp_l[1]))
leave = max(inc_val)
return (leave[1], leave[2])
| [
"[email protected]"
] | |
d729f9bd86b204be425d7c8cc856b17c2f0b8974 | 1982eca7dbeaa012c52bd045dd2acc54432792ad | /blog/tests.py | 8dfe1372c6e5da8d7789c27a40000c6195b7aa61 | [] | no_license | joshkatz1/blog | 3385b75d7baafb702f4c8edcc53213daa3860d3f | 4053e7a9ebf440e62b528d741c9144fb6e1b42fa | refs/heads/master | 2023-06-26T08:30:13.862244 | 2021-07-22T13:57:31 | 2021-07-22T13:57:31 | 387,840,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | py | from django.test import TestCase
# Create your tests here.
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from .models import Post
class BlogTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='testuser',
email='[email protected]',
password='secret'
)
self.post = Post.objects.create(
title='A good title',
body='Nice body content',
author=self.user,
)
def test_string_representation(self):
post = Post(title='A sample title')
self.assertEqual(str(post), post.title)
def test_post_content(self):
self.assertEqual(f'{self.post.title}', 'A good title')
self.assertEqual(f'{self.post.author}', 'testuser')
self.assertEqual(f'{self.post.body}', 'Nice body content')
def test_post_list_view(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Nice body content')
self.assertTemplateUsed(response, 'home.html')
def test_post_detail_view(self):
response = self.client.get('/post/1/')
no_response = self.client.get('/post/100000/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'A good title')
self.assertTemplateUsed(response, 'post_detail.html')
def test_post_create_view(self): # new
response = self.client.post(reverse('post_new'), {
'title': 'New title',
'body': 'New text',
'author': self.user,
})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'New title')
self.assertContains(response, 'New text')
def test_post_update_view(self): # new
response = self.client.post(reverse('post_edit', args='1'), {
'title': 'Updated title',
'body': 'Updated text',
})
self.assertEqual(response.status_code, 302)
def test_post_delete_view(self): # new
response = self.client.post(
reverse('post_delete', args='1'))
self.assertEqual(response.status_code, 302) | [
"[email protected]"
] | |
874356f7c15835a2cbfbcaec020e7f92a683ee3a | a18369348e9ca657d15c47a0b6d5f05ecbe7f26e | /main.py | 86cd7a5e577fc92a47e9f7e8248602ef9ee63aa2 | [] | no_license | stevie-h/1.7.2021 | 39060bbb667874f8817a2db73f49a2c6738ac2d8 | 15ed20e3097a20c333ce1fdb64b38ec8eb60adc3 | refs/heads/main | 2023-06-17T18:16:43.786929 | 2021-07-07T19:28:54 | 2021-07-07T19:28:54 | 383,188,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py |
# HW
# exercises from https://qaarchint.wordpress.com/2021/07/02/python-marathon/
| [
"[email protected]"
] | |
8bc1b80e842f887f3ee0f579212732acf07cdd82 | a302095a04f7781f1c264eb17fa9ebee8f6b26c4 | /ch1-ex2.py | 1e6704f6344a141e5c2d492c645bf737dab7af01 | [] | no_license | opi-lab/preliminares-aosoriob | 1f7459a5a1f928bfce7f7aafc58cfbd7105cab28 | e9519bf98eeba18570e7b7e375bf22d2699124ff | refs/heads/master | 2021-01-25T12:44:20.443481 | 2018-03-08T22:49:05 | 2018-03-08T22:49:05 | 123,505,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | from PIL import Image
from pylab import *
from scipy.ndimage import filters
im = array(Image.open("imagen2.jpg").convert('L'), "f")
# Creando imagen blurred
sigma = 3
blurred = filters.gaussian_filter(im,sigma)
weight = 0.25# escalar que multiplica a la imagen una vez blurred
unsharp = im - weight*blurred
# construyendo la imegen
figure()
imshow(im)
gray()
title("Imagen antes")
figure()
imshow(unsharp)
gray()
title("Imagen despues".format(weight))
show() | [
"[email protected]"
] | |
3ee4f2addec32c0e331746ce8d188d7dfa38e847 | 9a692b550b257a2e19ab146925b88ed172bbc6cf | /test_api/test_api/settings.py | f10126f29e2fa2b2c518c11740d0753a10c20fa0 | [] | no_license | VVD-byte/py_dev_test | e1613d90ef548d891295813eacae16e438745dfd | a6930193bd0f64bec4ed8f2e410e2e1a9e7f8af4 | refs/heads/main | 2023-03-24T15:22:46.717750 | 2021-03-23T08:16:25 | 2021-03-23T08:16:25 | 349,413,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,257 | py | """
Django settings for test_api project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd8_on3x2k)ocw0&#=^e%u7rh=pfv52mc*v4pnsf%^6wg*t%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['192.168.1.149', '127.0.0.1']
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.BasicAuthentication',
]
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
3d9ae9ed97e2086ac26476056509fc5b0de09fd3 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /ddpm_w_distillation/ddpm_w_distillation/config/dir_sample_cifar_condw_stage2.py | 21ef73b70eed5bf50147b39ceb9c3b6117bd6ed4 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 6,859 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring
# pylint: disable=g-no-space-after-comment,g-bad-todo
# pylint: disable=invalid-name,line-too-long
import ml_collections
class hyper:
pass
def D(**kwargs):
return ml_collections.ConfigDict(initial_dictionary=kwargs)
# added, edited
end_num_steps = 1 # eventual number of steps in the distilled sampler
start_num_steps = 1024 #512 #1024 #1024 #512 #1024 #NOTE: todo change to #1024 # number of steps in baseline sampler
distill_steps_per_iter = 50000 #50000 #10000 #10000 #1000 #50000
# NOTE alithough it's stage1 it's actually stage 2
teacher_ckpt_path = 'projects/diffusion/cifar_stage1_43492025/1/retained_checkpoints/checkpoint_620000'
train_batch = 128 #2048 # 256
lr = 1e-4 #3e-4 #1e-4
sampling_num_steps_train_start = 128
use_eval_ckpt_dir = True
# # # our two stage approach
# eval_ckpt_dir = 'projects/diffusion/cifar10_stage2_50k_ema_decay0_43877018/1/retained_checkpoints/'
# sampler='ddim'
# nvidia_2step approach
eval_ckpt_dir = 'projects/diffusion/cifar_stage2_twostep_nvidia_50k_43902200/1/retained_checkpoints/'
sampler = 'new_two_step_nvidia'
# previous runs with ema-decay 0.9999
# 'projects/diffusion/cifar_stage1_43555434/1/retained_checkpoints/'
use_retained_ckpt = True
w_sample_const = 1.0 #0.
waiting_for_new_ckpt = False
progressive_sampling_step = True
def get_config():
config = D(
launch=D(
sweep=hyper.product([
hyper.sweep('config.seed', [0]), #TODO [1, 2, 3] change to [0]
hyper.sweep('config.model.args.uncond_prob',
[0.]), # check NOTE not 0.1
hyper.sweep('config.eval.w_sample_const', [0., 0.3, 1., 2., 4.])
]),),
# added
distillation=D(
# teacher checkpoint is used for teacher and initial params of student
teacher_checkpoint_path=teacher_ckpt_path,
steps_per_iter=distill_steps_per_iter, # number of distillation training steps per halving of sampler steps
only_finetune_temb=False,
start_num_steps=start_num_steps,
end_num_steps=end_num_steps,
another_teacher_init=False, #True, # added
),
# added
seed=0,
progressive_distill=True, # a flag for stage 2 training
main_class='Model',
dataset=D(
name='CIFAR10',
args=D(
# image_size=64,
class_conditional=True,
randflip=True,
),
),
sampler=sampler, #'noisy',
use_eval_ckpt_dir=use_eval_ckpt_dir,
eval_ckpt_dir=eval_ckpt_dir,
waiting_for_new_ckpt=waiting_for_new_ckpt,
progressive_sampling_step=progressive_sampling_step,
model=D(
# architecture
name='w_unet3',
args=D(
ch=256,
emb_ch=1024, # default is ch * 4
ch_mult=[1, 1, 1],
num_res_blocks=3,
attn_resolutions=[8, 16],
num_heads=1,
# head_dim=64,
dropout=0., # NOTE changes 0.1,
logsnr_input_type='inv_cos',
w_input_type='inv_cos', # w embedding added
resblock_resample=True,
uncond_prob=0.1, #NOTE: default, but as sweep 0.,
# extra_class=True,
),
teacher_extra_class=True, #NOTE added
mean_type='v', #'eps', #both might not work since the teach model uses eps, 'both', #NOTE: need to implement 'eps',
teacher_mean_type='v', # added
logvar_type='fixed_large', #'fixed_medium:0.3', # TODO: check
mean_loss_weight_type='snr_trunc', #NOTE:default 'snr_trunc', 'snr' performs worse #'constant', #NOTE changed defalut 'snr_trunc', #constant='mse', snr, snr_trunc
logvar_loss_type='none',
# logsnr schedule
train_num_steps=end_num_steps,
eval_sampling_num_steps=end_num_steps,
train_logsnr_schedule=D(
name='cosine', logsnr_min=-20., logsnr_max=20.),
eval_logsnr_schedule=D(
name='cosine', logsnr_min=-20., logsnr_max=20.),
eval_clip_denoised=True,
# added
eval_sampling_num_steps_train_start=sampling_num_steps_train_start, # NOTE: need to change
noisy_sampler_interpolation=0.2,
train_w_schedule=D(
name='uniform',
# logsnr_min=0., logsnr_max=0.5),
# logsnr_min=0., logsnr_max=1.0),
# logsnr_min=0., logsnr_max=2.0),
logsnr_min=0.,
logsnr_max=4.),
),
train=D(
# optimizer
batch_size=train_batch, #2048, # TODO: change back 2048,
optimizer='adam',
learning_rate=lr, # 1e-4 for 50k, 2e-4 for 10k #3e-4, #NOTE: todo #1e-4, #edited 3e-4,
learning_rate_warmup_steps=0, #edited 10000, # used to be 1k, but 10k helps with stability
learning_rate_anneal_type='linear', # TODO: checked
learning_rate_anneal_steps=distill_steps_per_iter, # TODO: checked
weight_decay=0.0,
ema_decay=0.9999, #0.,
grad_clip=1.0,
substeps=10,
enable_update_skip=False,
# logging
log_loss_every_steps=100,
checkpoint_every_secs=900, # 15 minutes
retain_checkpoint_every_steps=20000, # old checkpoints won't get deleted
eval_every_steps=20000,
w_conditoned_training=True, # added
w_warmup_steps=0, # NOTE, set 0 10000, # added to worm up w embedding
),
eval=D(
batch_size=128, # TODO change to 128,
num_inception_samples=50000,
sampling_fixed_classes=[0, 5], # visualize malamute, siamese cat
sampling_fixed_w=[0.1, 0.3, 0.5], # NOTE, not used
use_retained_ckpt=use_retained_ckpt,
w_sample_const=w_sample_const,
),
)
# added
if hasattr(config, 'progressive_distill'):
# run some sanity checks on inputs
assert config.distillation.start_num_steps > 0
assert config.distillation.end_num_steps > 0
assert config.distillation.start_num_steps % config.distillation.end_num_steps == 0
return config
| [
"[email protected]"
] | |
4ba728c054427c96406b9abe05531f8c65c9a67a | e03e118dfb61e4f414035011c80718cff026ba9e | /main/analysis.py | dda3fc0df74a8891cf93466725399d16861d61cb | [] | no_license | 15minutOdmora/RedditAnalysis | 6a946a9896a59f878425d38a75de874679c6cf4c | fdec33443cb889ed74c687a960f8e2fe679ca62a | refs/heads/master | 2021-01-26T04:42:42.918884 | 2020-04-14T09:22:28 | 2020-04-14T09:22:28 | 243,310,546 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,298 | py | import numpy as np
import matplotlib.pyplot as plt
import os
import json
import math
class Analysis:
def __init__(self, dir):
self.dir = dir
self.counted_data_dir = self.dir + r'\counted_data'
self.data_files = self.dir + r'\data'
self.sorted_data = json.load(open(self.dir + r'\analysed_data\sorted_data.json'))
self.subs = json.load(open(self.dir + r'\subreddit_dict_fin.json'))
self.date_files = ['08_03_2020_14', '09_03_2020_19', '10_03_2020_20', '11_03_2020_17',
'12_03_2020_20', '13_03_2020_20', '14_03_2020_19', '15_03_2020_19',
'16_03_2020_23', '17_03_2020_20', '18_03_2020_18']
# Create a dict with only the lower names of subs, keys = normal, nsfw ...
self.sorted_subs = dict()
for key, value in self.subs.items():
self.sorted_subs[key] = [dat[0].lower() for dat in value]
def search(self, where, filter, what1, what2=None, what3=None, specs=None):
"""FUNCTION:
Searches for the 'what' data in the 'where' data group, adds them up in lists, returs a touple(or triple) of
lists where = 'data', 'counted_data', 'analysed_data'
what1, what2, what3 = depends where we search.
specs = are only used if we want a speciffic sub. """
data1, data2, data3 = [], [], []
if where == 'data': # Reads from the 'data' file
if filter == 'all': # Ads up all the lists from every subreddit from every day
for date in self.date_files:
for file in os.listdir(os.fsencode(self.data_files + r'\{}'.format(date))):
filename = os.fsdecode(file)
if filename.endswith(".json"):
sub = json.load(open(self.data_files + r'\{}\{}'.format(date, filename)))
data1 += sub[what1]
if data2 is not None:
data2 += sub[what2]
if what3 is not None:
data3 += sub[what3]
return data1, data2, data3
elif filter == 'specific_sub': # Only adds up from the specific sub specs
for _ in specs:
for date in self.date_files:
for file in os.listdir(os.fsencode(self.data_files + r'\{}'.format(date))):
filename = os.fsdecode(file)
if filename[:-5] in specs:
if filename.endswith(".json"):
sub = json.load(open(self.data_files + r'\{}\{}'.format(date, filename)))
data1 += sub[what1]
if data2 is not None:
data2 += sub[what2]
if what3 is not None:
data3 += sub[what3]
return data1, data2, data3
elif filter in ['normal', 'europe', 'usa', 'nsfw']: # Only adds up from the subs in the category filter
for date in self.date_files:
for file in os.listdir(os.fsencode(self.data_files + r'\{}'.format(date))):
filename = os.fsdecode(file)
if filename[:-5] in self.sorted_subs[filter]:
if filename.endswith(".json"):
sub = json.load(open(self.data_files + r'\{}\{}'.format(date, filename)))
data1 += sub[what1]
if data2 is not None:
data2 += sub[what2]
if what3 is not None:
data3 += sub[what3]
return data1, data2, data3
elif where == 'counted_data': # Reads from the 'counted_data' file
if filter == 'all': # Appends all the lists
sub = json.load(open(self.counted_data_dir + r'\all_sub_data.json'))
if what2 is None:
return sub[what1]
elif what3 is None and what2 is not None:
return sub[what1], sub[what2]
else:
return sub[what1], sub[what2], sub[what3]
if filter == 'specific_sub':
sub = json.load(open(self.counted_data_dir + r'\{}.json'.format(specs)))
if what2 is None:
return sub[what1]
elif what3 is None and what2 is not None:
return sub[what1], sub[what2]
else:
return sub[what1], sub[what2], sub[what3]
if filter in ['normal', 'europe', 'usa', 'nsfw']:
for file in os.listdir(os.fsencode(self.counted_data_dir)):
filename = os.fsdecode(file)
if filename[:-5] in self.sorted_subs[filter]:
sub = json.load(open(self.counted_data_dir + r'\{}'.format(filename)))
data1.append(sub[what1])
if what2 is None:
pass
elif what3 is None and what2 is not None:
data2.append(sub[what2])
elif what3 is not None:
data2.append(sub[what2])
data3.append(sub[what3])
if what2 is None:
return data1
elif what3 is None and what2 is not None:
return data1, data2
else:
return data1, data2, data3
elif where == 'analysed_data': # Reads from the analysed where we have rankings in a json file
if filter == 'all':
if what2 is None:
return self.sorted_data['all'][what1]
elif what3 is None:
return self.sorted_data['all'][what1], self.sorted_data['all'][what2]
else:
return self.sorted_data['all'][what1], self.sorted_data['all'][what2], self.sorted_data['all'][what3]
if filter in ['normal', 'europe', 'usa', 'nsfw']:
if what2 is None:
return self.sorted_data[filter][what1]
elif what3 is None:
return self.sorted_data[filter][what1], self.sorted_data[filter][what2]
else:
return self.sorted_data[filter][what1], self.sorted_data[filter][what1],\
self.sorted_data[filter][what1]
def meaning(self, sho):
"""Returns the meaning of shortcuts(sho) used in bar charts"""
if sho == "s_avg_comments":
return "average number of comments"
elif sho == "s_avg_upvotes":
return "average number of upvotes"
elif sho == "s_avg_ud_ratio":
return "average upvote/downvote ratio"
elif sho == "s_avg_uc_ratio":
return "upvote/comment ratio"
elif sho == "s_topcomupv_to_upv":
return "number of upvotes of the top comment / number of upvotes on post"
elif sho == "s_topcomupv_to_2topcomupv":
return "number of upvotes of the top comment / number of upvotes on the 2nd top comment"
else:
return "error: No title"
def scatter_plot_upv_com_ud(self, filter, specs=None, lin_regression=False, log_scale=False):
""" PYPLOT:
plots the scatter plot of all the data from the filter(normal, nsfw...) and the lin. regression function"""
plt.style.use('seaborn')
# Use the search function to get all the data
points = self.search(where='data', filter=filter,
what1='upvotes', what2='comments', what3='ud_ratio', specs=specs)
num_of_posts = len(points[0])
# Set the log scaling to the axes if true
if log_scale:
plt.yscale('log')
plt.xscale('log')
# Plot and set the settings to the scatter
plt.scatter(points[0], points[1], c=points[2], cmap='summer', s=30, edgecolor='black', alpha=0.7)
# Add a colorbar and set the label
cbar = plt.colorbar()
cbar.set_label('Upvote/Downvote ratio')
# Zoom out
plt.margins(5, 5) # Nastavljeno za log scale
# Set labels
plt.xlabel('Upvotes')
plt.ylabel('Comments')
if filter == "specific_sub":
tit = "r/" + specs[0].upper() + specs[1:]
else:
tit = "the group " + filter[0].upper() + filter[1:]
plt.title("The relation of upvotes to comments in {}.\n Number of posts: {}".format(tit, num_of_posts))
# Calculates and plots the lin. regression function if lin_regression is set to True
if lin_regression:
x = np.array(points[0])
y = np.array(points[1])
k, n = np.polyfit(x, y, 1)
print('Linear regression function = {} x + {}'.format(round(k, 3), round(n, 3)))
plt.show()
def plot_post_and_avgupv_freq(self, filter, specs=None):
"""Plots the average upvotes per hour and post freq."""
freq, upv = self.search('counted_data', what1="time_freq_hour", what2="time_freq_hour_upv",
filter=filter, specs=specs)
all_freq, all_upv = np.zeros((1, 25)), np.zeros((1, 25))
if filter in ["specific_sub", "all"]:
index = 0
for number in freq[0]:
all_freq[0, index] += number
index += 1
index = 0
for number in upv[0]:
all_upv[0, index] += number
index += 1
else:
for sub in freq:
index = 0
for post in sub[0]:
all_freq[0, index] += post
index += 1
for sub in upv:
index = 0
for post in sub[0]:
all_upv[0, index] += post
index += 1
x_axis = np.array([x for x in range(0, 25)])
# connect the last number with the first one as they should be the same
ar = []
for x in range(25):
if x < 24:
ar.append(all_upv[0, x]/all_freq[0, x])
else:
ar.append(all_upv[0, 0]/all_freq[0, 0])
avg_upv = np.array(ar)
plt.style.use('ggplot')
plt.subplot(211)
plt.xlim(0, 24)
plt.plot(x_axis, avg_upv, c='red')
plt.title("Average post upvotes per hour and number of posts per hour\n(UTC)")
plt.ylabel('Average number of upvotes')
plt.subplot(212)
plt.xlim(0, 24)
all_freq[0, 24] = all_freq[0, 0]
plt.plot(x_axis, all_freq[0], c='blue')
plt.ylabel("Number of posts")
plt.xlabel("Hour")
plt.show()
def compare_plots_post_avgupv_freq(self, filter1, filter2, what1, what2, specs1=None, specs2=None):
""" METHOD:
Displays 4 plots for two different groups/subreddits left = group1, right = group2"""
# Search the data:
group1 = self.search(where='counted_data', filter=filter1, what1=what1, what2=what2, specs=specs1)
group2 = self.search(where='counted_data', filter=filter2, what1=what1, what2=what2, specs=specs2)
# Set the time period, and the data variables
if what1 == 'time_freq_hour':
time_period = 24
else:
time_period = 72
post_freq1, post_upv1 = np.zeros((1, time_period)), np.zeros((1, time_period))
post_freq2, post_upv2 = np.zeros((1, time_period)), np.zeros((1, time_period))
if filter1 in ["specific_sub", "all"]:
for i in range(len(group1[0][0])):
post_freq1[0, i] += group1[0][0][i]
for i in range(len(group2[0][0])):
post_freq2[0, i] += group2[0][0][i]
else:
# Add up the lists for the two groups
for sub in range(len(group1[0])): # Post freq. p. time period group1
for i in range(len(group1[0][sub][0])):
post_freq1[0, i] += group1[0][sub][0][i]
for sub in range(len(group2[0])): # Post freq. p. time period group2
for i in range(len(group2[0][sub][0])):
post_freq2[0, i] += group2[0][sub][0][i]
if filter2 in ["specific_sub", "all"]:
for i in range(len(group1[1][0])):
post_upv1[0, i] += group1[1][0][i]
for i in range(len(group2[1][0])):
post_upv2[0, i] += group2[1][0][i]
else:
for sub in range(len(group1[1])): # Sum of upvotes p. h. group1
for i in range(len(group1[1][sub][0])):
post_upv1[0, i] += group1[1][sub][0][i]
for sub in range(len(group2[1])): # Sum of upvotes p. h. group2
for i in range(len(group2[1][sub][0])):
post_upv2[0, i] += group2[1][sub][0][i]
# Create the average upv. p. h. for the two groups
avg_upv1, avg_upv2 = np.zeros((1, 24)), np.zeros((1, 24))
for i in range(len(post_upv1[0])):
avg_upv1[0, i] = post_upv1[0, i] / post_freq1[0, i]
for i in range(len(post_upv2[0])):
avg_upv2[0, i] = post_upv2[0, i] / post_freq2[0, i]
# Label names
hours = [x for x in range(24)]
if filter1 == "specific_sub":
name1 = specs1
name2 = specs2
else:
name1 = filter1
name2 = filter2
# Use subplots
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(hours, avg_upv1[0]) # Upper left plot
axs[0, 0].set_title('{}'.format(name1))
axs[0, 1].plot(hours, avg_upv2[0], 'tab:orange') # Upper right plot
axs[0, 1].set_title('{}'.format(name2))
axs[1, 0].plot(hours, post_freq1[0], 'tab:green') # Bottom left plot
axs[1, 1].plot(hours, post_freq2[0], 'tab:red') # Bottom right plot
# Set the labels
counter = 0
for ax in axs.flat:
if counter == 0:
ax.set(xlabel='0', ylabel='Average upvotes per hour')
elif counter == 2:
ax.set(xlabel='Hour', ylabel='Number of posts per hour')
elif counter == 3:
ax.set(xlabel='Hour', ylabel='3')
counter += 1
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
plt.show()
def sorted_bar_chart(self, filter, what, top, specs=None):
""" METHOD:
Displays sorted bar-charts of subs. ranked by different categories """
data = self.search(where='analysed_data', filter=filter, what1=what)
if what == 's_awards':
if specs == 'platinum':
color = 'aquamarine'
elif specs == 'coins':
color = 'yellow'
else:
color = specs
# title text and label text
if specs == 'coins':
title_text = "All rewards recieved in coins worth for each" + \
" {} subreddit in a 11 day period".format(filter)
label_text = "Number of coins worth"
else:
title_text = "All {} awards recieved for each {} subreddit in a 11 day period".format(specs, filter)
label_text = "Number of {} awards".format(specs)
specs_dict = {'silver': 0, 'gold': 1, 'platinum': 2, 'coins': 3}
if len(data[specs_dict[specs]]) < top:
top = len(data[specs_dict[specs]])
sub_names, sub_data = list(), list()
for index in range(top):
num = data[specs_dict[specs]][index][1]
sub_names.append(data[specs_dict[specs]][index][0])
sub_data.append(num)
plt.barh(y=[2 * x for x in range(top, 0, -1)], width=sub_data, tick_label=sub_names, height=1.6,
color=color, edgecolor='black', linewidth=0.5)
# Setting the numbers on bars
"""for i,+ v in enumerate(sub_data[::-1]):
if specs == 'coins':
k = str(v / 1000) + 'k'
else:
k = str(v)
plt.text(25 + v, 2 * i + 1.3, k, color='black', fontsize=7)"""
plt.title(title_text)
plt.xlabel(label_text)
plt.show()
else:
if len(data) < top:
top = len(data)
sub_names, sub_data = list(), list()
for index in range(top):
sub_names.append(data[index][0])
sub_data.append(data[index][1])
plt.barh(y=[2*x for x in range(top, 0, -1)], width=sub_data, tick_label=sub_names, height=1.6)
# Setting the numbers on bars
"""for i, v in enumerate(sub_data[::-1]):
plt.text(10, 2*i +1.4, str(round(v)), color='white', fontsize=7)"""
plt.title("Top {} subreddits from the category {}\n ranked by {}".format(top, filter, self.meaning(what)))
plt.show()
def number_of_submissions_prediction(self):
"""Coming soon"""
pass
def standard_deviation(self, filter, specs=None):
"""Returns the standard deviation for upvotes, comments, and us_ratio"""
upv, com, ud = self.search(where="data", filter=filter,
what1="upvotes", what2="comments", what3="ud_ratio", specs=specs)
# Calc. the averages, lengths of the lists should in theory be the same
length = len(upv)
avg_upv = sum(upv) / length
avg_com = sum(com) / length
avg_ud = sum(ud) / length
# Calculate the s. deviation
d_upv, d_com, d_ud = 0, 0, 0
for i in range(length):
d_upv += (upv[i] - avg_upv)**2
d_com += (com[i] - avg_com)**2
d_ud += (ud[i] - avg_ud)**2
return math.sqrt(d_upv/(length-1)), math.sqrt(d_com/(length-1)), math.sqrt(d_ud/(length-1))
def stats(self, filter, specs=None):
""" FUNCTION: returns a list of different stats from group/subreddit
touple of averages: (sum_num_of_submissions, sum_number_comments,
sum_num_upvotes, sum_ud ,[sum_of_awards_list_of3lists],
sum_title_length_words)"""
# Fetch the data, for the groups
if filter in ["europe", "nsfw", "normal", "usa"]:
num_sub, com, upv = self.search('counted_data', filter=filter,
what1="number_of_submissions", what2="comments", what3="upvotes")
ud, awards, title_len = self.search('counted_data', filter=filter,
what1="ud_ratio", what2="awards", what3="title_length")
# calculate the averages
sum_num_sub = sum(num_sub)
sum_com = round(sum(com), 2)
sum_upv = round(sum(upv), 2)
sum_ud = round(sum(ud), 2)
# do the averages for the awards and title lengths
silver, gold, plat, words = 0, 0, 0, 0
for i in range(len(awards)):
silver += awards[i][0]
gold += awards[i][1]
plat += awards[i][2]
words += title_len[i][0]
return sum_num_sub, sum_com, sum_upv, sum_ud, [silver, gold, plat], words
else:
num_sub, com, upv = self.search('counted_data', filter=filter,
what1="number_of_submissions", what2="comments", what3="upvotes",
specs=specs)
ud, awards, title_len = self.search('counted_data', filter=filter,
what1="ud_ratio", what2="awards", what3="title_length",
specs=specs)
return num_sub, com, upv, ud, awards, title_len[0]
#if __name__ == '__main__': | [
"[email protected]"
] | |
e58beda762c47efd010b46b6c3e5309149104c90 | 19b7d0e225839b75a767cc62aeff15159e33a451 | /day-5/part-2.py | 78873479e442acf4437cb77018c5bd2544c25f46 | [] | no_license | TomBombadilV/advent-of-code-2020 | 6942aa2f4cf703adc54beafd653ef21b0da02d11 | 09e9d30ad8871d6561c0c52c6e4f9815d7bf7f89 | refs/heads/master | 2023-02-04T01:55:15.008358 | 2020-12-19T16:29:04 | 2020-12-19T16:29:04 | 318,918,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | # Day 5 Part 2
from typing import List
def decode(seat: str, rows: int, cols: int) -> int:
""" Converts a seat into its seat code """
# Keep track of current seat window
row_left, row_right = 0, rows - 1
col_left, col_right = 0, cols - 1
# Iterate through seat string, updating current seat window
for c in seat:
if c == "F": # Move window to front half
row_right = (row_left + row_right + 1) // 2
elif c == "B": # Move window to back half
row_left = (row_left + row_right + 1) // 2
elif c == "L": # Move window to left half
col_right = (col_left + col_right + 1) // 2
elif c == "R": # Move window to right half
col_left = (col_left + col_right + 1) // 2
else:
print("Invalid letter")
return -1
# Convert to seast code
return row_left * 8 + col_left
def missing_seat(seats: List[str], rows: int, cols: int) -> int:
""" Finds missing seat that is not from beginning or end """
# Array marking whether seat code has been encountered
found = [False for _ in range(rows * cols)]
# Decode each seat and mark it as encountered
for seat in seats:
code = decode(seat, rows, cols)
found[code] = True
# Move past beginning nonexistent seats
i = 0
while found[i] == False:
i += 1
# Iterate until you find a missing seat
while found[i] == True:
i += 1
return i
if __name__ == "__main__":
# Parse input
with open("input.txt", "r") as file:
lines = [line.strip() for line in file]
# Define plane size
rows, cols = 128, 8
# Get max seat code
print("Missing seat code is {0}".format(missing_seat(lines, rows, cols)))
| [
"[email protected]"
] | |
88ab09b0541b8ab896ba88063c9590026c36615d | 785f5a4bfd97ac77559110fb831f18a3822b4e17 | /01-python_crash_course/01-ejercicios_teoria/chapter_03_lists/bicycles.py | ca801e145ff518b9cf4696b1a32b4130de4d5840 | [] | no_license | lionelherrerobattista/practica_python | c552ae06336eb805172decd0d527b26a17c09cb9 | 44f09bae58748c2c7c2449adc5117591abd2828d | refs/heads/master | 2020-03-28T02:28:09.910563 | 2020-03-14T21:31:31 | 2020-03-14T21:31:31 | 147,572,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | bicycles = ['trek', 'cannondale', 'redline', 'specialized']
# ~ print(bicycles)
# ~ print(bicycles[0].title())
# ~ print(bicycles[1])
# ~ print(bicycles[3])
# ~ print(bicycles[-1])
message = "My first bicyle was a " + bicycles[0].title() + "."
print(message)
| [
"-"
] | - |
630d30611df501066fd29f0272f88adbd73b6db0 | 977e49e3d9940b79ad7cc55fdd6a31a85e199ee0 | /devtracker/time.py | b6a47db0e42cf97d5b56271813a142ee58685759 | [
"MIT"
] | permissive | ConSou/devtracker | e051045ea5a27e633426dbd7da572df2490c2493 | ea892d6d48aa5d4627b429469b59ae3f0ce7f10f | refs/heads/master | 2023-01-11T13:41:52.849977 | 2019-03-27T20:02:38 | 2019-03-27T20:02:38 | 177,614,070 | 1 | 1 | MIT | 2022-12-27T15:34:29 | 2019-03-25T15:29:25 | Python | UTF-8 | Python | false | false | 3,074 | py | import csv
from .dir_and_path_helpers import mk_file_path, check_file
from .time_and_date_helpers import get_date, get_time, total_time
def make_project_csv(dir):
if check_file(mk_file_path(dir)):
start_time(dir)
else:
print("[+] First time working on {0}, creating file...".format(dir))
initalize_list = ["start_date", "start_time", "end_date", "end_time", "total"]
with open(mk_file_path(dir), 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(initalize_list)
start_time(dir)
def start_time(dir):
start_arr = [get_date(), get_time()]
with open(mk_file_path(dir), 'r', newline='') as myFile:
reader = list(csv.reader(myFile))
last_row = reader.pop()
if len(last_row) == 2:
print("[-] You are already tracking this project.")
else:
with open(mk_file_path(dir), 'a', newline='') as myfile:
wr = csv.writer(myfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
wr.writerow(start_arr)
print("[+] Starting work on {0}".format(dir))
def end_time(dir):
end_date = get_date()
end_time = get_time()
if check_file(mk_file_path(dir)):
with open(mk_file_path(dir), 'r', newline='') as myFile:
reader = list(csv.reader(myFile))
last_row = reader.pop()
if len(last_row) == 2:
total = total_time(last_row[1], end_time)
last_row.extend([end_date, end_time, total])
reader.append(last_row)
with open(mk_file_path(dir), 'w', newline='') as myfile:
wr = csv.writer(myfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
for i in reader:
wr.writerow(i)
print("[+] Stopped tracking in {0}, with a total time of {1}!".format(dir, total))
else:
reader.append(last_row)
print("[-] 'devtracker stop' was entered. You must run 'devtracker start' first to start tracking time first.")
else:
print("[-] 'devtracker stop' was entered. You must run 'devtracker start' to create project tracking file.")
def current_status(dir):
if check_file(mk_file_path(dir)):
with open(mk_file_path(dir), 'r', newline='') as myFile:
reader = list(csv.reader(myFile))
last_row = reader.pop()
if len(last_row) == 2:
temp_time = get_time()
total = total_time(last_row[1], temp_time)
print("[+] You have been working on {0} for {1} so far. \n"
"Use 'devtracker stop' to stop working or continue working on this project.".format(dir, total))
else:
print("[+] Status report for '{0}': You have not started work on this project yet.".format(dir))
else:
print("[-] 'devtracker status' was entered. You must run 'devtracker start' to create project tracking file.") | [
"[email protected]"
] | |
d9227e657d330eebfff8a7108bd6c60f522b0fd3 | f83845488fb40ca954cc7785499e6f44a4efe3ee | /solution/admin.py | 6f555088b794c33f64c3e7874e04807daf2e503f | [] | no_license | justin3527/security_solution_api_app | 3f7ef0a1bc1651cb63c47ae06f129af340deda7e | 264004e5b0244cee5776c113bad194e46931d3e4 | refs/heads/master | 2020-04-23T19:40:28.961961 | 2019-06-26T02:13:24 | 2019-06-26T02:13:24 | 171,412,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from django.contrib import admin
from .models import ServerData
# Register your models here.
admin.site.register(ServerData) | [
"[email protected]"
] | |
e54b24fa76a561c7a37d1d7e336ccb90544fbd02 | e8f73030d427792a9c934fd6e096ea67494bc378 | /launcher.py | 5f895fecfb44ee98992b48f86252f6a1e8b0945e | [] | no_license | renanklehm/WM-Lages | e474dad636e0a043a0edd976a56a711484404ab9 | 933d2a7482f4741e5295fec5a6a3edf797ce28d9 | refs/heads/master | 2022-11-30T04:17:43.076969 | 2020-08-19T20:31:36 | 2020-08-19T20:31:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | from DataBase import DataBaseConnection
import main
import shutil
import os
import glob
"""def recursive_copy_files(source_path, destination_path, override=True):
files_count = 0
if not os.path.exists(destination_path):
os.mkdir(destination_path)
print('Creating destination directory')
items = glob.glob(source_path + '/*')
for item in items:
if os.path.isdir(item):
path = os.path.join(destination_path, item.split('/')[-1])
files_count += recursive_copy_files(source_path=item, destination_path=path, override=override)
else:
file = os.path.join(destination_path, item.split('/')[-1])
if not os.path.exists(file) or override:
shutil.copyfile(item, file)
files_count += 1
return files_count
if not (main.VER == DataBaseConnection().GetVersion()):
print(recursive_copy_files('C:\\Users\\renan.klehm\\Desktop\\backup', 'C:\\Users\\renan.klehm\\Desktop\\WM-Lages'))
main.launch()
else:
main.launch()"""
main.launch() | [
"[email protected]"
] | |
7a4e7e606d7f94222297752f4a930850506e838e | 215ea0eebf40b9e1037de4edc95db4fb8b3506b4 | /boards/models.py | d7980143ad65186d29849b0454921e21ee82f5db | [] | no_license | kaznakamura193/ky_app | ba648f9c2f27b389ea6393a83ef14f9f49df3773 | 55b6e5674062c6aab6a13aacc13ac31bcd895939 | refs/heads/master | 2020-04-01T01:50:55.068110 | 2018-10-12T13:32:33 | 2018-10-12T13:32:33 | 152,754,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | from django.db import models
from django.contrib.auth.models import User
class Board(models.Model):
name = models.CharField(max_length=30, unique=True)
description = models.CharField(max_length=100)
class Topic(models.Model):
subject = models.CharField(max_length=255)
last_updated = models.DateTimeField(auto_now_add=True)
board = models.ForeignKey(Board, on_delete=models.CASCADE, related_name='topics')
starter = models.ForeignKey(User, on_delete=models.CASCADE, related_name='topics')
class Post(models.Model):
message = models.TextField(max_length=4000)
topic = models.ForeignKey(Topic, on_delete=models.CASCADE, related_name='posts')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True)
created_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name='posts')
updated_by = models.ForeignKey(User, on_delete=models.CASCADE, null=True, related_name='+') | [
"[email protected]"
] | |
b35416f37ad9cfc67851d947813070decac673a1 | caf8cbcafd448a301997770165b323438d119f5e | /.history/chapter01/python_05_if_condition_20201128215219.py | 71f44fdea34ec2a59f9af31a0d6f8d10ae606e99 | [
"MIT"
] | permissive | KustomApe/nerdape | 03e0691f675f13ce2aefa46ee230111247e90c72 | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | refs/heads/main | 2023-01-23T10:13:26.584386 | 2020-11-28T22:29:49 | 2020-11-28T22:29:49 | 309,897,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | """[if文について]
もし〜だったら、こうして
"""
# if 条件:
# 実行するブロック
# 条件によって処理を適応したい場合
# 3000kmごとにオイル交換しないといけない
distance = 3403
if distance > 3000:
print('オイル交換時期です')
# 文字列を比較する/リストを比較する
if 'abc' == 'ABC':
print('1同類です')
if 'CDE' == 'CDE':
print('2同類です')
if 'あいうえお' == 'あいうえお':
print('3同類です')
if ['apple', 'banana'] == ['apple', 'banana']:
print('1リスト同類'))
# 文字列を検索する/リストの要素を検索する
# if 'abc' in "ABC":
# print('ヒットしました!')
# if 'ドリフト' in '僕はドリフトが好きです':
# print('ヒットしました!')
# if 'japan' in 'japanese domestic market vehicle':
# print('ヒットしました!')
# else文
# elif文
| [
"[email protected]"
] | |
5fbc5c34b348b43fdf08241bcc02815f1289c467 | 6732ecf2f4277a59fd0877373bdd69d1c3d218f4 | /book/urls.py | d0126601e3ee4fcd157b26a2bff91be0fe06cf48 | [] | no_license | MatinaBajracharya/book-review-system | 2cf29e9560b7639d2e7bc06962f4e6b4da852d71 | 434f9bba1068fee814533311e3579df23ee59939 | refs/heads/master | 2023-04-25T16:51:06.400151 | 2021-05-11T10:44:05 | 2021-05-11T10:44:05 | 309,442,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | from django.urls import path
from .views import *
from django.conf.urls import url
urlpatterns=[
# path('browse/', BookListView.as_view(template_name='browse.html'), name='browse'),
path('browse/', book_list, name='browse'),
path('browse/<int:pk>/', detail, name='book-detail'),
path('history/', history, name='history'),
url(r'^books/$', searchbook, name="searchbook"),
path('autosuggestbook/', autosuggestbook, name='autosuggestbook'),
] | [
"[email protected]"
] | |
bf078dfcd61f760ea0651879022062800b6f6d61 | d54dd2e67a0df874abbe6e1a8d8b94700b1d4913 | /brollop1/settings.py | 2c91c1ddfbeb53a57ee1e600ffc1678ec16f20ea | [] | no_license | AAnkanJ/brollop | d381c720b9ecb06c9714ea8e3408bd92fb9d6ad9 | 95ad8fac01988774f75e64478c5bcccf22307174 | refs/heads/master | 2023-04-28T03:04:11.538490 | 2023-04-13T15:18:21 | 2023-04-13T15:18:21 | 223,752,159 | 0 | 0 | null | 2023-04-21T20:42:50 | 2019-11-24T13:53:41 | HTML | UTF-8 | Python | false | false | 4,129 | py | """
Django settings for brollop1 project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from decouple import config, Csv
import dj_database_url
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
fn = os.path.join(os.path.dirname(__file__), '../../etc/secret_key.txt')
with open(fn) as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['www.thornberg.pm', 'thornberg.pm','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'accounts',
'boards',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'brollop1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'brollop1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
LOGOUT_REDIRECT_URL = 'home'
LOGIN_REDIRECT_URL = 'home'
LOGIN_URL = 'login'
EMAIL_BACKEND = config('EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
EMAIL_HOST = config('EMAIL_HOST', default='')
EMAIL_PORT = config('EMAIL_PORT', default=587, cast=int)
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=True, cast=bool)
DEFAULT_FROM_EMAIL = 'Brollop <[email protected]>'
EMAIL_SUBJECT_PREFIX = '[Brollop] ' | [
"[email protected]"
] | |
b4423b66842562f192d05801e125386cca36aff8 | 9a5b9c29c87bf3a2d325e85fdf582e6f44dcec5f | /chapter4/exercises/p09.py | 4682911a9fe128b56e5edf5072c58004c9590726 | [] | no_license | ksomemo/NltkStudy | 814a9f8ca7a41fa790157607a6d46538ab87f2af | d4ab99d0bc68feae02ff5bc9fe6328f7548aa98a | refs/heads/master | 2020-06-03T10:30:57.969586 | 2014-04-06T02:38:41 | 2014-04-06T02:38:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # -*- coding: utf-8 -*-
import unittest
class TestSequenceFunctions(unittest.TestCase):
"""
文字列の先頭と末尾の空白を削除して、単語間の空白を1文字にする
"""
def setUp(self):
self.expected = 'a b c d'
self.target = ' a b c d '
def test_split_join(self):
actual = ' '.join(self.target.split())
self.assertEqual(self.expected, actual)
def test_regex(self):
import re
actual = re.sub('^ +| +$', '', self.target)
actual = re.sub(' +', ' ', actual)
self.assertEqual(self.expected, actual)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
413cc13fdfa37037259e369fca378ec613bff873 | d4f2f62ce6042382abd5e2f4be6993fd45c706d5 | /pystone/nested_enviroment.py | ab3547d4ea99f4088659bb15b03b60ac23ac5ff8 | [] | no_license | CosmosShadow/pystone | 62667bb5f098eddca4b11586c4e539e35d7e5891 | 62cfcd21b37f834569a7a54ae8c707003683f44c | refs/heads/master | 2021-03-27T15:46:57.594007 | 2017-10-15T17:21:14 | 2017-10-15T17:21:14 | 104,151,274 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | # coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import copy
import json
from .enviroment import Enviroment
from .exception import *
class NestedEnv(Enviroment):
def __init__(self, outer=None):
super(NestedEnv, self).__init__()
self._outer = outer
def set_outer(self, outer):
self._outer = outer
def __getitem__(self, key):
assert isinstance(key, str)
if key in self._values:
return self._values[key]
else:
if self._outer is not None:
return self._outer[key]
else:
raise StoneException('value %s should assign before use' % key)
def put_new(self, key, value):
self._values[key] = value
def __setitem__(self, key, value):
assert isinstance(key, str)
obj = self.where(key)
obj = obj or self
obj.put_new(key, value)
def where(self, key):
if key in self._values:
return self
elif self._outer is None:
return None
else:
return self._outer.where(key)
def __repr__(self):
values = copy.deepcopy(self._values)
if self._outer is not None:
values['_outer'] = repr(self._outer)
return json.dumps(values)
| [
"[email protected]"
] | |
b39ca9d3645b3881d5ac6e6afc2ad3e6641815d9 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/DeeplabV3_for_Pytorch/mmseg/models/utils/inverted_residual.py | 01b9be23cc2a64dc5a71e842fd2b365f099dd07c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 7,777 | py | # Copyright 2021 Huawei
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mmcv.cnn import ConvModule
from torch import nn
from torch.utils import checkpoint as cp
from .se_layer import SELayer
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): Adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
dilation (int): Dilation rate of depthwise conv. Default: 1
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False,
**kwargs):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
**kwargs))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
**kwargs),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None,
**kwargs)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
class InvertedResidualV3(nn.Module):
"""Inverted Residual Block for MobileNetV3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernel size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Default: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels. Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super(InvertedResidualV3, self).__init__()
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2]
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=dict(
type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| [
"[email protected]"
] | |
a457dfaf302c3936e9ab61ace2a95fb9b664ca19 | 462a3ff3a280623c0f76389e61fe129ad87f3f39 | /tests/test_config.py | eabd0fc8ea84c8cdcf36488dbe62038d7b58ae2c | [] | no_license | cgnitash/MABE_testing | 8c2365ae062633523d2bfcea0b4ed2a1771d2d53 | b37bf7da89edc3634ba0e89bff806e20adf700ec | refs/heads/master | 2020-03-19T04:44:46.428424 | 2018-05-30T16:25:16 | 2018-05-30T16:25:16 | 135,861,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,386 | py | import os, subprocess, pytest
from utils.helpers import this_repo_path, mabe, dotSlashMabe, dirname_baseline, dirname_testline, path_baseline_exe, path_testline_exe
from utils.helpers import cd, diff, repoDiff, runCmdAndHideOutput, runCmdAndShowOutput, runCmdAndSaveOutput, getFileContents
from utils.helpers import copyfileAndPermissions, movefile, movefileSwap
from utils.helpers import thisTestName, repoDiffForDifference, repoDiffForSimilarity, diffForDifference, diffForSimilarity
##
## all tests run IN ORDER OF DEFINITION and are run if they begin with 'test_'
## a test_fn() will fail on first failure and stop testing that test_fn, continuing on to the next test_fn.
## use `assert condition, "error message"` in a test_fn() to print a useful message on failure
##
@pytest.fixture ## indicates this is the constructor fn for all the test fns in this module
def ctx(): ## create a context for all the tests - you could potentially use this to pass an obj to all test fns
if not ctx.ran:
## generate cfg (have to 'cd' there, because mabe '-s' ignores 'GLOBAL-outputDirectory' setting)
## and run mabe with defaults
dirs = [dirname_baseline, dirname_testline]
for eachdir in dirs: ## loop through each of baseline and testline and generate the files for later diffing
cd(this_repo_path)
cd(eachdir)
runStr = dotSlashMabe+" -s"
runCmdAndSaveOutput( runStr, filename='screen-settings' ) ## save settings to file
for eachfile in ["settings.cfg", "settings_organism.cfg", "settings_world.cfg"]: ## make a backup of the settings files
copyfileAndPermissions(eachfile, eachfile+".bak")
runStr = dotSlashMabe+" -f settings*.cfg -s"
runCmdAndSaveOutput( runStr, filename='screen-settings-reload' ) ## load and save settings to file
for eachfile in ["settings.cfg", "settings_organism.cfg", "settings_world.cfg"]: ## make a backup of the settings files
copyfileAndPermissions(eachfile, eachfile+".bak")
##
## MORE GENERATION OF FILES OR BEHAVIOR HERE
##
cd('..') ## could also have done cd(this_repo_path)
## FYI, could have done it the following way if we were up one dir in mabe_testing
#runCmdAndSaveOutput( "{exe} -p GLOBAL-outputDirectory {path}".format(exe=path_baseline_exe, path=dirname_baseline), filename=dirname_baseline+'screen-simulation' )
ctx.ran = True
yield None ## could have actually passed a context object here to all the test fns
##
## teardown happens after the last test in the module finishes
##
return
ctx.ran = False
## testing consistency of screen output
def test_screen_settings(ctx):
repoDiffForSimilarity('screen-settings')
def test_screen_settings_reload(ctx):
repoDiffForSimilarity('screen-settings-reload')
## cfg from -s
def test_settings_cfg(ctx):
repoDiffForSimilarity('settings.cfg')
def test_settings_organism_cfg(ctx):
repoDiffForSimilarity('settings_organism.cfg')
def test_settings_world_cfg(ctx):
repoDiffForSimilarity('settings_world.cfg')
## cfg from -f *fg -s
def test_settings_reload_cfg(ctx):
diffForSimilarity(dirname_baseline+'settings.cfg.bak',
dirname_baseline+'settings.cfg',
outfilename='diffForSimilarity-baseline-settings.cfg')
diffForSimilarity(dirname_testline+'settings.cfg.bak',
dirname_testline+'settings.cfg',
outfilename='diffForSimilarity-testline-settings.cfg')
def test_settings_reload_organism_cfg(ctx):
diffForSimilarity(dirname_baseline+'settings_organism.cfg.bak',
dirname_baseline+'settings_organism.cfg',
outfilename='diffForSimilarity-baseline-settings_organism.cfg')
diffForSimilarity(dirname_testline+'settings_organism.cfg.bak',
dirname_testline+'settings_organism.cfg',
outfilename='diffForSimilarity-testline-settings_organism.cfg')
def test_settings_reload_world_cfg(ctx):
diffForSimilarity(dirname_baseline+'settings_world.cfg.bak',
dirname_baseline+'settings_world.cfg',
outfilename='diffForSimilarity-baseline-settings_world.cfg')
diffForSimilarity(dirname_testline+'settings_world.cfg.bak',
dirname_testline+'settings_world.cfg',
outfilename='diffForSimilarity-testline-settings_world.cfg')
| [
"[email protected]"
] | |
ea5d1188a37085eef617480b440ef7078d84af08 | 6be5dfe95f8b4d60267b00aab6e162f62cb66e43 | /jobscheduler/jobscheduler.py | c22cf03eb71efccf2997689afb5726eff5649575 | [
"MIT"
] | permissive | wenbobuaa/pykit | 5f0db60be4d86b46eb5c49739f5c05c1f8b7aae3 | 43e38fe40297a1e7a9329bcf3db3554c7ca48ead | refs/heads/master | 2021-04-26T23:52:28.091347 | 2018-07-17T01:57:14 | 2018-07-18T10:02:34 | 123,873,359 | 0 | 0 | MIT | 2018-03-28T05:58:17 | 2018-03-05T06:12:41 | Python | UTF-8 | Python | false | false | 8,685 | py | import ctypes
import datetime
import logging
import threading
import time
import pytz
import tzlocal
from pykit import threadutil
from pykit import timeutil
from pykit import utfjson
SYS_gettid = 186
logger = logging.getLogger(__name__)
class NextFireTimeError(Exception):
pass
class JobExistError(Exception):
pass
SECOND = {
'second': 1,
'minute': 60,
'hour': 60 * 60,
'day': 60 * 60 * 24,
'week': 60 * 60 * 24 * 7,
'month': 60 * 60 * 24 * 31,
}
def get_time_info(ts=None, tz=None):
if ts is None:
ts = time.time()
if tz is None:
tz = tzlocal.get_localzone()
dt = datetime.datetime.fromtimestamp(ts, tz)
time_info = {
'ts': ts,
'string': str(dt),
}
return time_info
def get_next_fire_time(conf, last_fire_ts):
n, unit = conf['every']
interval = n * SECOND[unit]
next_ts = last_fire_ts + interval
at = conf.get('at')
if at is None:
return get_time_info(next_ts)
timezone_name = conf.get('timezone')
if timezone_name is None:
tz = tzlocal.get_localzone()
else:
tz = pytz.timezone(timezone_name)
next_dt = datetime.datetime.fromtimestamp(next_ts, tz)
actual_next_dt = next_dt.replace(**at)
actual_next_ts = timeutil.datetime_to_ts(actual_next_dt)
next_fire_time = get_time_info(actual_next_ts)
if actual_next_ts <= last_fire_ts:
msg = ('next fire time: %s, is before last fire ts: %f, conf: %s' %
(repr(next_fire_time), last_fire_ts, repr(conf)))
raise NextFireTimeError(msg)
return next_fire_time
class JobScheduler(object):
def __init__(self, jobs, dump_status=None, reload_status=None,
should_job_run=None):
self.lock = threading.RLock()
self.jobs = jobs
self.status = {}
self.dump_status = dump_status
self.reload_status = reload_status
self.should_job_run = should_job_run
if self.reload_status is not None:
try:
status = self.reload_status()
logger.info('loaded job_status: %s', repr(status))
if status is not None:
self.status = status
except Exception as e:
logger.exception('failed to reload job status: %s' % repr(e))
def append_log(self, job_status, job_conf, log_msg):
job_status['log'].append(log_msg)
# log at start and end, add keep 3 more entries.
n = job_conf['concurrence_n'] * 2 + 3
job_status['log'] = job_status['log'][-n:]
def run_job(self, curr_time, job_name, job_conf, job_status):
thread_id = None
thread_ident = threading.current_thread().ident
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
thread_id = libc.syscall(SYS_gettid)
except Exception as e:
logger.info('failed to get thread id: %s' + repr(e))
with self.lock:
job_status['active_threads'][thread_ident] = {
'start_time': curr_time,
'thread_ident': thread_ident,
'thread_id': thread_id,
}
log_msg = 'thread: %s-%s started at: %s' % (
thread_ident, thread_id, curr_time)
self.append_log(job_status, job_conf, log_msg)
try:
job_conf['func'](*job_conf['args'], **job_conf['kwargs'])
with self.lock:
log_msg = 'thread: %s-%s finished at: %s' % (
thread_ident, thread_id, get_time_info(ts=time.time()))
self.append_log(job_status, job_conf, log_msg)
del(job_status['active_threads'][thread_ident])
except Exception as e:
logger.exception('failed to run job: %s, %s' %
(job_name, repr(e)))
with self.lock:
log_msg = 'thread: %s-%s failed at: %s' % (
thread_ident, thread_id, get_time_info(ts=time.time()))
self.append_log(job_status, job_conf, log_msg)
del(job_status['active_threads'][thread_ident])
job_status['message'] = repr(e)
def fire(self, curr_time, job_name, job_conf, job_status):
thread_n = len(job_status['active_threads'])
if thread_n >= job_conf['concurrence_n']:
log_msg = 'at time: %s, already have %d threads for job: %s' % (
curr_time, thread_n, job_name)
self.append_log(job_status, job_conf, log_msg)
logger.error('too many threads for job: %s' % job_name)
return
logger.info('at time: %s, start to run job: %s' %
(curr_time, job_name))
threadutil.start_daemon_thread(
self.run_job, args=(curr_time, job_name, job_conf, job_status))
job_status['message'] = ''
def schedule_one_job(self, curr_time, job_name, job_conf):
if 'concurrence_n' not in job_conf:
job_conf['concurrence_n'] = 1
if job_name not in self.status:
self.status[job_name] = {
'active_threads': {},
'log': [],
}
job_status = self.status[job_name]
job_status['curr_time'] = curr_time
if self.should_job_run is not None:
should_run = self.should_job_run(job_conf)
if not should_run:
job_status['message'] = 'this job do not need to run'
return
if 'next_fire_time' not in job_status:
if job_conf.get('at') is None:
job_status['fire_time'] = curr_time
self.fire(curr_time, job_name, job_conf, job_status)
job_status['next_fire_time'] = get_next_fire_time(
job_conf, curr_time['ts'])
else:
n, unit = job_conf['every']
interval = n * SECOND[unit]
next_fire_time = get_next_fire_time(
job_conf, curr_time['ts'] - interval)
if next_fire_time['ts'] < curr_time['ts']:
next_fire_time = get_next_fire_time(
job_conf, curr_time['ts'])
job_status['next_fire_time'] = next_fire_time
if curr_time['ts'] >= job_status['next_fire_time']['ts']:
job_status['fire_time'] = curr_time
self.fire(curr_time, job_name, job_conf, job_status)
job_status['next_fire_time'] = get_next_fire_time(
job_conf, job_status['next_fire_time']['ts'])
def _schedule(self, curr_time):
for job_name, job_conf in self.jobs.iteritems():
try:
self.schedule_one_job(curr_time, job_name, job_conf)
except Exception as e:
logger.exception('failed to schedule job %s: %s' %
(job_name, repr(e)))
def schedule(self):
for job_name, job_status in self.status.items():
if job_name not in self.jobs:
del(self.status[job_name])
continue
if len(job_status['active_threads']) > 0:
msg = 'threads aborted by restart: %s' % (
job_status['active_threads'])
self.status[job_name]['message'] = msg
self.status[job_name]['active_threads'] = {}
while True:
curr_time = get_time_info(time.time())
with self.lock:
self._schedule(curr_time)
if self.dump_status is not None:
try:
self.dump_status(self.status)
except Exception as e:
logger.exception('failed to dump job status: %s' % repr(e))
for job_name, job_status in self.status.items():
logger.info('status of job %s, %s' %
(job_name, utfjson.dump(job_status)))
end_time = time.time()
logger.info('scheduled at: %s, time used: %f' %
(repr(curr_time), end_time - curr_time['ts']))
to_sleep = 60 - (end_time % 60) + 1
time.sleep(to_sleep)
def add_job(self, job_name, job_conf):
with self.lock:
if job_name in self.jobs:
raise JobExistError('job: %s already exists' % job_name)
self.jobs[job_name] = job_conf
def delete_job(self, job_name):
with self.lock:
if job_name in self.jobs:
del(self.jobs[job_name])
if job_name in self.status:
del(self.status[job_name])
| [
"[email protected]"
] | |
1f3a8832fcfc01a0ed8057f1fccc758b30dd56cb | b3cb41c81069ad2e447a7bab98fd269235996a51 | /dev/phillips_compare.py | 84e5f261e2193c6b85e1c4b41bff7cd61ce6b9d9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | usuaero/PyProp | e289e8bd64d2d0db51547a808a1f019b37b14fc4 | e568dda610632adf1ab208a6861cca8d8dd84e75 | refs/heads/master | 2023-06-03T01:35:08.525608 | 2021-06-21T16:45:51 | 2021-06-21T16:45:51 | 280,196,572 | 15 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,831 | py | """Duplicates Phillips' "Mechanics of Flight" Example 2.3.1"""
import pyprop
import numpy as np
from pyprop.helpers import to_rpm
import matplotlib.pyplot as plt
# Declare prop getter functions
def airfoil_CL(**kwargs):
alpha = kwargs.get("alpha", 0.0)
a_b = np.asarray(alpha+np.radians(2.1))
return np.where(a_b <= 0.25, 2.0*np.pi*a_b, 0.5*np.pi*np.cos(a_b)/np.cos(0.25))
def airfoil_Cm(**kwargs):
return 0.0
def airfoil_CD(**kwargs):
alpha = kwargs.get("alpha", 0.0)
a_b = np.asarray(alpha+np.radians(2.1))
return np.where(a_b <= 0.25, 0.224*a_b**2+0.006, np.where(a_b <= 0.3, 16.6944*a_b**2-1.0234, 0.5*np.pi*np.sin(a_b)/np.cos(0.25)))
# Declare prop input
phillips_prop = {
"airfoils" : {
"phillips" : {
"type" : "functional",
"CL" : airfoil_CL,
"Cm" : airfoil_Cm,
"CD" : airfoil_CD
}
},
"geometry" : {
"n_blades" : 2,
"hub_radius" : 0.05,
"weight" : 4.0,
"diameter" : 1.0,
"geom_pitch" : 0.5,
"chord" : ["elliptic", 0.075],
"rotation" : "CCW",
"airfoil" : "phillips",
"grid" : 100
}
}
# Reproduce Figures 2.3.7-9
num_pitches = 10
num_Js = 100
Js = np.linspace(0.0, 1.4, num_Js)
K_cs = np.linspace(0.3, 1.2, num_pitches)
C_T = np.zeros((num_pitches, num_Js))
C_P = np.zeros((num_pitches, num_Js))
eta = np.zeros((num_pitches, num_Js))
w = 2000 # arbitrary since we have no Reynolds dependence
# Loop through pitches
for i, K_c in enumerate(K_cs):
phillips_prop["geometry"]["geom_pitch"] = K_c
prop = pyprop.BladeElementProp("phillips_prop", phillips_prop)
# Loop through advance ratios
for j, J in enumerate(Js):
V = prop.get_velocity(to_rpm(w), J)
C_T[i,j] = prop.get_thrust_coef(w, V)
C_P[i,j] = prop.get_power_coef(w, V)
# Create figure 2.3.5
if K_c == 0.5:
prop.plot_angles_over_zeta(w, 0.0)
V = prop.get_velocity(to_rpm(w), 0.25)
prop.plot_angles_over_zeta(w, V)
# Calculate efficiency
eta = C_T*Js[np.newaxis,:]/C_P
# Plot thrust coefficient
plt.figure()
for i, K_c in enumerate(K_cs):
plt.plot(Js, C_T[i,:], label=str(round(K_c, 1)))
plt.xlabel("J")
plt.ylabel("C_T")
plt.legend()
plt.gca().set_xlim([0.0, 1.4])
plt.gca().set_ylim([0.0, 0.11])
plt.show()
# Plot power coefficient
plt.figure()
for i, K_c in enumerate(K_cs):
plt.plot(Js, C_P[i,:], label=str(round(K_c, 1)))
plt.xlabel("J")
plt.ylabel("C_P")
plt.legend()
plt.gca().set_xlim([0.0, 1.4])
plt.gca().set_ylim([0.0, 0.09])
plt.show()
# Plot efficiency
plt.figure()
for i, K_c in enumerate(K_cs):
plt.plot(Js, eta[i,:], label=str(round(K_c, 1)))
plt.xlabel("J")
plt.ylabel("Propulsive Efficiency")
plt.legend()
plt.gca().set_xlim([0.0, 1.4])
plt.gca().set_ylim([0.0, 1.0])
plt.show() | [
"[email protected]"
] | |
bc9a9fb49c34d6e77443b627f5818349c827d6ab | afb584832c8b70e15b93cf92bc2029c165c66482 | /014-timecycle.py | 1c7c49cd2fc0c40c44fe0db471ee72ce53d82e8d | [] | no_license | zenogears/BuRasPi | 474c20856884b44c735180d608e3a14af5c9ebe7 | d5284b7c2c3fbdb218e09f3b6a10d8b3079536bb | refs/heads/master | 2020-12-29T01:54:50.352209 | 2016-04-20T23:33:12 | 2016-04-20T23:33:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,906 | py | #!/usr/bin/python3.4
#coding=utf-8
import time
from datetime import datetime
import RPi.GPIO as GPIO
import os
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM) #Почему-то только в этом режиме
#для моей схемы подключения модуля из 4 реле. Пример работы распиновки: http://www.youtube.com/watch?v=Ln2owTgYv9M&index=4&list=PLTejl8qzLUsQuvwGsrdSC7KPgWu7mahWn
GPIO.setup(17, GPIO.OUT)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
GPIO.setup(27, GPIO.OUT)
GPIO.output(17, 1)
GPIO.output(18, 1)
GPIO.output(22, 1)
GPIO.output(27, 1)
f = open("commonlog.txt", "a")
f.write("Перезапуск скрипта: " + str(datetime.strftime(datetime.now(), "%d.%m.%Y %H:%M:%S")) + " \n")
f.close()
#Считывает план и возвращает список. Вызывается по ходу выполнения в контексте "по ЭТОМУ плану" для функций device_off и device_on
def planreader(plan):
list = []
f = open(plan, "r")
line = f.read()
list = line.split("\n")
f.close()
return list
#print (planreader("ONplan.txt")) #debug
#print (planreader("OFFplan.txt")) #debug
#print("Текущее время: ", datetime.strftime(datetime.now(), "%H:%M:%S")) #контроль времени
#Функция вЫключения. Принимает пин, план и логфайл
#Пин указывать номером согласно базовой схеме подключения. Если указать не тот, то неверное устройство будет работать по неверному плану.
#Логфайл лучше всего именовать по имени девайса, например, LampaLog.txt.
#В будущем лучше сделать единый лог без параметра функции, но уже сейчас можно писать в один файл, просто указав его для всех вызовов
def device_off(pin, offplan, logfile="commonlog.txt"):
for moment in planreader(offplan): #Дадада, я псих, рекурсия в наличии. Вызывает считыватель плана ещё раз на всякий пожарный
if moment == datetime.strftime(datetime.now(), "%H:%M:%S"):
GPIO.output(pin,1)
print("Выключено в ", moment) #при ручном запуске лучше раскомментировать, чтобы не смотреть в логи
#Пишем в лог
f = open(logfile, "a")
f.write("Устройство на пине " + str(pin) + " выключено " + str(datetime.strftime(datetime.now(), "%d.%m.%Y %H:%M:%S")) + "\n")
f.close()
#Функция Включения. Принимает пин, план и логфайл
#полностью аналогична предыдущей, только включает, а не выключает.
def device_on(pin, onplan, logfile="commonlog.txt"):
for moment in planreader(onplan): #Дадада, я псих, рекурсия в наличии. Вызывает считыватель плана ещё раз на всякий пожарный
if moment == datetime.strftime(datetime.now(), "%H:%M:%S"):
GPIO.output(pin,0)
print("Включено в ", moment) #при ручном запуске лучше раскомментировать, чтобы не смотреть в логи
#Пишем в лог
f = open(logfile, "a")
f.write("Устройство на пине " + str(pin) + " включено " + str(datetime.strftime(datetime.now(), "%d.%m.%Y %H:%M:%S")) + "\n")
f.close()
while True:
time.sleep(0.1) #Слегка снижает нагрузку на процессор, сокращая активность до 9-10 проходов в секунду
#----БЛОК РАЗМНОЖИТЬ ДЛЯ КАЖДОГО НУЖНОГО УСТРОЙСТВА---
#Смотрим указанный план и вызываем функцию включения и выключения
#План для КАЖДОГО устройства должен существовать!
#Включение
for moment in planreader("ONplan.txt"):
if moment == datetime.strftime(datetime.now(), "%H:%M:%S"):
#Вызываем функцию включения
device_on(18, "ONplan.txt", "commonlog.txt")
#Подавляет актвность блока после первой отработки в эту же секунду, иначе загадит лог очередным вызовом функции. На всякий случай лучше не использовать одно и то же время для всех устройств, мало ли что...
while moment == datetime.strftime(datetime.now(), "%H:%M:%S"):
continue
#Выключение
for moment in planreader("OFFplan.txt"):
if moment == datetime.strftime(datetime.now(), "%H:%M:%S"):
#Вызываем функцию включения
device_off(18, "OFFplan.txt", "commonlog.txt")
#Подавляет актвность блока после первой отработки в эту же секунду, иначе загадит лог очередным вызовом функции. На всякий случай лучше не использовать одно и то же время для всех устройств, мало ли что...
while moment == datetime.strftime(datetime.now(), "%H:%M:%S"):
continue
#----Конец размножаемого блока---
| [
"[email protected]"
] | |
4be336dfbf169295360a2a105ef7008b6e59a2a1 | 3241c360a6fa528ab282d5864ecaf054afd09360 | /Algorithm/SANGMIN/11501.py | e85725c35e0b40b5e85b67c3071c7fdb036deff7 | [] | no_license | sangjun-Leee/SangSangPlus | 33128044a58fc4f428e3ac8318b06fdc75d9bd31 | f7d976ef72489d393de4475463ca279ae4cb167f | refs/heads/main | 2023-08-20T12:01:31.325122 | 2021-10-15T06:44:28 | 2021-10-15T06:44:28 | 406,429,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | '''
https://www.acmicpc.net/problem/11501
주식
[풀이]
1. 핵심은, 두 가지이다.
=> 미래에 가격이 오르면 무조건 현재 산다.
=> 미래에 가격이 최대인 날에 주식을 판다.
2. 또한, 주어진 주식 데이터 길이가 1,000,000 이므로 N^2은 불가능
=> N 으로 진행해야 겠다는 생각을 할 수 있음
3. 이 때, 현재를 기준으로 미래의 데이터를 조사(미래에 가격이 최대인 날을 찾는 과정)하려고 하면 N^2이 소요된다.
=> 따라서, 현재를 기준으로 과거 데이터를 조사하는 방향으로 하자.
4. 그렇게 되면 다음과 같은 통찰을 얻는다.
=> 지금 있는 시점이 가격이 최대라면 과거 시점들은 모두 이 시점에 판매하겠군.
=> 지금 있는 시점이 가격이 최대가 아니라면 지금 시점 보다 값이 비싼 과거 시점에 판매를 하겠군
=> 그리고 그 시점 이후에는 내 시점에서 판매하겠군. (만약 아니라면 또 반복)
5. 이에 대한 구현을 한다.
=> 매번 최대 가격을 구하며, 각각의 시점에서의 가격은 최대 가격에서 팔것이므로 뺀 값을 매번 추가
'''
import sys
input = sys.stdin.readline
T = int(input())
for _ in range(T):
N = int(input())
prices = list(map(int, input().split()))
answer = selling_price = 0
for price in prices[::-1]:
selling_price = max(selling_price, price)
answer += selling_price-price
print(answer)
| [
"[email protected]"
] | |
be07696d50b5ada8255d3fd83bee4267a66a07dd | 2ad789090db7a377f746413e397b603c76f547fa | /V-REP_PRO_EDU_V3_6_0_Ubuntu16_04/programming/bluezero/bindings/python-ctypes/test-publisher.py | 27a990ea5006c9e13b6a60bca395ffb9342c2393 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | JasonLeeUT/Robotics | 442b8217918a32b3e8df454b9890ab83280ee7f3 | 7e21e5d18b42cfcaafd01b0243f221a051692f11 | refs/heads/master | 2023-01-23T00:14:41.801025 | 2020-11-19T02:23:55 | 2020-11-19T02:23:55 | 314,112,186 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | # -*- coding: utf-8 -*-
import b0
import time
node = b0.Node('python-publisher')
pub = b0.Publisher(node, 'A')
node.init()
print('Publishing to topic "%s"...' % pub.get_topic_name())
i = 0
while not node.shutdown_requested():
msg_str = u'µsg-%d' % i
i += 1
print('Sending message "%s"...' % msg_str)
msg = msg_str.encode('utf-8')
pub.publish(msg)
node.spin_once()
time.sleep(1)
node.cleanup()
| [
"[email protected]"
] | |
6f8999c0633473f5d33dcc21aa6d0f460b475531 | b853c3b9e7892280bd4095a975e8dd762363f5f5 | /api/migrations/0001_initial.py | 3b1df9126365ff50502f09a889a81fb7dea0d1bf | [] | no_license | yeswanthannadata/genderapi | d3c3d89b21690b264b3eb94b01dd6e7cff6b612c | b9c3e4b840c8246e51e9d9a76e954c0a474923ab | refs/heads/master | 2020-04-11T04:23:12.639388 | 2016-09-12T15:33:15 | 2016-09-12T15:33:15 | 68,022,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Details',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('gender', models.CharField(max_length=255)),
],
options={
'db_table': 'details',
},
),
]
| [
"[email protected]"
] | |
d0dec12e629ac375ae18b31e5bc6d115b4badd64 | f6f0cadc0d74aaad6512a3faab3fce2cfc858f10 | /run_copy.py | fb1a1f244319e886d79d9f5d5c6c0c3ca227447e | [
"MIT"
] | permissive | wxwxzhang/extreme-summarization-of-source-code | f70f0a4ce7861866e012e52de4931a24c6cf7e35 | e62b8174faa548bc4f240762835e61af46e5223e | refs/heads/master | 2020-09-03T15:30:30.325805 | 2018-09-21T11:46:27 | 2018-09-21T11:46:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,161 | py | import torch
import torch.optim as optim
import torch.nn as nn
from torchtext.data import Field, LabelField
from torchtext.data import TabularDataset
from torchtext.data import BucketIterator
import os
import argparse
import random
import models
import utils
parser = argparse.ArgumentParser(description='Implemention of \'A Convolutional Attention Network for Extreme Summarization of Source Code\'', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--project', default='cassandra', type=str, help='Which project to run on')
parser.add_argument('--data_dir', default='data', type=str, help='Where to find the training data')
parser.add_argument('--checkpoints_dir', default='checkpoints', type=str, help='Where to save the model checkpoints')
parser.add_argument('--no_cuda', action='store_true', help='Use this flag to stop using the GPU')
parser.add_argument('--min_freq', default=2, help='Minimum times a token must appear in the dataset to not be unk\'d')
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--emb_dim', default=128, type=int)
parser.add_argument('--k1', default=32, type=int)
parser.add_argument('--k2', default=16, type=int)
parser.add_argument('--w1', default=18, type=int)
parser.add_argument('--w2', default=19, type=int)
parser.add_argument('--w3', default=2, type=int)
parser.add_argument('--dropout', default=0.4, type=float)
parser.add_argument('--clip', default=0.75, type=float)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--seed', default=1234, type=int)
args = parser.parse_args()
assert os.path.exists(f'{args.data_dir}/{args.project}_train.json')
assert os.path.exists(f'{args.data_dir}/{args.project}_test.json')
if not os.path.exists(f'{args.checkpoints_dir}'):
os.mkdir(f'{args.checkpoints_dir}')
#make deterministic
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
random.seed(args.seed)
#get available device
device = torch.device('cuda' if (torch.cuda.is_available() and not args.no_cuda) else 'cpu')
#set up fields
BODY = Field()
NAME = Field()
fields = {'name': ('name', NAME), 'body': ('body', BODY)}
#get data from json
train, test = TabularDataset.splits(
path = 'data',
train = f'{args.project}_train.json',
test = f'{args.project}_test.json',
format = 'json',
fields = fields
)
#build the vocabulary
BODY.build_vocab(train.body, train.name, min_freq=args.min_freq)
NAME.build_vocab(train.body, train.name, min_freq=args.min_freq)
# make iterator for splits
train_iter, test_iter = BucketIterator.splits(
(train, test),
batch_size=args.batch_size,
sort_key=lambda x: len(x.name),
repeat=False,
device=-1 if device == 'cpu' else None)
#calculate these for the model
vocab_size = len(BODY.vocab)
pad_idx = BODY.vocab.stoi['<pad>']
unk_idx = BODY.vocab.stoi['<unk>']
#initialize model
model = models.CopyAttentionNetwork(vocab_size, args.emb_dim, args.k1, args.k2, args.w1, args.w2, args.w3, args.dropout, pad_idx)
#place on GPU if available
model = model.to(device)
#initialize optimizer
optimizer = optim.RMSprop(model.parameters(), momentum=0.9, lr=1e-3)
def train(model, iterator, optimizer, clip):
#turn on dropout/bn
model.train()
epoch_loss = 0
n_examples = 0
precision = 0
recall = 0
f1 = 0
for i, batch in enumerate(iterator):
bodies = batch.body
names = batch.name
optimizer.zero_grad()
I = torch.zeros(names.shape[0], names.shape[1], bodies.shape[0]).to(device)
_ones = torch.ones(bodies.shape[0]).to(device)
_zeros = torch.zeros(bodies.shape[0]).to(device)
#create the I tensor
#the length of the method body where elements are:
# 1 in the position where the current token you are trying to predict are in the body
# 0 otherwise
for j, name in enumerate(names):
for k, token in enumerate(name):
I[j,k,:] = torch.where(bodies[:,k] == token, _ones, _zeros)
#output is predictions
#kappas are copy-attention over the body
#lambdas are probability of copy over generate from vocab
output, kappas, lambdas = model(bodies, names)
examples = names.shape[1]
n_examples += examples
copy_preds = kappas.max(2)[1]
vocab_preds = output.max(2)[1]
for ex in range(examples):
predicted = []
actual = [n.item() for n in names[:,ex][1:]]
for n, l in enumerate(lambdas[:,ex][1:], start=1):
if l.item() >= 0.5: #do copy
copied_token_position = copy_preds[n,ex]
predicted.append(bodies[copied_token_position, ex].item())
else:
predicted.append(vocab_preds[n,ex].item())
_precision, _recall, _f1 = utils.token_precision_recall(predicted, actual, unk_idx)
precision += _precision
recall += _recall
f1 += _f1
#reshape parameters
output = output[1:].view(-1, output.shape[2])
kappas = kappas[1:].view(-1, kappas.shape[2])
lambdas = lambdas[1:].view(-1)
I = I[1:].view(-1, I.shape[2])
names = names[1:].view(-1, 1)
#probability of using copy and model predictions from vocab
use_copy = torch.log(lambdas + 10e-8) + torch.sum(I * kappas, dim=1)
use_model = torch.log(1 - lambdas + 10e-8) + torch.gather(output, 1, names).squeeze(1)
#calculate loss
loss = torch.mean(utils.logsumexp(use_copy, use_model))
#calculate gradients
loss.backward()
#clip to prevent exploding gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
#update parameters
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator), precision/n_examples, recall/n_examples, f1/n_examples
def evaluate(model, iterator):
#turn off bn/dropout
model.eval()
epoch_loss = 0
n_examples = 0
precision = 0
recall = 0
f1 = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
bodies = batch.body
names = batch.name
I = torch.zeros(names.shape[0], names.shape[1], bodies.shape[0]).to(device)
_ones = torch.ones(bodies.shape[0]).to(device)
_zeros = torch.zeros(bodies.shape[0]).to(device)
for j, name in enumerate(names):
for k, token in enumerate(name):
I[j,k,:] = torch.where(bodies[:,k] == token, _ones, _zeros)
output, kappas, lambdas = model(bodies, names, 0) #set teacher forcing to zero
examples = names.shape[1]
n_examples += examples
copy_preds = kappas.max(2)[1]
vocab_preds = output.max(2)[1]
for ex in range(examples):
predicted = []
actual = [n.item() for n in names[:,ex][1:]]
for n, l in enumerate(lambdas[:,ex][1:], start=1):
if l.item() >= 0.5: #do copy
copied_token_position = copy_preds[n,ex]
predicted.append(bodies[copied_token_position, ex].item())
else:
predicted.append(vocab_preds[n,ex].item())
_precision, _recall, _f1 = utils.token_precision_recall(predicted, actual, unk_idx)
precision += _precision
recall += _recall
f1 += _f1
output = output[1:].view(-1, output.shape[2])
kappas = kappas[1:].view(-1, kappas.shape[2])
lambdas = lambdas[1:].view(-1)
I = I[1:].view(-1, I.shape[2])
names = names[1:].view(-1,1)
use_copy = torch.log(lambdas + 10e-8) + torch.sum(I * kappas, dim=1)
use_model = torch.log(1 - lambdas + 10e-8) + torch.gather(output, 1, names).squeeze(1)
loss = torch.mean(utils.logsumexp(use_copy, use_model))
epoch_loss += loss.item()
return epoch_loss / len(iterator), precision/n_examples, recall/n_examples, f1/n_examples
best_test_loss = float('inf')
if not os.path.isdir(f'{args.checkpoints_dir}'):
os.makedirs(f'{args.checkpoints_dir}')
for epoch in range(args.epochs):
train_loss, train_precision, train_recall, train_f1 = train(model, train_iter, optimizer, args.clip)
test_loss, test_precision, test_recall, test_f1 = evaluate(model, test_iter)
if test_loss < best_test_loss:
best_test_loss = test_loss
torch.save(model.state_dict(), f'{args.checkpoints_dir}/{args.project}-copy-model.pt')
print(f'| Epoch: {epoch+1:03} | Train Loss: {train_loss:.3f} | Train F1: {train_f1:.3f} | Test Loss: {test_loss:.3f} | Test F1: {test_f1:.3f}') | [
"[email protected]"
] | |
3def15806b2c6935dae0896a56b6fcfb20bc592b | c225a68767e1d2a4123e19772f2e08e8967fb592 | /mainsite/models.py | bd5259a67b860ad815b55e4bf228b644fc536050 | [] | no_license | Sajor-X/Sajor_blog | 379087e1b41874c4878e398ae09895d7d43d21b0 | f318eb4494e120c6ee8f7e05eba111b54b32d1c5 | refs/heads/master | 2022-11-18T23:20:30.225447 | 2018-04-03T12:04:01 | 2018-04-03T12:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=200)
slug = models.CharField(max_length=200)
body = models.TextField()
pub_date = models.DateTimeField(default=timezone.now)
class Meta:
ordering = ('-pub_date', )
def __unicode__(self):
return self.title | [
"[email protected]"
] | |
480cd6b1eaf6e81feaa834bfd246f0e85cf9695e | 9e96e21a9c51a1ca4e2183f163e566bc24c07d42 | /maskrcnn_benchmark/utils/comm.py | b12072aea4fd0f46b6578ee6c1b86782b587594d | [] | no_license | zhouhaocv/RLM-Net | 7a13263f591a99bbd5abd126004240aff9ca66d7 | c1a692e6b84e59957da7369dc6426a8515cb5d8e | refs/heads/master | 2020-06-25T12:51:36.971532 | 2020-06-22T07:21:10 | 2020-06-22T07:21:10 | 199,311,490 | 16 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,143 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import os
import pickle
import tempfile
import time
import torch
import torch.distributed as dist
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
def is_main_process():
if not torch.distributed.is_initialized():
return True
return torch.distributed.get_rank() == 0
def synchronize():
"""
Helper function to synchronize between multiple processes when
using distributed training
"""
if not torch.distributed.is_initialized():
return
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
if world_size == 1:
return
def _send_and_wait(r):
if rank == r:
tensor = torch.tensor(0, device="cuda")
else:
tensor = torch.tensor(1, device="cuda")
torch.distributed.broadcast(tensor, r)
while tensor.item() == 1:
time.sleep(1)
_send_and_wait(0)
# now sync on the main process
_send_and_wait(1)
def _encode(encoded_data, data):
# gets a byte representation for the data
encoded_bytes = pickle.dumps(data)
# convert this byte string into a byte tensor
storage = torch.ByteStorage.from_buffer(encoded_bytes)
tensor = torch.ByteTensor(storage).to("cuda")
# encoding: first byte is the size and then rest is the data
s = tensor.numel()
assert s <= 255, "Can't encode data greater than 255 bytes"
# put the encoded data in encoded_data
encoded_data[0] = s
encoded_data[1 : (s + 1)] = tensor
def _decode(encoded_data):
size = encoded_data[0]
encoded_tensor = encoded_data[1 : (size + 1)].to("cpu")
return pickle.loads(bytearray(encoded_tensor.tolist()))
# TODO try to use tensor in shared-memory instead of serializing to disk
# this involves getting the all_gather to work
def scatter_gather(data):
"""
This function gathers data from multiple processes, and returns them
in a list, as they were obtained from each process.
This function is useful for retrieving data from multiple processes,
when launching the code with torch.distributed.launch
Note: this function is slow and should not be used in tight loops, i.e.,
do not use it in the training loop.
Arguments:
data: the object to be gathered from multiple processes.
It must be serializable
Returns:
result (list): a list with as many elements as there are processes,
where each element i in the list corresponds to the data that was
gathered from the process of rank i.
"""
# strategy: the main process creates a temporary directory, and communicates
# the location of the temporary directory to all other processes.
# each process will then serialize the data to the folder defined by
# the main process, and then the main process reads all of the serialized
# files and returns them in a list
if not torch.distributed.is_initialized():
return [data]
synchronize()
# get rank of the current process
rank = torch.distributed.get_rank()
# the data to communicate should be small
data_to_communicate = torch.empty(256, dtype=torch.uint8, device="cuda")
if rank == 0:
# manually creates a temporary directory, that needs to be cleaned
# afterwards
tmp_dir = tempfile.mkdtemp()
_encode(data_to_communicate, tmp_dir)
synchronize()
# the main process (rank=0) communicates the data to all processes
torch.distributed.broadcast(data_to_communicate, 0)
# get the data that was communicated
tmp_dir = _decode(data_to_communicate)
# each process serializes to a different file
file_template = "file{}.pth"
tmp_file = os.path.join(tmp_dir, file_template.format(rank))
torch.save(data, tmp_file)
# synchronize before loading the data
synchronize()
# only the master process returns the data
if rank == 0:
data_list = []
world_size = torch.distributed.get_world_size()
for r in range(world_size):
file_path = os.path.join(tmp_dir, file_template.format(r))
d = torch.load(file_path)
data_list.append(d)
# cleanup
os.remove(file_path)
# cleanup
os.rmdir(tmp_dir)
return data_list
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.IntTensor([tensor.numel()]).to("cuda")
size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list | [
"[email protected]"
] | |
39bea274ace88e368afd36d0ea7fb79c2d3db73a | be6b100e09aeeb7e7172abd037a00a6d29dca4f4 | /feeds/views.py | 392b277ba842e20b217a1da03e5135ad09fb98e5 | [] | no_license | forstleblue/DemoAppUser | fc93fe97398a1151fa9be10cdc60cb8f152ce9ec | 6a31a82e0b7a82ae1e09631c86a787e7ae08eb1a | refs/heads/master | 2021-07-08T01:55:00.244423 | 2017-10-01T04:01:05 | 2017-10-01T04:01:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,922 | py | import json
from django.contrib.auth.decorators import login_required
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.http import (HttpResponse, HttpResponseBadRequest,
HttpResponseForbidden)
from django.shortcuts import get_object_or_404, render
from django.template.context_processors import csrf
from django.template.loader import render_to_string
from decorators import ajax_required
from feeds.models import Feed
FEEDS_NUM_PAGES = 10
@login_required
def feeds(request):
all_feeds = Feed.get_feeds()
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(1)
from_feed = -1
if feeds:
from_feed = feeds[0].id
#import pdb; pdb.set_trace()
return render(request, 'feeds.html', {
'feeds': feeds,
'from_feed': from_feed,
'page': 1,
'username': request.user.username,
})
def feed(request, pk):
feed = get_object_or_404(Feed, pk=pk)
return render(request, 'feed.html', {'feed': feed})
@login_required
@ajax_required
def load(request):
from_feed = request.GET.get('from_feed')
page = request.GET.get('page')
feed_source = request.GET.get('feed_source')
all_feeds = Feed.get_feeds(from_feed)
if feed_source != 'all':
all_feeds = all_feeds.filter(user__id=feed_source)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
try:
feeds = paginator.page(page)
except PageNotAnInteger:
return HttpResponseBadRequest()
except EmptyPage:
feeds = []
html = ''
csrf_token = (csrf(request)['csrf_token'])
for feed in feeds:
html = '{0}{1}'.format(html,
render_to_string('partial_feed.html',
{
'feed': feed,
'user': request.user,
'csrf_token': csrf_token
}))
return HttpResponse(html)
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
html = ''
for feed in feeds:
html = '{0}{1}'.format(html,
render_to_string('partial_feed.html',
{
'feed': feed,
'user': user,
'csrf_token': csrf_token
}))
return html
@login_required
@ajax_required
def load_new(request):
last_feed = request.GET.get('last_feed')
user = request.user
csrf_token = (csrf(request)['csrf_token'])
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def check(request):
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
count = feeds.count()
return HttpResponse(count)
@login_required
@ajax_required
def post(request):
last_feed = request.POST.get('last_feed')
user = request.user
csrf_token = (csrf(request)['csrf_token'])
feed = Feed()
feed.user = user
post = request.POST['post']
post = post.strip()
if len(post) > 0:
feed.post = post[:255]
feed.save()
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def comment(request):
if request.method == 'POST':
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
post = request.POST['post']
post = post.strip()
if len(post) > 0:
post = post[:255]
user = request.user
feed.comment(user=user, post=post)
user.profile.notify_commented(feed)
user.profile.notify_also_commented(feed)
return render(request, 'partial_feed_comments.html',
{'feed': feed})
else:
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'partial_feed_comments.html',
{'feed': feed})
@login_required
@ajax_required
def update(request):
first_feed = request.GET.get('first_feed')
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds().filter(id__range=(last_feed, first_feed))
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
dump = {}
for feed in feeds:
dump[feed.pk] = {'likes': feed.likes, 'comments': feed.comments}
data = json.dumps(dump)
return HttpResponse(data, content_type='application/json')
@login_required
@ajax_required
def track_comments(request):
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'partial_feed_comments.html', {'feed': feed})
@login_required
@ajax_required
def remove(request):
try:
feed_id = request.POST.get('feed')
feed = Feed.objects.get(pk=feed_id)
import pdb; pdb.set_trace()
if feed.user == request.user:
likes = feed.get_likes()
parent = feed.parent
for like in likes:
like.delete()
feed.delete()
if parent:
parent.calculate_comments()
return HttpResponse()
else:
return HttpResponseForbidden()
except Exception:
return HttpResponseBadRequest()
| [
"[email protected]"
] | |
592b026eea60637891126979c84878aa55bba9f2 | b76e39e535499704368eddc26237dc0016ef7d06 | /CASTERSIMULATION/Instrumentplc/latestfiles/calallABPdrives_V3.py | 6729dbb4d7d6816a1535e1bf0a9b89eb163319bc | [] | no_license | BUBAIMITRA2018/castersimulation | 0532e53df7d346c2824e577cc91cd0ac2ce4694c | eca5fddff5c0f33f785168f6b1e9f572c1622be0 | refs/heads/master | 2022-12-10T02:45:04.207196 | 2020-09-09T05:35:54 | 2020-09-09T05:35:54 | 260,110,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | from logger import *
from fn_ABP_drive_V3 import *
import logging
logger = logging.getLogger("main.log")
class Cal_ABBDrives:
def __init__(self,df,com):
self.df = df
self.com = com
self.listofdrives = []
self.listofdrivearea = []
self.devicelistperarea = [[]]
self.setup()
def setup(self):
try:
self.listofdrivearea = list(set(self.df['Sub-Area']))
n= 0
self.listofdrives.clear()
while n< len(self.df.index):
self.df.iloc[n, 0] = Fn_ABP_Drive(self.com, self.df, n)
self.listofdrives.append(self.df.iloc[n,0])
n = n + 1
# per area wise device list
# Declear empty list
for area in self.listofdrivearea:
list1 = []
for item in self.listofdrives:
if item.areaname == area:
list1.append(item)
self.devicelistperarea.append(list1)
keys = self.listofdrivearea
values = self.devicelistperarea[1:]
# Declear empty list
self.dictionary = dict(zip(keys, values))
print(self.dictionary)
except Exception as e :
level = logging.ERROR
messege = 'Event:' + "callalldrives" + str(e.args)
logger.log(level, messege)
log_exception(e)
def __getstate__(self):
state = self.__dict__.copy()
# Remove the unpicklable entries.
del state['mylock']
return state
def __setstate__(self, state):
# Restore instance attributes.
self.__dict__.update(state)
@property
def listofalldrives(self):
if len(self.listofalldrives) > 0:
return self.listofalldrives
@property
def listofdriveareas(self):
return self.listofdrivearea
@property
def getmotordictionary(self):
return self.dictionary
| [
"[email protected]"
] | |
309d07035024a6f4c4a5c2c4c3f509c78a162f46 | 0fc78025778bc2905be091e7840c0364d987133e | /weather_spider/__init__.py | 4f6477075fee5c631cadede144c0956910e5a917 | [] | no_license | jeffreyzzh/myspider | 5e5acc52906ffb944c1c112be0ee9ab15ebe2868 | d72befdd45edd059dfae4077cfde5969928323a8 | refs/heads/master | 2021-01-13T07:22:34.998234 | 2017-04-27T14:13:21 | 2017-04-27T14:13:21 | 71,528,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | # -*- coding: utf-8 -*-
# 2017/4/26 0026
| [
"[email protected]"
] | |
7e0d728ad0e25fb39f5845003077a6be5587cfaa | 5483eb59f8e0c950b1f2a337f0805da4081b06c1 | /polls_project/polls/views.py | 19a8bf634c1b40aefa084da76380528957031b54 | [] | no_license | aust10/django_fun | 666957414d7f4f069f2770c55db5cb5c46815817 | 93f7f9dde6b1b046582d5dd0685d274a7ac30bca | refs/heads/master | 2021-09-27T05:19:55.651347 | 2020-07-22T18:12:36 | 2020-07-22T18:12:36 | 250,668,790 | 0 | 0 | null | 2021-09-22T18:55:42 | 2020-03-27T23:07:26 | Python | UTF-8 | Python | false | false | 1,957 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
# def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# context = {
# 'latest_question_list': latest_question_list
# }
# return render(request, 'polls/index.html', context)
# def detail(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/detail.html', {'question': question})
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# # model = Question
# # template_name = 'polls/results.html'
# return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {'question': question, 'error_message': "You didn't select a choice."})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,))) | [
"[email protected]"
] | |
edddb4dd1cd91eae4a9a569c9a35726d8a593b9c | 47502e9033b20ad03d5861e3940c7ec6c95ffc86 | /treemodeladmin/templatetags/treemodeladmin_tags.py | e952d2bc6dd6c12030bd1f6b39138ac371a29ba5 | [
"CC0-1.0"
] | permissive | DalavanCloud/wagtail-treemodeladmin | 30000aeb14815c1b733267e7a436cae06399f4ad | e2c49ceea5f378ff639f4bf467de189fe86fd3a0 | refs/heads/master | 2020-04-25T06:42:54.939620 | 2018-11-05T18:17:58 | 2018-11-05T18:17:58 | 172,590,191 | 1 | 0 | null | 2019-02-25T21:45:49 | 2019-02-25T21:45:49 | null | UTF-8 | Python | false | false | 1,198 | py | from django.template import Library
from wagtail.contrib.modeladmin.templatetags.modeladmin_tags import (
result_list,
result_row_display,
)
register = Library()
@register.inclusion_tag("treemodeladmin/includes/tree_result_list.html",
takes_context=True)
def tree_result_list(context):
""" Displays the headers and data list together with a link to children """
context = result_list(context)
return context
@register.inclusion_tag(
"treemodeladmin/includes/tree_result_row.html", takes_context=True)
def tree_result_row_display(context, index):
context = result_row_display(context, index)
obj = context['object_list'][index]
view = context['view']
child_url_helper = view.child_url_helper
if view.has_child_admin:
context.update({
'children': view.get_children(obj),
'child_index_url': child_url_helper.get_index_url_with_parent(
view.child_model_admin.parent_field, obj.pk
),
'child_create_url': child_url_helper.get_create_url_with_parent(
view.child_model_admin.parent_field, obj.pk
),
})
return context
| [
"[email protected]"
] | |
4b59ec8381847efc181138f5b97710431cf9436a | a1e4b21a5fe07c9d9332e10dbcce25a6f5ed92de | /test.py | 7ff6e92a921f2739360b039c3b751c85604e572c | [] | no_license | uditsharma29/AttributePredictionsVRAI | defaab8e7b3230f2345aa335817a96508338348c | fbd16971a9fa2d5bf14a993d570d08a33a1d898a | refs/heads/master | 2023-01-14T12:05:19.681051 | 2020-11-20T01:53:15 | 2020-11-20T01:53:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,276 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 17:29:46 2020
@author: udits
"""
import argparse
import os
import warnings
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.transforms as transforms
#from dataset import FashionDataset, AttributesDataset, mean, std
from model import AttributePrediction
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, balanced_accuracy_score
from torch.utils.data import DataLoader
def checkpoint_load(model, name):
print('Restoring checkpoint: {}'.format(name))
model.load_state_dict(torch.load(name, map_location='cpu'))
epoch = int(os.path.splitext(os.path.basename(name))[0].split('-')[1])
return epoch
def validate(model, dataloader, iteration, device, predictions, ground_truths, checkpoint=None):
if checkpoint is not None:
checkpoint_load(model, checkpoint)
model.eval()
with torch.no_grad():
avg_loss = 0
accuracy_color = 0
accuracy_type = 0
accuracy_wheel = 0
accuracy_luggage = 0
accuracy_sky = 0
accuracy_bumper = 0
epochs = []
loss = []
acc_col = []
acc_type = []
acc_wheel = []
acc_luggage = []
acc_sky = []
acc_bumper = []
for batch in dataloader:
img = batch['image']
target_labels = batch['labels']
target_labels = {t: target_labels[t].to(device) for t in target_labels}
output = model(img.to(device))
val_train, val_train_losses = model.get_loss(output, target_labels)
avg_loss += val_train.item()
accuracies, predictions, ground_truths = calculate_metrics(output, target_labels, predictions, ground_truths)
accuracy_color += accuracies['color']
accuracy_type += accuracies['type']
accuracy_wheel += accuracies['wheel']
accuracy_luggage += accuracies['luggage']
accuracy_sky += accuracies['sky']
accuracy_bumper += accuracies['bumper']
n_samples = len(dataloader)
avg_loss /= n_samples
accuracy_color /= n_samples
accuracy_type /= n_samples
accuracy_wheel /= n_samples
accuracy_luggage /= n_samples
accuracy_sky /= n_samples
accuracy_bumper /= n_samples
print('-' * 72)
print("Validation loss: {:.4f}, color: {:.4f}, type: {:.4f}, wheel: {:.4f}, luggage: {:.4f}, sky: {:.4f}, bumper: {:.4f}\n".format(
avg_loss, accuracy_color, accuracy_type, accuracy_wheel, accuracy_luggage, accuracy_sky, accuracy_bumper))
epochs.append(iteration)
acc_col.append(accuracy_color)
acc_type.append(accuracy_type)
acc_wheel.append(accuracy_wheel)
acc_luggage.append(accuracy_luggage)
acc_sky.append(accuracy_sky)
acc_bumper.append(accuracy_bumper)
loss.append(avg_loss)
metrics = np.array((epochs, acc_col,acc_type, acc_wheel, acc_luggage, acc_sky, acc_bumper, loss))
#logger.add_scalar('val_loss', avg_loss, iteration)
#logger.add_scalar('val_accuracy_color', accuracy_color, iteration)
#logger.add_scalar('val_accuracy_type', accuracy_type, iteration)
model.train()
return metrics, predictions, ground_truths
def visualize_grid(model, dataloader, attributes, device, show_cn_matrices=True, show_images=True, checkpoint=None,
show_gt=False):
if checkpoint is not None:
checkpoint_load(model, checkpoint)
model.eval()
imgs = []
labels = []
gt_labels = []
gt_color_all = []
gt_type_all = []
predicted_color_all = []
predicted_type_all = []
accuracy_color = 0
accuracy_type = 0
with torch.no_grad():
for batch in dataloader:
img = batch['img']
gt_colors = batch['labels']['color_labels']
gt_types = batch['labels']['type_labels']
output = model(img.to(device))
batch_accuracy_color, batch_accuracy_type = \
calculate_metrics(output, batch['labels'])
accuracy_color += batch_accuracy_color
accuracy_type += batch_accuracy_type
# get the most confident prediction for each image
_, predicted_colors = output['color'].cpu().max(1)
_, predicted_types = output['type'].cpu().max(1)
for i in range(img.shape[0]):
image = np.clip(img[i].permute(1, 2, 0).numpy() * std + mean, 0, 1)
predicted_color = attributes.color_id_to_name[predicted_colors[i].item()]
predicted_type = attributes.type_id_to_name[predicted_types[i].item()]
gt_color = attributes.color_id_to_name[gt_colors[i].item()]
gt_types = attributes.type_id_to_name[gt_types[i].item()]
gt_color_all.append(gt_color)
gt_type_all.append(gt_types)
predicted_color_all.append(predicted_color)
predicted_type_all.append(predicted_type)
imgs.append(image)
labels.append("{}\n{}\n{}".format(predicted_type, predicted_color))
gt_labels.append("{}\n{}\n{}".format(gt_types, gt_color))
if not show_gt:
n_samples = len(dataloader)
print("\nAccuracy:\ncolor: {:.4f}, type: {:.4f}, article: {:.4f}".format(
accuracy_color / n_samples,
accuracy_type / n_samples))
# Draw confusion matrices
if show_cn_matrices:
# color
cn_matrix = confusion_matrix(
y_true=gt_color_all,
y_pred=predicted_color_all,
labels=attributes.color_labels,
normalize='true')
ConfusionMatrixDisplay(cn_matrix, attributes.color_labels).plot(
include_values=False, xticks_rotation='vertical')
plt.title("Colors")
plt.tight_layout()
plt.show()
# gender
cn_matrix = confusion_matrix(
y_true=gt_type_all,
y_pred=predicted_type_all,
labels=attributes.type_labels,
normalize='true')
ConfusionMatrixDisplay(cn_matrix, attributes.gender_labels).plot(
xticks_rotation='horizontal')
plt.title("Genders")
plt.tight_layout()
plt.show()
if show_images:
labels = gt_labels if show_gt else labels
title = "Ground truth labels" if show_gt else "Predicted labels"
n_cols = 5
n_rows = 3
fig, axs = plt.subplots(n_rows, n_cols, figsize=(10, 10))
axs = axs.flatten()
for img, ax, label in zip(imgs, axs, labels):
ax.set_xlabel(label, rotation=0)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.imshow(img)
plt.suptitle(title)
plt.tight_layout()
plt.show()
model.train()
def calculate_metrics(output, target, predictions, ground_truths):
_, predicted_color = output['color'].cpu().max(1)
gt_color = target['color_labels'].cpu()
_, predicted_types = output['type'].cpu().max(1)
gt_type = target['type_labels'].cpu()
_, predicted_luggage = output['luggage'].cpu().max(1)
gt_luggage = target['luggage_labels'].cpu()
_, predicted_wheel = output['wheel'].cpu().max(1)
gt_wheel = target['wheel_labels'].cpu()
_, predicted_sky = output['sky'].cpu().max(1)
gt_sky = target['sky_labels'].cpu()
_, predicted_bumper = output['bumper'].cpu().max(1)
gt_bumper = target['bumper_labels'].cpu()
#print(gt_color)
predictions['color'].append(predicted_color.numpy())
ground_truths['color'].append(gt_color.numpy())
predictions['type'].append(predicted_types.numpy())
ground_truths['type'].append(gt_type.numpy())
predictions['wheel'].append(predicted_wheel.numpy())
ground_truths['wheel'].append(gt_wheel.numpy())
predictions['luggage'].append(predicted_luggage.numpy())
ground_truths['luggage'].append(gt_luggage.numpy())
predictions['sky'].append(predicted_sky.numpy())
ground_truths['sky'].append(gt_sky.numpy())
predictions['bumper'].append(predicted_bumper.numpy())
ground_truths['bumper'].append(gt_bumper.numpy())
with warnings.catch_warnings(): # sklearn may produce a warning when processing zero row in confusion matrix
warnings.simplefilter("ignore")
accuracy_color = balanced_accuracy_score(y_true=gt_color.numpy(), y_pred=predicted_color.numpy())
accuracy_type = balanced_accuracy_score(y_true=gt_type.numpy(), y_pred=predicted_types.numpy())
accuracy_luggage = balanced_accuracy_score(y_true=gt_luggage.numpy(), y_pred=predicted_luggage.numpy())
accuracy_wheel = balanced_accuracy_score(y_true=gt_wheel.numpy(), y_pred=predicted_wheel.numpy())
accuracy_sky = balanced_accuracy_score(y_true=gt_sky.numpy(), y_pred=predicted_sky.numpy())
accuracy_bumper = balanced_accuracy_score(y_true=gt_bumper.numpy(), y_pred=predicted_bumper.numpy())
accuracies = {'color': accuracy_color, 'type': accuracy_type, 'luggage':accuracy_luggage, 'wheel': accuracy_wheel, 'sky':accuracy_sky, 'bumper': accuracy_bumper}
return accuracies, predictions, ground_truths
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference pipeline')
parser.add_argument('--checkpoint', type=str, required=True, help="Path to the checkpoint")
parser.add_argument('--attributes_file', type=str, default='./fashion-product-images/styles.csv',
help="Path to the file with attributes")
parser.add_argument('--device', type=str, default='cuda',
help="Device: 'cuda' or 'cpu'")
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() and args.device == 'cuda' else "cpu")
# attributes variable contains labels for the categories in the dataset and mapping between string names and IDs
attributes = AttributesDataset(args.attributes_file)
# during validation we use only tensor and normalization transforms
val_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
test_dataset = FashionDataset('./val.csv', attributes, val_transform)
test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=False, num_workers=8)
model = MultiOutputModel(n_color_classes=attributes.num_colors, n_gender_classes=attributes.num_genders,
n_article_classes=attributes.num_articles).to(device)
# Visualization of the trained model
visualize_grid(model, test_dataloader, attributes, device, checkpoint=args.checkpoint)
| [
"[email protected]"
] | |
eea81f8d92a5f4b5914eb02992153a06f56c77aa | f4d362f3c3a1bd127923ed3fa30c03a1587caa38 | /pyactivemodel/base.py | be0a2dc7be4dbe0df77855ef7353b2b5e29633e2 | [
"MIT"
] | permissive | zhuyifei1999/wmcn-activemodel-py | e8be64d06bb529d12a9ae11a6509aaf4345085ee | 143d8f55fbd34fb21c401a9574530eba7f8d287b | refs/heads/master | 2021-01-02T09:42:16.068768 | 2017-08-04T02:52:16 | 2017-08-04T02:55:22 | 99,283,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # -*- coding: UTF-8 -*-
noop = lambda *args, **kwargs: None # noqa: E731
class ActiveModelBase(type):
def __new__(metacls, name, bases, namespace, **kwargs):
"""Create the new class, run classinit_includes."""
cls = super().__new__(metacls, name, bases, namespace)
for classes in reversed(cls.__mro__):
classes.__dict__.get('classinit_include', noop).__get__(None, cls)()
for classes in cls.__mro__:
classes.__dict__.get('classinit_include_post', noop).__get__(None, cls)()
return cls
| [
"[email protected]"
] | |
108209cb2a4a35deba0f4aa4d5f213e33ac9aea3 | c8d8e570a1936952f170e4863a1fde871756160f | /core/management/commands/index_titles.py | 783e8bb74b4258ed5e253d7d0ea3b40e88d6b5e5 | [] | no_license | CDRH/nebnews | 90db170cf4091dec328898f5e12a487f2f632929 | b8f064c4dcc2561c0f130cfc0db02f114da527da | refs/heads/main | 2023-08-22T20:25:04.638718 | 2018-08-09T17:29:30 | 2018-08-09T17:29:30 | 34,015,551 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import logging
from django.core.management.base import BaseCommand
from chronam.core.management.commands import configure_logging
from chronam.core.index import index_titles
configure_logging("index_titles_logging.config", "index_titles.log")
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, **options):
_logger.info("indexing titles")
index_titles()
_logger.info("finished indexing titles")
| [
"[email protected]"
] | |
2c9f982ff712ff03b6c2fb9764ef24fc322a0f82 | 650b516b1214c4d44fd6f04941e87e28e9049cde | /addons/script.mtvguide/streaming.py | 8f841b20c43e14c1555bef8e094786504de6c532 | [] | no_license | MultiWu/build | b85cc45a33b871f4ade58de8457fcd094761f385 | f50a64f674b6499668e0a5758fe0879b016f5c38 | refs/heads/master | 2022-10-31T20:35:53.382826 | 2019-12-20T22:50:16 | 2019-12-20T22:50:16 | 228,462,984 | 0 | 3 | null | 2022-10-07T08:47:18 | 2019-12-16T19:46:39 | Python | UTF-8 | Python | false | false | 11,622 | py | # Copyright (C) 2012 Tommy Winther
# http://tommy.winther.nu
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this Program; see the file LICENSE.txt. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
import os, ConfigParser
import xbmc, xbmcaddon
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
from strings import *
class StreamsService(object):
def __init__(self):
path = os.path.join(ADDON_PATH, 'resources', 'addons.ini')
self.addonsParser = ConfigParser.ConfigParser(dict_type=OrderedDict)
self.addonsParser.optionxform = lambda option: option
try:
self.addonsParser.read(path)
except:
print 'unable to parse addons.ini'
def loadFavourites(self):
entries = list()
path = xbmc.translatePath('special://userdata/favourites.xml')
if os.path.exists(path):
f = open(path)
xml = f.read()
f.close()
try:
doc = ElementTree.fromstring(xml)
for node in doc.findall('favourite'):
value = node.text
if value[0:11] == 'PlayMedia("':
value = value[11:-2]
elif value[0:10] == 'PlayMedia(':
value = value[10:-1]
elif value[0:22] == 'ActivateWindow(10025,"':
value = value[22:-9]
elif value[0:22] == 'ActivateWindow(10025,':
value = value[22:-9]
else:
continue
entries.append((node.get('name'), value))
except ExpatError:
pass
return entries
def getAddons(self):
return self.addonsParser.sections()
def getAddonStreams(self, id):
return self.addonsParser.items(id)
def detectStream(self, channel):
try:
"""
@param channel:
@type channel: source.Channel
"""
favourites = self.loadFavourites()
# First check favourites, if we get exact match we use it
for label, stream in favourites:
if label == channel.title:
return stream
# Second check all addons and return all matches
matches = list()
for id in self.getAddons():
try:
xbmcaddon.Addon(id)
except Exception:
continue # ignore addons that are not installed
for (label, stream) in self.getAddonStreams(id):
try:
if label == channel.title or label.startswith(channel.title+' @'):
matches.append((id, label, stream))
except:
continue
if len(matches) == 1:
return matches[0][2]
else:
return matches
except:
return None
class OrderedDict(dict):
# From: http://code.activestate.com/recipes/576693/
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| [
"[email protected]"
] | |
65a327b0db2fe61c17d851aef22248b05b86c466 | 9ff0e35334f06fa98d46b844d6bcd27e5c191266 | /pageAction/order_action.py | 01d14f70a0ab5faf524a5d68f80fbeeb64cda2ee | [] | no_license | zhangyuyu0222/mtx0117 | ec6bfbd068b3422ac4d96d49a1756bbbac0be4a8 | 8a93c8bbab3dfcf11d59b9061380c74a1a3506ea | refs/heads/master | 2023-02-16T14:24:44.150213 | 2021-01-16T03:39:43 | 2021-01-16T03:39:43 | 330,388,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | # 导包
import time
from lesson13.lesson13_1买裙子调用类.pageAction.action_manager import ActionManage
'''
组合业务
引入页面对象
'''
class Buy_pinkskirt(ActionManage):
def buy_pinkskirt_business(self):
self.pageindex.click_pinkskirt()
time.sleep(1)
self.pageindex.change_window()
time.sleep(1)
self.pagegoogsdetail.click_pink()
time.sleep(1)
self.pagegoogsdetail.click_M()
time.sleep(1)
self.pagegoogsdetail.click_buy_now()
time.sleep(1)
self.pagebuy.click_givemoney()
time.sleep(1)
self.pagebuy.submit()
| [
"[email protected]"
] | |
6267eb1c55485c3686510613a53527be2a437057 | 6e1fae6052a9fca260585439311128f06d449a34 | /Python/createTablesSQLite3.py | b737711f4b3c0bf8fe8ca8e31beebc44087827ac | [] | no_license | thecodearrow/Programming-Tasks | 797d687f405e021ae37b07ae6c15175ad693154c | 8779dfb14a79b4451f8feafde9b1daf30eea9e4b | refs/heads/master | 2020-05-22T02:33:56.289954 | 2018-07-28T09:18:51 | 2018-07-28T09:18:51 | 84,663,349 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | """
Table CREATION-- Books,Titles,Publishers,Zip Codes, Authors Titles, AUthors
"""
import sqlite3
def create_table():
#Exception handling for the sqlite connection
try:
conn=sqlite3.connect('library.db')
c=conn.cursor()
#CREATING TABLES
c.execute('CREATE TABLE ZIPCODES(ZIP_CODE_ID TEXT PRIMARY KEY NOT NULL,CITY TEXT,STATE TEXT,ZIP_CODE INT)')
c.execute('CREATE TABLE PUBLISHERS(PUBLISHER_ID TEXT PRIMARY KEY NOT NULL,NAME TEXT,STREET_ADDRESS CHAR(50),SUITE_NUMBER INT,ZIP_CODE_ID INT NOT NULL,FOREIGN KEY(ZIP_CODE_ID) REFERENCES ZIPCODES(ZIP_CODE_ID));')
c.execute('CREATE TABLE TITLES(TITLE_ID TEXT PRIMARY KEY NOT NULL,TITLE TEXT,ISBN INT,PUBLISHER_ID TEXT NOT NULL,PUBLICATION_YEAR INT,FOREIGN KEY(PUBLISHER_ID) REFERENCES PUBLISHERS(PUBLISHER_ID));')
c.execute('CREATE TABLE BOOKS(BOOK_ID TEXT PRIMARY KEY NOT NULL, TITLE_ID TEXT NOT NULL,LOCATION TEXT,GENRE TEXT,FOREIGN KEY(TITLE_ID) REFERENCES TITLES(TITLE_ID));')
c.execute('CREATE TABLE AUTHORS(AUTHOR_ID TEXT PRIMARY KEY NOT NULL, FIRST_NAME TEXT,MIDDLE_NAME TEXT,LAST_NAME TEXT')
c.execute('CREATE TABLE AUTHOR_TITLES(AUTHOR_TITLE_ID TEXT PRIMARY KEY NOT NULL, AUTHOR_ID TEXT,TITLE_ID TEXT,FOREIGN KEY(TITLE_ID) REFERENCES TITLES(TITLE_ID),FOREIGN KEY(AUTHOR__ID) REFERENCES AUTHORS(AUTHOR_ID))'),
conn.commit() #committing changes
except sqlite3.Error as e:
print("There was a problem! ", e)
finally:
if conn:
conn.close() #if conn was established, finally close it
| [
"[email protected]"
] | |
0a89bf2cf3823d660a7228315a6db6e090c2d5b3 | 08d6dba0d2fbde98f572f2d1423a95d4f0addfb5 | /modulo-1/desafio007.py | b9f94863e19e9b67dcbb436cfbee22eb313f71e6 | [
"MIT"
] | permissive | Maximos007/Exercicios-Python-Modulo1 | b859cf695d442a7abfb1f441d38160fb7cb6be19 | fc269296e4fc2f4b08d949040c5e7fff51495f2c | refs/heads/main | 2023-03-02T03:27:06.463453 | 2021-02-08T22:39:22 | 2021-02-08T22:39:22 | 336,445,295 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | nota1 = float(input('Primeira nota : '))
nota2 = float(input('Segunda nota : '))
soma = nota1 + nota2
media = soma / 2
print('Soma {} média {}'.format(soma, media)) | [
"[email protected]"
] | |
b269e0594b7094cc006a3156adb471214675a64e | 457c3e20036060adfb6a019fc1917546b8585cc9 | /dark.py | 608d6af4e80b6867eb0e1e911c984a45c0e350be | [] | no_license | D1ARK-VA4U3/Birthday.wish | 6973acdf5df4ca55c87b114e6055f333e479c6e6 | 041b2bc3091144d5ac5172f53119e7b8d0e16076 | refs/heads/main | 2023-06-21T19:30:22.693097 | 2021-07-17T15:58:46 | 2021-07-17T15:58:46 | 386,980,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | import itertools
ban = '''
'''
print('\n------------------\n\n D1ARK-VA4U3 \033[32m2.0\033[m | Its Me Dark King\n\n~ Tools Make by: D1ARK-VA4U3 FROM BCA TEAM\n\n------------------\n')
scale = input('\033[36m[!] provide a size scale [eg: "1 to 8" = 1:8] : ')
start = int(scale.split(':')[0])
final = int(scale.split(':')[1])
use_nouse = str(input("\n\033[36m[?] Do you want to enter personal data ? [y/N]: "))
if use_nouse == 'y':
first_name = str(input("\n\033[36m[*] Fist Name: "))
last_name = str(input("\n\033[36m[*] Last Name: "))
birthday = str(input("\n\033[36m[*] Birthday: "))
month = str(input("\n\033[36m[*] Month: "))
year = str(input("\n\033[36m[*] Year: "))
chrs = first_name + last_name + birthday + month + year
else:
chrs = 'abcdefghijklmnopqrstuvwxyz'
pass
chrs_up = chrs.upper()
chrs_specials = '!\][/?.,~-=";:><@#$%&*()_+\' '
chrs_numerics = '1234567890'
file_name = input('\n\033[36m[!] Insert a name for your wordlist file: ')
arq = open(file_name, 'w')
if input('\n\033[36m[?] Do you want to use uppercase characters? (y/n): ') == 'y':
chrs = ''.join([chrs, chrs_up])
if input('\n\033[36m[?] Do you want to use special characters? (y/n): ') == 'y':
chrs = ''.join([chrs, chrs_specials])
if input('\n\033[36m[?] Do you want to use numeric characters? (y/n): ') == 'y':
chrs = ''.join([chrs, chrs_numerics])
for i in range(start, final+1):
for j in itertools.product(chrs, repeat=i):
temp = ''.join(j)
print(temp)
arq.write(temp + '\n')
arq.close() | [
"[email protected]"
] | |
b0e2416d470cc5f24e7731d0491051295cc4f4b0 | 197420c1f28ccb98059888dff214c9fd7226e743 | /happy_pythoning_cource/Part_4/4.1.1.Password_comparing/4.1.1.password_comparing.py | c522343418d97a0fd058b829a9b6f0fe5eff3384 | [] | no_license | Vovanuch/python-basics-1 | fc10b6f745defff31364b66c65a704a9cf05d076 | a29affec12e8b80a1d3beda3a50cde4867b1dee2 | refs/heads/master | 2023-07-06T17:10:46.341121 | 2021-08-06T05:38:19 | 2021-08-06T05:38:19 | 267,504,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | '''
Пароль
При регистрации на сайтах требуется вводить пароль дважды. Это сделано для безопасности, поскольку такой подход уменьшает возможность неверного ввода пароля.
Напишите программу, которая сравнивает пароль и его подтверждение. Если они совпадают, то программа выводит: «Пароль принят», иначе: «Пароль не принят».
Формат входных данных
На вход программе подаются две строки.
Формат выходных данных
Программа должна вывести одну строку в соответствии с условием задачи.
Sample Input 1:
qwerty
qwerty
Sample Output 1:
Пароль принят
Sample Input 2:
qwerty
Qwerty
Sample Output 2:
Пароль не принят
Sample Input 3:
PythonROCKS
PythonROCKS
Sample Output 3:
Пароль принят
'''
a = input().strip()
b = input().strip()
if (a == b) and (a != ""):
print('Пароль принят')
else:
print('Пароль не принят') | [
"[email protected]"
] | |
f1b1f2e7cdbd3782caf07c6d8fac5894512e2308 | 27044bb88c709e7ffa5278afc7c81f37e0b6e9e4 | /venv/lib/python3.10/site-packages/rich/errors.py | ab1fb97994c508091d92fe3fc2098585aa21c320 | [] | no_license | mesaye85/organika_Inventory_project-with-React- | 48c93efb6aba64d5e9310c57e4c5a06d3f2cc502 | 6aa3d29e8be3e22b8dc9163d558cdcc8c9122fd1 | refs/heads/main | 2023-02-19T10:11:47.880754 | 2023-02-14T01:55:43 | 2023-02-14T01:55:43 | 298,101,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/e6/93/f7/29ce5de1027f734285b31adfca18e23d57bb275ccea9215b140cdc57e6 | [
"[email protected]"
] | |
214bbafb9ca631e71c4cff2fd4d55f1af76b2482 | 6c85401ae391b8c63808789ac4eaf583085b761e | /data_sync/sync_main/entrance.py | 08ab1afef4643161607fd038d3587eb464199fe3 | [] | no_license | rkzhang/data_sync | 60691d8f1c3027b76722d5d41f3afcdda26c127b | cd98a4dc8e7ff9fe7b36df08d8c21eba2cee1f6d | refs/heads/master | 2016-09-02T04:55:20.486096 | 2015-01-06T02:34:21 | 2015-01-06T02:34:21 | 28,586,594 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 362 | py | #-*-coding:UTF-8-*-
'''
Created on 2014Äê12ÔÂ24ÈÕ
@author: zhangr01
'''
from sync_main.watcher import WatcherThread
from sync_main.data_consumer import ConsumerThread
import time
t_watcher = WatcherThread();
t_watcher.start()
for i in range(5) :
t_consumer = ConsumerThread();
t_consumer.start()
while True :
time.sleep(10) | [
"[email protected]"
] | |
c5c85e191d1f5478a197ed951010c41e372ca36b | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/smart_document_recognizer_kv_result.py | 3808e6c70a5cbc3fc914f863fa9c73ed150831e2 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,572 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SmartDocumentRecognizerKvResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'kv_block_count': 'int',
'kv_block_list': 'list[SmartDocumentRecognizerKVBlock]'
}
attribute_map = {
'kv_block_count': 'kv_block_count',
'kv_block_list': 'kv_block_list'
}
def __init__(self, kv_block_count=None, kv_block_list=None):
"""SmartDocumentRecognizerKvResult
The model defined in huaweicloud sdk
:param kv_block_count: 模型识别到的键值对数量。
:type kv_block_count: int
:param kv_block_list: 键值对识别结果列表。
:type kv_block_list: list[:class:`huaweicloudsdkocr.v1.SmartDocumentRecognizerKVBlock`]
"""
self._kv_block_count = None
self._kv_block_list = None
self.discriminator = None
if kv_block_count is not None:
self.kv_block_count = kv_block_count
if kv_block_list is not None:
self.kv_block_list = kv_block_list
@property
def kv_block_count(self):
"""Gets the kv_block_count of this SmartDocumentRecognizerKvResult.
模型识别到的键值对数量。
:return: The kv_block_count of this SmartDocumentRecognizerKvResult.
:rtype: int
"""
return self._kv_block_count
@kv_block_count.setter
def kv_block_count(self, kv_block_count):
"""Sets the kv_block_count of this SmartDocumentRecognizerKvResult.
模型识别到的键值对数量。
:param kv_block_count: The kv_block_count of this SmartDocumentRecognizerKvResult.
:type kv_block_count: int
"""
self._kv_block_count = kv_block_count
@property
def kv_block_list(self):
"""Gets the kv_block_list of this SmartDocumentRecognizerKvResult.
键值对识别结果列表。
:return: The kv_block_list of this SmartDocumentRecognizerKvResult.
:rtype: list[:class:`huaweicloudsdkocr.v1.SmartDocumentRecognizerKVBlock`]
"""
return self._kv_block_list
@kv_block_list.setter
def kv_block_list(self, kv_block_list):
"""Sets the kv_block_list of this SmartDocumentRecognizerKvResult.
键值对识别结果列表。
:param kv_block_list: The kv_block_list of this SmartDocumentRecognizerKvResult.
:type kv_block_list: list[:class:`huaweicloudsdkocr.v1.SmartDocumentRecognizerKVBlock`]
"""
self._kv_block_list = kv_block_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SmartDocumentRecognizerKvResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f721f383bba5a9421e5b1e98752bd34c7f0aa7fb | 4ca59fa23ed7d26a5d1d29ad0b182d87df6885ad | /venv/Menu/Burgers.py | 189d5fa858e62c6ce6bf5883603e20f5401fee79 | [] | no_license | RAVISH-KUMAR-PANDEY/POS | 2be8311fdf5ced6098b49dbdbf5a94720065f8d1 | 0a2fd3fd40223b4d329166ea07bd6ca9cff0453d | refs/heads/master | 2020-03-21T23:19:00.950938 | 2018-06-29T17:54:08 | 2018-06-29T17:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | from Menu import lsBrg
class Brg:
def __init__(self,name,price):
self.name=name
self.price=price
@classmethod
def setterBrg(cls):
objbrg=[]
for i, j in zip(lsBrg.keys(), lsBrg.values()):
objbrg.append(Brg(i,j))
return objbrg
@classmethod
def ShowBrg(cls,ls):
count=1
for i in ls:
print(count,". Name = ",i.name ," price = ",i.price)
count=count+1
#l1=FoodItem.setter()
#FoodItem.ShowBrg(l1) | [
"[email protected]"
] | |
b50ccde95c834b12913d5b58f2734a388a0872a7 | bc464d2f33ee41e4b2595b01dd3f29e5347edaaf | /PDF/01development/read_PDFtk_dumpdata.py | b917a6c1eff059193dbec535670b2dc954b7d0e4 | [] | no_license | peterhsprenger/VuRProjects | b6256260ea36193bbb631398fbbae3572e1a822d | b082b1592b2bc1094a53086a1698a630f23e5e39 | refs/heads/master | 2021-01-10T05:31:52.989976 | 2015-11-05T01:58:04 | 2015-11-05T01:58:04 | 44,212,869 | 0 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 1,938 | py | # -*- coding: iso-8859-1 -*-
import os
#import subprocess
import re
#import string
dump_data = []
dump_data_path = "F:\UTBeScholars\LIEFERUNG\Inhaltsverzeichnisse\TMP"
bookmarks = {}
# Zusammenstellung aller Dateien im Verzeichnis zu weiteren Bearbeitung; es werden die Dateinamen ohne Pfad ausgegeben
for dd in os.listdir(dump_data_path):
if os.path.isfile(os.path.join(dump_data_path, dd)):
#pdf_files_path.append(os.path.join(dump_data_path, dd))
dump_data.append(dd)
print 'Dump Data', dump_data
print 'Length Dump Data', len(dump_data)
print 'Bookmarks Dictionary', bookmarks
# zunächst werden aus den mit pdftk erzeugten Dateien für jedes verfügbare PDF die Bookmarks für die Table of Contents ausgelesen
for dd in dump_data:
o_dd = open(os.path.join(dump_data_path, dd))
r_dd = o_dd.read()
f_dd = re.findall('BookmarkTitle: Table of Contents\sBookmarkLevel: [0-9]\sBookmarkPageNumber: ([0-9]+)\sBookmarkBegin\sBookmarkTitle: Body\sBookmarkLevel: [0-9]\sBookmarkPageNumber: ([0-9]+)', r_dd)
# vor der Zusammenführung muss die Seitenzahl der letzten Seite eines Inhaltsverzeichnisses um den Wert "1" verringert werden
# (es hört ja auf der Seite VOR dem nächsten Bookmark auf!)
for x, y in f_dd:
pp = int(x), int(y) - 1
# aus dem Dateinamen wird die ISBN extrahiert für das spätere Dictionary mit ISBN-Seitenzahl-Paaren
isbn = re.findall('(\d\d\d\d\d\d\d\d\d\d\d\d\d).*', dd)
print "DD", dd
print "FDD", f_dd
print "PP", pp
print "ISBN", isbn
bookmarks_tmp = []
bookmarks_tmp.append(isbn)
print "Bookmarks_TMP", bookmarks_tmp
print "Seitenzahl", f_dd
for isbn in bookmarks_tmp[0]:
#print "Bookmark", isbn
if isbn not in bookmarks:
bookmarks[isbn] = pp
print 'Bookmarks Dictionary - neu:', bookmarks
| [
"[email protected]"
] | |
0a874592b2b9ca19355cf79fad234aa814e89655 | 26b87e8933578c041232e60bc8f6f033c8bbdfb1 | /main_app/migrations/0012_auto_20190825_1438.py | a573815651584c1a7765acaa948d4aaa78574bf8 | [] | no_license | mchlsdrv/lucidpy_site | bb37bf84736eccf493a11d9e6eff915f6a561cdd | c04dfa534d2e192f0943541461821657f8c4c094 | refs/heads/master | 2023-09-02T18:43:42.196169 | 2021-11-01T09:49:07 | 2021-11-01T09:49:07 | 217,487,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # Generated by Django 2.1.5 on 2019-08-25 11:38
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0011_auto_20190825_1429'),
]
operations = [
migrations.AlterField(
model_name='post',
name='date',
field=models.DateTimeField(default=datetime.datetime(2019, 8, 25, 14, 38, 16, 956766), verbose_name='publishing date'),
),
]
| [
"[email protected]"
] | |
f3006e05e8c84437c800d6eddecf68c544327558 | 53cc7d0c2cbeb3c2d6e26739e2b64c36e7580869 | /networks/db1/3-3/submit.py | ee603cce95375c6067163d7adf8117440b15066a | [] | no_license | jkitchin/amp-tutorial | 951720dc1071f62ca220b0aa7ec841e227086162 | ea89852cbfea602db061dde48837604565b26f40 | refs/heads/master | 2020-12-25T03:21:25.386984 | 2017-03-16T16:53:33 | 2017-03-16T16:53:33 | 50,206,355 | 1 | 0 | null | 2016-01-22T20:41:32 | 2016-01-22T20:41:32 | null | UTF-8 | Python | false | false | 456 | py | #!/usr/bin/env python
from amp import Amp
from amp.descriptor import *
from amp.regression import *
calc = Amp(label="./",
descriptor=Behler(cutoff=6.0),
regression=NeuralNetwork(hiddenlayers=(2, 3)))
calc.train("../train.db", # The training data
cores=1,
global_search=None, # not found the simulated annealing feature useful
extend_variables=False) # feature does not work properly and will crash
| [
"[email protected]"
] | |
465fd2c3192b8abcca71593d9b3ee7bc1848867e | b4ccb7e67d066649e5ece45d556e76ba45778683 | /students/forms.py | 89bf1de2af8b1dea340ec6bcb3daaafec34b3bfa | [] | no_license | elbination/Django-ELearning | 51ca6e5feb411bf279729410abcc2e2e54241804 | 07d57a415885af37fac1e818ed172937df8798dd | refs/heads/main | 2023-07-19T22:38:03.816242 | 2021-08-24T02:53:35 | 2021-08-24T02:53:35 | 392,744,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from django import forms
from courses.models import Course
class CourseEnrollForm(forms.Form):
# Use HiddenInput widget to not to show this field the user.
# This form is used in the CourseDetailView
course = forms.ModelChoiceField(queryset=Course.objects.all(),
widget=forms.HiddenInput)
| [
"[email protected]"
] | |
7ada391c8583896595b322ab1117e8ecfb68820e | 9f2ade5d29f61d1426ecbbe13120b14102b274ae | /core/settings.py | 59a443f19ed3aca9f73d6d5a5a2dde1a084a588b | [] | no_license | Dieggg/DLD-Desarrollo-ET | 4c7004bd02de97ad73a9bcd74763a4eabe5a2e78 | f50f98e53ac2a5e4047c1547c2dcbc1ce908d77b | refs/heads/master | 2022-12-10T19:43:08.624276 | 2019-12-13T03:37:21 | 2019-12-13T03:37:21 | 227,510,172 | 0 | 0 | null | 2019-12-12T03:17:01 | 2019-12-12T03:15:33 | Python | UTF-8 | Python | false | false | 3,676 | py | """
Django settings for core project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e2hae7m**_v1n_x7*6!24^buw!mhiksxpy1dj-uz5ehl75-_h7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'compras',
'registro',
'crispy_forms',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'pwa',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
gettext = lambda s: s
LANGUAGES = (
('es', gettext('Español')),
)
LANGUAGE_CODE = 'es-cl'
TIME_ZONE = 'America/Santiago'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
#CRISPY FORMS
CRISPY_TEMPLATE_PACK="bootstrap4"
# redirección despues de loggeasrse
LOGIN_REDIRECT_URL = "panel"
# accion para desloggearse
LOGOUT_REDIRECT_URL = "/"
#AUTH LOGIN FB Y GOOGLE
SITE_ID = 1
PWA_SERVICE_WORKER_PATH = os.path.join(BASE_DIR,'serviceworker.js') | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.