blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad1d37cb7490059bf5b6d6b9b92d5794041e6f26 | da246a1aab0465ee2cc4875263f5d4930cb614e1 | /news/serializers.py | f9c2ae1189341de5891250b979e935594d2f82c8 | [] | no_license | MUDASSARHASHMI/newsproject | 88f1d6727a655c25e6207b805931cfd3528259a1 | 0c7c388db5e7e78f8ce40bbde505ec229229cd1e | refs/heads/master | 2020-03-28T04:13:22.379849 | 2016-05-24T02:50:25 | 2016-05-24T02:50:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from rest_framework import serializers
from .models import Article
class ArticleSerializer(serializers.Serializer):
pk = serializers.IntegerField(read_only=True)
feed = serializers.PrimaryKeyRelatedField(read_only=True)
title = serializers.CharField(max_length=200)
url = serializers.URLField()
description = serializers.CharField(source='description_truncated')
publication_date = serializers.DateTimeField(format='%A %B %d, %Y %-I:%M %p')
| [
"[email protected]"
] | |
41414ca0e0785f02a351c9d7939a16dade750fa7 | f10e9729b83ed5779d6177e95168602eb3d0bacd | /society/views.py | ef91729d36462ab0d7020a07219459c0ccbf68c2 | [
"BSD-3-Clause"
] | permissive | nrjshka/doSociety | f0aee5d12875eb24ac9a567bd919fa776474a5ea | b0e61f93a5837195c14b23975820734fa9144996 | refs/heads/master | 2021-04-26T23:18:58.890549 | 2018-03-05T20:09:59 | 2018-03-05T20:09:59 | 123,970,174 | 2 | 0 | null | 2018-03-05T20:10:00 | 2018-03-05T19:46:50 | JavaScript | UTF-8 | Python | false | false | 232 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.views.generic import TemplateView
#
class Index(TemplateView):
#при любых "вопросах" выводить index.html
template_name = 'society/index/index.html'
| [
"[email protected]"
] | |
955e29cada5373ee3ebea1ad22cc0e8d418a9792 | 8d1192ad8c9092aafde7ef07d8fe6992faa2c811 | /6. tile-cost.py | 176718009a8012b529341fa01022500865b7d8fe | [] | no_license | nickczj/karan-project | 0e731aa8272d461d7864791db55d0c14d4280c94 | 9c6e38d460ab2677dc23851576fb841630fa3ccb | refs/heads/master | 2021-05-31T20:13:48.030896 | 2016-04-29T09:10:14 | 2016-04-29T09:10:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | '''
Created on 29/03/2016
@author: Nick
'''
"""
Calculate the total cost of tile it would take to cover a floor plan of
width and height, using a cost entered by the user.
"""
currency = raw_input("Enter the currency")
cost = float(input("Enter cost per tile: "))
width = float(input("Enter width of room: "))
height = float(input("Enter height of room: "))
totalCost = lambda w,h,c: w*h*c
print ("Total cost to tile is " + currency + str(totalCost(width,height,cost)))
| [
"[email protected]"
] | |
5edd2a12c852a50eee55d4df5f7ef1e7d82606c1 | 7a8a3ef62919c3e08d439bd04cc0abead2e2cded | /0x00-python-hello_world/5-print_string.py | f72dad6baa4592b768ec80559835bd0e6696d493 | [] | no_license | hfsantiago/holbertonschool-higher_level_programming | bbedeada3684cf53041f4df5c9e35281d3b93808 | 3c99686ae6fd5eede9aea23fa6a08f353f67550f | refs/heads/master | 2022-12-22T02:40:33.446624 | 2020-09-23T03:19:00 | 2020-09-23T03:19:00 | 259,438,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | #!/usr/bin/python3
str = "Holberton School"
print("{:s}".format(str)*3)
print(str[:9])
| [
"[email protected]"
] | |
c2102244a867fcaeb1d908c6d7cb853f69110892 | e02dc5c3227105005c6c6e11c38e9782397004e1 | /yolov4/util/file.py | bfcf65f912038824a4ed7801debd923dc5eee2da | [
"MIT"
] | permissive | archyrei/tf-yolov4 | 0669020ff961f5bff75db660ecd841c30da06b3f | 355ac532228031d9a0928e962271244f49a898d5 | refs/heads/master | 2023-05-29T00:36:59.280312 | 2020-09-22T13:50:27 | 2020-09-22T13:50:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | """
MIT License
Copyright (c) 2020 Licht Takeuchi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import typing
def get_ndarray_from_fd(fd: typing.BinaryIO, dtype, count: int) -> np.ndarray:
data = np.fromfile(fd, dtype=dtype, count=count)
if len(data) != count:
raise RuntimeError('Wrong Darknet weight file.')
return data
| [
"[email protected]"
] | |
2c91585d921c59ae9ec1a32593eeec1086d145cd | fba5689ca72e3fafb5f4205b7fd94ca126ec020d | /atendimento/views.py | c462dde180e8cfcef7a8a4b62830498045d8f6eb | [
"MIT"
] | permissive | misaelbarreto/mini_curso_django | ce10b911e6d8e7414ad86f6ad0c61b43a062f1b4 | 08917621d0055289988b2aede6c11512082fc06a | refs/heads/master | 2023-04-27T14:37:17.175793 | 2019-07-16T22:10:44 | 2019-07-16T22:10:44 | 192,121,253 | 8 | 3 | MIT | 2023-04-21T20:32:57 | 2019-06-15T20:29:09 | Python | UTF-8 | Python | false | false | 2,536 | py | import datetime
from django.http import HttpResponse
from django.template import Template, Context, loader
from django.shortcuts import render
from django.views import generic
from .models import Cliente
# Create your views here.
# Exemplo de controle que retorna como resposta um texto simples.
def index(request):
return HttpResponse('Hi Houston! It s all right!')
# Exemplo de controle que retorna como resposta um html simples.
def index2(request):
html = '<html><body style="color: red">Hi Houston! It s all right!</body></html>'
return HttpResponse(html)
# Exemplo de controle que retorna como resposta um html simples através de um template criado em tempo de execução.
def index3(request):
now = datetime.datetime.now()
# Obs: A data já é exibida de acordo com a configuração realizada no
# Django: It timeeeeeee 3 de Julho de 2014 às 11:59.
template = Template('<html><body>Houston, now is {{ current_date }}.</body></html>')
# Contexto representa o conjunto de variáveis que serão processadas pelo template.
context = Context({'current_date': now})
html = template.render(context)
return HttpResponse(html)
# Exemplo de controle que retorna como resposta um html simples através de um template armazenado em arquivo.
def index4a(request):
# Obs: Para o loader funcionar, se faz necessário que a aplicação esteja listada no settings.INSTALLED_APPS.
template = loader.get_template('atendimento/index4.html')
context = {
'current_date': datetime.datetime.now(),
}
return HttpResponse(template.render(context, request))
# Exemplo de controle, que recebe opcionalmente o parâmetro idade retorna como resposta um html simples através de um
# template armazenado em arquivo.
def index4b(request, idade=None):
current_date = datetime.datetime.now()
# locals() retorna um dicionário contento todos as variáveis locais disponíveis.
# print(locals())
return render(request, 'atendimento/index4.html', locals())
'''
- - - - - - - - - - - - - -
Controle Manual de Clientes
- - - - - - - - - - - - - -
'''
def modo_manual_cliente_list(request):
clientes = Cliente.objects.all()
context = {'clientes': clientes}
return render(request, 'atendimento/modo_manual/cliente/list.html', context)
class ModoManualClienteListView(generic.ListView):
template_name = 'atendimento/modo_manual/cliente/list.html'
context_object_name = 'clientes'
def get_queryset(self):
return Cliente.objects.all() | [
"[email protected]"
] | |
8eae4f39f1cf89b3b2d94851ceba971cd0771a83 | 3aec1c00d6a71ac548dc1b779500cbbc0c07542c | /inputs_alexei_gk/upol_hermes_torino_alexei15.py | 9d14081a5be8d1c0c8f625d12adac5ae5904e85b | [] | no_license | prokudin/psu_project_2018 | 1396201c2aa6cf5604b1542106fe6654023b4c7a | 5b38b71f0a6dbce93045b0efb3c46a4051a9aa17 | refs/heads/master | 2020-03-20T02:51:23.612305 | 2018-12-17T21:44:16 | 2018-12-17T21:44:16 | 137,126,564 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,910 | py | conf={}
############################################################################
#mcsamp
conf['nruns']=10
conf['factor']=3
conf['tol']=1e-4
conf['itmax']=int(1e7)
conf['block size']=5000
conf['kappa']=1.3
conf['nll shift']=0
############################################################################
# params
conf['params']={}
# Parameters in gaussian approximation, parton model:
# GK approximate
conf['params']['gk']={}
conf['params']['gk']['gk0'] ={'value': -2.69819e-01,'min': -1.3,'max':0,'fixed':False}
conf['params']['gk']['Q0'] ={'value': 1.69000e+00,'min':0.8,'max':2,'fixed':True}
# TMD PDF:
conf['params']['pdf']={}
conf['params']['pdf']['widths1_uv'] ={'value': 5.41371e-01,'min': 0.1,'max':0.7,'fixed':False}
conf['params']['pdf']['widths2_uv'] ={'value': 0.00000e+00,'min':-1,'max':1,'fixed':True}
conf['params']['pdf']['widths1_dv'] ={'value': 5.41371e-01,'min': 0,'max':1,'fixed':'widths1_uv'}
conf['params']['pdf']['widths2_dv'] ={'value': 0.00000e+00,'min':-1,'max':1,'fixed':'widths2_uv'}
conf['params']['pdf']['widths1_sea'] ={'value': 6.32422e-01,'min': 0.1,'max':0.9,'fixed':False}
conf['params']['pdf']['widths2_sea'] ={'value': 0.00000e+00,'min':-1,'max':1,'fixed':True}
# TMD FF:
conf['params']['ffpi']={}
conf['params']['ffpi']['widths1_fav'] ={'value': 1.21644e-01,'min': 0,'max':0.5,'fixed':False}
conf['params']['ffpi']['widths2_fav'] ={'value': 0.00000e+00,'min':-1,'max':1,'fixed':True}
conf['params']['ffpi']['widths1_ufav'] ={'value': 1.40026e-01,'min': 0,'max':0.5,'fixed':False}
conf['params']['ffpi']['widths2_ufav'] ={'value': 0.00000e+00,'min':-1,'max':1,'fixed':True}
conf['params']['ffk']={}
conf['params']['ffk']['widths1_fav'] ={'value': 1.32705e-01,'min': 0,'max':0.5,'fixed':False}
conf['params']['ffk']['widths2_fav'] ={'value': 0.00000e+00,'min':-1,'max':1,'fixed':True}
conf['params']['ffk']['widths1_ufav'] ={'value': 1.90175e-01,'min': 0,'max':0.5,'fixed':False}
conf['params']['ffk']['widths2_ufav'] ={'value': 0.00000e+00,'min':-1,'max':1,'fixed':True}
conf['params']['ffh']={}
conf['params']['ffh']['widths1_fav'] ={'value': 1.32925e-01,'min': 0,'max':1,'fixed':True}
conf['params']['ffh']['widths2_fav'] ={'value': 0.00000e+00,'min':-1,'max':1,'fixed':True}
conf['params']['ffh']['widths1_ufav'] ={'value': 1.86073e-01,'min': 0,'max':1,'fixed':True}
conf['params']['ffh']['widths2_ufav'] ={'value': 0.00000e+00,'min':-1,'max':1,'fixed':True}
############################################################################
# set data sets
conf['datasets']={}
conf['datasets']['sidis']={}
conf['datasets']['sidis']['xlsx']={}
conf['datasets']['sidis']['xlsx'][1000]='sidis/expdata/1000.xlsx' # | proton | pi+ | M_Hermes | hermes
conf['datasets']['sidis']['xlsx'][1001]='sidis/expdata/1001.xlsx' # | proton | pi- | M_Hermes | hermes
conf['datasets']['sidis']['xlsx'][1004]='sidis/expdata/1004.xlsx' # | deuteron | pi+ | M_Hermes | hermes
conf['datasets']['sidis']['xlsx'][1005]='sidis/expdata/1005.xlsx' # | deuteron | pi- | M_Hermes | hermes
conf['datasets']['sidis']['xlsx'][1002]='sidis/expdata/1002.xlsx' # | proton | k+ | M_Hermes | hermes
conf['datasets']['sidis']['xlsx'][1003]='sidis/expdata/1003.xlsx' # | proton | k- | M_Hermes | hermes
conf['datasets']['sidis']['xlsx'][1006]='sidis/expdata/1006.xlsx' # | deuteron | k+ | M_Hermes | hermes
conf['datasets']['sidis']['xlsx'][1007]='sidis/expdata/1007.xlsx' # | deuteron | k- | M_Hermes | hermes
conf['datasets']['sidis']['norm']={}
for idx in conf['datasets']['sidis']['xlsx']: conf['datasets']['sidis']['norm'][idx]={'value':1,'fixed':True,'min':0,'max':1}
conf['datasets']['sidis']['filters']=["z<0.6 and Q2>1.69 and pT>0.2 and pT<0.9 and dy>1.5"]
#conf['datasets']['sidis']['filters']=["z>0.2 and z<0.6 and Q2>1.69 and (pT/z)**2<0.25*Q2"]
| [
"[email protected]"
] | |
26cdefb0ecec064ed1f917ef3afd55f863db75a4 | 8425dd7fe9ab2cc794f08714931b4449c59eb51b | /TopPop.py | f3fafe544e17a633ed368c3f87d612a57dbb745e | [] | no_license | GioValca/Recommender-Systems-2019 | f498d84ef5a421da5632de6173ce763e27b8a794 | d8b54578f7d1e6453785c216f253404aabfbaba7 | refs/heads/master | 2023-02-22T12:49:32.801064 | 2021-01-26T19:38:12 | 2021-01-26T19:38:12 | 333,192,671 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,443 | py | import zipfile
import scipy.sparse as sps
import numpy as np
from matplotlib import pyplot
from Algorithms import TopPopRecommender
dataFile = zipfile.ZipFile("/Users/giovanni/Desktop/RecSys/RecSysTest/Data/recommender-system-2019-challenge-polimi.zip")
URM_path = dataFile.extract("data_train.csv", path="/Users/giovanni/Desktop/RecSys/RecSysTest/Data")
URM_file = open(URM_path, 'r')
# for _ in range(10):
# print(URM_file.readline())
# Start from beginning of the file
URM_file.seek(0)
numberInteractions = 0
for _ in URM_file:
numberInteractions += 1
print("The number of interactions is {}".format(numberInteractions))
def rowSplit(rowString):
split = rowString.split(",")
split[2] = split[2].replace("\n", "")
split[0] = int(split[0])
split[1] = int(split[1])
split[2] = float(split[2])
result = tuple(split)
return result
URM_file.seek(0)
URM_file.readline()
URM_tuples = []
for line in URM_file:
URM_tuples.append(rowSplit(line))
userList, itemList, ratingList = zip(*URM_tuples)
userList = list(userList)
itemList = list(itemList)
ratingList = list(ratingList)
userList_unique = list(set(userList))
itemList_unique = list(set(itemList))
numUsers = len(userList_unique)
numItems = len(itemList_unique)
print("-- STATISTIC --")
print ("Number of items\t {}, Number of users\t {}".format(numItems, numUsers))
print ("Max ID items\t {}, Max Id users\t {}".format(max(itemList_unique), max(userList_unique)))
print ("Min items\t {}, Min user\t {}".format(min(itemList_unique), min(userList_unique)))
print ("Average interactions per user {:.2f}".format(numberInteractions/numUsers))
print ("Average interactions per item {:.2f}".format(numberInteractions/numItems))
print ("Sparsity {:.2f} %".format((1-float(numberInteractions)/(numItems*numUsers))*100))
print("---------------")
URM_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_all.tocsr()
#Item Activity
itemPopularity = (URM_all > 0).sum(axis=0)
itemPopularity = np.array(itemPopularity).squeeze()
itemPopularity = np.sort(itemPopularity)
pyplot.plot(itemPopularity, 'ro')
pyplot.ylabel('Num Interactions ')
pyplot.xlabel('Item Index')
pyplot.show()
tenPercent = int(numItems/10)
print("Average per-item interactions over the whole dataset {:.2f}".
format(itemPopularity.mean()))
print("Average per-item interactions for the top 10% popular items {:.2f}".
format(itemPopularity[-tenPercent].mean()))
print("Average per-item interactions for the least 10% popular items {:.2f}".
format(itemPopularity[:tenPercent].mean()))
print("Average per-item interactions for the median 10% popular items {:.2f}".
format(itemPopularity[int(numItems*0.45):int(numItems*0.55)].mean()))
print("Number of items with zero interactions {}".
format(np.sum(itemPopularity==0)))
itemPopularityNonzero = itemPopularity[itemPopularity>0]
print("Non zero Statistic:")
tenPercent = int(len(itemPopularityNonzero)/10)
print("Average per-item interactions over the whole dataset {:.2f}".
format(itemPopularityNonzero.mean()))
print("Average per-item interactions for the top 10% popular items {:.2f}".
format(itemPopularityNonzero[-tenPercent].mean()))
print("Average per-item interactions for the least 10% popular items {:.2f}".
format(itemPopularityNonzero[:tenPercent].mean()))
print("Average per-item interactions for the median 10% popular items {:.2f}".
format(itemPopularityNonzero[int(numItems*0.45):int(numItems*0.55)].mean()))
pyplot.plot(itemPopularityNonzero, 'ro')
pyplot.ylabel('Num Interactions ')
pyplot.xlabel('Item Index')
pyplot.show()
#User Activity
userActivity = (URM_all>0).sum(axis=1)
userActivity = np.array(userActivity).squeeze()
userActivity = np.sort(userActivity)
pyplot.plot(userActivity, 'ro')
pyplot.ylabel('Num Interactions ')
pyplot.xlabel('User Index')
pyplot.show()
#Splitting matrici
train_test_split = 0.80
numInteractions = URM_all.nnz
train_mask = np.random.choice([True,False], numInteractions, p=[train_test_split, 1-train_test_split])
URM_train = sps.load_npz("/Users/giovanni/Desktop/RecSys/RecSysTest/MyData/TrainAndTest/URM_train.npz")
URM_test = sps.load_npz("/Users/giovanni/Desktop/RecSys/RecSysTest/MyData/TrainAndTest/URM_test.npz")
URM_train = URM_train.tocsr()
URM_test = URM_test.tocsr()
topPopRecommender = TopPopRecommender()
topPopRecommender.fit(URM_train)
for user_id in userList_unique[0:10]:
print(topPopRecommender.recommend(user_id, at=10))
#Evaluation metrics
#Precision
def precision(recommended_items, relevant_items):
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
precision_score = np.sum(is_relevant, dtype=np.float32) / len(is_relevant)
return precision_score
#Recall
def recall(recommended_items, relevant_items):
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
recall_score = np.sum(is_relevant, dtype=np.float32) / relevant_items.shape[0]
return recall_score
#Mean Average Precision
def MAP(recommended_items, relevant_items):
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
# Cumulative sum: precision at 1, at 2, at 3 ...
p_at_k = is_relevant * np.cumsum(is_relevant, dtype=np.float32) / (1 + np.arange(is_relevant.shape[0]))
map_score = np.sum(p_at_k) / np.min([relevant_items.shape[0], is_relevant.shape[0]])
return map_score
#Methods that call all theevaluation metrics
# We pass as paramether the recommender class
def evaluate_algorithm(URM_test, recommender_object, at=10):
cumulative_precision = 0.0
cumulative_recall = 0.0
cumulative_MAP = 0.0
num_eval = 0
for user_id in userList_unique:
relevant_items = URM_test[user_id].indices
if len(relevant_items) > 0:
recommended_items = recommender_object.recommend(user_id, at=at)
num_eval += 1
cumulative_precision += precision(recommended_items, relevant_items)
cumulative_recall += recall(recommended_items, relevant_items)
cumulative_MAP += MAP(recommended_items, relevant_items)
cumulative_precision /= num_eval
cumulative_recall /= num_eval
cumulative_MAP /= num_eval
print("Recommender performance is: Precision = {:.4f}, Recall = {:.4f}, MAP = {:.4f}".format(
cumulative_precision, cumulative_recall, cumulative_MAP))
evaluate_algorithm(URM_test, topPopRecommender, at=10) | [
"[email protected]"
] | |
b5116fe784459d1e4eb67712b4155dee71af23f0 | db53bbcf8e61208034d8bdd8556d207b5cc513a2 | /friartuck/iextrading/iextrading.py | ee22ba9f3117c8335013d6b19ceabdad1f3eae23 | [
"MIT"
] | permissive | prateek470/friartuck | 4f38d45c1ddad1e291831615657c2e721fc95057 | a4f4a3a3e9e9fbe45c4354431a9fe832d4089c81 | refs/heads/master | 2020-03-30T15:41:21.378467 | 2018-09-12T16:46:18 | 2018-09-12T16:46:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,675 | py | import json
from datetime import timedelta, datetime
import calendar
import pandas as pd
import urllib.request
from tinydb import TinyDB, Query
class IEXTrading(object):
def __init__(self):
pass
def get_quote_daily(self, symbol, bars=22):
if bars <= 20:
query_length = '1m'
elif bars <= 60:
query_length = '3m'
elif bars <= 120:
query_length = '6m'
elif bars <= 240:
query_length = '1y'
elif bars <= 480:
query_length = '2y'
else:
query_length = '5y'
url = "https://api.iextrading.com/1.0/stock/%s/chart/%s?chartLast=%s" % (symbol.lower(), query_length, bars)
print(url)
bars = None
with urllib.request.urlopen(url) as response:
content = response.read()
# resp, content = self.client.request(url, "GET")
# print(content)
data = json.loads(content.decode('utf-8'))
quotes = data
# print(quotes)
print(len(quotes))
for quote_data in quotes:
quote_date = datetime.strptime(quote_data['date'], "%Y-%m-%d")
bar = pd.DataFrame(index=pd.DatetimeIndex([quote_date]),
data={'price': quote_data['close'],
'open': quote_data['open'],
'high': float(quote_data['high']),
'low': float(quote_data['low']),
'close': quote_data['close'],
'volume': int(quote_data['volume']),
'date': quote_date})
# print(close)
if bars is None:
bars = bar
else:
bars = bars.append(bar)
if bars is None:
# log.warn("Unexpected, could not retrieve quote for security (%s) " % symbol)
# bars = pd.DataFrame(index=[6], columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date'])
quote_date = datetime.now()
quote_date = quote_date.replace(second=0, microsecond=0)
bars = pd.DataFrame(index=pd.DatetimeIndex([quote_date]), columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date'],
data={'price': float("nan"),
'open': float("nan"),
'high': float("nan"),
'low': float("nan"),
'close': float("nan"),
'volume': int(0),
'date': quote_date})
return bars
def get_quote_intraday(self, symbol, minute_series, last_quote_time):
if not last_quote_time:
last_quote_time = datetime.now().replace(hour=8, minute=30, second=0, microsecond=0)
start_time = last_quote_time
end_time = last_quote_time.replace(hour=15, minute=0, second=0, microsecond=0)
diff = (end_time - start_time).seconds / 60
print("diff: %s" % diff)
url = "https://api.iextrading.com/1.0/stock/%s/chart/1d?" \
"chartLast=%s" % (symbol.lower(), diff)
print(url)
with urllib.request.urlopen(url) as response:
content = response.read()
# resp, content = self.client.request(url, "GET")
# print(content)
data = json.loads(content.decode('utf-8'))
quotes = data
# print(quotes)
print(len(quotes))
return self.summarize_quote(quotes, minute_series)
def _get_quote_intraday_by_date(self, symbol, date):
db = TinyDB('data/iex_db_%s.json' % symbol)
quote_query = Query()
datestr = date.strftime("%Y%m%d")
quotes = db.search(quote_query.date == datestr)
if quotes and len(quotes) > 0:
print("from db: %s" % len(quotes))
return quotes
url = "https://api.iextrading.com/1.0/stock/%s/chart/date/%s" % (symbol.lower(), datestr)
print(url)
with urllib.request.urlopen(url) as response:
content = response.read()
# resp, content = self.client.request(url, "GET")
# print(content)
data = json.loads(content.decode('utf-8'))
quotes = data
# print(quotes)
print(len(quotes))
if len(quotes) > 0:
current_datetime = datetime.now()
if date.date() < current_datetime.date() or current_datetime > current_datetime.replace(hour=16, minute=0, second=0, microsecond=0):
print("storing quotes:")
db.insert_multiple(quotes)
return quotes
def get_quote_intraday_hist_by_bars(self, symbol, minute_series, bars=1, before_date=None):
date = datetime.now()
# if date.hour < 15:
# # if intra-day, start with previous
# date = date - timedelta(days=1)
date_ctr = 0
quote_bars = None
while date_ctr < 35 and (quote_bars is None or len(quote_bars) < bars):
while date.weekday() in [5, 6] or (before_date and date >= before_date):
date = date - timedelta(days=1)
quotes = self._get_quote_intraday_by_date(symbol, date)
date_ctr = date_ctr+1
date = date - timedelta(days=1)
if len(quotes) == 0:
continue
my_bars = summarize_quote(quotes, minute_series)
if quote_bars is None:
quote_bars = my_bars
else:
quote_bars = pd.concat([my_bars, quote_bars])
if quote_bars is None:
quote_date = datetime.now()
quote_date = quote_date.replace(second=0, microsecond=0)
quote_bars = pd.DataFrame(index=pd.DatetimeIndex([quote_date]), columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date'],
data={'price': float("nan"),
'open': float("nan"),
'high': float("nan"),
'low': float("nan"),
'close': float("nan"),
'volume': int(0),
'date': quote_date})
return quote_bars.tail(bars)
def summarize_quote(quotes, minute_series):
if minute_series not in [1, 5, 15, 30, 60]:
quote_date = datetime.now()
quote_date = quote_date.replace(second=0, microsecond=0)
return pd.DataFrame(index=pd.DatetimeIndex([quote_date]), columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date'],
data={'price': float("nan"),
'open': float("nan"),
'high': float("nan"),
'low': float("nan"),
'close': float("nan"),
'volume': int(0),
'date': quote_date})
bars = None
active_quote = None
for quote_data in quotes:
if "date" not in quote_data or ("close" not in quote_data and "marketClose" not in quote_data):
# quote_data['date'] = datetime.now().strftime("%Y%m%d")
continue
quote_date = datetime.strptime("%sT%s" % (quote_data['date'], quote_data['minute']), "%Y%m%dT%H:%M") - timedelta(hours=1)
# print(quote_date)
if quote_date.time() > quote_date.time().replace(hour=15, minute=0, second=0, microsecond=0):
continue
if not active_quote or ((minute_series in [1, 5, 15, 30] and quote_date.minute % minute_series == 0) or (minute_series in [60] and quote_date.hour != active_quote['date'].hour)):
if active_quote and active_quote['close'] != -1:
bar = pd.DataFrame(index=pd.DatetimeIndex([active_quote['date']]),
data=active_quote)
# print(close)
if bars is None:
bars = bar
else:
bars = bars.append(bar)
# print(quote_data)
# active_quote = None
market_open = -1
market_close = -1
if "marketClose" in quote_data:
market_close = is_valid_value(float(quote_data['marketClose']), float(get_field_value('close', quote_data, -1)))
if "marketOpen" in quote_data:
market_open = is_valid_value(float(quote_data['marketOpen']), float(get_field_value('open', quote_data, -1)))
active_quote = {'price': market_close,
'open': market_open,
'high': is_valid_value(float(quote_data['marketHigh']), float(quote_data['high'])),
'low': is_valid_value(float(quote_data['marketLow']), float(quote_data['low'])),
'close': market_close,
'volume': is_valid_value(int(quote_data['marketVolume']), int(quote_data['volume'])),
'date': quote_date}
else:
if "marketClose" in quote_data:
if active_quote['open'] == -1:
active_quote['open'] = is_valid_value(float(quote_data['marketOpen']), float(get_field_value('open', quote_data, -1)))
active_quote['price'] = is_valid_value(float(quote_data['marketClose']), float(get_field_value('close', quote_data, -1)))
active_quote['close'] = is_valid_value(float(quote_data['marketClose']), float(get_field_value('close', quote_data, -1)))
active_quote['volume'] = active_quote['volume']+is_valid_value(int(quote_data['marketVolume']), int(quote_data['volume']))
if active_quote['high'] == -1 or active_quote['high'] < is_valid_value(float(quote_data['marketHigh']), float(quote_data['high'])):
active_quote['high'] = is_valid_value(float(quote_data['marketHigh']), float(quote_data['high']))
if active_quote['low'] == -1 or active_quote['low'] > is_valid_value(float(quote_data['marketLow']), float(quote_data['low'])):
active_quote['low'] = is_valid_value(float(quote_data['marketLow']), float(quote_data['low']))
if active_quote and active_quote['close'] != -1:
bar = pd.DataFrame(index=pd.DatetimeIndex([active_quote['date']]),
data=active_quote)
# print(close)
if bars is None:
bars = bar
else:
bars = bars.append(bar)
if bars is None:
quote_date = datetime.now()
quote_date = quote_date.replace(second=0, microsecond=0)
bars = pd.DataFrame(index=pd.DatetimeIndex([quote_date]), columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date'],
data={'price': float("nan"),
'open': float("nan"),
'high': float("nan"),
'low': float("nan"),
'close': float("nan"),
'volume': int(0),
'date': quote_date})
return bars
def is_valid_value(value, default):
if value != -1:
return value
return default
def get_field_value(field, map_data, default):
if field in map_data:
return map_data[field]
return default
| [
"[email protected]"
] | |
e6d9b1efa325f76c10eb17089443990d9cb3ee85 | b3c0618649531f3759c2f3b5b9820025d5b5d8b2 | /swagger_client/models/elimination_alliance_status.py | 0ad6d640d5e025a5830cac8b091b798353993550 | [
"MIT"
] | permissive | simra/FRCModeling | ef2287a88134671b9c20e2544f3c22f5758a7100 | e98b57cbfb5db5ed66c4fcf72c5ed96fcfb41ace | refs/heads/master | 2023-04-09T05:45:26.845734 | 2023-03-30T04:07:50 | 2023-03-30T04:07:50 | 182,472,667 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,674 | py | # coding: utf-8
"""
The Blue Alliance API v3
# Overview Information and statistics about FIRST Robotics Competition teams and events. # Authentication All endpoints require an Auth Key to be passed in the header `X-TBA-Auth-Key`. If you do not have an auth key yet, you can obtain one from your [Account Page](/account). A `User-Agent` header may need to be set to prevent a 403 Unauthorized error. # noqa: E501
OpenAPI spec version: 3.04.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.wlt_record import WLTRecord # noqa: F401,E501
class EliminationAllianceStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'current_level_record': 'WLTRecord',
'level': 'str',
'playoff_average': 'float',
'record': 'WLTRecord',
'status': 'str'
}
attribute_map = {
'current_level_record': 'current_level_record',
'level': 'level',
'playoff_average': 'playoff_average',
'record': 'record',
'status': 'status'
}
def __init__(self, current_level_record=None, level=None, playoff_average=None, record=None, status=None): # noqa: E501
"""EliminationAllianceStatus - a model defined in Swagger""" # noqa: E501
self._current_level_record = None
self._level = None
self._playoff_average = None
self._record = None
self._status = None
self.discriminator = None
if current_level_record is not None:
self.current_level_record = current_level_record
if level is not None:
self.level = level
if playoff_average is not None:
self.playoff_average = playoff_average
if record is not None:
self.record = record
if status is not None:
self.status = status
@property
def current_level_record(self):
"""Gets the current_level_record of this EliminationAllianceStatus. # noqa: E501
:return: The current_level_record of this EliminationAllianceStatus. # noqa: E501
:rtype: WLTRecord
"""
return self._current_level_record
@current_level_record.setter
def current_level_record(self, current_level_record):
"""Sets the current_level_record of this EliminationAllianceStatus.
:param current_level_record: The current_level_record of this EliminationAllianceStatus. # noqa: E501
:type: WLTRecord
"""
self._current_level_record = current_level_record
@property
def level(self):
"""Gets the level of this EliminationAllianceStatus. # noqa: E501
:return: The level of this EliminationAllianceStatus. # noqa: E501
:rtype: str
"""
return self._level
@level.setter
def level(self, level):
"""Sets the level of this EliminationAllianceStatus.
:param level: The level of this EliminationAllianceStatus. # noqa: E501
:type: str
"""
self._level = level
@property
def playoff_average(self):
"""Gets the playoff_average of this EliminationAllianceStatus. # noqa: E501
:return: The playoff_average of this EliminationAllianceStatus. # noqa: E501
:rtype: float
"""
return self._playoff_average
@playoff_average.setter
def playoff_average(self, playoff_average):
"""Sets the playoff_average of this EliminationAllianceStatus.
:param playoff_average: The playoff_average of this EliminationAllianceStatus. # noqa: E501
:type: float
"""
self._playoff_average = playoff_average
@property
def record(self):
"""Gets the record of this EliminationAllianceStatus. # noqa: E501
:return: The record of this EliminationAllianceStatus. # noqa: E501
:rtype: WLTRecord
"""
return self._record
@record.setter
def record(self, record):
"""Sets the record of this EliminationAllianceStatus.
:param record: The record of this EliminationAllianceStatus. # noqa: E501
:type: WLTRecord
"""
self._record = record
@property
def status(self):
"""Gets the status of this EliminationAllianceStatus. # noqa: E501
:return: The status of this EliminationAllianceStatus. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this EliminationAllianceStatus.
:param status: The status of this EliminationAllianceStatus. # noqa: E501
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EliminationAllianceStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EliminationAllianceStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
422de7724d120ddf4cb787369374d3de41313db8 | 48cddade0476a14974acc087f8bf1d4a40d914ee | /partsrc/RodLatch.py | 40ea936dc7933aeeec25c3f8da37928186d809be | [] | no_license | Zerick/MendelMax | ac6360675f53e6bafdbd167b7136951190d70a23 | fa62862863fd72140b11fed88febc645b4d2c9d7 | refs/heads/master | 2020-12-25T02:29:55.587169 | 2012-05-24T23:03:42 | 2012-05-24T23:03:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | from __future__ import division # allows floating point division from integers
import FreeCAD, Part, math
from FreeCAD import Base
def RodLatch(
rod_diameter = 8,
#how far apart should the bolt holes be?
hole_spacing = 30,
#how thick should the non-flange parts be?
thick_typical = 4.25,
#how thick should the plastic under the bolt be?
thick_compress = 3
):
#how big are the bolts?
#TODO: replace with use of bolt module after it's written
bolt_hole_diameter = 5.5
bolt_head_diameter = 8.5
latch_width = bolt_head_diameter + thick_typical*2
#bounding box for the main curved part of the latch
box = Part.makeBox(hole_spacing,latch_width,rod_diameter/2+thick_typical)
box.translate(Base.Vector(latch_width/2,0,0))
#main curved part of the latch, around the rod
cylinder = Part.makeCylinder(rod_diameter/2+thick_typical,latch_width)
cylinder.rotate(Base.Vector(0,0,0),Base.Vector(1,0,0),-90)
cylinder.translate(Base.Vector(latch_width/2+hole_spacing/2,0,0))
latch = box.common(cylinder)
#connects the curved part to the bolt holes
box = Part.makeBox(hole_spacing,latch_width,max(thick_typical,thick_compress))
box.translate(Base.Vector(latch_width/2,0,0))
latch = latch.fuse(box)
#housings for the bolt holes
cylinder = Part.makeCylinder(latch_width/2,thick_typical)
cylinder.translate(Base.Vector(latch_width/2,latch_width/2,0))
latch = latch.fuse(cylinder)
cylinder = Part.makeCylinder(latch_width/2,thick_typical)
cylinder.translate(Base.Vector(latch_width/2+hole_spacing,latch_width/2,0))
latch = latch.fuse(cylinder)
#bolt holes
cylinder = Part.makeCylinder(bolt_hole_diameter/2,rod_diameter/2+thick_typical)
cylinder.translate(Base.Vector(latch_width/2,latch_width/2,0))
latch = latch.cut(cylinder)
cylinder = Part.makeCylinder(bolt_head_diameter/2,rod_diameter/2+thick_typical)
cylinder.translate(Base.Vector(latch_width/2,latch_width/2,thick_compress))
latch = latch.cut(cylinder)
cylinder = Part.makeCylinder(bolt_hole_diameter/2,rod_diameter/2+thick_typical)
cylinder.translate(Base.Vector(latch_width/2+hole_spacing,latch_width/2,0))
latch = latch.cut(cylinder)
cylinder = Part.makeCylinder(bolt_head_diameter/2,rod_diameter/2+thick_typical)
cylinder.translate(Base.Vector(latch_width/2+hole_spacing,latch_width/2,thick_compress))
latch = latch.cut(cylinder)
#rod hole
cylinder = Part.makeCylinder(rod_diameter/2,latch_width)
cylinder.rotate(Base.Vector(0,0,0),Base.Vector(1,0,0),-90)
cylinder.translate(Base.Vector(latch_width/2+hole_spacing/2,0,0))
latch = latch.cut(cylinder)
return latch
if __name__ == "__main__":
Part.show(RodLatch())
| [
"[email protected]"
] | |
5ffd9da9b29264937d092e0e599d5ef03390c703 | 3ec374cf0ddaed895243bac370c98292c0e62d1c | /blog/urls.py | 9b50717f17946eb0364ef561519f575eb5c5c9df | [] | no_license | wmahad/DjangoApps | aaf5f5c96d91c4f5eb233e5e59b20916d5f19cf3 | debac500f94f680ae1fa1d9b5bbee56280c77ca6 | refs/heads/master | 2021-06-08T05:40:18.238293 | 2016-11-09T08:25:22 | 2016-11-09T08:25:22 | 73,264,160 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^create/$', views.posts_create, name='create'),
url(r'^(?P<slug>[\w-]+)/$', views.posts_detail, name='detail'),
url(r'^$', views.posts_list, name='list'),
url(r'^(?P<slug>[\w-]+)/edit/$', views.posts_update, name='update'),
url(r'^(?P<slug>[\w-]+)/delete/$', views.posts_delete, name='delete'),
] | [
"[email protected]"
] | |
b9e0b30c03c0acc655581261d4497f32ed24eeea | ef5397e65e0ed37b18bd57922cd4d786fc805195 | /passowrd_generator/urls.py | 9f9b36fdaddac0a426afccc07afb2f77280fc527 | [] | no_license | obiorbitalstar/password-generator-django | e42d86c6301bd42a74283a517fca8d8d510c20ff | 1f1e3a712acedb7f5925771e315ddbfc6162552f | refs/heads/main | 2023-07-17T16:52:37.803443 | 2021-08-29T07:18:20 | 2021-08-29T07:18:20 | 356,600,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | """passowrd_generator URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from generator import views
urlpatterns = [
path('',views.home , name = 'home'),
path('password',views.password , name='password'),
path('about',views.about , name='about'),
]
| [
"[email protected]"
] | |
aadf7e232eb2e18f4f3b975abb3ac92433eac982 | 70c8fedb44f9c351d756ced956cfe12aa51bee1b | /problem9.py | 57fa8c3cf03d5b548f90b9698a2d4e3cee30cd0e | [] | no_license | nitin42/Project-Euler | 0f4ef4744855d5899d22446caf08dbbf42b34dd9 | 6a49a0cdd904dc8477f8c57457e0294519586adb | refs/heads/master | 2020-04-06T07:03:47.652367 | 2016-08-29T19:05:14 | 2016-08-29T19:05:14 | 64,479,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | def pythagorean():
# a+b+c = 1000 ---- (1)
# a^2 + b^2 = c^2 ---- (2)
# a = 1000*(500-b) / (1000 - b) ----> After mathematically solving the equation by squaring the eq. 1
'''
>>> pythagorean()
200 375
375 200
289001
'''
for b in range(1,500):
if 1000*(500-b) % (1000-b) == 0:
a = 1000*(500-b) / (1000-b)
print b, a
A = a**2
B = b**2
C = A+B
print C
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
| [
"[email protected]"
] | |
fb6d9e5ffceae3600e94f3b4c38180affb6a07c0 | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/models/analytics_avg_concurrent_viewers_response.py | 49b659be8d320fa4c635950d412173f20deeb451 | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 5,314 | py | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
import pprint
import six
class AnalyticsAvgConcurrentViewersResponse(object):
@poscheck_model
def __init__(self,
rows=None,
row_count=None,
column_labels=None):
# type: (list[float], int, list[AnalyticsColumnLabel]) -> None
self._rows = list()
self._row_count = None
self._column_labels = list()
self.discriminator = None
if rows is not None:
self.rows = rows
if row_count is not None:
self.row_count = row_count
if column_labels is not None:
self.column_labels = column_labels
@property
def openapi_types(self):
types = {
'rows': 'list[float]',
'row_count': 'int',
'column_labels': 'list[AnalyticsColumnLabel]'
}
return types
@property
def attribute_map(self):
attributes = {
'rows': 'rows',
'row_count': 'rowCount',
'column_labels': 'columnLabels'
}
return attributes
@property
def rows(self):
# type: () -> list[float]
"""Gets the rows of this AnalyticsAvgConcurrentViewersResponse.
:return: The rows of this AnalyticsAvgConcurrentViewersResponse.
:rtype: list[float]
"""
return self._rows
@rows.setter
def rows(self, rows):
# type: (list) -> None
"""Sets the rows of this AnalyticsAvgConcurrentViewersResponse.
:param rows: The rows of this AnalyticsAvgConcurrentViewersResponse.
:type: list[float]
"""
if rows is not None:
if not isinstance(rows, list):
raise TypeError("Invalid type for `rows`, type has to be `list[float]`")
self._rows = rows
@property
def row_count(self):
# type: () -> int
"""Gets the row_count of this AnalyticsAvgConcurrentViewersResponse.
Number of rows returned
:return: The row_count of this AnalyticsAvgConcurrentViewersResponse.
:rtype: int
"""
return self._row_count
@row_count.setter
def row_count(self, row_count):
# type: (int) -> None
"""Sets the row_count of this AnalyticsAvgConcurrentViewersResponse.
Number of rows returned
:param row_count: The row_count of this AnalyticsAvgConcurrentViewersResponse.
:type: int
"""
if row_count is not None:
if not isinstance(row_count, int):
raise TypeError("Invalid type for `row_count`, type has to be `int`")
self._row_count = row_count
@property
def column_labels(self):
# type: () -> list[AnalyticsColumnLabel]
"""Gets the column_labels of this AnalyticsAvgConcurrentViewersResponse.
:return: The column_labels of this AnalyticsAvgConcurrentViewersResponse.
:rtype: list[AnalyticsColumnLabel]
"""
return self._column_labels
@column_labels.setter
def column_labels(self, column_labels):
# type: (list) -> None
"""Sets the column_labels of this AnalyticsAvgConcurrentViewersResponse.
:param column_labels: The column_labels of this AnalyticsAvgConcurrentViewersResponse.
:type: list[AnalyticsColumnLabel]
"""
if column_labels is not None:
if not isinstance(column_labels, list):
raise TypeError("Invalid type for `column_labels`, type has to be `list[AnalyticsColumnLabel]`")
self._column_labels = column_labels
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AnalyticsAvgConcurrentViewersResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
56e52bdba43b4682cabb1caca99075c7f00d64a7 | 77b5fdaa83c3cd27bb6ea122acfd871af492a23e | /src/todobackend/settings/test.py | 0668d6678ff09f8a48edbd35f01382c978cfc73a | [] | no_license | ChristopherGerlier/todobackend | fe2b376f0c9d4c0da755ca6a7bad7d089b0ddb29 | e5a5e5c69a1d83119e938d7b7c249fea06de6608 | refs/heads/master | 2022-07-18T18:56:26.452742 | 2020-05-26T12:13:58 | 2020-05-26T12:13:58 | 267,033,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | from base import *
import os
INSTALLED_APPS += ('django_nose', )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEST_OUTPUT_DIR = os.environ.get('TEST_OUTPUT_DIR', '.')
NOSE_ARGS = [
'--verbosity=2', # verbose output
'--nologcapture', # don't output log capture
'--with-coverage', # activate coverage report
'--cover-package=todo', # coverage reports will apply to these packages
'--with-spec', # spec style tests
'--spec-color',
'--with-xunit', # enable xunit plugin
'--xunit-file=%s/unittests.xml' % TEST_OUTPUT_DIR,
'--cover-xml', # produce XML coverage info
'--cover-xml-file=%s/coverage.xml' % TEST_OUTPUT_DIR,
]
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('MYSQL_DATABASE', 'todobackend'),
'USER': os.environ.get('MYSQL_USER', 'todo'),
'PASSWORD': os.environ.get('MYSQL_PASSWORD', 'password'),
'HOST': os.environ.get('MYSQL_HOST', 'localhost'),
'PORT': os.environ.get('MYSQL_PORT', '3306'),
}
}
| [
"[email protected]"
] | |
a385414e39a8f59f0ae922b97e76365a818bfdb7 | 05722ad65c0b486bf78c20a6655f3331697ea995 | /galaxy/polls/models.py | 62b2f7fed06f43880ee4c51863efbcb026b014dd | [] | no_license | contrerasjlu/gldesa | 371fc77b146e0b4895f2038b0bfbcdd9694a0d27 | 1d3c063d5d8c3d954b42f679149be9af67d779cb | refs/heads/master | 2021-01-22T09:05:20.249099 | 2015-09-04T04:14:58 | 2015-09-04T04:14:58 | 41,629,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
return "I call your Name"
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text= models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"[email protected]"
] | |
77752d8ff8e46b198c31a4bf2e73e72df3eec500 | 7972f2a08d201325f13847086d9c6e3161fa0e95 | /OpenCV.xcodeproj/lib/python2.7/site-packages/numpy/testing/tests/test_utils.py | 7fab79d3047eb88e766dea2900a7dd5b77693153 | [] | no_license | valiok98/testing | d430e1a2bfa6c4ec758f6629cb0e11f3d19e1480 | b022b04e92f14d5c7fa69d589bfa8983160890a4 | refs/heads/master | 2022-01-14T22:11:00.365164 | 2019-05-14T11:53:08 | 2019-05-14T11:53:08 | 184,013,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | ../../../../../../Cellar/numpy/1.16.3_1/lib/python2.7/site-packages/numpy/testing/tests/test_utils.py | [
"[email protected]"
] | |
5cd4b7729e9d9cbd16fa750af29aa6659268a1c2 | 167173e56f9085f2d28b647878b7a538a4599637 | /KAI/KAI/settings.py | 1af54d3a7a8a3508ce6c0200f2d132201f889600 | [] | no_license | DrKornballer/KAI | 738ded00bd9141d020f6a3637e0b058fda70675d | 8c0c5fdac63e2620201e2dbdd9b48849650b7d96 | refs/heads/main | 2023-06-10T04:40:45.012094 | 2021-06-19T18:31:35 | 2021-06-19T18:31:35 | 378,478,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,402 | py | """
Django settings for KAI project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-4smi$@@_l(5%)qtu*$_jvty7bfb=dqtz=f6szswq&vg3f)1anx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'pack',
'kkquit',
'my_kai_web',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'KAI.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'KAI.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
b1f301ed9a61bc2c2e6415a2b18e839582f587d7 | 1fc81227ff573f133521003f83681897d7e3ef3a | /ExtractAnnualPCCInMangMask/CreateCmdsList.py | fd8354d0b7658b86d3d02d755dcf8a6ca39c450b | [] | no_license | petebunting/australia_odc_mangrove_extent | 56f42901e18f67f5f5c3e0f868bca3b6bc60ec55 | e22ff1832a9567a5e0d8a46570dbe917c243391b | refs/heads/master | 2022-08-01T01:53:30.362116 | 2018-02-18T22:39:39 | 2018-02-18T22:39:39 | 266,527,531 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,815 | py | import pandas
import os.path
gmwTiles = pandas.read_csv('./AustraliaSqGrid_MangroveRegionsV1.csv', delimiter = ',')
cmdBase = 'python /home/552/pjb552/agdc_mangrovemonitoring/ExtractAnnualPCCInMangMask/ExtractPCCInMangAreas.py '
outFileImgBase = '/g/data/r78/pjb552/MangChangePVFC_V3/pccimgstif'
cmds = []
for tile in range(len(gmwTiles)):
# Create lat / long file name.
midLat = gmwTiles['MinY'][tile] + ((gmwTiles['MaxY'][tile] - gmwTiles['MinY'][tile])/2)
midLon = gmwTiles['MinX'][tile] + ((gmwTiles['MaxX'][tile] - gmwTiles['MinX'][tile])/2)
midLatStr = str(midLat)
midLonStr = str(midLon)
midLatStr = midLatStr.replace('-','')
midLonStr = midLonStr.replace('-','')
midLatStr = midLatStr.replace('.','')
midLonStr = midLonStr.replace('.','')
posFileName = midLatStr+'_'+midLonStr+'_'+str(gmwTiles['GridID'][tile])
outImgTileName = 'PCC4GMW_MangExtent_'+posFileName+'.tif'
cmd = cmdBase + '--startyear 1987 --endyear 2016 '
cmd = cmd + ' --minlat ' + str(gmwTiles['MinY'][tile])
cmd = cmd + ' --maxlat ' + str(gmwTiles['MaxY'][tile])
cmd = cmd + ' --minlon ' + str(gmwTiles['MinX'][tile])
cmd = cmd + ' --maxlon ' + str(gmwTiles['MaxX'][tile])
cmd = cmd + ' --outimg ' + os.path.join(outFileImgBase, outImgTileName)
#print(cmd)
cmds.append(cmd)
outRunLstFile = 'RunExtractPCC4Mang.sh'
f = open(outRunLstFile, 'w')
for item in cmds:
f.write(str(item)+'\n')
f.flush()
f.close()
outQSubFile = 'QSubExtractAnnualPCC4GMWMangs.pbs'
outGenPBSFile = 'GenQSubExtractAnnualPCC4GMWMangsCmds.sh'
f = open(outGenPBSFile, 'w')
f.write(str('python ../PBS/CreateQSubScripts.py --input ' + outRunLstFile + ' --output ' + outQSubFile + ' --memory 32Gb --time 30:00:00 --cores 16 --project r78')+'\n')
f.flush()
f.close()
| [
"[email protected]"
] | |
6001ae74b5f0fed18ec6e469b34b33ca9d1eadf6 | 8ac749d12d6836455b6240204697501e79b37c2d | /Data_Gathering_Implementations/Authcode_Linux/logic_code/logger.py | 3b8911928c22b1724146d178d968f199fedded4d | [] | no_license | CyberDataLab/AuthCode | 075ee81158d2322b5b7d8a51f825c96e9e2c28ea | 214c08f06bdfe048efe4d0807f8497d0dedf20bc | refs/heads/master | 2021-05-26T23:03:25.487378 | 2020-04-28T12:29:54 | 2020-04-28T12:29:54 | 254,184,562 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,406 | py | # coding=utf-8
import time
import psutil
import os
from pynput import mouse
import threading
import subprocess as sp
from logic_code import extract_features
from pathlib import Path
from logic_code.keyboard import *
import notify2
import sys
# Lamba to obtain milisecond timestamp
current_time = lambda: int(time.time() * 1000)
# Generate work directory
home = Path.home()
path_logs = str(home) + "/Authcode"
if not os.path.exists(path_logs):
os.mkdir(path_logs)
# Generate temporal log files
path = path_logs + "/mouse_keyboard_log_service.txt"
if os.path.exists(path):
os.remove(path)
path_apps= path_logs + "/apps_log_service.txt"
if os.path.exists(path_apps):
os.remove(path_apps)
file_logs = open(path, 'a')
pids = []
#Read PIDs of the processes with active windows (applications)
def read_window_pids():
global pids
f = os.popen("wmctrl -l")
lines=f.read().split("\n")
windows = []
for line in lines:
line = line.replace(" ", " ")
win = tuple(line.split(" ", 3))
windows.append(win)
for win in windows:
if(win[0]!=''):
f = os.popen("xprop -id "+win[0]+" _NET_WM_PID")
lines = f.read().split("\n")
pid_split=lines[0].split(" ")
pid=int(pid_split[2])
pids.append(pid)
return True
#Obtain process pids using app name
def find_pids_by_name(processName):
pid_list = []
# Iterate over all running processes
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name', 'create_time'])
# Check if process name contains the given name string.
if processName.lower() in pinfo['name'].lower():
pid_list.append(pinfo.get('pid'))
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return pid_list
#write key strokes into the log file, including key, action Press/release, and foreground app
def print_pressed_keys(e):
global file_logs
try:
with open(os.devnull, 'w') as devnull:
pid = int(sp.check_output(["xdotool", "getactivewindow", "getwindowpid"],stderr=devnull).decode("utf-8").strip())
if (pid > 0):
exeFirstPlane = psutil.Process(pid).name()
text_event=str(e)
text_event=text_event[14:-1]
parts_text=text_event.split(' ')
key=''.join(parts_text[i] for i in range(0,len(parts_text)-1))
key=key.lower()
press=parts_text[len(parts_text)-1]
if press=='down':
file_logs.write(str(current_time()) + ",KP,{0},{1}\n".format(key, exeFirstPlane))
elif press=='up':
file_logs.write(str(current_time()) + ",KR,{0},{1}\n".format(key, exeFirstPlane))
except:
pass
#Write mouse movement events into the log file
def on_move(x, y):
global file_logs
try:
with open(os.devnull, 'w') as devnull:
pid = int(sp.check_output(["xdotool", "getactivewindow", "getwindowpid"],stderr=devnull).decode("utf-8").strip())
if (pid > 0):
exeFirstPlane = psutil.Process(pid).name()
file_logs.write(str(current_time()) + ",MM,{0},{1},{2}\n".format(x, y, exeFirstPlane))
except:
pass
# file_logs.write(str(current_time()) + ",MM,{0},{1},{2}\n".format(x, y, "-"))
# Pynput listener functions disabled due to current error
"""
def on_press(key):
try:
with open(os.devnull, 'w') as devnull:
pid = int(sp.check_output(["xdotool", "getactivewindow", "getwindowpid"],stderr=devnull).decode("utf-8").strip())
if (pid[-1] > 0):
exeFirstPlane = psutil.Process(pid[-1]).name()
print(key)
file_logs.write(str(current_time()) + ",KP,{0},{1}\n".format(key, exeFirstPlane))
except:
pass
# file_logs.write(str(current_time()) + ",KP,{0},{1}\n".format(key, "-"))
def on_release(key):
try:
with open(os.devnull, 'w') as devnull:
pid = int(sp.check_output(["xdotool", "getactivewindow", "getwindowpid"],stderr=devnull).decode("utf-8").strip())
if (pid[-1] > 0):
exeFirstPlane = psutil.Process(pid[-1]).name()
file_logs.write(str(current_time()) + ",KR,{0},{1}\n".format(key, exeFirstPlane))
#if (key == Key.f12):
# ListenerKeyboard.stop()
# ListenerMouse.stop()
except:
pass
# file_logs.write(str(current_time()) + ",KR,{0},{1}\n".format(key, "-"))
"""
#Write mouse click events into the log file
def on_click(x, y, button, pressed):
global file_logs
try:
with open(os.devnull, 'w') as devnull:
pid = int(sp.check_output(["xdotool", "getactivewindow", "getwindowpid"],stderr=devnull).decode("utf-8").strip())
exeFirstPlane = psutil.Process(pid).name()
file_logs.write(
str(current_time()) + ",MC,{0},{1},{2},{3},{4}\n".format(x, y, button, pressed, exeFirstPlane))
except:
pass
# file_logs.write(str(current_time()) + ",MC,{0},{1},{2},{3},{4}\n".format(x, y, button, pressed, "-"))
#Write scrolling events into the log file
def on_scroll(x, y, dx, dy):
global file_logs
try:
with open(os.devnull, 'w') as devnull:
pid = int(sp.check_output(["xdotool", "getactivewindow", "getwindowpid"],stderr=devnull).decode("utf-8").strip())
if (pid > 0):
exeFirstPlane = psutil.Process(pid).name()
file_logs.write(str(current_time()) + ",MS,{0},{1},{2},{3},{4}\n".format(x, y, dx, dy, exeFirstPlane))
except:
pass
# file_logs.write(str(current_time()) + ",MS,{0},{1},{2},{3},{4}\n".format(x, y, dx, dy, "-"))
#Enable listeners for mouse
ListenerMouse = mouse.Listener(on_move=on_move, on_click=on_click, on_scroll=on_scroll)
#Pynpyt keyboard listener disabled based on accents errors:
#https://github.com/moses-palmer/pynput/issues/118
#ListenerKeyboard = pynput.keyboard.Listener(on_press=on_press, on_release=on_release)
#Class implementing the keyboard event listener thread
class thread_keyboard(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
hook(print_pressed_keys)
wait()
def stop(self):
pass
#Launching function
def launch():
#Consts
global pids
global file_logs
total_bytes_sent = 0
total_bytes_recv = 0
const_notification=3600000
#Close possible open files and prepare output files
file_logs.close()
try:
os.remove(path)
except Exception as e:
#print("ERROR" + str(e))
pass
file_logs = open(path, 'a')
sys.stdout = open(path_logs+'/output', 'w')
sys.stderr = open(path_logs+'/errors','w')
# Thread launch for mouse and keyboard event listening
#ListenerKeyboard.start()
t=thread_keyboard(1,"keyboard_thread",1)
if not t.is_alive():
t.start()
if not ListenerMouse.is_alive():
ListenerMouse.start()
#Get current time stamp
mark_time = current_time()
# Read window If no window size is saved, the default value is 60
window = 60
mark_notification=0
if os.path.exists(path_logs + "/window"):
file_window = open(path_logs + "/window", 'r')
try:
window = int(file_window.read())
except:
window = 60
file_window.close()
#While mouse listener threat is alive, execute the application logic
#Instead of while true, keeping track of the mouse threat allows to detect failures and re-lauch the app
while ListenerMouse.isAlive():
#Read pids of active windows and get current process in foreground
"""EnumWindows(EnumWindowsProc(foreach_window), 0)"""
read_window_pids()
processs_aplication=0
try:
with open(os.devnull, 'w') as devnull:
pid_current = int(sp.check_output(["xdotool", "getactivewindow", "getwindowpid"], stderr=devnull).decode(
"utf-8").strip())
name_process_current = psutil.Process(pid_current).name()
if not pid_current in pids:
pids.insert(0, pid_current)
processs_aplication = len(find_pids_by_name(name_process_current))
except Exception as e:
#print("ERROR" + str(e))
name_process_current = "-"
#Read cpu, memory for each process and in general
cpu_per_process = {}
memory_per_process = {}
memory_total = 0
cpu_total = 0
for pid in pids:
try:
p = psutil.Process(pid)
cpu_total_process = 0
memory_total_process = 0
for i in range(0, 5):
cpu_total_process += p.cpu_percent(0.2)
cpu_total_process = round(cpu_total_process / 5, 2)
for pid2 in find_pids_by_name(p.name()):
p = psutil.Process(pid2)
memory_total_process += p.memory_percent()
p = psutil.Process(pid)
cpu_per_process[p.name()] = cpu_total_process
cpu_total += cpu_total_process
memory_total_process = round(memory_total_process, 2)
memory_per_process[p.name()] = memory_total_process
memory_total += memory_total_process
except:
pass
cpu_total_device = psutil.cpu_percent()
#Read sent/received bytes
red = psutil.net_io_counters()
if total_bytes_recv == 0:
total_bytes_recv = red.bytes_recv
if total_bytes_sent == 0:
total_bytes_sent = red.bytes_sent
diff_bytes_recv = red.bytes_recv - total_bytes_recv
diff_bytes_sent = red.bytes_sent - total_bytes_sent
total_bytes_recv = red.bytes_recv
total_bytes_sent = red.bytes_sent
memory_total = round(memory_total, 2)
file_logs_2 = open(path_apps, 'a')
#Build app and resource usage log and write it to the log file
if name_process_current != "-" and name_process_current in cpu_per_process.keys():
file_logs_2.write(
str(current_time()) + ",APPS,{0},{1},{2},{3},{4},{5},{6},{7},{8},{9}\n".format(len(pids),name_process_current,cpu_per_process[name_process_current],cpu_total,memory_per_process[name_process_current],memory_total,cpu_total_device,diff_bytes_recv,diff_bytes_sent,str(processs_aplication)))
file_logs_2.close()
pids = []
# Check if the time window is over
if mark_time + window * 1000 < current_time():
# Update timestamp
mark_time = current_time()
#Close log file and call the function responsible of generating and sending the vector to the server
#The server answers with the new time window duration in seconds
file_logs.close()
window = extract_features.extract_features(path, path_apps, window)
# Save time window
file_window = open(path_logs + "/window", 'w')
file_window.write(str(window))
file_window.close()
# Reset log files
#file_logs.close()
try:
os.remove(path)
os.remove(path_apps)
except Exception as e:
#print("ERROR"+str(e))
pass
file_logs = open(path, 'a')
# Get evaluation results
result = extract_features.obtain_auth_puntuation()
text_notification=""
#Show notification
if result == 999:
text_notification="Sent training data."
elif result == 500:
text_notification="Unable to reach the server."
else:
text_notification="Evaluation result: " + str(result)
if mark_notification+const_notification < current_time():
mark_notification=current_time()
try:
os.system("notify-send 'Authcode: "+str(text_notification)+"'")
except Exception as e:
print("BEHAVIOURAL DATA SENT")
time.sleep(2)
| [
"[email protected]"
] | |
ad5e5337a7d729ca9ec143cbc26655e1a5877b6c | 55591c65dd46c12891e10cc360680f4f36fa1525 | /ruedalaSessions/migrations/0001_initial.py | 3e2fa03bda115aaa0f7f8167cf5dc486c554c1d8 | [] | no_license | maridelvalle15/ruedala-app | bfdc8f52b5c1d60e9ea8e6e66a13925de0d8531e | e5aa22084383b0b3f897b224c2664abee0101e5a | refs/heads/master | 2021-06-08T22:30:41.087802 | 2016-12-17T16:25:17 | 2016-12-17T16:25:17 | 66,802,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,706 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CorredorVendedor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('corredor', models.ForeignKey(related_name='corredor', to=settings.AUTH_USER_MODEL)),
('vendedor', models.ForeignKey(related_name='vendedor', to=settings.AUTH_USER_MODEL, unique=True)),
],
options={
'verbose_name_plural': 'CorredorVendedors',
},
),
migrations.CreateModel(
name='Credito',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('monto', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0)])),
('dias', models.IntegerField(default=5)),
('fecha_solicitud', models.DateTimeField(default=datetime.date.today)),
('fecha_tope', models.DateTimeField(default=datetime.date.today)),
('status', models.CharField(default=b'Recibido', max_length=30)),
],
),
migrations.CreateModel(
name='DatosEjecutivo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('direccion', models.CharField(max_length=100, null=True, blank=True)),
('tlf', models.CharField(default=0, max_length=20, validators=[django.core.validators.RegexValidator(regex=b'^[0-9]*$', message=b'Este campo es num\xc3\xa9rico. Introduzca un n\xc3\xbamero')])),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DatosSolicitante',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fecha_nacimiento', models.DateField(default=datetime.date.today)),
('identificador', models.CharField(default=b'', max_length=100)),
('ingreso', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0)])),
('telefono', models.CharField(default=0, max_length=100, validators=[django.core.validators.RegexValidator(regex=b'^[0-9]*$', message=b'Este campo es num\xc3\xa9rico. Introduzca un n\xc3\xbamero')])),
('lugar_trabajo', models.CharField(default=b'', max_length=100)),
('ocupacion', models.CharField(default=b'', max_length=100)),
('direccion', models.CharField(default=b'', max_length=100)),
('cargo', models.CharField(default=b'', max_length=100)),
('salario', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0)])),
('fecha_ingreso', models.DateField(default=datetime.date.today)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DocumentosPersonales',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('doc', models.FileField(null=True, upload_to=b'', blank=True)),
('nombre', models.CharField(default=b'', max_length=100)),
('solicitante', models.ForeignKey(blank=True, to='ruedalaSessions.DatosSolicitante', null=True)),
],
),
migrations.CreateModel(
name='ReferenciasBancarias',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('doc', models.FileField(null=True, upload_to=b'', blank=True)),
('nombre', models.CharField(default=b'', max_length=100)),
('solicitante', models.ForeignKey(blank=True, to='ruedalaSessions.DatosSolicitante', null=True)),
],
),
migrations.CreateModel(
name='ReferenciasPersonales',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('doc', models.FileField(null=True, upload_to=b'', blank=True)),
('nombre', models.CharField(default=b'', max_length=100)),
('solicitante', models.ForeignKey(blank=True, to='ruedalaSessions.DatosSolicitante', null=True)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activation_key', models.CharField(max_length=40, blank=True)),
('key_expires', models.DateTimeField(default=datetime.date(2016, 10, 3))),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'User profiles',
},
),
migrations.AddField(
model_name='credito',
name='solicitante',
field=models.ForeignKey(to='ruedalaSessions.DatosSolicitante'),
),
]
| [
"[email protected]"
] | |
9a0068199f4b5e1953a28e30c2903fd6378f5894 | 349253d0463efacd27303a786ad57d311f10221b | /HW6/hw6-theirs.py | 4291d09310146de323972ccad630bc94cb1c4fc2 | [] | no_license | ngutro25/csf | 5f247cfb224f213c2ed3e71f1c021feef619e96f | 1195029253b9812d6b56a1b01f2467660fd928c1 | refs/heads/master | 2021-01-16T19:00:01.366949 | 2013-12-13T07:56:34 | 2013-12-13T07:56:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,878 | py | """
Five Key Concepts for being able to complete problem 1 from homework 5
1. Understanding dictionaries and being able to use the keys and the benefits of using them
2. Understanding For loops.
3. Understanding calling and defining functions.
4. Understanding the scopes like when to indent and unindent
5. Understanding Indexing and the difference with indexing on dictionaries and lists
note: not in this order
"""
"""
dictionary = {}
dictionary["Kyle"] = 10
dictionary["Ahmed"] = 5
# Problem 1 (Dictionaries and lists)
# Difference between lists and dictionaries
# A dictionary is noted with {} similar to a set, however an example of a dictionary would have a ":" inside of the brackets
# the reason for this is because a dictionary has keys and values, such as,
print "This is an example of a dictionary."
print dictionary
part1 = raw_input("You go ahead and create your own dictionary: ")
print part1
print "This is the name of the dictionary and the contents within it:", 'dictionary = {"Kyle" : 10, "Ahmed" : 5}'
# This dictionary shows everyones favorite number.
# The term Kyle is the first key listed in this dictionary (dictionaries order does not matter) and would yield the value of 10 if the key "Kyle" was called.
# If the key "Ahmed" was called it would yield the value of 5
print "Example of how to call a key in a dictionary: dictionary[\"Kyle\"]"
print "Would return: ", dictionary["Kyle"]
print "Another example: dicionary[\"Ahmed\"]"
print "Would return: ", dictionary["Ahmed"]
# The next concept to understand with dictionaries is how to add another key and value to the dictionary.
print "An example of adding contents to a dictionary would look like this: dictionary[\"Ian\"] = 14"
dictionary["Ian"] = 14
print "To see the new content in the dictionary, you would simply type the name of the dictionary: dictionary"
print "Would return: ", dictionary
"""
print "First concept we will introduce is dictionaries."
print "Here is an example of the contents in a dictionary: "
dictionary = {"Kyle" : 10, "Ahmed" : 5}
print dictionary
print "\nThe way you name a dictionary is the same as a variable,"
print "as long as its not reserved by python its fine to use it."
print "Go ahead and create a dictionary, lets make an empty dictionary for now,"
print "should look like this: <dictionary name> = {}."
print "Name the dictionary \"test_diction\""
user_diction = raw_input("\nEnter here: ")
test_diction = {}
leave = "n"
while leave == "n":
if user_diction == "test_diction = {}":
print "Correct!"
leave = "y"
else:
print "Incorrect"
print "Remember it should be and empty ditctionary like this: <dictionary name> = {}."
user_diction = raw_input("Try again: ")
move_on = raw_input("Press enter to move on: ")
print "\nGood so now we want to add contents to the dictionary."
print "Adding contents is simple, you name the key and give it the value."
print "An example would look like this: "
print "test_diction[\"Ian\"] = 14"
print 'The key being "Ian" and the value being "14"'
move_on = raw_input("Press enter to move on: ")
print "\nSo now we are going to have you go ahead and add some content to your dictionary."
print "The dictionary we will make will be of fruits and their prices, name your first key apple,"
print "and the value of it is 0.45 cents. (dont put the word cents just the number)"
print "Using the dictionary you already made named \"test_diction\""
user_add = raw_input("Enter here: ")
test_diction["apple"] = "0.45"
leave = "n"
while leave == "n":
if user_add == 'test_diction["apple"] = 0.45' or user_add == 'test_diction["Apple"] = 0.45':
print "Correct!"
leave = "y"
else:
print "Incorrect"
print 'Remember we want a key name "apple" and a value of "0.45"'
user_add = raw_input("Try again: ")
move_on = raw_input("Press enter move on: ")
print "\nGreat now you can make a dictionary and add content into it!"
print "Your current dictionary should look like this: "
print '{"apple" : 0.45}'
move_on = raw_input("Press enter to move on: ")
print "\nNow we want you to go ahead and add a few more items,"
print "go ahead and make two new keys, first named pear with the value of 1.00,"
print "and cherries valued at 2.50. Do the pear first then the cherries"
pear_key = raw_input("Enter here: ")
leave = "n"
while leave == "n":
if pear_key == 'test_diction["pear"] = 1.00' or pear_key == 'test_diction["Pear"] = 1.00':
print "Great!"
leave = "y"
else:
print "Incorrect."
print "If you cannot remember how to add content into you dictionary scroll back up."
pear_key = raw_input("Try again: ")
print "\nNow do the cherries key and value."
cherries_key = raw_input("Enter here: ")
leave = "n"
while leave == "n":
if cherries_key == 'test_diction["cherries"] = 2.50' or cherries_key == 'test_diction["cherries"] = 2.50':
print "Great!"
leave = "y"
else:
print "Incorrect!"
print "Scroll back up if you cannot remember how to add conent into the dictionary."
cherries_key = raw_input("Try again: ")
move_on = raw_input("Press enter to move on.")
test_diction["pear"] = 1.00
test_diction["cherries"] = 2.50
print "\nAlright now you have a dictionary with a few keys and values in it."
print "This is what your dictionary should look like so far: "
print test_diction
move_on = raw_input("Press enter to move on: ")
print "\nNow that you are able to add content into the dictionary we'll show you how to index it."
print "Lists and dictionaries are a little different with indexing."
print "With a given list named lista = [25,42,13,46]."
print "lista[2] would return 13. With dictionaries you have to call the key."
move_on = raw_input("Press enter to move on: ")
print '\nAn example from my first dictionary would look like this: dictionary["Ahmed"]'
print "Would return: ", dictionary["Ahmed"]
print "Go ahead and access the pear key so we can get its value."
key_user = raw_input("Enter here: ")
leave = "n"
while leave == "n":
if key_user == 'test_diction["pear"]':
print "Correct!"
print test_diction["pear"]
leave = "y"
else:
print "Incorrect!"
print "If you need help scroll up on how to index a dictionary."
key_user = raw_input("Try again: ")
move_on = raw_input("Press enter to move on:")
print "\nNow we will introduce nested data structures."
print "An example of a nested data structure is similar to nested loops."
print "There is a number of different types of data structures, we will use lists and dictionaries."
print "Here is an actual example of what one might look like: "
print
dictionary["Ian"] = 14
lista = [dictionary, test_diction]
print lista
print '\nSo this example is a list, with two dictionaries inside of it. We will call this "lista"'
move_on = raw_input("\nPress enter to move on:")
print "Now we want to index a specific key inside of the nested data structure."
print "The syntax of what it would look like grabbing a key from a dictionary that is inside a list."
print '<name of list>[index][key]'
print '\nA random example: listname[0]["honda"]'
print "This would return the 0th dictionary and the key value of honda within the dictionary."
print "So from our example earlier of a nested data structure,the one called lista,"
print "how would you get the value 14 to be printed from it?"
user_attempt = raw_input("Enter here: ")
leave = "n"
while leave == "n":
if user_attempt == 'lista[0]["Ian"]':
print lista[0]["Ian"]
print "Correct!"
leave = "y"
else:
print "Incorrect!"
print "Remember we want the value of a key, so we need to index the dictionary"
print "that the value 14 is in, and use that key to print 14."
user_attempt = raw_input("Try again: ")
move_on = raw_input("\nPress enter to move on: ")
print "So now you can make dictionaries, and you know how to index nested data structures, "
print "but now you need to know how to make the nested data structures."
print "We will continue to use the same form, a list with dictionaries inside of it."
print "\nNote that this is not the only type of a nested data structure, "
print "you could have dictionaries with lists inside of them, or even keys, with lists as its value."
print "Or many others that we wont go over here."
move_on = raw_input("\npress enter to move on: ")
print "\nThere are several ways to make a nested data structure, we will stick to a simple one."
print "And a way we think is a clean way to do it, specifically with lists with dictionaries inside."
print "First you can make the dictionaries, such as: "
dict1 = {"Apple" : 1.00, "Bananas": 1.25, "Pear" : 1.00}
dict2 = {"Apple" : 1.25, "Bananas" : 1.45, "Pear" : 1.50}
print "\n","dic1 =", dict1
print "dic2 =", dict2
move_on = raw_input("Press enter to move on: ")
print "\nThen you need to make the list with the dictionaries inside of it, we will call it nest_list: "
nest_list = [dict1, dict2]
print "\nnest_list = [dict1,dict2]"
print "\nThe result would look like this: "
print nest_list
print '\nSo using our two dictionaries names "dictionary" and "test_diction",'
print "make a nested data structure. You can call it nested_list."
print "\nNote that since its a list, with dictionaries inside, the order does matter, "
print 'so lets order it with "dictionary" first and "test_diction" next.'
nested_list = [dictionary, test_diction]
user_nest = raw_input("\nEnter here: ")
leave = "n"
while leave == "n":
if user_nest == 'nested_list = [dictionary, test_diction]':
print nested_list
print "Correct!"
leave = "y"
else:
print "Incorrect!"
print "Look at the examples if you're having a hard time remember how to make the structure."
user_nest = raw_input("Try again: ")
move_on = raw_input("Press enter to move on: ")
print "Alright so now you can make a nested data structure for our purpose."
print "Next we want to understand for loops and how useful they can be with lists and dictionaries."
print "\nFirst we will look at a simple for loop: "
print "\nfor i in range(10):"
print " print i"
print "Should return this: "
for i in range(10):
print i
move_on = raw_input("\nPress enter to move on: ")
print "For loops don't just use the range function, they can be used for most any sequence."
print "The syntax of a for loop would look like this:"
print "for <variable name> in <seqence>:"
print " <do code until loop is over> "
move_on = raw_input("\nPress enter to move on: ")
print "An example of another tpye would be:"
print 'for i in "Sequence": '
print " print i "
print "\nShould return: "
for i in "Sequence":
print i
move_on = raw_input("\nPress enter to move on:")
print "\nNow we can show you how useful a for loop can be with data structures, such as a list:"
print "test_list = [5,3,2,7,3,1,4,6,7]"
print "for i in test_list:"
print " print i"
print "Would return this: "
test_list = [5,3,2,7,3,1,4,6,7]
for i in test_list:
print i
move_on = raw_input("\nPress enter to move on:")
print "\nNow we want to go through a dictionary with a for loop: "
print "We can even use the exisiting dictionary we already made named test_diction."
print "for i in test_diction:"
print " print i"
print "\nWould return this: "
test_diction = {"apple" : 0.45, "cherries" : 2.50, "pear" : 1.00}
for i in test_diction:
print i
dict1 = {"Apple" : 1.00, "Bananas": 1.25, "Pear" : 1.00}
dict2 = {"Apple" : 1.25, "Bananas" : 1.45, "Pear" : 1.50}
nest_list = [dict1, dict2]
move_on = raw_input("Press enter to move on:")
print "Now we can go through a nested data structure using the nested structure we made earlier named nest_list, like so: "
print "for i in nest_list:"
print " print i"
print "\nShould return: "
for i in nest_list:
print i
move_on = raw_input("Press enter to move on: ")
print "\nSo now we want to be able to go through specific items in the nested data structure, "
print "to do this we would type something like this: "
print "for i in range(len(nest_list)):"
print ' print nest_list[i]["Apple"]'
for i in range(len(nest_list)):
print nest_list[i]["Apple"]
print "\nSo this loop is using the len() function, which grabs the length of the nest_list, having 2 dictionaries inside of it, "
print "gives it the length of 2. The indent on the inside of the for loop prints the nest_list[i], the first iteration is 0 "
print 'so it would print the 0th dictionary, then the nest part is asking for the key "Apple", which returns the value of apple.'
print 'The next iteration through the loop turns the "i" into a 1, and prints the "Apple" value in the second dictionary.'
move_on = raw_input("Press enter to move on: ")
print "\nNow we will have you go ahead and print out the values of the \"Pear\" in each dictionary using a for loop."
print "Using the same list, nest_list."
user_loop = raw_input("Enter here: ")
print "\nFirst we will have you do the for loop setup, then after that we will write the second half of the code."
leave = "n"
while leave == "n":
if user_loop == "for i in range(len(nest_list)):":
print "Correct!"
leave = "y"
else:
print "Incorrect!"
print "Use the example above to help you with the setup of the for loop, should be the same."
user_loop = raw_input("Try again: ")
loop_scope = raw_input("Enter here: ")
leae = "n"
while leave == "n":
if loop_scope == 'print nest_list[i]["Pear"]':
print "Correct!"
leave = "y"
else:
print "Incorrect!"
print "Look at the example above to help with the format."
loop_scope = raw_input("Try again: ")
print "\nAwesome! Now we will run the same loop but we will use a function inside of the loop to tell us which store has the lowest prices."
from math import sqrt
print "So to start off we will recall how to call functions. An example of a function call would be: sqrt():"
print "The name of this function is sqrt, the parameters would go into the brackets and would return the square root of the arguement entered."
print "An example would be: "
print "sqrt(25)"
print "Would return: "
print sqrt(25)
move_on = raw_input("Press enter to move on: ")
print "\nYou can also use functions inside of a loop, say we want to get the sqaure root of all the numbers from 25 to 50:"
print "for i in range(25,51):"
print " print sqrt(i)"
print "Would return: "
move_on = raw_input("Press enter to move on: ")
print
for i in range(25,51):
print sqrt(i)
move_on = raw_input("Press enter to move on: ")
print "\nWe could even get the square root of each number between 25-50 and put it into a list to store it:"
print "First we need to set an empty list up outside of the loop to have content added into it inside of the loop:"
print "alist = []"
print "\nfor i in range(25,51):"
print " alist.append(sqrt(i))"
print "\nWould return this:"
alist = []
for i in range(25,51):
alist.append(sqrt(i))
print alist
move_on = raw_input("Press enter to move on: ")
def square(x): return x*x
print "So now we will have you go ahead and use a for loop similar to ours, but instead we will use a different function."
print "This function is called square(). It takes one arguement just like the sqrt function, we want you to make an empty list, "
print 'name the empty list "empty_list". We want you to add the first ten numbers (1-10) and get the square of each number then add '
print "it to the list you made."
move_on = raw_input("Press enter to move on: ")
print 'Remeber to start with making an empty list name "empty_list".'
emptylist = raw_input("Start here: ")
leave = "n"
while leave == "n":
if emptylist == "empty_list = []":
print "Correct!"
leave = "y"
else:
print "Incorrect!"
print "Use the example from above to help setting up."
emptylilst = raw_input("Try again: ")
print "Great! Now lets make the for loop, remember the range is 1-10:"
move_on = raw_input("Press enter to move on: ")
for_loop = raw_input("Enter here: ")
leave = "n"
while leave == "n":
if for_loop == "for i in range(1,11):":
print "Correct!"
leave = "y"
else:
print "Incorrect!"
print "Look at the example above, only difference is the numbers between the range."
for_loop = raw_input("Try again: ")
print "Great now the last part is putiing the information into the list."
move_on = raw_input("Press enter to move on:")
add_to_list = raw_input("Enter here: ")
leave = "n"
while leave == "n":
if add_to_list == "empty_list.append(sqrt(i))":
print "Correct!"
leave = "y"
else:
print "Incorrect!"
print 'The code should look similar to the for loop we showed you earlier just a different name instad of "alist"'
add_to_list = raw_input("Try again: ")
print "Waaaahooooooo look behind you!"
print "\nJust kidding you're done...."
Finished this on my command line | [
"[email protected]"
] | |
54abce4f573f34dd3b64d565bc50f59b993c1d20 | 6529f7bf8fa107d58a9ff3f478c41392115b27f5 | /src/modules/optimizers.py | e8f4e29e1731bfe6d8235e0b96e98657e3431594 | [] | no_license | Limworld98/ARGON | d52451fbbb766ebc1258a2a3066570a91ab6aa4b | 4df80e3fef42a1640e16a434e178d6da600b062f | refs/heads/main | 2023-08-05T22:01:37.119598 | 2021-09-23T15:13:18 | 2021-09-23T15:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | import torch
def build_optimizer(args, model, visual_extractor):
ve_params = list(map(id, visual_extractor.parameters()))
ed_params = filter(lambda x: id(x) not in ve_params, model.parameters())
optimizer = getattr(torch.optim, args.optim)(
[{'params': visual_extractor.parameters(), 'lr': args.lr_ve},
{'params': ed_params, 'lr': args.lr_ed}],
weight_decay=args.weight_decay,
amsgrad=args.amsgrad
)
return optimizer
def build_lr_scheduler(args, optimizer):
lr_scheduler = getattr(torch.optim.lr_scheduler, args.lr_scheduler)(optimizer, args.step_size, args.gamma)
return lr_scheduler
def build_optimizer_t2t(args, model):
ed_params = model.parameters()
optimizer = getattr(torch.optim, args.optim)(
[{'params': ed_params, 'lr': args.lr_ed}],
weight_decay=args.weight_decay,
amsgrad=args.amsgrad
)
return optimizer | [
"[email protected]"
] | |
30bb3b70e6cb3d92e7de46dbddb7fb30ea2fee2e | 469f37d26708778d8d697183954987e313ffe39f | /Blog Project/blog/blog/settings.py | 7927be564776c8c33527ddf1bb7194b11d7f047a | [] | no_license | deni1231/Blog-Application | a08c164e65e697e19988a69533c39467a3cf68cf | 9dce0f04cfc79d5d7e0fcd5bccba6766e173bebd | refs/heads/master | 2020-08-30T01:01:43.540911 | 2019-10-29T07:05:53 | 2019-10-29T07:05:53 | 218,221,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,681 | py | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vsfw^u@l=2yi!wxaexgdh(cu%p%*0w0a^$-!6x^kizem6ibseg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'users.apps.UsersConfig',
'bloga.apps.BlogaConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT= os.path.join (BASE_DIR, 'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK='bootstrap4'
LOGIN_REDIRECT_URL= 'blog-home'
LOGIN_URL = 'login'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
EMAIL_HOST_USER = os.environ.get('EMAIL_PASS')
| [
"[email protected]"
] | |
6c7cdc13cae3d4af6037778d8fd6daa11d8a3293 | 4be4363be240c1ad05c51cb7d9c3a3e9111569d4 | /venv/Scripts/easy_install-script.py | b3e55093dc64b6faca84f5e5b89a3752d58afea2 | [] | no_license | Vec7/django_weather_site | bbef0386d8b2e2269c1c0a87f1489e36a9a72265 | c7f9e7a5c1c6dbe5b21654a80f9b07321dfd8bfc | refs/heads/master | 2021-05-20T02:53:42.943864 | 2020-04-04T21:09:11 | 2020-04-04T21:09:11 | 252,154,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | #!C:\Users\Lana\PycharmProjects\wether\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.0.1','console_scripts','easy_install'
__requires__ = 'setuptools==39.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.0.1', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
3f6cd3be8f129f44a4bc40883819ad366950174d | 2cde67131eb1cdc9d84f7d3a106b758bfe78e279 | /train.py | 32f0fbf38d4580a4151d745ef96143bb88076b0f | [] | no_license | ChingChingYa/HCAN | d89adda93d1fb9de3f972120321f21c986dcdd82 | c1e9f35c4d524d125d65c4611a4e80d40d8fe19a | refs/heads/master | 2022-05-16T22:19:22.418745 | 2020-04-16T16:00:30 | 2020-04-16T16:00:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,714 | py | import matplotlib.pyplot as plt
import numpy as np
from load_data import load_data, read_dataset, cut_data_len
from sklearn.model_selection import train_test_split
from pmf import PMF
from sklearn.utils import shuffle
# from test import Config
import pickle
train_list = []
test_list = []
test_rmse = []
# config = Config()
aspect_vec = []
sentiment_labels = []
# PMF parameter
ratio = 0.8
lambda_U = 0.01
lambda_V = 0.01
latent_size = 6
learning_rate = 3e-5 # 3e-5
iterations = 1000
lambda_value_list = []
lambda_value_list.append([0.01, 0.01])
if __name__ == "__main__":
alldata = load_data('./data/prouduct_rating_data_1.pickle')
train, test = read_dataset('./data/prouduct_rating_data_11.pickle')
num_users = cut_data_len(alldata, 'reviewerID')
num_items = cut_data_len(alldata, 'asin')
fp = open("log.txt", "a")
fp.write("dataset:" + "Musical_Instruments_5"+"\n")
fp.write("ratio:" + str(ratio)+"\n")
fp.write("latent_factor:" + str(latent_size)+"\n")
fp.write("learning_rate:" + str(learning_rate)+"\n")
for lambda_value in lambda_value_list:
lambda_U = lambda_value[0]
lambda_V = lambda_value[1]
# initialization
pmf_model = PMF(U=None, V=None, lambda_U=lambda_U,
lambda_V=lambda_V, latent_size=latent_size,
momentum=0.8, learning_rate=learning_rate,
iterations=iterations)
s = ('parameters are:ratio={:f}, reg_u={:f}, reg_v={:f}, latent_size={:d},'
+ 'learning_rate={:f}, iterations={:d}')
print(s.format(ratio, lambda_U, lambda_V, latent_size,
learning_rate, iterations))
U = None
V = None
fp.write("=============================== Lambda Value ============="+"\n")
fp.write("lambda_U:" + str(lambda_U)+"\n")
fp.write("lambda_V:" + str(lambda_V)+"\n")
# rmse均方根誤差
rmse_minus = 1
rmse_temp = 0
round_value = 0
model_loss = 0
loss_minus = 1
loss_temp = 0
while loss_minus > 0.001:
print("=============================== Round =================================")
print(round_value)
fp.write("=============================== Round ================================="+"\n")
fp.write(str(round_value)+"\n")
if round_value == 0:
U, V, train_loss = pmf_model(num_users=num_users, num_items=num_items,
train_data=train, aspect_vec=None, U=U, V=V, flag=round_value,
lambda_U=lambda_U, lambda_V=lambda_V)
else:
U, V, train_loss = pmf_model(num_users=num_users, num_items=num_items,
train_data=train, aspect_vec=aspect_vec, U=U, V=V, flag=round_value,
lambda_U=lambda_U, lambda_V=lambda_V)
aspect_vec.clear()
sentiment_labels.clear()
for data in (train):
s = np.multiply(U[int(data[0])], V[int(data[1])])
sentiment_labels.append(s)
print('testing PMFmodel.......')
preds = pmf_model.predict(data=test)
test_rmse = pmf_model.RMSE(preds, test[:, 2])
print("=============================== RMSE =================================")
print('test rmse:{:f}'.format(test_rmse))
fp.write("================================ RMSE =========================="+"\n")
fp.write(str('test rmse:{:f}'.format(test_rmse))+"\n")
print("=============================== LOSS =================================")
print('loss:{:f}'.format(train_loss+model_loss))
fp.write("================================ LOSS =========================="+"\n")
fp.write(str('loss:{:f}'.format(train_loss+model_loss))+"\n")
# abs 絕對值
rmse_minus = abs(rmse_temp - test_rmse)
rmse_temp = test_rmse
# abs 絕對值
loss_minus = abs(loss_temp - train_loss+model_loss)
loss_temp = train_loss+model_loss
print('loss_minus='+str(loss_minus))
if loss_minus > 0.001:
aspect_vec, aspects, loss = config.Setup(sentiment_labels)
model_loss = loss
pickle.dump((aspects),
open('./data/aspect_vec'+str(round_value)+'.pickle', 'wb'))
# aspect_vec.append(sentiment_labels)
round_value = round_value + 1
fp.close()
| [
"[email protected]"
] | |
11abaf21150751218fb4ddb947d41d7745d6339e | 6583b4a83138dc8c17a79af160cf3fd97443248b | /blog/migrations/0001_initial.py | 42be7e249c5d3766705fc48829265c39088c3cc6 | [] | no_license | leejh4851/mywebsite | b4397e3e9d6d8c5b5373b384867b3259565d1cc8 | e416e342a8c8905a5baab0c1adb09d0d5be5bf83 | refs/heads/master | 2020-09-09T08:38:27.708500 | 2019-11-13T07:56:07 | 2019-11-13T07:56:07 | 221,402,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | # Generated by Django 2.2.5 on 2019-11-12 07:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='블로그 글의 분류를 입력하세요.(ex:일상)', max_length=50)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('title_image', models.ImageField(blank=True, upload_to='')),
('content', models.TextField()),
('createDate', models.DateTimeField(auto_now_add=True)),
('updateDate', models.DateTimeField(auto_now_add=True)),
('category', models.ManyToManyField(help_text='글의 분류를 설정하세요.', to='blog.Category')),
],
),
]
| [
"[email protected]"
] | |
eb536eb1df7f40bc65b15277fbe448b5a6114b65 | 876cfcdd0eb947b90ca990694efd5a4d3a92a970 | /Python_stack/django/django_intro/dojo_ninjas_shell/dojo_ninjas_shell/settings.py | c8f1a8ab0d452d40f05c9301ab9d86682be43e6b | [] | no_license | lindseyvaughn/Dojo-Assignments | 1786b13a6258469a2fd923df72c0641ce60ccbb2 | 3b37284cdd813b6702f5843c113f7bc7137a56c0 | refs/heads/master | 2023-01-13T20:19:50.152115 | 2019-12-13T23:16:48 | 2019-12-13T23:16:48 | 209,396,128 | 0 | 0 | null | 2023-01-07T11:56:19 | 2019-09-18T20:15:24 | Python | UTF-8 | Python | false | false | 3,154 | py | """
Django settings for dojo_ninjas_shell project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3xu9p-+f=^_g7!l#n&6e=j(771onc&d0orjmreht8dnlek%oyu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.dojo_ninja_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dojo_ninjas_shell.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dojo_ninjas_shell.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
7fdc3a2de6bd16536aa00d4e3af7062310107e9f | b8ed71f3d1a36c119d846e97f1aa7d8ba6774f52 | /82_Remove_Duplicates_from_Sorted_list_2.py | 4eb085d68f83160805f63ac693bcf226f61bafc2 | [] | no_license | imjaya/Leetcode_solved | 0831c4114dd919864452430c4e46d3f69b4bd0cd | 374eb0f23ae14d9638d20bbfe622209f71397ae0 | refs/heads/master | 2023-05-24T17:57:56.633611 | 2023-05-16T06:31:42 | 2023-05-16T06:31:42 | 284,203,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
ans = ListNode()
res = ans
while head:
if head.next and head.val != head.next.val: # if adjacent elements are not same append to res
ans.next = ListNode(head.val)
ans = ans.next
head = head.next
elif head.next and head.val == head.next.val: # if adjacent elements are same, skip the current till current head value is skipped
skip = head.val
while head and skip == head.val:
head = head.next
else:
ans.next = ListNode(head.val)
ans = ans.next
head = head.next
return res.next | [
"[email protected]"
] | |
752b5d96bc0bf04c2ce5f9aca348c59dfd611ece | 6f3f78b7960a10f750caf359ced19c1f7f49273c | /tes.py | b5f5fd6376fb0085bbd69f029da71ded405094dc | [] | no_license | titoalvi/Gemastik-VIII | 515e64b8f856c78e8e1943646672be671c57df19 | 88cd3b033ceac4fa9da7ae5f83b0b48ca87216be | refs/heads/master | 2021-01-10T02:35:36.550863 | 2016-03-08T16:49:25 | 2016-03-08T16:49:25 | 53,428,829 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | import pycurl, json
from StringIO import StringIO
import RPi.GPIO as GPIO
import time
time_now = time.strftime("%H:%M:%S")
#setup GPIO using Broadcom SOC channel numbering
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#GPIO.setup(22,GPIO.OUT)
# set to pull-up (normally closed position)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#setup InstaPush variables
# add your Instapush Application ID
appID = "54efcdd9a4c48ae45dd714d6"
# add your Instapush Application Secret
appSecret = "7412d112d9ebd19e6cfca40a18473445"
pushEvent = "Tes"
pushMessage = "message"
# use this to capture the response from our push API call
buffer = StringIO()
# use Curl to post to the Instapush API
c = pycurl.Curl()
# set API URL
c.setopt(c.URL, 'https://api.instapush.im/v1/post')
#setup custom headers for authentication variables and content type
c.setopt(c.HTTPHEADER, ['x-instapush-appid: ' + appID,
'x-instapush-appsecret: ' + appSecret,
'Content-Type: application/json'])
# create a dict structure for the JSON data to post
json_fields = {}
# setup JSON values
json_fields['event']=pushEvent
json_fields['trackers'] = {}
json_fields['trackers']['message']=pushMessage
#print(json_fields)
postfields = json.dumps(json_fields)
# make sure to send the JSON with post
c.setopt(c.POSTFIELDS, postfields)
# set this so we can capture the resposne in our buffer
c.setopt(c.WRITEFUNCTION, buffer.write)
# uncomment to see the post sent
#c.setopt(c.VERBOSE, True)
# setup an indefinite loop that looks for the door to be opened / closed
while True:
GPIO.wait_for_edge(23, GPIO.RISING)
print("Door Opened!\n")
print ("time_now")
#GPIO.output(22,GPIO.HIGH)
# in the door is opened, send the push request
c.perform()
# capture the response from the server
body= buffer.getvalue()
# print the response
print(body)
# reset the buffer
buffer.truncate(0)
buffer.seek(0)
# print when the door in closed
GPIO.wait_for_edge(23, GPIO.FALLING)
print("Door Closed!\n")
#GPIO.output(22,GPIO.LOW)
# cleanup
c.close()
GPIO.cleanup()
| [
"Tito Alvi Nugroho"
] | Tito Alvi Nugroho |
0305545b721124ac3c2c39173024523d9042e064 | 3571c2d2892a52e0cff72c2db7d91828cf95a6b3 | /smartcab/random_agent.py | 0aa1061c98d007b455878985a807f8e2aa2565ca | [] | no_license | al825/smart_cab | dfacb0dba3fc2b754f41ff3ffc19099096ed2bc0 | fe1ed9fb688d97c291b9a127011502c37212d37e | refs/heads/master | 2021-01-11T10:36:52.889666 | 2016-12-14T07:28:09 | 2016-12-14T07:28:09 | 76,328,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | py | import random
from environment import Agent, Environment
from planner import RoutePlanner
from simulator import Simulator
class RandomLearningAgent(Agent):
"""An agent that learns to drive in the smartcab world."""
def __init__(self, env):
super(RandomLearningAgent, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color
self.color = 'red' # override color
self.planner = RoutePlanner(self.env, self) # simple route planner to get next_waypoint
# TODO: Initialize any additional variables here
def reset(self, destination=None):
self.planner.route_to(destination)
# TODO: Prepare for a new trip; reset any variables here, if required
def update(self, t):
# Gather inputs
self.next_waypoint = self.planner.next_waypoint() # from route planner, also displayed by simulator
inputs = self.env.sense(self)
deadline = self.env.get_deadline(self)
# TODO: Update state
# TODO: Select action according to your policy
action = random.choice(self.env.valid_actions)
# Execute action and get reward
reward = self.env.act(self, action)
# TODO: Learn policy based on state, action, reward
print ("LearningAgent.update(): deadline = {}, inputs = {}, action = {}, reward = {}".format(deadline, inputs, action, reward) ) # [debug]
def run():
"""Run the agent for a finite number of trials."""
# Set up environment and agent
e = Environment() # create environment (also adds some dummy traffic)
a = e.create_agent(RandomLearningAgent) # create agent
e.set_primary_agent(a, enforce_deadline=True) # specify agent to track
# NOTE: You can set enforce_deadline=False while debugging to allow longer trials
# Now simulate it
sim = Simulator(e, update_delay=0.0001, display=False) # create simulator (uses pygame when display=True, if available)
# NOTE: To speed up simulation, reduce update_delay and/or set display=False
sim.run(n_trials=1000) # run for a specified number of trials
# NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
636dd8d302c9765233548a4bab273d19dfb6290c | 4436cbbe464e432290ee7bb7c53e9b99800cbe33 | /bin/des-make-image | 4b1b87294f889930de3b56d85e29f45984939556 | [] | no_license | NiallMac/desimage | f5ec5e04f01891c610be3407be935426ba38f55f | ff5d02ff5b143e5b5252df950ded3771b66f1381 | refs/heads/master | 2020-06-04T23:48:08.550864 | 2019-04-19T16:08:43 | 2019-04-19T16:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | #!/usr/bin/env python
import argparse
import desimage
parser=argparse.ArgumentParser()
parser.add_argument('tilename',
help='e.g. DES0428-4748')
parser.add_argument('--campaign', help='e.g. y5a1_coadd')
parser.add_argument('--types',
default='jpg',
help="types to make, e.g. jpg,tiff. Default jpg")
parser.add_argument('--noclean',
action='store_true',
help="don't clean up the downloaded fits files")
if __name__=="__main__":
args=parser.parse_args()
types = args.types.split(',')
if args.noclean:
clean=False
else:
clean=True
desimage.make_image_auto(
args.tilename,
campaign=args.campaign,
clean=clean,
type=types,
)
| [
"[email protected]"
] | ||
4bf33edc1729442467db7b83b7869951182f6352 | 5910ae723a4e378309479272ffb03c9e8e409c16 | /flip.py | fa147d3a6ee50af733b8393fa7b10d563414390a | [] | no_license | arturcuringa/compvi | 9425d24a067781799d5f8139fe91e0b70db883c6 | fbbb4b2bb16b46967ed0adb7b353ebe8ac5e180e | refs/heads/master | 2020-04-24T00:52:40.089002 | 2019-03-27T13:33:11 | 2019-03-27T13:33:11 | 171,579,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | import sys
import cv2
if __name__ == '__main__':
img_path = sys.argv[1]
img = cv2.imread(img_path, 1)
cv2.imshow('flip', cv2.flip(img, 0))
cv2.waitKey(0)
| [
"[email protected]"
] | |
ae21cab553ec681e85a41c12031a5930bb5e0eff | eff5f0a2470c7023f16f6962cfea35518ec0b89c | /Elementary_full/Index Power.py | 5fda4f90a638e1c5d3a63e232c95a4a619ceae99 | [] | no_license | olegJF/Checkio | 94ea70b9ee8547e3b3991d17c4f75aed2c2bab2f | fc51a7244e16d8d0a97d3bb01218778db1d946aa | refs/heads/master | 2021-01-11T00:46:42.564688 | 2020-03-02T13:36:02 | 2020-03-02T13:36:02 | 70,490,008 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # -*- coding: utf-8 -*-
def checkio(a,b):
import math
try:
return int(math.pow(a[b],b))
except:
return -1
print(checkio([1, 2], 3))
| [
"[email protected]"
] | |
cb2e56d36a985db04c6cb556a6aba7d38cd2068e | b5f233737d73be695c8a377f083f4476399adfeb | /scripts/cheating_detection/display_cheaters.py | a04a8e3cd82b9d6199b55ce458e950900a3fcdaa | [] | no_license | djamrozik/CS411-VegeCheck | cda4510c99504257c041ef33b0f0f556eedf49aa | 8083ed381da3c7bcf31b5b196fafba06ce76f25c | refs/heads/master | 2021-05-31T06:44:37.310470 | 2016-04-23T04:53:39 | 2016-04-23T04:53:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | import sys
from suspected_cheaters import get_suspects
def display(suspects):
for suspect in suspects.keys():
sys.stdout.write('EMPLOYEE' + str(suspect) + ': ')
for partners in suspects[suspect]:
sys.stdout.write('EMPLOYEE' + str(partners) + ' ')
sys.stdout.write("<br>")
sys.stdout.flush()
display(dict(get_suspects()))
| [
"[email protected]"
] | |
0b6ebd6ce4dd3e6f9efa92f71a31f227c8692ca8 | 7d7e9f10ec12c0fba97716986ae4076b89df9e63 | /starter.py | ff10f4e9e1bce411e9a63170dd19dbf18d767e00 | [] | no_license | showmikgupta/League-of-Legends-Stats | 4e19e8586a08dc7b3e4a4bc86429ddcb83e1b0cd | a9b71cef7fb36a75557686904ffcc5f7b70bc92d | refs/heads/master | 2022-12-03T19:38:42.690276 | 2020-08-27T00:40:01 | 2020-08-27T00:40:01 | 290,302,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,153 | py | import requests
import sys
import json
import RiotConsts as Consts
import Key
def main():
championCodes = generateChampionCodes(Consts.CHAMPION_LIST_URL)
# Get Summoner Name
name = input("Enter Summoner Name: ")
# Request for summoner info
r = requests.get(f'{Consts.URL["base"]}/summoner/v{Consts.API_VERSIONS["summoner"]}/summoners/by-name/{name}?api_key={Key.KEY}')
if r.status_code != 200:
print("There was a problem with your request")
sys.exit()
summoner_info = r.json()
summoner_id = summoner_info['id']
account_id = summoner_info['accountId']
# Get basic ranked info
r = requests.get(f'{Consts.URL["base"]}/league/v{Consts.API_VERSIONS["league"]}/entries/by-summoner/{summoner_id}?api_key={Key.KEY}')
if r.status_code != 200:
print("There was a problem with your request")
sys.exit()
ranked_info = r.json()
ranked_solo_info = ranked_info[0]
if ranked_solo_info['queueType'] == 'RANKED_FLEX_SR':
ranked_solo_info = r.json()[1]
tier = ranked_solo_info['tier']
rank = ranked_solo_info['rank']
lp = ranked_solo_info['leaguePoints']
wins = int(ranked_solo_info['wins'])
losses = int(ranked_solo_info['losses'])
wr = round((wins / (wins + losses)) * 100)
print("--------------------------")
print(summoner_info['name'])
print(f'{tier} {rank}: {lp} LP')
print(f'Win Rate: {wr}%')
print("--------------------------")
# Get matchlist by account id
r = requests.get(f'{Consts.URL["base"]}/match/v{Consts.API_VERSIONS["match"]}/matchlists/by-account/{Consts.ACCOUNT_ID}?api_key={Key.KEY}')
if r.status_code != 200:
print("There was a problem with your request")
sys.exit()
matchlist = r.json()
gameIds = getGameIds(matchlist)
game = gameIds[0]
# Getting game info from the latest match
r = requests.get(f'{Consts.URL["base"]}/match/v{Consts.API_VERSIONS["match"]}/matches/{game}?api_key={Key.KEY}')
if r.status_code != 200:
print("There was a problem with your request")
sys.exit()
match = r.json()
# with open('match.json', 'w') as f:
# json.dump(match, f)
# f.close()
# game duration in minutes
gameDuration = int(match['gameDuration']) / 60
target = getPlayerStats(match, name)
printMatchStats(target, gameDuration, championCodes)
# Returns all the gameIds of the give matches
def getGameIds(matchlist):
matches = matchlist['matches']
gameIds = []
for match in matches:
gameIds.append(match['gameId'])
return gameIds
# Returns game stats of the given player
def getPlayerStats(match, name):
participantIdentities = match['participantIdentities']
participantNo = 0
# Getting participant number
for participant in participantIdentities:
playerInfo = participant['player']
if playerInfo['summonerName'].lower() == name.lower():
participantNo = participant['participantId']
break
if participantNo == 0:
print("Could not find player")
return match['participants'][participantNo - 1]
def printMatchStats(target, gameDuration, championCodes):
print("Previous Match Stats:")
stats = target['stats']
minionsKilled = int(stats['totalMinionsKilled'])
neutralMinionsKilled = int(stats['neutralMinionsKilled'])
totalMinionsKilled = minionsKilled + neutralMinionsKilled
csPerMin = round(totalMinionsKilled / gameDuration, 1)
damageDealt = int(stats['totalDamageDealtToChampions'])
goldEarned = int(stats['goldEarned'])
visionScore = int(stats['visionScore'])
controlWardsBought = int(stats['visionWardsBoughtInGame'])
championPlayed = ""
for champion in championCodes:
if target['championId'] == int(champion):
championPlayed = championCodes[champion]
break
print(f"Champion: {championPlayed}")
print(f"CS/Min: {csPerMin}")
print(f"Damage Dealth: {damageDealt}")
print(f"Gold Earned: {goldEarned}")
print(f"Vision Score: {visionScore}")
print(f"Control Wards Bought: {controlWardsBought}")
print("--------------------------")
def generateChampionCodes(url):
with open('champion.json') as jsonFile:
championData = json.load(jsonFile)
championIds = {}
championInfo = championData['data']
for champion in championInfo:
championIds[championInfo[champion]['key']] = champion
return championIds
main() | [
"[email protected]"
] | |
954df544fde77274249fc7496b0ff01c0f5e03b3 | 70e4c9b6b87e3632a947f7e0fd5cdc294c3ed887 | /pyelong/util/__init__.py | 347f73f5ef541c7ae53118a75d18800c1997d0e7 | [
"MIT"
] | permissive | DeanThompson/pyelong | 314b95b153a52cc26cd69280c2849c3611bfb6a4 | 654a7f6dfe2b813db62aeda995859866b36c2042 | refs/heads/master | 2021-01-13T01:36:50.498786 | 2016-02-24T08:13:59 | 2016-02-24T08:13:59 | 37,655,113 | 1 | 0 | MIT | 2019-10-02T21:36:28 | 2015-06-18T11:17:20 | Python | UTF-8 | Python | false | false | 1,047 | py | # -*- coding: utf-8 -*-
__author__ = 'leon'
from Crypto.Cipher import DES
try:
from Crypto.Util.Padding import pad, unpad
except ImportError:
from .padding import pad, unpad
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
Taken from `tornado.escape.utf8`.
"""
if isinstance(value, bytes):
return value
if not isinstance(value, unicode):
raise TypeError('Expected bytes, unicode; got %r' % type(value))
return value.encode('utf-8')
def des_encrypt(data, key, iv, mode=DES.MODE_CBC):
cipher = DES.new(key, mode=mode, IV=iv)
padded_text = pad(utf8(data), cipher.block_size)
return cipher.encrypt(padded_text).encode('hex')
def des_decrypt(data, key, iv, mode=DES.MODE_CBC):
cipher = DES.new(key, mode=mode, IV=iv)
decrypted = cipher.decrypt(data.decode('hex'))
return unpad(decrypted, cipher.block_size)
| [
"[email protected]"
] | |
6bd6fad061c49013fb3591bf4140d4620eeaea35 | 719831dddbafba08f8351da6d6d2bb6c1a41fbd6 | /back-end/app/__main__.py | 44c8d7862fa6bbdda94345e91bf5bba219fd7dbd | [] | no_license | nautilis/zhku.scm | 757266a7231efaf7d0ad8bbc0a0a26d6530c48fc | 3a84a522264e376a1a51756f82fa3dd2d458aa93 | refs/heads/master | 2021-09-14T00:02:40.469833 | 2018-05-06T09:55:19 | 2018-05-06T09:55:19 | 118,904,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | from app.main import main
main() | [
"[email protected]"
] | |
e7a298e50aeb5196887f2bee9769dc01e2752184 | 781196348bca7e0061bf5408fd766306c45e67c0 | /pre.py | a945ad85745b22a559d66b2e6586e515b2126972 | [] | no_license | szhhan/Chinese-poetry-generation- | 814828734ef73375158cbbf2e34023cccc6d2384 | f603b89ee88dbc50698c7e2abf88f93423ee1028 | refs/heads/master | 2020-12-27T21:26:18.413695 | 2020-02-03T21:14:20 | 2020-02-03T21:14:20 | 238,063,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,496 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 1 16:53:33 2020
@author: sizhenhan
"""
def preprocess(poets,number_to_train):
p_list = []
with open(poets, 'r',encoding='UTF-8') as f:
for line in f:
p_list.append(line)
p_list = p_list[:number_to_train]
poems_with_5_words = ""
poems_with_7_words = ""
for poem in p_list:
x = poem.split(':')[1].strip() + "|"
if len(x) >5 and x[5] == ',':
poems_with_5_words += x
elif len(x) >7 and x[7] == ",":
poems_with_7_words += x
all_poems = poems_with_5_words + poems_with_7_words
chars = list(all_poems)
word_count = {}
for char in chars:
word_count[char] = word_count.get(char,0) + 1
low_frequency_words = []
for word,freq in word_count.items():
if freq <= 5:
low_frequency_words.append(word)
for word in low_frequency_words:
del word_count[word]
words = sorted(word_count.items(), key=lambda x: -x[1])
word_list = sorted(word_count, key=word_count.get, reverse=True)
word_list.append(" ")
word_to_num = {}
num_to_word = {}
for i, w in enumerate(word_list):
word_to_num[w] = i
num_to_word[i] = w
word_to_num_get = lambda x: word_to_num.get(x, len(words) - 1)
return poems_with_5_words, poems_with_7_words, num_to_word, word_to_num_get, words
def clean(poems,length):
poems_f = []
if length == 5:
for poem in poems:
k = 5
flag = True
for i in range(len(poem)):
if i != k and (poem[i] == ',' or poem[i] == '。'):
flag = False
break
if i == k and (poem[i] != ',' and poem[i] != '。'):
flag = False
break
if i == k :
k += 6
if flag:
poems_f.append(poem)
else:
for poem in poems:
k = 7
flag = True
for i in range(len(poem)):
if i != k and (poem[i] == ',' or poem[i] == '。'):
flag = False
break
if i == k and (poem[i] != ',' and poem[i] != '。'):
flag = False
break
if i == k :
k += 8
if flag:
poems_f.append(poem)
return poems_f | [
"[email protected]"
] | |
c7e589bf961b28ca4c2f7458c7012b1037eaf31e | 5923a870f661255f63faa3b3b5b243f7a7c69059 | /interact_dbdc.py | f7e9c306bf48afbcdbe0d341b22d0e823ff9edb1 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | SeolhwaLee/DialoGPT | 74237648728776a59409343823da195f60c6549f | 45220d493e8d267d703a7abca0497753cc4cda6c | refs/heads/master | 2022-12-14T15:40:23.460077 | 2020-09-22T15:34:49 | 2020-09-22T15:34:49 | 269,487,480 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,107 | py | import json
from os.path import abspath, dirname, exists, join
import argparse
import logging
from tqdm import trange
import tqdm
import torch
import torch.nn.functional as F
import numpy as np
import socket
import os, sys
import re
import logging
from functools import partial
from demo_utils import download_model_folder
import argparse
import subprocess as sp
import time
from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config
from gpt2_training.train_utils import get_eval_list_same_length, load_model, boolean_string, fix_state_dict_namespace
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
EOS_ID = 50256
def cut_seq_to_eos(sentence, remove_id=[-1]):
sent = []
for s in sentence:
if s in remove_id:
continue
if s != EOS_ID:
sent.append(s)
else:
break
return sent
### FROM HUGGING FACE REPO
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def generate_next_token(model, input_ids, position_ids=None, token_type_ids=None, prev=None, temperature=1, top_k=0,
top_p=0, past=None):
with torch.no_grad():
if not past:
hidden_states, past = model.transformer(prev, position_ids, token_type_ids, past=past)
else:
hidden_states, past = model.transformer(prev, past=past)
logits = model.lm_head(hidden_states)
logits = logits[0, -1, :] / temperature
logits = top_filtering(logits, top_k=top_k, top_p=top_p)
probs = F.softmax(logits.unsqueeze(0), dim=-1)
prev = torch.multinomial(probs, num_samples=1)
return prev, probs[0][prev], past
def generate_sequence(model, input_ids, position_ids=None, token_type_ids=None, temperature=1, top_k=0, top_p=0,
length=20, past=None, device='cuda'):
output = input_ids.new_zeros([input_ids.size(0), 0])
prev = input_ids
for i in range(length):
prev, probs, past = generate_next_token(model, input_ids, position_ids, token_type_ids, prev, temperature,
top_k, top_p, past)
output = torch.cat((output, prev), dim=1)
return output
def run_model():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', type=str, default='',
help='pretrained model name or path to local checkpoint')
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--load_checkpoint", '-c', type=str, default='')
parser.add_argument("--fp16", type=boolean_string, default=False)
parser.add_argument("--max_seq_length", type=int, default=128)
parser.add_argument("--generation_length", type=int, default=20)
parser.add_argument("--max_history", type=int, default=2)
parser.add_argument("--chateval_multi", type=boolean_string, default=False)
parser.add_argument("--temperature", type=float, default=1)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument('--use_gpu', action='store_true')
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--conversation", type=boolean_string, default=True,
help='This is for the interactive conversation or save the history for the script')
parser.add_argument("--eval_input", type=str, default='', help='evaluation data input path')
parser.add_argument("--eval_output", type=str, default='', help='evaluation data output path')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
device = torch.device("cuda" if torch.cuda.is_available() and args.use_gpu else "cpu")
n_gpu = torch.cuda.device_count()
args.device, args.n_gpu = device, n_gpu
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
#### load the GPT-2 model
config = GPT2Config.from_json_file(os.path.join(args.model_name_or_path, './config.json'))
enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
model = load_model(GPT2LMHeadModel(config), args.load_checkpoint, args, verbose=True)
model.to(device)
model.eval()
history = []
'''This is for the interactive conversation'''
if args.conversation:
while True:
raw_text = input("USR >>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input("USR >>> ")
history.append(raw_text)
context_tokens = sum([enc.encode(h) + [EOS_ID] for h in history], []) # + [EOS_ID]
context_tokens = torch.tensor(context_tokens, device=device, dtype=torch.long).unsqueeze(0)
position_ids = torch.arange(0, context_tokens.size(-1), dtype=torch.long, device=context_tokens.device)
out = generate_sequence(model, context_tokens, position_ids=position_ids,
length=args.generation_length, temperature=args.temperature,
top_k=args.top_k, top_p=args.top_p)
out = out.tolist()
text = enc.decode(cut_seq_to_eos(out[0])).encode('ascii', 'ignore').decode('ascii')
print("SYS >>> ", text)
history.append(text)
history = history[-(2 * args.max_history + 1):]
else:
script_input_path = str(args.eval_input)
script_file = open(script_input_path, 'r', encoding='utf-8')
script_out_path = str(args.eval_output)
timestr = time.strftime("%Y%m%d-%H%M%S")
file_name = str(args.eval_input).split('/')[-1].split('.')[0]
script_response = open(script_out_path + '/' + file_name + '_dialoGPT_medium_' + timestr + '.txt', 'w')
for raw_text in script_file:
print("input:", raw_text)
if args.chateval_multi == False:
history.append(raw_text.replace('\n', ''))
elif args.chateval_multi == True:
# if args.max_history == 2:
# turn1 = raw_text.split('</s>')[0].lstrip().replace('’', '')
# turn2 = raw_text.split('</s>')[1].lstrip().replace('’', '')
# history.append(turn1.replace('\n', ''))
# history.append(turn2.replace('\n', ''))
# elif args.max_history == 3:
# turn1 = raw_text.split('</s>')[0].lstrip().replace('’', '')
# turn2 = raw_text.split('</s>')[1].lstrip().replace('’', '')
# turn3 = raw_text.split('</s>')[2].lstrip().replace('’', '')
# history.append(turn1.replace('\n', ''))
# history.append(turn2.replace('\n', ''))
# history.append(turn3.replace('\n', ''))
if len(raw_text) > 1:
# n_turn = raw_text.split()[0]
utter = raw_text.split()[1:]
input_utter = ' '.join(utter)
print("utter", input_utter)
script_response.write("%s" % (raw_text))
history.append(input_utter)
continue
# elif len(raw_text) < 1:
# n_turn = 0 # turn count reset
# history = [] # history reset
# continue
context_tokens = sum([enc.encode(h) + [EOS_ID] for h in history], []) # + [EOS_ID]
context_tokens = torch.tensor(context_tokens, device=device, dtype=torch.long).unsqueeze(0)
position_ids = torch.arange(0, context_tokens.size(-1), dtype=torch.long, device=context_tokens.device)
out = generate_sequence(model, context_tokens, position_ids=position_ids,
length=args.generation_length, temperature=args.temperature,
top_k=args.top_k, top_p=args.top_p)
out = out.tolist()
text = enc.decode(cut_seq_to_eos(out[0])).encode('ascii', 'ignore').decode('ascii')
print("SYS >>> ", text)
# history = history[-(2 * args.max_history + 1):]
# print(history)
print("history test:", history)
# script_response.write("%s\n" % (raw_text))
script_response.write("[ground]\t%s\n" % (text))
script_response.write("\n")
if args.chateval_multi == True:
history = []
else:
history.append(text)
history = history[-(2 * args.max_history + 1):]
script_response.close()
print("script response complete!")
if __name__ == '__main__':
PYTHON_EXE = 'python'
MODEL_FOLDER = './models'
DATA_FOLDER = './data'
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
logger = logging.getLogger(__name__)
if os.path.exists(MODEL_FOLDER):
print('Found existing ./models folder, skip creating a new one!')
os.makedirs(MODEL_FOLDER, exist_ok=True)
else:
os.makedirs(MODEL_FOLDER)
#########################################################################
# Download Model
#########################################################################
logger.info('Downloading models...')
download_model = partial(download_model_folder, DATA_FOLDER=MODEL_FOLDER)
# model size: could be one of 'small' (GPT2 with 117M), 'medium'(345M) or 'large' (1542M)
# dataset: one of 'multiref' or 'dstc'
# from_scratch: True : load model trained from scratch or False: load model trained from fine-tuning the GPT-2
target_folder = download_model(model_size='medium', dataset='multiref', from_scratch=False)
logger.info('Done!\n')
run_model()
| [
"[email protected]"
] | |
adc3d8c1979d8475dc672be640ed7cb55d85ed49 | a34447b5ae3709653fb99d0d2f3e604e3ce83b18 | /libros/migrations/0002_score.py | d33c0353d22a674b58f85bb9113d3165bf9f72b3 | [] | no_license | juandr89/SRIW_trabajo2 | 2e2d46116e935a0f3752a863065d9795ac8a4477 | 53ce5e374e613afaa72642cd3309ecc5da269198 | refs/heads/master | 2021-03-06T07:39:41.880331 | 2020-05-08T19:24:36 | 2020-05-08T19:24:36 | 246,189,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | # Generated by Django 3.0.4 on 2020-03-31 00:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('libros', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('valor', models.IntegerField(blank=True, null=True)),
('libro', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='libros.Libro')),
('usuario', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
4fc47125db5b2ede8cab94898f1f3a42b6b57a4f | 046cd4b68c8bdfb99fbba085fdf5f922be1191b4 | /src/dao/model_information_dao.py | 4303aac4bcb5f60fdd27a590c66f7619add6faf5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | LiuYuWei/service-ml-model-training-evaluation | f51fc9a5f5b2f821c1e2f69c5b346512c58954aa | 5eb297922935047738769c3a7fde4f3e18783fd7 | refs/heads/master | 2022-12-02T14:14:09.149893 | 2020-08-22T08:49:34 | 2020-08-22T08:49:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | """Record confusion matrix dao"""
# coding=utf-8
# import relation package.
import datetime
import pandas as pd
import sqlite3
from sqlalchemy import create_engine
# import project package.
from config.config_setting import ConfigSetting
class ModelInformationDao:
"""Record confusion matrix dao"""
def __init__(self):
"""Initial variable and module"""
config_setting = ConfigSetting()
self.log = config_setting.set_logger("[model_information_dao]")
self.config = config_setting.yaml_parser()
self.sqlite_engine = None
def create_connection(self):
"""create a database connection to the SQLite database
Returns:
Successfully create connection or not.
"""
result = False
try:
self.sqlite_engine = create_engine('sqlite:///{}'.format(self.config['sqlite']['file_path']), echo=False)
result = True
except Exception as e:
self.log.error("Create conntection error: {}".format(e))
return result
def execute_table(self, execute_sql):
""" create a table from the execute_sql statement
Arguments:
execute_sql: str, sql string.
Returns:
Successfully execute or not.
"""
result = False
try:
result = True
self.sqlite_engine.execute(execute_sql)
except Exception as e:
self.log.error("Executor error: {}".format(e))
return result
def save_data(self, data_dict, table_name):
df = pd.DataFrame.from_dict(data_dict, orient = 'index').T
df.columns = data_dict.keys()
df.to_sql(table_name, con=self.sqlite_engine, if_exists='append', index=False)
def setting_model_database(self, model_name):
self.create_connection()
create_model_sql = """ CREATE TABLE IF NOT EXISTS {} (
timestamp datetime PRIMARY KEY NOT NULL,
number_data integer NOT NULL,
model_path string NOT NULL);
""".format(model_name)
self.execute_table(
create_model_sql) | [
"[email protected]"
] | |
598f8cc6475b5a9f493ac90153d53f1413bc1295 | 03117d77dfd87cfbdc77b3449dd8ad67d069b15e | /Programing Basics March2020/Conditional Statements - Lab/Number 1...9 to Text.py | 3f5093e8051b536ae022796afeb3b11e3b165f76 | [] | no_license | zdravkob98/Programming-Basics-with-Python-March-2020 | ba8bed19aa7f1df2d14c18b3ec18d0046811faed | a4e54d313f16ea5a59be6f0b08f3ef1572ce6cd6 | refs/heads/main | 2022-12-24T13:36:53.142471 | 2020-10-06T17:00:12 | 2020-10-06T17:00:12 | 301,796,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | number = int(input())
if number == 1 :
print('one')
elif number == 2 :
print('two')
elif number == 3 :
print('three')
elif number == 4 :
print('four')
elif number == 5 :
print('five')
elif number == 6 :
print('six')
elif number == 7 :
print('seven')
elif number == 8 :
print('eight')
elif number == 9 :
print('nine')
else:
print('number too big') | [
"[email protected]"
] | |
705fe577e926d2ea35468bf612f0a3057927b39d | 40f4aa5b646a6c61065dc5c59554ff1588214f0f | /page96c.py | 3ed8c00251b945518d68c7cbd6a90cbf734d0c32 | [] | no_license | barath99/TN-CS-ErrorCorrections | 18e54e4b36240536c4842682ab0898c68dfa1514 | 3d89eccd0a9a6760ad5de91a10a5e058f5840823 | refs/heads/master | 2020-11-27T12:49:05.105602 | 2020-04-12T03:51:37 | 2020-04-12T03:51:37 | 229,447,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | def printnos (*nos):
for n in nos:
print(n)
return
# now invoking the printnos() function
print ('Printing two values')
printnos (1,2)
print ('Printing three values')
printnos (10,20,30)
| [
"[email protected]"
] | |
7c8b548a761ba51df29c4acfa73865b7172c7dd6 | 2f76cb07039049c2f592d57093993d3f56373dfd | /esp/boot.py | 624f5b5c420a3629aa8428ec919781741709528c | [] | no_license | Ignisor/ESP-01-DHT-11 | 22b2a517f30a8b6cb6570fc71827b6206a176863 | 79087b55736584b7dfecc5b87ff0f3a7e1c7f7c9 | refs/heads/master | 2020-04-03T11:17:27.234216 | 2018-10-29T13:54:41 | 2018-10-29T13:54:41 | 155,217,181 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | import esp; esp.osdebug(None)
import gc
from utils import wifi
from utils.pins import LED
wifi.toggle_wifi(True)
is_connected = wifi.connect(indicate=True)
LED.value(not is_connected) # 'not' because 0 - is enable for led
gc.collect()
| [
"[email protected]"
] | |
430ff13a3c0ccba5650202397d6d90e358121f45 | 0031467185e50077934785d2b52818e968e2bce8 | /setup.py | 70ec28b92c1fec1f5d55cc61b584ee567a2ade24 | [] | no_license | Crosse/ZenPacks.crosse.OpenBSD | f9a7b9f36f45c7e1a713761161dcf6749dc545b8 | 39673c5618034a380cbf9c39d383c4047cf45d88 | refs/heads/master | 2020-12-24T18:41:46.530703 | 2016-06-07T20:59:01 | 2016-06-07T20:59:01 | 59,491,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,404 | py | ################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.crosse.OpenBSD"
VERSION = "1.0.0dev"
AUTHOR = "Your Name Here"
LICENSE = ""
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.crosse']
PACKAGES = ['ZenPacks', 'ZenPacks.crosse', 'ZenPacks.crosse.OpenBSD']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = ""
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name=NAME,
version=VERSION,
author=AUTHOR,
license=LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers=COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName=PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages=NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages=find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data=True,
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires=INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points={
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe=False,
)
| [
"[email protected]"
] | |
651cd1268e961e0cd9720d09e6bf2f654de81f96 | 07f0cb11f5fc85e4d441fe5a3c53ddf1b4d8001e | /PLBART/bleu.py | d23dce0c852e5f6427f633f12d9714a7f99a3cbc | [
"MIT"
] | permissive | saikat107/MODIT | 1e9362d53d3295ec9f133d3b518bdcc75629402c | 91c4b7aecb0ed8e69fc9ccfe47c955db0db32109 | refs/heads/main | 2023-04-14T03:56:34.995332 | 2021-04-24T02:58:08 | 2021-04-24T02:58:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,115 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python implementation of BLEU and smooth-BLEU.
This module provides a Python implementation of BLEU and smooth-BLEU.
Smooth BLEU is computed following the method outlined in the paper:
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
evaluation metrics for machine translation. COLING 2004.
"""
import collections
import math
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
def _bleu(ref_file, trans_file, subword_option=None):
max_order = 4
smooth = True
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with open(reference_filename) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference_list.append(reference.strip().split())
per_segment_references.append(reference_list)
translations = []
with open(trans_file) as fh:
for line in fh:
translations.append(line.strip().split())
bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
return round(100 * bleu_score, 2) | [
"[email protected]"
] | |
b8da12905fc2b25296b56d88f50eae18c8b6ab40 | eef81f38a81aa5934ac386f6b8de71eb7eee1a05 | /cuenta_listas.py | 2bc06b98a120c6d364d65238db9ad36b84298547 | [] | no_license | valentinomaretto/valentin | 3297f155c90265bfd4f7daab44af3c87556c29cb | ad1cffccb491da6402d5755ffd2ec2b0f8e09b09 | refs/heads/master | 2020-09-04T01:35:52.801198 | 2019-11-09T18:15:05 | 2019-11-09T18:15:05 | 219,630,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | print("CUENTA LISTAS")
list=[]
lista_usuario="0"
while lista_usuario.isdigit()==True :
lista_usuario = input("Llena tu lista: [end para terminar]")
if lista_usuario.isdigit()==True:
list.append(lista_usuario)
print("Numero añadido {}".format(lista_usuario))
print(list)
objetos_en_lista=0
for num in list:
objetos_en_lista += 1
print("Objetos en lista: {}".format(objetos_en_lista))
| [
"[email protected]"
] | |
d31549d5bb51a355ccf603ccb38e7dd30adc2d68 | 708bc013e833de18a935b5daacd44c21a94b1cdb | /IX.프로젝트/open_메모장.py | f754a9760bebb4a4ca4739936c6c95db5cf59be5 | [] | no_license | kkyoung28/Programming-Python- | c1f0d6e9d9c1aaa0e1b99b9bd0ca0c45fc6070fc | 536bb936bea79879b4be4b0dd11b54e966239db9 | refs/heads/master | 2020-07-10T20:41:28.084078 | 2019-12-09T00:54:52 | 2019-12-09T00:54:52 | 204,366,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | import pyautogui as pag
import time
if __name__ == '__main__':
pag.moveTo(73, 1057, 2)
pag.click()
pag.moveTo(439,543, 2)
pag.click()
pag.moveTo(24, 69, 2)
pag.click()
pag.typewrite("Hello world")
pag.press("enter")
pag.press("enter")
pag.press("hangul")
pag.typewrite("qksrkqrnsk tptkddk")
pag.hotkey('ctrl', 's')
pag.moveTo(170, 488)
pag.doubleClick()
# pag.typewrite("C:\\Users\\kjhh9\\Downloads")
pag.typewrite("vkdlTjs dnjfem", interval=0.5)
pag.moveTo(805, 564, 2)
pag.click()
#메모장 프로그램 실행하자
#hello world 치자
#두번 내리자
#반갑구나 세상아 치자
#저장하자
#파일이름: 파이썬월드 | [
"[email protected]"
] | |
b421e770c7dc315f37572472ec17277b6ee88e2d | c9d705bb6837cfd13ce66bd7d050a033d03f4e7b | /backend/core/urls.py | 074936639489e2c60551fb9a7b0381ba6f79dcb6 | [] | no_license | cesarMtorres/rets_blog_vue | 6574484dce7126881b2f8132d12bfd0453600855 | 927cfe657eb3e3657a1f1ab11e4f8f48da667d6b | refs/heads/master | 2023-09-05T03:59:47.623970 | 2021-11-10T19:45:24 | 2021-11-10T19:45:24 | 428,383,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"[email protected]"
] | |
ec0113c1b2831cfa47b0fd3958bf7d1c62107098 | 27a480568c3dc3d61f9f850ea394fcaf4cbe3037 | /stackbasic.py | 7845abd683fdacfc5c5f98ea971b232fe0e833d5 | [] | no_license | sirisatwika/263698 | 77eeffb8d94f06c9989dd009476367c4843136de | a06192ab7c1d113ec373bb8c1da20027d55e4ea9 | refs/heads/main | 2023-04-15T18:32:57.644792 | 2021-04-28T04:30:34 | 2021-04-28T04:30:34 | 359,067,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | stack = []
def push(val):
stack.append(val)
def pop():
val = stack[-1]
del stack[-1]
return val
push(3)
push(2)
push(1)
print(pop())
print(pop())
print(pop())
| [
"sirisatwikakotha29.com"
] | sirisatwikakotha29.com |
b676944f48f0ef7ddef893a5f59e1a39082303b4 | 0857913ae5cde7481c5bca63ed5081e11f217581 | /feiji/14_爆炸效果.py | c8b53095b3e0aca6e847f85db0bfe07b24d7884a | [] | no_license | nmww/p1804_ceshi | 48c2d93849018d8601f0732c5005395c81490ef1 | 72bf25cc7767371594b41f8919454e46fe178023 | refs/heads/master | 2020-03-15T23:25:43.345763 | 2018-06-27T03:19:55 | 2018-06-27T03:19:55 | 132,393,611 | 53 | 7 | null | null | null | null | UTF-8 | Python | false | false | 6,110 | py | #-*- coding=utf-8 -*-
import pygame
import time, random
class Base(object):
"""docstring for ClassName"""
def __init__(self, screen, x, y, image_name):
self.screen = screen
self.x = x
self.y = y
#3.英雄
self.image = pygame.image.load(image_name)
class BasePlane(Base):
def __init__(self, screen, x, y, image_name):
Base.__init__(self, screen, x, y, image_name)
# 4. 定义英雄的初始位置
self.rect = pygame.Rect(self.x, self.y, 102, 126)
self.bullet_list = [] #保持子弹引用
self.bullet_remove = [] #保持待删除的子弹
self.hit = False #表示 没有击中 不爆炸
self.bomb_list = [] #保存爆炸效果图
self.__create_image() #添加图片
self.image_num = 0 #While True增加次数,效果延迟
self.image_index = 0 # 爆炸图片id
def __create_image(self):
self.bomb_list.append(pygame.image.load('./images/hero_blowup_n1.png'))
self.bomb_list.append(pygame.image.load('./images/hero_blowup_n2.png'))
self.bomb_list.append(pygame.image.load('./images/hero_blowup_n3.png'))
self.bomb_list.append(pygame.image.load('./images/hero_blowup_n4.png'))
def display(self):
if self.hit == True:
self.screen.blit(self.bomb_list[self.image_index],self.rect)
self.image_num += 1
if self.image_num == 7:
self.image_index += 1
self.image_num = 0
if self.image_index > 3:
time.sleep(1)
exit()
else:
self.screen.blit(self.image,self.rect)
if len(self.bullet_list) > 0:
for bullet in self.bullet_list:
bullet.display()
bullet.move()
if bullet.judge():
self.bullet_remove.append(bullet)
if len(self.bullet_remove) > 0:
for i in self.bullet_remove:
self.bullet_list.remove(i)
# self.bullet_remove.clear()
del self.bullet_remove[:]
def bomb(self):
self.hit = True
class HeroPlane(BasePlane):
"""英雄的创建和展示类"""
def __init__(self, screen):
super(HeroPlane, self).__init__( screen, 150, 500, './images/hero1.png')
def fire(self):
self.bullet_list.append(Bullet(self.screen, self.rect.x, self.rect.y))
class EnemyPlane(BasePlane):
"""敌人飞机 创建和展示类"""
def __init__(self, screen):
super(EnemyPlane, self).__init__(screen, 0, 0, './images/enemy0.png')
self.direction = 'right' #保持敌机默认方向
def move(self):
if self.direction == 'right':
self.rect.x += 5
elif self.direction == 'left':
self.rect.x -= 5
if self.rect.x > 480-self.rect.width:
self.direction = 'left'
elif self.rect.x < 0:
self.direction = 'right'
def fire(self):
if random.randint(1,100) == 78:
self.bullet_list.append(EnemyBullet(self.screen, self.rect.x, self.rect.y))
class BaseBullet(Base):
"""BaseBullet for ClassName"""
def __init__(self, screen ,x ,y ,image_name):
Base.__init__(self, screen, x, y, image_name)
def display(self):
self.screen.blit(self.image,(self.x, self.y))
class Bullet(BaseBullet):
"""docstring for Bullet"""
def __init__(self, screen ,x ,y):
BaseBullet.__init__(self, screen, x+40, y+20, './images/bullet.png')
def move(self):
self.y -= 4
def judge(self):
if self.y < 0:
return True
else:
return False
class EnemyBullet(BaseBullet):
"""docstring for Bullet"""
def __init__(self, screen ,x ,y):
BaseBullet.__init__(self, screen, x+25, y+40, './images/bullet1.png')
def move(self):
self.y += 4
def judge(self):
if self.y > 600:
return True
else:
return False
def key_control(hero):
'''键盘的监听控制方法'''
move_step = 5
move_x, move_y = 0, 0
# =====事件监听
for event in pygame.event.get():
# 判断用户是否点击了关闭按钮
if event.type == pygame.QUIT:
print("退出游戏...")
pygame.quit()
# 直接退出系统
exit()
elif event.type == pygame.KEYDOWN:
#键盘有按下?
if event.key == pygame.K_LEFT:
#按下的是左方向键的话,把x坐标减一
move_x = -move_step
elif event.key == pygame.K_RIGHT:
#右方向键则加一
move_x = move_step
elif event.key == pygame.K_UP:
#类似了
move_y = -move_step
elif event.key == pygame.K_DOWN:
move_y = move_step
elif event.key == pygame.K_SPACE:
hero.fire()
elif event.key == pygame.K_b:
hero.bomb()
elif event.type == pygame.KEYUP:
#如果用户放开了键盘,图就不要动了
move_x = 0
move_y = 0
#计算出新的坐标
hero.rect.x += move_x
hero.rect.y += move_y
def main():# 主方法 调用 类和方法
#1.窗口
screen = pygame.display.set_mode((480,600),0,32)
#2.背景
background = pygame.image.load('./images/background.png')
#3.英雄
hero = HeroPlane(screen)
# 4.1. 创建游戏时钟对象
clock = pygame.time.Clock()
# 5创建一个敌人飞机
enemy = EnemyPlane(screen)
while True:
screen.blit(background,(0,0))
hero.display()
enemy.display()
enemy.move() #让敌机移动
enemy.fire()
# 设置屏幕刷新帧率
clock.tick(60)
if hero.rect.y <= 0:
hero.rect.y = 500
key_control(hero)
pygame.display.update()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b1e2b0be35c1a98f39800fefa049b1950031588e | a67852f501752583eec252de9a5e29da4a5d2aa2 | /more_sel_topics/windows_tabs.py | cb9624fa6fbdbc308acf7b1e4dc9946a1c7126a1 | [] | no_license | zGeorgi/Selenium_py | c7c0e77123ff4768be3a549b667cf92e24516e32 | 5712e903e0e00abe92a6f3630de4abf506e96873 | refs/heads/master | 2022-12-02T18:30:17.298162 | 2020-08-16T10:14:14 | 2020-08-16T10:14:14 | 271,631,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | from selenium import webdriver
driver = webdriver.Chrome(executable_path="/home/georgi/chromedriver/chromedriver")
driver.get("https://the-internet.herokuapp.com/windows")
driver.find_element_by_link_text("Click Here").click()
#------ return list with windows-------------
child_windows = driver.window_handles
driver.switch_to.window(child_windows[1])
print(driver.find_element_by_tag_name("h3").text)
#------ swich to parent window---------
driver.switch_to.window(driver.window_handles[0])
print(driver.find_element_by_tag_name("h3").text)
driver.close() | [
"[email protected]"
] | |
42272ffcb536d23cb7a011ae634d6f4df37f7308 | b59e7e26f9413f989c399d2816ac7a6f919427da | /steps/resources/geometry.py | fdcc96125ea55a10c0743748fbe7de5bb32aa3a0 | [] | no_license | NinfaS/simulation_scripts | 9b48448085632303dde48770882e3f118c9c849a | 806f75c0d8c340be9d6fd655525afc8eacef8a9d | refs/heads/master | 2020-04-01T01:48:28.245826 | 2018-11-08T11:31:22 | 2018-11-08T11:31:22 | 152,753,208 | 0 | 0 | null | 2018-10-12T13:16:52 | 2018-10-12T13:16:52 | null | UTF-8 | Python | false | false | 13,609 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Helper functions for geometry calculations.
'''
import numpy as np
def ray_triangle_intersection(ray_near, ray_dir, triangle):
"""
Möller–Trumbore intersection algorithm in pure python
Based on http://en.wikipedia.org/wiki/
M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
The returned t's are the scaling for ray_dir do get the triangle
intersection point. The ray is starting from ray_near and goes to
infinity. Only positive t's are returned.
Parameters
----------
ray_near : array-like shape=(3,)
Starting point of the ray.
ray_dir : array-like shape=(3,)
Directional vector of the ray.
triangle : array-like shape=(3, 3)
Triangle for which the interaction point should be found.
Returns
-------
t : float or np.nan
Intersection is ray_near + t * ray_dir or np.nan for no
intersection
"""
(v1, v2, v3) = triangle
eps = 0.000001
edge1 = v2 - v1
edge2 = v3 - v1
pvec = np.cross(ray_dir, edge2)
det = edge1.dot(pvec)
if abs(det) < eps:
return np.nan
inv_det = 1. / det
tvec = ray_near - v1
u = tvec.dot(pvec) * inv_det
if u < 0. or u > 1.:
return np.nan
qvec = np.cross(tvec, edge1)
v = ray_dir.dot(qvec) * inv_det
if v < 0. or u + v > 1.:
return np.nan
t = edge2.dot(qvec) * inv_det
if t < eps:
return np.nan
return t
def get_intersections(convex_hull, v_pos, v_dir, eps=1e-4):
'''Function to get the intersection points of an infinite line and the
convex hull. The returned t's are the scaling factors for v_dir to
get the intersection points. If t < 0 the intersection is 'behind'
v_pos. This can be used decide whether a track is a starting track.
Parameters
----------
convex_hull : scipy.spatial.ConvexHull
defining the desired convex volume
v_pos : array-like shape=(3,)
A point of the line.
v_dir : array-like shape=(3,)
Directional vector of the line.
eps : float or None
Min distance between intersection points to be treated as
different points.
Returns
-------
t : array-like shape=(n_intersections)
Scaling factors for v_dir to get the intersection points.
Actual intersection points are v_pos + t * v_dir.
'''
if not isinstance(v_pos, np.ndarray):
v_pos = np.array(v_pos)
if not isinstance(v_dir, np.ndarray):
v_dir = np.array(v_dir)
t_s = [ray_triangle_intersection(v_pos,
v_dir,
convex_hull.points[simp])
for simp in convex_hull.simplices]
t_s = np.array(t_s)
t_s = t_s[np.isfinite(t_s)]
if len(t_s) != 2: # A line should have at max 2 intersection points
# with a convex hull
t_s_back = [ray_triangle_intersection(v_pos,
-v_dir,
convex_hull.points[simp])
for simp in convex_hull.simplices]
t_s_back = np.array(t_s_back)
t_s_back = t_s_back[np.isfinite(t_s_back)]
t_s = np.hstack((t_s, t_s_back * (-1.)))
if isinstance(eps, float): # Remove similar intersections
if eps >= 0.:
t_selected = []
intersections = []
for t_i in t_s:
intersection_i = v_pos + t_i * v_dir
distances = [np.linalg.norm(intersection_i - intersection_j)
for intersection_j in intersections]
if not (np.array(distances) < eps).any():
t_selected.append(t_i)
intersections.append(intersection_i)
t_s = np.array(t_selected)
return t_s
def point_is_inside(convex_hull,
v_pos,
default_v_dir=np.array([0., 0., 1.]),
eps=1e-4):
'''Function to determine if a point is inside the convex hull.
A default directional vector is asumend. If this track has an intersection
in front and behind v_pos, then must v_pos be inside the hull.
The rare case of a point inside the hull surface is treated as
being inside the hull.
Parameters
----------
convex_hull : scipy.spatial.ConvexHull
defining the desired convex volume
v_pos : array-like shape=(3,)
Position.
default_v_dir : array-like shape=(3,), optional (default=[0, 0, 1])
See get_intersections()
eps : float or None
See get_intersections()
Returns
-------
is_inside : boolean
True if the point is inside the detector.
False if the point is outside the detector
'''
t_s = get_intersections(convex_hull, v_pos, default_v_dir, eps)
return len(t_s) == 2 and (t_s >= 0).any() and (t_s <= 0).any()
def distance_to_convex_hull(convex_hull, v_pos):
'''Function to determine the closest distance of a point
to the convex hull.
Parameters
----------
convex_hull : scipy.spatial.ConvexHull
defining the desired convex volume
v_pos : array-like shape=(3,)
Position.
Returns
-------
distance: float
absolute value of closest distance from the point
to the convex hull
(maybe easier/better to have distance poositive
or negativ depending on wheter the point is inside
or outside. Alernatively check with point_is_inside)
'''
raise NotImplementedError
def get_closest_point_on_edge(edge_point1,
edge_point2, point):
'''Function to determine the closest point
on an edge defined by the two points
edge_point1 and edge_point2
Parameters
----------
edge_point1 : array-like shape=(3,)
First edge point .
edge_point2 : array-like shape=(3,)
Second edge point .
point : array-like shape=(3,)
point of which to find the distance
to the edge
Returns
-------
distance: array-like shape=(3,)
closest point on the edge
'''
if edge_point1 == edge_point2:
return ValueError('Points do not define line.')
A = np.array(edge_point1)
B = np.array(edge_point2)
P = np.array(point)
vec_edge = B - A
vec_point = P - A
norm_edge = np.linalg.norm(vec_edge)
t_projection = np.dot(vec_edge,vec_point) / (norm_edge**2)
t_clipped = min(1,max(t_projection,0))
closest_point = A + t_clipped*vec_edge
return closest_point
def get_distance_to_edge(edge_point1, edge_point2,
point):
'''Function to determine the closest distance of a point
to an edge defined by the two points
edge_point1 and edge_point2
Parameters
----------
edge_point1 : array-like shape=(3,)
First edge point .
edge_point2 : array-like shape=(3,)
Second edge point .
point : array-like shape=(3,)
point of which to find the distance
to the edge
Returns
-------
distance: float
'''
closest_point = get_closest_point_on_edge(edge_point1,
edge_point2, point)
distance = np.linalg.norm(closest_point - point)
return distance
def get_edge_intersection(edge_point1,
edge_point2, point):
'''Returns t:
edge_point1 + u*(edge_point2-edge_point1)
=
point + t * (0, 1, 0)
if u is within [0,1].
[Helper Function to find out if point
is inside the icecube 2D Polygon]
Parameters
----------
edge_point1 : array-like shape=(3,)
First edge point .
edge_point2 : array-like shape=(3,)
Second edge point .
point : array-like shape=(3,)
point of which to find the distance
to the edge
Returns
-------
t: float.
If intersection is within edge
othwise returns nan.
'''
if edge_point1 == edge_point2:
return ValueError('Points do not define line.')
A = np.array(edge_point1)
B = np.array(edge_point2)
P = np.array(point)
vec_edge = B - A
vec_point = P - A
u = vec_point[0] / vec_edge[0]
t = u * vec_edge[1] - vec_point[1]
if u > -1e-8 and u < 1 +1e-8:
return t
return float('nan')
def distance_to_axis_aligned_Volume(pos, points, z_min, z_max):
'''Function to determine the closest distance of a point
to the edge of a Volume defined by z_zmin,z_max and a
2D-Polygon described through a List of counterclockwise
points.
Parameters
----------
pos :I3Position
Position.
points : array-like shape=(?,3)
List of counterclockwise points
describing the polygon of the volume
in the x-y-plane
z_max : float
Top layer of IceCube-Doms
z_min : float
Bottom layer of IceCube-Doms
Returns
-------
distance: float
closest distance from the point
to the edge of the volume
negativ if point is inside,
positiv if point is outside
'''
no_of_points = len(points)
edges = [ (points[i], points[ (i+1 )% (no_of_points)])
for i in range(no_of_points) ]
xy_distance = float('inf')
list_of_ts = []
for edge in edges:
x = (edge[0][0],edge[1][0])
y = (edge[0][1],edge[1][1])
distance = get_distance_to_edge(edge[0],edge[1],
[pos[0],pos[1],0])
t = get_edge_intersection(edge[0],edge[1],
[pos[0],pos[1],0])
if not np.isnan(t):
list_of_ts.append(t)
if distance < xy_distance:
xy_distance = distance
is_inside_xy = False
if len(list_of_ts) == 2:
# u's are pos and negativ
if list_of_ts[0]*list_of_ts[1] < 0:
is_inside_xy = True
# point is exactly on border
elif len([t for t in list_of_ts if t == 0])==1:
is_inside_xy = True
#---- Calculate z_distance
is_inside_z = False
if pos[2] < z_min:
# underneath detector
z_distance = z_min - pos[2]
elif pos[2] < z_max:
# same height
is_inside_z = True
z_distance = min(pos[2] - z_min, z_max - pos[2])
else:
# above detector
z_distance = pos[2] - z_max
#---- Combine distances
if is_inside_z:
if is_inside_xy:
# inside detector
distance = - min(xy_distance,z_distance)
else:
distance = xy_distance
else:
if is_inside_xy:
distance = z_distance
else:
distance = np.sqrt(z_distance**2 + xy_distance**2)
return distance
def distance_to_icecube_hull(pos, z_min=-502, z_max=501):
'''Function to determine the closest distance of a point
to the icecube hull. This is only
an approximate distance.
Parameters
----------
pos :I3Position
Position.
z_max : float
Top layer of IceCube-Doms
z_min : float
Bottom layer of IceCube-Doms
Returns
-------
distance: float
closest distance from the point
to the icecube hull
negativ if point is inside,
positiv if point is outside
'''
points = [
[-570.90002441, -125.13999939, 0], # string 31
[-256.14001465, -521.08001709, 0], # string 1
[ 361. , -422.82998657, 0], # string 6
[ 576.36999512, 170.91999817, 0], # string 50
[ 338.44000244, 463.72000122, 0], # string 74
[ 101.04000092, 412.79000854, 0], # string 72
[ 22.11000061, 509.5 , 0], # string 78
[-347.88000488, 451.51998901, 0], # string 75
]
return distance_to_axis_aligned_Volume(pos, points, z_min, z_max)
def distance_to_deepcore_hull(pos, z_min=-502, z_max=188):
'''Function to determine the closest distance of a point
to the deep core hull. This is only
an approximate distance.
Parameters
----------
pos :I3Position
Position.
z_max : float
Top layer of IceCube-Doms
z_min : float
Bottom layer of IceCube-Doms
Returns
-------
distance: float
closest distance from the point
to the icecube hull
negativ if point is inside,
positiv if point is outside
'''
points = [
[-77.80000305175781, -54.33000183105469, 0], # string 35
[1.7100000381469727, -150.6300048828125, 0], # string 26
[124.97000122070312, -131.25, 0], # string 27
[194.33999633789062, -30.920000076293945, 0], # string 37
[90.48999786376953, 82.3499984741211, 0], # string 46
[-32.959999084472656, 62.439998626708984, 0], # string 45
]
return distance_to_axis_aligned_Volume(pos, points, z_min, z_max)
def is_in_detector_bounds(pos,extend_boundary=60):
'''Function to determine whether a point is still
withtin detector bounds
Parameters
----------
pos : I3Position
Position to be checked.
extend_boundary : float
Extend boundary of detector by extend_boundary
Returns
-------
is_inside : bool
True if within detector bounds + extend_boundary
'''
distance = distance_to_icecube_hull(pos)
return distance - extend_boundary <= 0 | [
"[email protected]"
] | |
9d2c718371fc5aa296befd6bb943d2cfdfbdf3e2 | 4ee9556155d5004c7fa4fd23e4294df459606bdb | /homework-20/Artist_Album_Song_home_v.py | f405deefd5332029e5253262551f222986f29541 | [] | no_license | sppess/Homeworks | 7844159589fbacf7657f5d8a4c39f178c4fdab1d | 34a95a613c013fd9e965afc9cf5b6b23720f642e | refs/heads/master | 2020-08-23T13:18:19.795130 | 2019-11-12T16:56:05 | 2019-11-12T16:56:05 | 213,270,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | class WrongArtistError(Exception):
pass
class Artist:
def __init__(self, name: str, country: str):
self.name = name
self.country = country
self.songs = []
self.albums = []
def __repr__(self):
return f"{self.name}"
@property
def songs_number(self):
return len(self.songs)
@property
def album_number(self):
return len(self.albums)
class Album:
def __init__(self, name: str, year: int, genre: str, artist: Artist):
self.name = name
self.year = year
self.genre = genre
self.artist = artist
self.songs = []
if any(album.name == self.name for album in self.artist.albums):
raise ValueError(f"Album '{self.name}' already in "
f"{self.artist.name} albums list")
else:
self.artist.albums.append(self)
def __repr__(self):
return f"{self.name}"
@property
def songs_number(self):
return len(self.songs)
@property
def duration(self):
duration = 0
if len(self.songs) == 0:
print('Songs list is empty')
return 0
for song in self.songs:
if len(self.songs) == 1:
return song.duration
else:
min_ = int(song.duration) * 60
sec = (song.duration - int(song.duration)) * 100
time = min_ + sec
duration += time
return duration / 60
class Song:
def __init__(self, name: str, artist: Artist,
features: list, year: int,
duration: float, album=None):
self.name = name
self.artist = artist
self.year = year
self.duration = duration
self.album = album
self.features = features
if any(song.name == self.name for song in self.artist.songs):
raise ValueError(f"Song '{self.name}' already in "
f"{self.artist.name} albums list")
else:
self.artist.songs.append(self)
if self.album is None:
print("Song cannot be added to album as this is a single")
elif self.artist is not self.album.artist:
raise WrongArtistError("Song's artist name don't match with "
"album's artist name. Make sure the song's "
"artist or album is correct")
else:
self.album.songs.append(self)
def __repr__(self):
return f"{self.name}"
| [
"[email protected]"
] | |
01a6dc6845e9d96f56b1449a3dd629955ae6dd8c | 38d2841ec3ef1a69a0a698f4796cbe886cd8b226 | /main_func.py | db88c1e3c3aa560aac77b4301b96eb045cacacfb | [] | no_license | takuyakt/data_maker | 0de653b5361ec26bd00052031e609b4697b7367b | 230ad64cc87c768bb0111037a183e8c8c42d6f46 | refs/heads/master | 2021-04-11T01:49:44.283760 | 2020-03-21T14:30:01 | 2020-03-21T14:30:01 | 248,983,726 | 0 | 0 | null | 2020-03-21T14:30:02 | 2020-03-21T13:40:07 | null | UTF-8 | Python | false | false | 282 | py | import csv
import pprint
import os
test = "hello world"
print(test)
fpath = os.path.dirname(__file__)
with open(fpath + '/out_testdata/testdata1.csv','w', newline="") as f :
w = csv.writer(f)
w.writerow([1,'test user', 'aaa'])
w.writerow([2,'山田太郎' , 'bbb']) | [
"[email protected]"
] | |
59b6f1c8efff9176012b9b7fc9c5464a5dfcd74f | 2abc92fe24adb0098136b4961caa681a7675cc0d | /exercicioscv/ex 081.py | 6c4db40f02c20e165a929b4790983eed4e97cdca | [] | no_license | sarahemmendorfer/python-basics | 969c35d0a5f6fa3d6cd9961ae490523a7939adfa | d9073643cfcdb5aa6404e09e8dc9eed61faf3238 | refs/heads/main | 2023-04-17T01:05:53.265440 | 2021-04-22T16:27:14 | 2021-04-22T16:27:14 | 360,582,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | lista = list() # lista vazia, tbm pode ser lista = []
for c in range(0, 5):
n = int(input('Digite um valor: '))
if c == 0 or n > lista[len(lista)-1]: #ou lista[-1], pega o ultimo elemento (se n for maior que o último elemento)
lista.append(n)
print('Adicionado no final da lista!')
else:
pos = 0
while pos < len(lista):
if n <= lista[pos]:
lista.insert(pos, n) #insere valor n na posição pos
print(f'Adicionado na posição {pos}!')
break
pos += 1
print('-=' * 30)
print(f'Os valores digitados em ordem foram {lista}') | [
"[email protected]"
] | |
e5addaa7e9e7f5e96e50c097199d033ba1508071 | f74a1366b6897204978136da3f5324c50ea1fe7f | /perapp/apps.py | 07490c2ea563a769c4805972b56de26594240311 | [
"BSD-2-Clause"
] | permissive | fukexue/persite | 96ae80266c4ba3939e760da0ff7188fdd01f3f1c | 79c806d6354f4d5cf27769568fb152b8af89dc74 | refs/heads/master | 2021-07-18T13:50:24.380238 | 2017-10-26T05:02:18 | 2017-10-26T05:02:18 | 108,362,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class PerappConfig(AppConfig):
name = 'perapp'
| [
"[email protected]"
] | |
a4d356d11a28339d7d7d62ec80b076f3d0daef9b | 4a6403da9bb730abae3cc443f3641399e77c7058 | /config/settings.py | a9293c1c01b9d5665b8c7a0ae2fd5e77d4e8020b | [] | no_license | zkeshtkar/Food_Delivery_Backend | d55ba17509711e2eb511771aaf7ea45e1c8a0781 | 209c62fb62384934b15ecdec6d343f5ad83ba405 | refs/heads/main | 2023-06-05T02:59:11.693379 | 2021-06-26T08:23:53 | 2021-06-26T08:23:53 | 375,336,511 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd8+lpdyupy-+snp_@iwh3bz)a0@2xf&==4x$v&bqw*axn@2c$d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Custom User Model
AUTH_USER_MODEL = 'accounts.User'
# Application definition
DJANGO_DEFAULT_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
]
LOCAL_APPS = [
'accounts.apps.AccountsConfig',
'customers.apps.CustomersConfig',
'about_us.apps.AboutUsConfig',
'managers.apps.ManagersConfig',
]
INSTALLED_APPS = DJANGO_DEFAULT_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'snapfood',
'USER': 'postgres',
'PASSWORD': '97243057',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
}
CORS_ORIGIN_ALLOW_ALL = True
| [
"[email protected]"
] | |
700c350838cf0a297be8d230bbdf29716773117e | 89cf3437d037b4592ffdc8f92b0919b59c021e88 | /file_serverA.py | 7c2ab8eb2ab293a26d5fcc16c34ff50aeca91979 | [] | no_license | zeus7678/Distributed-File-System-Project-NFS-Protocal- | 6f5461a0319e6a4b0ff7c73a4f996d194b70edc6 | f8311fa28729ca792fad20775cd8620c41da3af0 | refs/heads/master | 2022-12-04T09:24:09.244892 | 2020-08-22T09:18:24 | 2020-08-22T09:18:24 | 289,354,014 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | # file server
from socket import *
server_addr = "localhost"
server_port = 12001
server_socket = socket(AF_INET,SOCK_STREAM)
server_socket.bind((server_addr, server_port))
server_socket.listen(10)
print ('FILE SERVER is ready to receive...')
file_version_map = {}
def replicate(filename):
f = open(filename, 'r')
text = f.read()
f.close()
msg = "REPLICATE|" + filename + "|" + text + "|" + str(file_version_map[filename])
port = 12002
server_ip = 'localhost'
print("Replicating to fileserver B")
replicate_socket = socket(AF_INET, SOCK_STREAM)
replicate_socket.connect((server_ip,port))
replicate_socket.send(msg.encode())
replicate_socket.close()
port = 12003
server_ip = 'localhost'
print("Replicating to fileserver C")
replicate_socket = socket(AF_INET, SOCK_STREAM)
replicate_socket.connect((server_ip,port))
replicate_socket.send(msg.encode())
replicate_socket.close()
def read_write(filename, RW, text, file_version_map):
if RW == "r": # if read request
try:
file = open(filename, RW)
text_in_file = file.read() # read the file's text into a string
if filename not in file_version_map:
file_version_map[filename] = 0
return (text_in_file, file_version_map[filename])
except IOError: # IOError occurs when open(filepath,RW) cannot find the file requested
print (filename + " does not exist in directory\n")
return (IOError, -1)
pass
elif RW == "a+": # if write request
if filename not in file_version_map:
file_version_map[filename] = 0 # if empty (ie. if its a new file), set the version no. to 0
else:
file_version_map[filename] = file_version_map[filename] + 1 # increment version no.
print("Updated " + filename + " to version " + str(file_version_map[filename]))
file = open(filename, RW)
file.write(text)
print("FILE_VERSION: " + str(file_version_map[filename]))
return ("Success", file_version_map[filename])
def send_client_reply(response, RW, connection_socket):
if response[0] == "Success":
reply = "File successfully written to..." + str(response[1])
print("Sending file version " + str(response[1]))
connection_socket.send(reply.encode())
#print ("Sent: " + reply)
elif response[0] is not IOError and RW == "r":
connection_socket.send(response.encode())
#print ("Sent: " + reply)
elif response[0] is IOError:
reply = "File does not exist\n"
connection_socket.send(reply.encode())
#print ("Sent: " + reply)
def main():
while 1:
response = ""
connection_socket, addr = server_socket.accept()
recv_msg = connection_socket.recv(1024)
recv_msg = recv_msg.decode()
#print("RECEIVED: " + recv_msg)
if recv_msg != "" and "CHECK_VERSION" not in recv_msg:
# parse the message
filename = recv_msg.split("|")[0] # file path to perform read/write on
print ("Filename: " + filename)
RW = recv_msg.split("|")[1] # whether its a read or write
print ("RW: " + RW)
text = recv_msg.split("|")[2] # the text to be written (this text is "READ" for a read and is ignored)
print ("TEXT: " + text)
response = read_write(filename, RW, text, file_version_map) # perform the read/write and check if successful
send_client_reply(response, RW, connection_socket) # send back write successful message or send back text for client to read
if RW == 'a+':
replicate(filename)
elif "CHECK_VERSION" in recv_msg:
client_filename = recv_msg.split("|")[1] # parse the version number to check
print("Version check on " + client_filename)
file_version = str(file_version_map[client_filename])
connection_socket.send(file_version.encode())
connection_socket.close()
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
52ec80830c3a884b41526bdcc2a033914c9186fd | 7d697d6417261ab0547fb9dc930ccdc8cef284a8 | /shop/shop/settings.py | 93e38e875aa330220e66fd946b3028460cc1ddde | [] | no_license | JulyYSun/Shopping_web | 9f723d4206f6ce43c18ad940f1baccabad3568d0 | 2e3207dd7a7c7ab32695d891567a622424aec060 | refs/heads/master | 2022-12-24T09:44:03.549637 | 2018-06-30T13:03:59 | 2018-06-30T13:03:59 | 139,249,852 | 0 | 1 | null | 2022-12-15T10:54:31 | 2018-06-30T13:01:07 | Python | UTF-8 | Python | false | false | 3,173 | py | """
Django settings for shop project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$wh+f0i7hq^bx62+n%t927*%v(u6gkk9y5wqfid7md_+^zdc1y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shopping',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
MEDIA_URL='/media/'
| [
"[email protected]"
] | |
da745ca909e3ec7d10cc7fe8020724b7d99795fa | 6b85cf0643f746430878e3c6722a2786a366ecb2 | /MODUL-2/4.py | 3e1680fcb7c635b98f11004349e167063246d793 | [] | no_license | L200183147/algostruk_x | a7fff0187bfaf1e8eb1c88f57071800d50ce727e | 5ba6de6a190bab2310a6bef311fc6fc6eaa84e50 | refs/heads/master | 2021-02-09T22:53:41.848312 | 2020-04-27T13:02:15 | 2020-04-27T13:02:15 | 244,331,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | class Mahasiswa(object):
"""Class Mahasiswa yang dibangun dari class Manusia"""
def __init__(self, nama, NIM, kota, us):
"""Metode inisiasi ini menutupi inisiasi di class manusia."""
self.nama = nama
self.NIM = NIM
self.kotaTinggal = kota
self.uangSaku = us
self.listKuliah = []
def __str__(self):
s = self.nama + ', NIM ' + str(self.NIM) \
+ '. Tinggal di ' + self.kotaTinggal \
+ '. Uang saku Rp. ' + str(self.uangSaku) \
+ ' tiap bulannya.'
return s
def ambilNama(self):
return self.nama
def ambilNIM(self):
return self.NIM
def ambilUangSaku (self):
return self.uangSaku
def makan(self, s):
"""Metode ini menutupi metode 'makan'-nya class Manusia.
Mahasiswa kalau makan sambil belajar,"""
print("Saya baru saja makan",s,"sambil belajar.")
self.keadaan = 'kenyang'
def ambilKotaTinggal(self):
return self.kotaTinggal
def perbaruiKotaTinggal (self, kotaBaru):
self.kota = kotaBaru
def tambahUangSaku (self, uangBaru):
self.uangSaku += uangBaru
def listKuliah(self):
return self.listKuliah
def ambilKuliah(self, matkul):
self.listKuliah.append(matkul)
| [
"[email protected]"
] | |
fe546eece8e5657586198652daac8bcc846a5b7a | c55da74730b22e529b2704494736ca8c256a3bcc | /user/decorator.py | cf65b68bccc58948665cc93c084d8bdb961a235b | [] | no_license | anurag2050doit/Flask-liveyoursport | d268293d0bb09c02d0915360a0468ad98952c23b | a11051a2362ebdee769b9a97870a09240f15060e | refs/heads/master | 2021-01-18T20:38:28.024543 | 2017-04-02T11:27:13 | 2017-04-02T11:27:13 | 86,979,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | from functools import wraps
from flask import session, request, redirect, url_for, abort
def login_required(f):
@wraps(f)
def decorated_function(*args,**kwargs):
if session.get('username') is None:
return redirect(url_for('login', next=request.url))
return f(*args, **kwargs)
return decorated_function
| [
"[email protected]"
] | |
527b7463f9eb3882f940fa76797915142466b8bb | d6a61039d69eb9f7f352b98500d8e0e021b98203 | /Connectors/osxAddressBook/addressBook.py | 3825d5457e42f07434eb5d101e3ef5b1dc91f5f3 | [] | no_license | arfrank/Locker | 0796294ef1d8968828df3ddb4e31c19043ec860b | e3e7b90214d9e7ec0114477bd5eb9403865ae0ad | refs/heads/master | 2021-01-18T08:47:28.806759 | 2011-03-24T14:26:40 | 2011-03-24T14:26:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,634 | py | from AddressBook import *
import json
import os
def dataOrRaise(x):
if not x:
raise Exception("None")
return x
def defaultOp(container, containerKey):
return dataOrRaise(container.valueForProperty_(containerKey))
def addDataIfValid(targetObject, targetKey, container, containerKey, op=None):
op = op or defaultOp
try:
targetObject[targetKey] = op(container, containerKey)
except Exception:
pass
def addListIfValid(targetObject, targetKey, multivalue, op=None):
def defaultRetOp(data):
return data
op = op or defaultRetOp
if multivalue:
targetObject[targetKey] = []
for x in range(multivalue.count()):
targetObject[targetKey].append({"type":multivalue.labelAtIndex_(x)[4:-4], "value":op(multivalue.valueAtIndex_(x))})
def formatAdressRecord(record):
return "{0} {1}, {2} {3} {4}".format(record.valueForKey_(kABAddressStreetKey), record.valueForKey_(kABAddressCityKey),
record.valueForKey_(kABAddressStateKey), record.valueForKey_(kABAddressZIPKey), record.valueForKey_(kABAddressCountryKey) or "").rstrip()
def gatherContacts():
ab = ABAddressBook.sharedAddressBook()
allPeople = ab.people()
try:
os.mkdir("my")
except OSError:
pass
fd = open("my/contacts.json", "w")
for person in allPeople:
jsonData = {}
recordID = person.valueForProperty_(kABUIDProperty)[:-9]
# Dropped middle name for now
jsonData["id"] = recordID
jsonData["name"] = u"{0} {1}".format(person.valueForProperty_("First"), person.valueForProperty_("Last") or "").strip()
addDataIfValid(jsonData, "nickname", person, kABNicknameProperty)
addDataIfValid(jsonData, "birthday", person, kABBirthdayProperty, lambda x,y:str(dataOrRaise(defaultOp(x, y))))
addListIfValid(jsonData, "phone", person.valueForProperty_(kABPhoneProperty))
addListIfValid(jsonData, "email", person.valueForProperty_(kABEmailProperty))
addListIfValid(jsonData, "address", person.valueForProperty_(kABAddressProperty), formatAdressRecord)
# Gross, there's no single aggregate property with all of the IM networks in one
ims = []
for key in (("aim",kABAIMInstantProperty), ("icq",kABICQInstantProperty), ("jabber",kABJabberInstantProperty), ("msn",kABMSNInstantProperty), ("yahoo",kABYahooInstantProperty)):
val = person.valueForProperty_(key[1])
if val:
for x in range(val.count()):
ims.append({"type":key[0], "value":val.valueAtIndex_(x)})
if len(ims): jsonData["im"] = ims
# We'll save out a copy of the image data for easier access
image = person.imageData();
path = "my/{0}.jpg".format(recordID)
if image: image.writeToFile_atomically_(path, False)
jsonData["avatar"] = [path]
groups = []
for group in ab.groups():
if group.members().containsObject_(person): groups.append(group.valueForProperty_(kABUIDProperty)[:-9])
if len(groups): jsonData["groups"] = groups
json.dump(jsonData, fd)
fd.write("\n")
def gatherGroups():
"""This only takes the top level groups currently."""
ab = ABAddressBook.sharedAddressBook()
groups = ab.groups()
if not groups.count(): return
fd = open("my/groups.json", "w")
for group in groups:
json.dump({"id":group.valueForProperty_(kABUIDProperty)[:-9], "name":group.name()}, fd)
fd.write("\n")
if __name__ == "__main__":
gatherContacts()
gatherGroups()
| [
"[email protected]"
] | |
2315c61056762406d746e77b9628fd75f28bbf16 | 3514e1935723cfa228d4dda1c7ea7429c54f8660 | /petmail/test/test_invitation.py | de04c7835cc24176388774d2c64d9b05c4be6bde | [
"MIT"
] | permissive | ggozad/petmail | 5e5ab8796cb52091a42f9ba509b9ddca1e4b32a0 | d097816d48049b8142bd6e2f8261ab2b55a7c81b | refs/heads/master | 2020-12-24T22:20:41.856401 | 2013-09-04T06:54:13 | 2013-09-04T06:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,846 | py | import os, collections, json
from twisted.trial import unittest
from .common import BasedirMixin, NodeRunnerMixin, TwoNodeMixin, fake_transport
from ..eventual import flushEventualQueue
from ..errors import CommandError
from ..invitation import splitMessages
MROW = collections.namedtuple("Row", ["my", "theirs", "next"])
AddressbookRow = collections.namedtuple("AddressbookEntry",
["petname", "their_verfkey", "acked",
"my_CID_key",
"their_channel_record",
])
class Invite(BasedirMixin, NodeRunnerMixin, unittest.TestCase):
def checkCounts(self, node, code, my, theirs, next, exists=True):
c = node.db.execute("SELECT"
" myMessages, theirMessages, nextExpectedMessage"
" FROM invitations WHERE code=?",
(code.encode("hex"),))
rows = [ MROW(splitMessages(row[0]), splitMessages(row[1]), row[2])
for row in c.fetchall() ]
if not exists:
self.failUnlessEqual(len(rows), 0)
return
self.failUnlessEqual(len(rows), 1)
self.failUnlessEqual(len(rows[0].my), my)
self.failUnlessEqual(len(rows[0].theirs), theirs)
self.failUnlessEqual(rows[0].next, next)
return rows
def fetchAddressBook(self, node):
c = node.db.execute("SELECT petname, their_verfkey, acked,"
" my_CID_key, their_channel_record_json"
" FROM addressbook")
rows = [ AddressbookRow(row[0], str(row[1]), bool(row[2]),
str(row[3]), json.loads(row[4]))
for row in c.fetchall() ]
return rows
def test_one(self):
code = "code"
basedir1 = os.path.join(self.make_basedir(), "node1")
self.createNode(basedir1)
n1 = self.startNode(basedir1, beforeStart=self.disable_polling)
rclient1 = list(n1.client.im)[0]
tport1 = fake_transport()
tports1 = {0: tport1[1]}
nA_notices = []
n1.client.subscribe("addressbook", nA_notices.append)
n1.client.command_invite(u"petname-from-1", code,
override_transports=tports1)
inviteID = rclient1.subscriptions.keys()[0]
rdir = os.path.join(rclient1.basedir, inviteID)
self.failUnless(os.path.exists(rdir))
# messages: node1-M1
# at this point, node1 should have sent a single message (M1), and
# should be waiting for the peer's first message (M1)
self.checkCounts(n1, code, 1, 0, 1)
# polling again should ignore the previously-sent message
rclient1.poll()
self.checkCounts(n1, code, 1, 0, 1)
# now we add a peer (node2) for them to talk to
basedir2 = os.path.join(self.make_basedir(), "node2")
self.createNode(basedir2)
n2 = self.startNode(basedir2, beforeStart=self.disable_polling)
tport2 = fake_transport()
tports2 = {0: tport2[1]}
n2.client.command_invite(u"petname-from-2", code,
override_transports=tports2)
rclient2 = list(n2.client.im)[0]
# messages: node1-M1, node2-M1
# node2 should have sent one message. node1 should not have noticed
# yet, because we only poll manually here.
self.checkCounts(n2, code, 1, 0, 1)
self.checkCounts(n1, code, 1, 0, 1)
# allow node2 to poll. It should see the node1's first message, and
# create its own second message. node1 should not notice yet.
rclient2.poll()
# messages: node1-M1, node2-M1, node2-M2
self.checkCounts(n1, code, 1, 0, 1)
self.checkCounts(n2, code, 2, 1, 2)
# node2 polling again should not change anything
rclient2.poll()
self.checkCounts(n1, code, 1, 0, 1)
self.checkCounts(n2, code, 2, 1, 2)
# let node1 poll. It will see both of node2's messages, add its
# addressbook entry, send it's second and third messages, and be
# waiting for an ACK
#print "== first client polling to get M2"
rclient1.poll()
# messages: node1-M1, node2-M1, node2-M2, node1-M2, node1-M3-ACK
self.checkCounts(n2, code, 2, 1, 2)
self.checkCounts(n1, code, 3, 2, 3)
a1 = self.fetchAddressBook(n1)
self.failUnlessEqual(len(a1), 1)
self.failUnlessEqual(a1[0].petname, "petname-from-1")
self.failUnlessEqual(a1[0].acked, False)
#print a1[0].their_verfkey
#print a1[0].their_channel_record
# re-polling should not do anything
rclient1.poll()
# TODO: the Invitation is incorrectly given new messages here
self.checkCounts(n2, code, 2, 1, 2)
self.checkCounts(n1, code, 3, 2, 3)
# let node2 poll. It will see node1's M2 message, add its addressbook
# entry, send its ACK, will see node1's ACK, update its addressbook
# entry, send its destroy-channel message, and delete the invitation.
#print " == second client polling to get M2"
rclient2.poll()
# messages: node1-M1, node2-M1, node2-M2, node1-M2, node1-M3-ACK,
# node2-M3-ACK, node2-M4-destroy
self.checkCounts(n2, code, None, None, None, exists=False)
self.checkCounts(n1, code, 3, 2, 3)
a2 = self.fetchAddressBook(n2)
self.failUnlessEqual(len(a2), 1)
self.failUnlessEqual(a2[0].petname, "petname-from-2")
self.failUnlessEqual(a2[0].acked, True)
# finally, let node1 poll one last time. It will see the ACK and send
# the second destroy-channel message.
rclient1.poll()
# messages: node1-M1, node2-M1, node2-M2, node1-M2, node1-M3-ACK,
# node2-M3-ACK, node2-M4-destroy, node1-M4-destroy
self.checkCounts(n2, code, None, None, None, exists=False)
self.checkCounts(n1, code, None, None, None, exists=False)
a1 = self.fetchAddressBook(n1)
self.failUnlessEqual(len(a1), 1)
self.failUnlessEqual(a1[0].acked, True)
self.failUnlessEqual(a1[0].their_channel_record["CID_key"],
a2[0].my_CID_key)
self.failUnlessEqual(a1[0].my_CID_key,
a2[0].their_channel_record["CID_key"])
# finally check that the channel has been destroyed
self.failIf(os.path.exists(rdir))
# look at some client command handlers too
a1 = n1.client.command_list_addressbook()
self.failUnlessEqual(len(a1), 1)
a2 = n2.client.command_list_addressbook()
self.failUnlessEqual(len(a2), 1)
self.failUnlessEqual(a1[0]["my_verfkey"], a2[0]["their_verfkey"])
self.failUnlessEqual(a2[0]["my_verfkey"], a1[0]["their_verfkey"])
self.failUnlessEqual(a1[0]["acked"], True)
self.failUnlessEqual(a1[0]["petname"], "petname-from-1")
self.failUnlessEqual(a2[0]["acked"], True)
self.failUnlessEqual(a2[0]["petname"], "petname-from-2")
self.failUnlessEqual(nA_notices, [])
d = flushEventualQueue()
def _then(_):
self.failUnlessEqual(len(nA_notices), 2)
self.failUnlessEqual(nA_notices[0].action, "insert")
self.failUnlessEqual(nA_notices[0].new_value["acked"], 0)
self.failUnlessEqual(nA_notices[0].new_value["petname"],
"petname-from-1")
self.failUnlessEqual(nA_notices[1].action, "update")
self.failUnlessEqual(nA_notices[1].new_value["acked"], 1)
self.failUnlessEqual(nA_notices[1].new_value["petname"],
"petname-from-1")
n1.client.unsubscribe("addressbook", nA_notices.append)
d.addCallback(_then)
return d
def test_duplicate_code(self):
basedir1 = os.path.join(self.make_basedir(), "node1")
self.createNode(basedir1)
n1 = self.startNode(basedir1, beforeStart=self.disable_polling)
code = "code"
tport = fake_transport()
tports = {0: tport[1]}
n1.client.command_invite(u"petname-from-1", code,
override_transports=tports)
self.failUnlessRaises(CommandError,
n1.client.command_invite, u"new-petname", code)
class Two(TwoNodeMixin, unittest.TestCase):
def test_two(self):
nA, nB, entA, entB = self.make_nodes()
self.failUnlessEqual(nA.client.command_list_addressbook()[0]["cid"],
entA["id"])
self.failUnlessEqual(nB.client.command_list_addressbook()[0]["cid"],
entB["id"])
| [
"[email protected]"
] | |
7e857079fd34b808ba6a450e41c12f817da22560 | bd04d6c138665d01349f62d912514c5a7343a6d0 | /algorithm/SW_expert_academy/1970.py | ad95e2d0315e75c142820bd871f67112240dd930 | [] | no_license | gvg4991/TIL | fe6209c21a228d3500ca64615df2f1066a5d0f11 | ada8fbdc88555d327beae2ae8eee16b4739240aa | refs/heads/master | 2021-06-08T06:44:37.554473 | 2021-06-06T09:06:47 | 2021-06-06T09:06:47 | 162,512,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,571 | py | # 우리나라 화폐 ‘원’은 금액이 높은 돈을 우선적으로 계산할 때 돈의 개수가 가장 최소가 된다.
# S마켓에서 사용하는 돈의 종류는 다음과 같다.
# 50,000 원
# 10,000 원
# 5,000 원
# 1,000 원
# 500 원
# 100 원
# 50 원
# 10 원
# S마켓에서 손님에게 거슬러 주어야 할 금액 N이 입력되면 돈의 최소 개수로 거슬러 주기 위하여 각 종류의 돈이 몇 개씩 필요한지 출력하라.
# 예제]
# N이 32850일 경우,
# 50,000 원 : 0개
# 10,000 원 : 3개
# 5,000 원 : 0개
# 1,000 원 : 2개
# 500 원 : 1개
# 100 원 : 3개
# 50 원 : 1개
# 10 원 : 0개
# [제약 사항]
# 1. N은 10이상 1,000,000이하의 정수이다. (10 ≤ N ≤ 1,000,000)
# 2. N의 마지막 자릿수는 항상 0이다. (ex : 32850)
# [입력]
# 가장 첫 줄에는 테스트 케이스의 개수 T가 주어지고, 그 아래로 각 테스트 케이스가 주어진다.
# 각 테스트 케이스에는 N이 주어진다.
# [출력]
# 각 줄은 '#t'로 시작하고, 다음줄에 각 돈의 종류마다 필요한 개수를 빈칸을 사이에 두고 출력한다.
# (t는 테스트 케이스의 번호를 의미하며 1부터 시작한다.)
test = int(input())
money = [50000, 10000, 5000, 1000, 500, 100, 50, 10]
for tc in range(test):
data = int(input())
result = [0]*8
for i in range(len(money)):
if data//money[i]>=1:
result[i]=data//money[i]
data=data%money[i]
result=' '.join(map(str,result))
print(f'#{tc+1}')
print(f'{result}')
| [
"[email protected]"
] | |
3bd0d7c243a87f3364c34c0aeb30eab85d2046ab | 022c3ca134fb125e70ed84c215f14da5eee3020d | /first_app/models.py | 249d6f01f06ffad24a21b12eb2287a4f0b345d96 | [] | no_license | akibul-sharif/django_practice_1 | da755444ad8d7368df9c2330ff61879bbdbf0fe6 | 7041dbf3b724760c66a70246f4cacc366a3e9c49 | refs/heads/master | 2023-04-16T01:01:01.433070 | 2021-04-22T13:41:13 | 2021-04-22T13:41:13 | 360,533,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from django.db import models
# Create your models here.
class Profile(models.Model):
name=models.CharField(max_length=20)
age=models.IntegerField()
address=models.TextField()
images=models.ImageField() | [
"[email protected]"
] | |
56aac45f3cf4f0fb95b0b0e4578bb423478fa7ab | 68049b03dbbd9a3d778571794472e07c05fb00ad | /python/courses/corey_shafer/regular_expressions/recompile.py | 61f6cf043a694f33b1271c322e5e7d1a181336df | [] | no_license | tjkhara/notes | c9e96ecea6efed860c521eb7df562c5715091aea | 5602a25ba23104e4154700108f1b8a3a0144f712 | refs/heads/master | 2023-01-20T07:42:47.129359 | 2020-11-24T06:43:24 | 2020-11-24T06:43:24 | 285,811,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | import re
# This is the text we are searching
text_to_search = '''
abcdefghijklmnopqurtuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
Ha HaHa
MetaCharacters (Need to be escaped):
. ^ $ * + ? { } [ ] \ | ( )
coreyms.com
321-555-4321
123.555.1234
123*555*1234
800-555-1234
900-555-1234
900-555-1234
Mr. Schafer
Mr Smith
Ms Davis
Mrs. Robinson
Mr. T
cat
mat
hat
bat
'''
# Searching for the characters abc
pattern = re.compile(r'M(r|s|rs)\.?\s[A-Z]\w*')
matches = pattern.finditer(text_to_search)
for match in matches:
print(match)
| [
"[email protected]"
] | |
c55d5264bdbe43e511bdbb95e17d1012118c0221 | bbb5edc5d431bc9db6b31659b03b98dbef11411a | /python_code/picturing.py | 31929ef2b57d31d80bf82073e2b9b004be80e143 | [
"MIT"
] | permissive | tabhitmy/CMLTF | ade3b5727e982514002b7a4e3f3cb4183699e0e3 | 63f297f5f5d980da5599815ccec837578f977317 | refs/heads/master | 2021-01-02T17:16:55.573193 | 2017-08-04T07:10:22 | 2017-08-04T07:10:22 | 99,308,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,292 | py | # picturing
import sys
import os
import numpy as np
import glob
import math
import matplotlib as mpl
from matplotlib.font_manager import FontProperties
zhfont = FontProperties(fname="/usr/share/fonts/cjkuni-ukai/ukai.ttc") # 图片显示中文字体
mpl.use('Agg')
import matplotlib.pyplot as plt
import pickle
from toolkitJ import cell2dmatlab_jsp
with open('res.pickle', 'rb') as f:
res = pickle.load(f)
print(res)
print(len(res))
#
L = len(res)
ft_size = 24
xlbl = cell2dmatlab_jsp([L], 1, [])
y = np.zeros((6, L))
for i in range(L):
xlbl[i] = res[i][1]
for j in range(6):
y[j][i] = res[i][3][j]
xlbl = ['LSVM', 'LDA', 'QDA', 'NB', 'ADAB', 'LRC', 'DT', 'RF']
ylbl = ['P(Precision)', 'A(Accuracy)', 'R(Recall)', 'MA(Missing Alert)', 'FA(False Alert)', 'F1(F1 score)']
x = np.arange(1, 9)
h = plt.figure(num=str(j), figsize=(17, 9.3))
ax = plt.gca()
port = 0.1
ytick = np.arange(0, 1, 0.2)
colorlist = ['blue', 'green', 'yellow', 'yellowgreen', 'purple', 'red']
for j in range(6):
# plt.subplot(3, 2, j + 1)
delt = port * j + 0.01 * j
plt.bar(x - 0.3 + delt, y[j], width=port, facecolor=colorlist[j], label=ylbl[j])
plt.legend(mode="expand", loc=2, fontsize=ft_size)
ax.set_xticks(x)
ax.set_xticklabels(xlbl, fontproperties=zhfont, fontsize=ft_size)
ax.set_yticklabels(ytick, fontsize=ft_size)
# plt.xlabel('Classifiers')
plt.ylabel('scores', fontsize=ft_size)
# plt.title(ylbl[j])
plt.ylim((0, 1))
plt.show()
plt.savefig('/home/GaoMY/EXECUTION/NFDA/code/python_backup/pic/e.png')
h2 = plt.figure(num=str(j), figsize=(17, 9.3))
for j in range(6):
plt.subplot(3, 2, j + 1)
ax = plt.gca()
plt.bar(x, y[j], label=ylbl[j])
plt.legend(loc='best')
ax.set_xticks(x)
if j > 3:
ax.set_xticklabels(xlbl, fontproperties=zhfont, fontsize=ft_size)
else:
ax.set_xticklabels([], fontproperties=zhfont, fontsize=ft_size)
ax.set_yticklabels(ytick, fontsize=ft_size)
# plt.xlabel('Classifiers')
plt.ylabel('scores', fontsize=ft_size)
plt.title(ylbl[j], fontsize=ft_size)
plt.ylim((0, 1))
plt.show()
plt.savefig('/home/GaoMY/EXECUTION/NFDA/code/python_backup/pic/SPR.png')
| [
"[email protected]"
] | |
3d7e6b19381428cbae047c99eb8fdb144a5a4680 | c94cb71443f71dcb17609e1f85b595f34dc6be66 | /carla_scripts/Utils/hud.py | 9359b754ea743a6420a718794ffb316b6f45c011 | [] | no_license | guygo12345/simulator | b453270026876d4f38d592cb3bd0d103ea99f825 | 4700cb7470be9fe739b6de4927a06f7d40e61468 | refs/heads/master | 2021-05-20T03:53:31.139891 | 2020-08-05T15:50:10 | 2020-08-05T15:50:10 | 252,173,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,128 | py | """
Welcome to CARLA manual control.
Use ARROWS or WASD keys for control.
W : throttle
S : brake
AD : steer
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
M : toggle manual transmission
,/. : gear up/down
TAB : change sensor position
` : next sensor
[1-9] : change to sensor [1-9]
G : toggle radar visualization
C : change weather (Shift+C reverse)
Backspace : change vehicle
R : toggle recording images to disk
CTRL + R : toggle recording of simulation (replacing any previous)
CTRL + P : start replaying last recorded simulation
CTRL + + : increments the start time of the replay by 1 second (+SHIFT = 10 seconds)
CTRL + - : decrements the start time of the replay by 1 second (+SHIFT = 10 seconds)
F1 : toggle HUD
H/? : toggle help
ESC : quit
"""
import pygame
import os
from carla_scripts.Utils.sim_utils import *
import datetime
import math
import carla
# ==============================================================================
# -- FadingText ----------------------------------------------------------------
# ==============================================================================
class FadingText(object):
def __init__(self, font, dim, pos):
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=(255, 255, 255), seconds=2.0):
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill((0, 0, 0, 0))
self.surface.blit(text_texture, (10, 11))
def tick(self, _, clock):
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
display.blit(self.surface, self.pos)
# ==============================================================================
# -- HelpText ------------------------------------------------------------------
# ==============================================================================
class HelpText(object):
def __init__(self, font, width, height):
lines = __doc__.split('\n')
self.font = font
self.dim = (680, len(lines) * 22 + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill((0, 0, 0, 0))
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, (255, 255, 255))
self.surface.blit(text_texture, (22, n * 22))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
self._render = not self._render
def render(self, display):
if self._render:
display.blit(self.surface, self.pos)
# ==============================================================================
# -- HUD -----------------------------------------------------------------------
# ==============================================================================
class HUD(object):
def __init__(self, width, height):
self.dim = (width, height)
font = pygame.font.Font(pygame.font.get_default_font(), 20)
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.help = HelpText(pygame.font.Font(mono, 24), width, height)
self.server_fps = 0
self.frame = 0
self.simulation_time = 0
self._show_info = True
self._info_text = []
self._server_clock = pygame.time.Clock()
self.closest_vehicle_distance = None
def on_world_tick(self, timestamp):
self._server_clock.tick()
self.server_fps = self._server_clock.get_fps()
self.frame = timestamp.frame
self.simulation_time = timestamp.elapsed_seconds
def tick(self, world, clock):
self._notifications.tick(world, clock)
if not self._show_info:
return
t = world.player.get_transform()
v = world.player.get_velocity()
c = world.player.get_control()
compass = world.imu_sensor.compass
heading = 'N' if compass > 270.5 or compass < 89.5 else ''
heading += 'S' if 90.5 < compass < 269.5 else ''
heading += 'E' if 0.5 < compass < 179.5 else ''
heading += 'W' if 180.5 < compass < 359.5 else ''
colhist = world.collision_sensor.get_collision_history()
collision = [colhist[x + self.frame - 200] for x in range(0, 200)]
max_col = max(1.0, max(collision))
collision = [x / max_col for x in collision]
vehicles = world.world.get_actors().filter('vehicle.*')
# pedestrians = world.world.get_actors().filter('walker.pedestrian.*')
self._info_text = [
'Server: % 16.0f FPS' % self.server_fps,
'Client: % 16.0f FPS' % clock.get_fps(),
'',
'Vehicle: % 20s' % get_actor_display_name(world.player, truncate=20),
'Map: % 20s' % world.map.name,
'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),
'',
'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),
u'Compass:% 17.0f\N{DEGREE SIGN} % 2s' % (compass, heading),
'Accelero: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.accelerometer),
'Gyroscop: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.gyroscope),
'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),
'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)),
'Height: % 18.0f m' % t.location.z,
'']
if isinstance(c, carla.VehicleControl):
self._info_text += [
('Throttle:', c.throttle, 0.0, 1.0),
('Steer:', c.steer, -1.0, 1.0),
('Brake:', c.brake, 0.0, 1.0),
('Reverse:', c.reverse),
('Hand brake:', c.hand_brake),
('Manual:', c.manual_gear_shift),
'Gear: %s' % {-1: 'R', 0: 'N'}.get(c.gear, c.gear)]
elif isinstance(c, carla.WalkerControl):
self._info_text += [
('Speed:', c.speed, 0.0, 5.556),
('Jump:', c.jump)]
self._info_text += [
'',
'Collision:',
collision,
'',
'Number of vehicles: % 8d' % len(vehicles)]
if len(vehicles) > 1:
self._info_text += ['Nearby vehicles:']
distance = lambda l: math.sqrt((l.x - t.location.x)**2 + (l.y - t.location.y)**2 + (l.z - t.location.z)**2)
vehicles = sorted([(distance(x.get_location()), x) for x in vehicles if x.id != world.player.id])
self.closest_vehicle_distance = vehicles[0][0]
for d, vehicle in vehicles:
if d > 200.0:
break
vehicle_type = get_actor_display_name(vehicle, truncate=22)
self._info_text.append('% 4dm %s' % (d, vehicle_type))
def toggle_info(self):
self._show_info = not self._show_info
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def error(self, text):
self._notifications.set_text('Error: %s' % text, (255, 0, 0))
def render(self, display):
if self._show_info:
info_surface = pygame.Surface((220, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
for item in self._info_text:
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
v_offset += 18
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))
pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)
else:
rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect_border, 1)
f = (item[1] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))
else:
rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect)
item = item[0]
if item: # At this point has to be a str.
surface = self._font_mono.render(item, True, (255, 255, 255))
display.blit(surface, (8, v_offset))
v_offset += 18
self._notifications.render(display)
self.help.render(display) | [
"[email protected]"
] | |
31a122ec6c026fc494c752caf471d5d872b31f2f | d1bd97ee39d68c0947b1b20ecfb7acb2542c4863 | /CatServer/main.py | 8374b2aae2ec15a7a906d7874692ed91d1a2ecef | [] | no_license | LeonidMurashov/Python_Projects | 84a6e515c12ed1a5217283049dc6bbc507709202 | d1df4d1baf483f9025859f6a6e332b44cbb9fdf2 | refs/heads/master | 2021-08-20T05:07:49.990224 | 2020-07-27T10:41:20 | 2020-07-27T10:41:20 | 73,642,642 | 12 | 12 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import random
from tornado.options import define, options
import urllib.request as urllib2
define("port", default=8888, help="run on the given port", type=int)
from pymongo import MongoClient
client = MongoClient("mongodb://l.murashov:[email protected]:55841/hotdogs1576")
database = client["hotdogs1576"]
collection = database["hotdogs"]
hotdogs = list(collection.find())
class MainHandler(tornado.web.RequestHandler):
def get(self):
hotdog = random.choice(hotdogs)
good = False
while(good):
hotdog = random.choice(hotdogs)
try:
src = urllib2.urlopen(hotdog["link"])
print(src)
good = True
except:
pass
self.render("index.html", hotdog=hotdog)
def main():
tornado.options.parse_command_line()
application = tornado.web.Application([
(r"/", MainHandler),
], debug=True)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main() | [
"="
] | = |
6fdc01621b1df1b18c3bb641942f1e82a75be909 | 09abafe939ae356b7a9c17d2fe6498e710f1cbc5 | /Parte II/ladder/ladder2/pub/controllerDoor.py | dcbc61f11b3c3967b283bb1dc05b0f166e714350 | [] | no_license | brunacordeiro/SCAPS | ab2b83f1fede6f53220193c2fcc5c490c739bee2 | 3c2db3ef9e8796fc977509b753f204a9043c17fb | refs/heads/master | 2022-12-20T08:51:48.057197 | 2020-10-20T22:43:17 | 2020-10-20T22:43:17 | 277,899,565 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | import time
import random
lotacaoShopping = 5000
taxaPermitidaShopping = 0.3
capMaxShopping = lotacaoShopping * taxaPermitidaShopping
amountPeople = 50
amountEntry = 0
amountExit = 0
result = list()
porta = 2
def contagemPessoasDoor(vetor): # [1,0] ... [0,1]
global amountPeople
global amountEntry
global amountExit
while amountPeople < capMaxShopping:
if vetor[0] == 1 and vetor[1] == 0: # entrada de pessoas
print("\nEntrada detectada!")
amountEntry = amountEntry + 1
amountPeople = amountPeople + 1
time.sleep(3)
result = list()
result.append((amountPeople, amountEntry, amountExit, porta))
if vetor[0] == 0 and vetor[1] == 1: # saida de pessoas
print("\nSaida detectada! ")
amountExit = amountExit + 1
amountPeople = amountPeople - 1
time.sleep(3)
result = list()
result.append((amountPeople, amountEntry, amountExit, porta))
print(result)
return str(result)
exit()
| [
"[email protected]"
] | |
875e0d0c989a74f1b08d8b931176422888425e8d | 03f30b0cf13beb6c80c26af3c40b9334ce5b063c | /프로그래머스/level1/콜라츠 추측.py | 423ccdc1c8c3d0a013fc0308541ce7cc824689c1 | [] | no_license | yunakim2/Algorithm_Python | 41887147dd412596cd5813283e96f68113a4c43d | e8fde2bd7206903b6b4f3c321c3ed5651b484cc7 | refs/heads/master | 2023-08-25T23:13:58.544439 | 2021-11-13T09:08:17 | 2021-11-13T09:08:17 | 295,153,209 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | def solution(num):
answer = 0
while (num!=1) :
if num%2 == 0 :
num = num/2
else :
num = num*3+1
answer= answer+1
if(answer>500) :
answer = -1
break
return answer
print(solution(626331)) | [
"[email protected]"
] | |
1160903dc116ef10682357928602c084c50c18f8 | 4860af57dcde0e65e9ed359e416c75d9f984910c | /DataProcess/data2_preprocessing.py | 2f985bd959de18749462efe3c4868f35c4887013 | [
"Apache-2.0"
] | permissive | zouning68/ner-demo | 61cc26c676cb2cd99010a7f6f2b5597f6a680f14 | ffdbf95fd0354766bd2f882ecb02d55a9b14b74d | refs/heads/master | 2020-11-30T08:34:21.325248 | 2019-12-27T02:30:00 | 2019-12-27T02:30:00 | 230,357,325 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | """
data2 数据预处理
数据从all 文件夹中分割出两粉数据
"""
from Public.path import path_data2_dir
import os
# data2 数据预处理
def data2_preprocessing(split_rate: float = 0.8,
ignore_exist: bool = False) -> None:
"""
data2数据预处理
:param split_rate: 训练集和测试集切分比例
:param ignore_exist: 是否忽略已经存在的文件(如果忽略,处理完一遍后不会再进行第二遍处理)
:return: None
"""
path = os.path.join(path_data2_dir, "all.txt")
path_train = os.path.join(path_data2_dir, "train.txt")
path_test = os.path.join(path_data2_dir, "test.txt")
if not ignore_exist and os.path.exists(path_train) and os.path.exists(path_test):
return
texts = []
with open(path, 'r', encoding='utf8') as f:
line_t = []
for l in f:
if l != '\n':
line_t.append(l)
else:
texts.append(line_t)
line_t = []
if split_rate >= 1.0:
split_rate = 0.8
split_index = int(len(texts) * split_rate)
train_texts = texts[:split_index]
test_texts = texts[split_index:]
# 分割和存数文本
def split_save(texts: [str], save_path: str) -> None:
data = []
for line in texts:
for item in line:
data.append(item)
data.append("\n")
with open(save_path, 'w', encoding='utf8') as f:
f.write("".join(data))
split_save(texts=train_texts, save_path=path_train)
split_save(texts=test_texts, save_path=path_test)
if __name__ == '__main__':
data2_preprocessing()
| [
"[email protected]"
] | |
885130676b7af1c3d220da12ca057033b6a743ad | 24bd62bd716a25e203bf05bf1f74e964e250f020 | /bsmv_experiments.py | 2e68eabbe3eaac11254d7fb6c87b527c0d173b09 | [] | no_license | sreejithkvenugopal/GRRT_Experiments | 32cf2186dea62b2c11be8e43d2ce86e0d46d1334 | e7588353facef59372553a2411a8f5a96e778c2d | refs/heads/main | 2023-01-23T06:11:10.883988 | 2020-12-08T02:10:18 | 2020-12-08T02:10:18 | 318,921,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,782 | py | import numpy as np
#import matplotlib.pyplot as plt
import os,sys,json
import scipy as sci
from rrt_codes.block_single_measurement_vector import block_single_measurement_vector
from scipy.linalg import hadamard
from scipy.stats import norm
from sklearn import linear_model
from sklearn.linear_model import LassoCV
import argparse
def generate_full_support_from_block_support(block_support,block_size=4):
ind = []
for i in block_support:
ind=ind+[j for j in range(i * block_size, (i + 1) * block_size)]
return ind
# BOMP with a priori known block sparsity
def BOMP_prior_sparsity(X,y,k_block,l_block):
n,p=X.shape;n_blocks=np.int(p/l_block)
indices_per_block={}
for k in np.arange(n_blocks):
indices_per_block[k]=[j for j in range(k * l_block, (k + 1) *l_block)]
res=y
block_support=[]
full_support=[]
for k in np.arange(k_block):
corr=np.matmul(X.T,res).flatten()
corr_norm_per_block=np.array([np.linalg.norm(corr[indices_per_block[k]]) for k in np.arange(n_blocks)])
block_ind=np.argmax(corr_norm_per_block)
block_support.append(block_ind)
full_support=full_support+indices_per_block[block_ind]
X_new=X[:,full_support];
try:
beta_est=np.matmul(np.linalg.pinv(X_new),y).flatten()
except:
break
Beta_est=np.zeros(p)
Beta_est[full_support]=beta_est
Beta_est=Beta_est.reshape((p,1))
res=y-np.matmul(X,Beta_est)
return Beta_est,full_support,block_support
# BOMP which stops once residual drops below a user provide threshold. threshold could be noise l2_norm or a noise variance based upper bound
def BOMP_prior_variance(X,y,threshold,l_block):
n,p=X.shape;n_blocks=np.int(p/l_block)
indices_per_block={}
for k in np.arange(n_blocks):
indices_per_block[k]=[j for j in range(k * l_block, (k + 1) *l_block)]
res=y
block_support=[]
full_support=[]
for k in np.arange(n_blocks):
corr=np.matmul(X.T,res).flatten()
corr_norm_per_block=np.array([np.linalg.norm(corr[indices_per_block[k]]) for k in np.arange(n_blocks)])
block_ind=np.argmax(corr_norm_per_block)
block_support.append(block_ind)
full_support=full_support+indices_per_block[block_ind]
X_new=X[:,full_support];
try:
beta_est=np.matmul(np.linalg.pinv(X_new),y).flatten()
except:
break
Beta_est=np.zeros(p)
Beta_est[full_support]=beta_est
Beta_est=Beta_est.reshape((p,1))
res=y-np.matmul(X,Beta_est)
if np.linalg.norm(res)<threshold:
break
return Beta_est,full_support,block_support
# BOMP using the validation scheme proposed in "On the theoretical
# analysis of cross validation in compressive sensing" in Proc. ICASSP
## 2014. IEEE, 2014, pp. 3370–3374.
def BOMP_CV(X,y,l_block,cv_fraction):
#cv_fraction: fraction of measurments for validation.
n,p=X.shape;n_blocks=np.int(p/l_block)
indices_per_block={}
for k in np.arange(n_blocks):
indices_per_block[k]=[j for j in range(k * l_block, (k + 1) *l_block)]
n_cv=np.int(n*cv_fraction)
indices=np.random.choice(n,n,False).tolist()
ind_cv=indices[:n_cv]
ind_train=indices[n_cv:]
n_train=len(ind_train)
y_train=y[ind_train].reshape((n_train,1)); y_cv=y[ind_cv].reshape((n_cv,1));
X_train=X[ind_train,:]; X_cv=X[ind_cv,:]
train_col_norms=np.linalg.norm(X_train,axis=0)+1e-8
X_train=X_train/train_col_norms
train_col_norms=train_col_norms.reshape((p,1))
max_iter=np.int(np.floor(n_train/(l_block)))
cv_error_list=[np.linalg.norm(y_cv)**2/(n_cv)]
train_error_list=[np.linalg.norm(y_train)**2/(n_train)]
min_cv_error=cv_error_list[0]
res=y_train
block_support=[]
full_support=[]
best_full_support=full_support
best_block_support=block_support
best_est=np.zeros((p,1))
for k in np.arange(max_iter):
corr=np.matmul(X_train.T,res).flatten()
corr_norm_per_block=np.array([np.linalg.norm(corr[indices_per_block[k]]) for k in np.arange(n_blocks)])
block_ind=np.argmax(corr_norm_per_block)
block_support.append(block_ind)
full_support=full_support+indices_per_block[block_ind]
X_new=X_train[:,full_support];
try:
beta_est=np.matmul(np.linalg.pinv(X_new),y_train).flatten()
except:
break
Beta_est=np.zeros(p)
Beta_est[full_support]=beta_est
Beta_est=Beta_est.reshape((p,1))
res=y_train-np.matmul(X_train,Beta_est)
train_error_list.append(np.linalg.norm(res)**2/n_train)
Beta_scaled=Beta_est*train_col_norms #rescaling to accomodate scaling in X_train
res_cv=y_cv-np.matmul(X_cv,Beta_scaled)
cv_error=np.linalg.norm(res_cv)**2/n_cv
#print(cv_error_list)
cv_error_list.append(cv_error)
if cv_error<min_cv_error:
best_full_support=full_support
best_block_support=block_support
best_est=Beta_scaled
min_cv_error=cv_error
CV_dict={}
CV_dict['train_error_list']=train_error_list
CV_dict['cv_error_list']=cv_error_list
return best_est,best_full_support,best_block_support,CV_dict
# Block sparse version of SPICE from T. Kronvall, S. I. Adalbj¨ornsson, S. Nadig, and A. Jakobsson, “Groupsparse
#regression using the covariance fitting criterion,” Signal Processing,
# vol. 139, pp. 116–130, 2017.
def group_spice(X,y,l_block):
#implemented with r=infty. homoscedastic version.
n,p=X.shape; n_blocks=np.int(p/l_block)
r=1e8; s=1;
max_iter=1000;tol=1e-4
current_iter=0;
indices_per_block={}
for k in np.arange(n_blocks):
indices_per_block[k]=[j for j in range(k * l_block, (k + 1) *l_block)]
col_norm=np.linalg.norm(X,axis=0)
v=np.zeros(n_blocks)
for k in np.arange(n_blocks):
ind=indices_per_block[k]
v[k]=np.linalg.norm(col_norm[ind]**2,s)
A=np.hstack([X,np.eye(n)])
sigma_current=np.sqrt(np.matmul(y.T,y)/n)
p_current=np.zeros(p)
for k in np.arange(p):
p_current[k]=np.matmul(X[:,k].T,y)**2/np.linalg.norm(X[:,k])**4
while current_iter<max_iter:
current_iter+=1
R=np.matmul(np.matmul(X,np.diag(p_current)),X.T)+sigma_current*np.eye(n)
z=np.matmul(np.linalg.inv(R),y)
sigma_next=sigma_current*np.sqrt(np.linalg.norm(z)**2/n)
p_next=np.zeros(p)
for k in np.arange(n_blocks):
ind=indices_per_block[k]
block_r_p=np.zeros(len(ind))
r_z=np.abs(np.matmul(X[:,ind].T,z)).flatten()
p_z=p_current[ind].flatten()
p_next_block=np.linalg.norm([r_z[j]*p_z[j] for j in np.arange(l_block)])/np.sqrt(v[k])
p_next[ind]=p_next_block
if np.linalg.norm(p_next-p_current)<tol:
break;
p_current=p_next
sigma_current=sigma_next
Beta_est=np.zeros(p)
for k in np.arange(p):
Beta_est[k]=p_current[k]*np.matmul(X[:,k].T,z)
return Beta_est.reshape((p,1)),p_current,sigma_current
def compute_error(support_true,support_estimate,Beta_true,Beta_estimate):
Beta_true=np.squeeze(Beta_true); Beta_estimate=np.squeeze(Beta_estimate);
l2_error=np.linalg.norm(Beta_true-Beta_estimate)**2/np.linalg.norm(Beta_true)**2
if len(support_estimate)==0:
support_error=1;
recall=0
precision=1;
pmd=1
pfd=0
else:
support_true=set(support_true); support_estimate=set(support_estimate)
if support_true==support_estimate:
support_error=0;
else:
support_error=1;
recall=len(support_true.intersection(support_estimate))/len(support_true)
precision=len(support_estimate.intersection(support_true))/len(support_estimate)
if len(support_true.difference(support_estimate))>0:
pmd=1;
else:
pmd=0;
if len(support_estimate.difference(support_true))>0:
pfd=1
else:
pfd=0;
return support_error,l2_error,recall,precision,pmd,pfd
def run_experiment(num_iter=100,matrix_type='two_ortho'):
n,p,k_block,l_block=64,128,3,4
n_blocks = np.int(p /l_block)
indices_per_block={}
for k in np.arange(n_blocks):
indices_per_block[k]=[j for j in range(k * l_block, (k + 1) *l_block)]
SNR=np.linspace(-10,20,10)
snr=10**(SNR/10)# SNR in real scale
num_iter=num_iter
MSE_cv=np.zeros(10);MSE_sparsity=np.zeros(10);MSE_alpha1=np.zeros(10);MSE_alpha2=np.zeros(10);MSE_variance=np.zeros(10);
MSE_spice=np.zeros(10);MSE_spice_ls=np.zeros(10)
MSE_variance2=np.zeros(10);
PE_cv=np.zeros(10);PE_sparsity=np.zeros(10);PE_alpha1=np.zeros(10);PE_alpha2=np.zeros(10);PE_variance=np.zeros(10);
PE_spice=np.zeros(10);PE_spice_ls=np.zeros(10);
PE_variance2=np.zeros(10);
PFD_cv=np.zeros(10);PFD_sparsity=np.zeros(10);PFD_alpha1=np.zeros(10);PFD_alpha2=np.zeros(10);PFD_variance=np.zeros(10);
PFD_spice=np.zeros(10);PFD_spice_ls=np.zeros(10);
PFD_variance2=np.zeros(10);
PMD_cv=np.zeros(10);PMD_sparsity=np.zeros(10);PMD_alpha1=np.zeros(10);PMD_alpha2=np.zeros(10);PMD_variance=np.zeros(10);
PMD_spice=np.zeros(10);PMD_spice_ls=np.zeros(10);
PMD_variance2=np.zeros(10);
Recall_cv=np.zeros(10);Recall_sparsity=np.zeros(10);Recall_alpha1=np.zeros(10);Recall_alpha2=np.zeros(10);
Recall_variance=np.zeros(10);Recall_spice=np.zeros(10);Recall_spice_ls=np.zeros(10);
Recall_variance2=np.zeros(10);
Precision_cv=np.zeros(10);Precision_sparsity=np.zeros(10);Precision_alpha1=np.zeros(10);
Precision_alpha2=np.zeros(10);Precision_variance=np.zeros(10);Precision_spice=np.zeros(10);
Precision_variance2=np.zeros(10);Precision_spice_ls=np.zeros(10);
bsmv=block_single_measurement_vector();
for snr_iter in np.arange(10):
print(SNR[snr_iter])
mse_cv=0;mse_sparsity=0;mse_alpha1=0;mse_alpha2=0;mse_variance=0;mse_variance2=0;mse_spice=0;mse_spice_ls=0;
pe_cv=0;pe_sparsity=0;pe_alpha1=0;pe_alpha2=0;pe_variance=0;pe_variance2=0;pe_spice=0;pe_spice_ls=0;
pfd_cv=0;pfd_sparsity=0;pfd_alpha1=0;pfd_alpha2=0;pfd_variance=0;pfd_variance2=0;pfd_spice=0;pfd_spice_ls=0;
pmd_cv=0;pmd_sparsity=0;pmd_alpha1=0;pmd_alpha2=0;pmd_variance=0;pmd_variance2=0;pmd_spice=0;pmd_spice_ls=0;
recall_cv=0;recall_sparsity=0;recall_alpha1=0;recall_alpha2=0;recall_variance=0;recall_variance2=0;
recall_spice=0;recall_spice_ls=0;
precision_cv=0;precision_sparsity=0;precision_alpha1=0;precision_alpha2=0;precision_variance=0;
precision_spice=0;precision_spice_ls=0;precision_variance2=0;
for num in np.arange(num_iter):
#print(num)
#signal model
if matrix_type=='two_ortho':
X=np.hstack([np.eye(n),hadamard(n)/np.sqrt(n)])
elif matrix_type=='normal':
X=np.random.randn(n,p)/np.sqrt(n)
else:
raise Exception('Invalid matrix type. Give one of normal or two_ortho')
Beta_true=np.zeros((p,1))
if p % l_block != 0:
raise Exception(' nfeatures should be a multiple of block_size')
block_support= np.random.choice(np.arange(n_blocks), size=k_block, replace=False).tolist()
support_true=generate_full_support_from_block_support(block_support=block_support,block_size=l_block)
Beta_true[support_true] = np.sign(np.random.randn(len(support_true), 1))
signal_power=len(support_true)
# noise_power=nsamples*noisevar. snr=signal_power/noise_power
noise_var = signal_power/ (n * snr[snr_iter])
noise = np.random.randn(n, 1) * np.sqrt(noise_var)
y= np.matmul(X, Beta_true) + noise
#GRRT
rrt_bomp_dict=bsmv.compute_signal_and_support(X,Y=y,block_size=l_block,alpha_list=[0.1,0.01])
block_support,Beta_est_alpha1=rrt_bomp_dict[0.1]['support_estimate'],rrt_bomp_dict[0.1]['signal_estimate']
support_est=generate_full_support_from_block_support(block_support=block_support,block_size=l_block)
support_error,l2_error,recall,precision,pmd,pfd=compute_error(support_true,support_est,
Beta_true,Beta_est_alpha1)
mse_alpha1+=l2_error;pe_alpha1+=support_error;recall_alpha1+=recall;precision_alpha1+=precision;
pmd_alpha1+=pmd;pfd_alpha1+=pfd
block_support,Beta_est_alpha2=rrt_bomp_dict[0.01]['support_estimate'],rrt_bomp_dict[0.01]['signal_estimate']
support_est=generate_full_support_from_block_support(block_support=block_support,block_size=l_block)
support_error,l2_error,recall,precision,pmd,pfd=compute_error(support_true,support_est,
Beta_true,Beta_est_alpha2)
mse_alpha2+=l2_error;pe_alpha2+=support_error;recall_alpha2+=recall;precision_alpha2+=precision;
pmd_alpha2+=pmd;pfd_alpha2+=pfd
# Groupd SPICE
Beta_est,p_current,sigma_est=group_spice(X,y,l_block)
power_per_block=np.zeros(n_blocks)
for k in np.arange(n_blocks):
ind=indices_per_block[k]
power_per_block[k]=(np.linalg.norm(Beta_est.flatten()[ind],2)**2)/len(ind)
block_support=np.where(power_per_block>1e-2)[0]
support_est=generate_full_support_from_block_support(block_support=block_support,block_size=l_block)
support_error,l2_error,recall,precision,pmd,pfd=compute_error(support_true,support_est,
Beta_true,Beta_est)
mse_spice+=l2_error;pe_spice+=support_error;recall_spice+=recall;precision_spice+=precision;
pmd_spice+=pmd;pfd_spice+=pfd
max_power=np.max(power_per_block)
block_support=np.where(power_per_block>0.2*max_power)[0]
support_est=generate_full_support_from_block_support(block_support=block_support,block_size=l_block)
beta_est=np.matmul(np.linalg.pinv(X[:,support_est]),y)
Beta_est_ls=np.zeros(p);Beta_est_ls[support_est]=beta_est.flatten()
support_error,l2_error,recall,precision,pmd,pfd=compute_error(support_true,support_est,
Beta_true,Beta_est_ls)
mse_spice_ls+=l2_error;pe_spice_ls+=support_error;recall_spice_ls+=recall;precision_spice_ls+=precision;
pmd_spice_ls+=pmd;pfd_spice_ls+=pfd
Beta_est,support_est,block_support=BOMP_prior_sparsity(X,y,k_block,l_block)
support_error,l2_error,recall,precision,pmd,pfd=compute_error(support_true,support_est,
Beta_true,Beta_est)
mse_sparsity+=l2_error;pe_sparsity+=support_error;recall_sparsity+=recall;precision_sparsity+=precision;
pmd_sparsity+=pmd;pfd_sparsity+=pfd;
threshold=np.sqrt(noise_var)*np.sqrt(n+2*np.sqrt(n*np.log(n)))
Beta_est,support_est,block_support=BOMP_prior_variance(X,y,threshold,l_block)
support_error,l2_error,recall,precision,pmd,pfd=compute_error(support_true,support_est,
Beta_true,Beta_est)
mse_variance+=l2_error;pe_variance+=support_error;recall_variance+=recall;precision_variance+=precision;
pmd_variance+=pmd;pfd_variance+=pfd;
threshold=np.linalg.norm(noise)
Beta_est,support_est,block_support=BOMP_prior_variance(X,y,threshold,l_block)
support_error,l2_error,recall,precision,pmd,pfd=compute_error(support_true,support_est,
Beta_true,Beta_est)
mse_variance2+=l2_error;pe_variance2+=support_error;recall_variance2+=recall;precision_variance2+=precision;
pmd_variance2+=pmd;pfd_variance2+=pfd;
Beta_est,support_est,best_block_support,CV_dict=BOMP_CV(X,y,l_block,cv_fraction=0.1)
support_error,l2_error,recall,precision,pmd,pfd=compute_error(support_true,support_est,
Beta_true,Beta_est)
mse_cv+=l2_error;pe_cv+=support_error;recall_cv+=recall;precision_cv+=precision;
pmd_cv+=pmd;pfd_cv+=pfd;
MSE_cv[snr_iter]=mse_cv/num_iter;MSE_sparsity[snr_iter]=mse_sparsity/num_iter;
MSE_alpha1[snr_iter]=mse_alpha1/num_iter;MSE_alpha2[snr_iter]=mse_alpha2/num_iter;
MSE_variance[snr_iter]=mse_variance/num_iter;MSE_spice[snr_iter]=mse_spice/num_iter;MSE_spice_ls[snr_iter]=mse_spice_ls/num_iter;
MSE_variance2[snr_iter]=mse_variance2/num_iter;
PE_cv[snr_iter]=pe_cv/num_iter;PE_sparsity[snr_iter]=pe_sparsity/num_iter;
PE_alpha1[snr_iter]=pe_alpha1/num_iter;PE_alpha2[snr_iter]=pe_alpha2/num_iter;
PE_variance[snr_iter]=pe_variance/num_iter; PE_variance2[snr_iter]=pe_variance2/num_iter;
PE_spice[snr_iter]=pe_spice/num_iter;PE_spice_ls[snr_iter]=pe_spice_ls/num_iter;
PFD_cv[snr_iter]=pfd_cv/num_iter;PFD_sparsity[snr_iter]=pfd_sparsity/num_iter;
PFD_alpha1[snr_iter]=pfd_alpha1/num_iter;PFD_alpha2[snr_iter]=pfd_alpha2/num_iter;
PFD_variance[snr_iter]=pfd_variance/num_iter; PFD_variance2[snr_iter]=pfd_variance2/num_iter;
PFD_spice[snr_iter]=pfd_spice/num_iter;PFD_spice_ls[snr_iter]=pfd_spice_ls/num_iter;
PMD_cv[snr_iter]=pmd_cv/num_iter;PMD_sparsity[snr_iter]=pmd_sparsity/num_iter;
PMD_alpha1[snr_iter]=pmd_alpha1/num_iter;PMD_alpha2[snr_iter]=pmd_alpha2/num_iter;
PMD_variance[snr_iter]=pmd_variance/num_iter; PMD_variance2[snr_iter]=pmd_variance2/num_iter;
PMD_spice[snr_iter]=pmd_spice/num_iter;PMD_spice_ls[snr_iter]=pmd_spice_ls/num_iter;
Recall_cv[snr_iter]=recall_cv/num_iter;Recall_sparsity[snr_iter]=recall_sparsity/num_iter;
Recall_alpha1[snr_iter]=recall_alpha1/num_iter;Recall_alpha2[snr_iter]=recall_alpha2/num_iter;
Recall_variance[snr_iter]=recall_variance/num_iter;Recall_spice[snr_iter]=recall_spice/num_iter;
Recall_variance2[snr_iter]=recall_variance2/num_iter;Recall_spice_ls[snr_iter]=recall_spice_ls/num_iter;
Precision_cv[snr_iter]=precision_cv/num_iter;Precision_sparsity[snr_iter]=precision_sparsity/num_iter;
Precision_alpha1[snr_iter]=precision_alpha1/num_iter;Precision_alpha2[snr_iter]=precision_alpha2/num_iter;
Precision_variance[snr_iter]=precision_variance/num_iter;Precision_variance2[snr_iter]=precision_variance2/num_iter;
Precision_spice[snr_iter]=precision_spice/num_iter;Precision_spice_ls[snr_iter]=precision_spice_ls/num_iter;
print('over')
print(' experiment over')
print('saving results')
results={}
results['algo']='SOMP'
results['experiment_type']='SNR_sweep'
results['num_iter']=num_iter
results['n']=n
results['p']=p
results['l_block']=l_block
results['k_block']=k_block
results['SNR']=SNR.tolist()
results['matrix_type']=matrix_type
results['sigmal_type']='pm1' #plus or minus 1.
results['MSE_cv']=MSE_cv.tolist();results['MSE_variance']=MSE_variance.tolist();results['MSE_variance2']=MSE_variance2.tolist();
results['MSE_spice']=MSE_spice.tolist();results['MSE_spice_ls']=MSE_spice_ls.tolist();
results['MSE_sparsity']=MSE_sparsity.tolist();results['MSE_alpha1']=MSE_alpha1.tolist(); results['MSE_alpha2']=MSE_alpha2.tolist();
results['PE_cv']=PE_cv.tolist();results['PE_variance']=PE_variance.tolist();
results['PE_variance2']=PE_variance2.tolist();results['PE_spice']=PE_spice.tolist();results['PE_spice_ls']=PE_spice_ls.tolist();
results['PE_sparsity']=PE_sparsity.tolist();results['PE_alpha1']=PE_alpha1.tolist(); results['PE_alpha2']=PE_alpha2.tolist();
results['Recall_cv']=Recall_cv.tolist();results['Recall_variance']=Recall_variance.tolist();
results['Recall_variance2']=Recall_variance2.tolist();results['Recall_spice']=Recall_spice.tolist();
results['Recall_sparsity']=Recall_sparsity.tolist();results['Recall_alpha1']=Recall_alpha1.tolist();
results['Recall_alpha2']=Recall_alpha2.tolist(); results['Recall_spice_ls']=Recall_spice_ls.tolist();
results['Precision_cv']=Precision_cv.tolist();results['Precision_variance']=Precision_variance.tolist();
results['Precision_variance2']=Precision_variance2.tolist();results['Precision_spice']=Precision_spice.tolist();
results['Precision_sparsity']=Precision_sparsity.tolist();results['Precision_alpha1']=Precision_alpha1.tolist();
results['Precision_alpha2']=Precision_alpha2.tolist(); results['Precision_spice_ls']=Precision_spice_ls.tolist();
results['PMD_cv']=PMD_cv.tolist();results['PMD_variance']=PMD_variance.tolist();
results['PMD_variance2']=PMD_variance2.tolist();results['PMD_spice']=PMD_spice.tolist();results['PMD_spice_ls']=PMD_spice_ls.tolist();
results['PMD_sparsity']=PMD_sparsity.tolist();results['PMD_alpha1']=PMD_alpha1.tolist(); results['PMD_alpha2']=PMD_alpha2.tolist();
results['PFD_cv']=PFD_cv.tolist();results['PFD_variance']=PFD_variance.tolist();
results['PFD_variance2']=PFD_variance2.tolist();results['PFD_spice']=PFD_spice.tolist();results['PFD_spice_ls']=PFD_spice_ls.tolist();
results['PFD_sparsity']=PFD_sparsity.tolist();results['PFD_alpha1']=PFD_alpha1.tolist(); results['PFD_alpha2']=PFD_alpha2.tolist();
file_name='BOMP_SNR_sweep_'+matrix_type+'.json'
print(file_name)
with open(file_name,'w') as f:
json.dump(results,f)
print('results dumped to {}'.format(file_name))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_iter', type=int, default=100,
help='how many monte carlo iterations?')
parser.add_argument('--matrix_type', type=str, default='two_ortho',
help='matrix type: normal or two_ortho?')
args = parser.parse_args()
print(args)
run_experiment(num_iter=args.num_iter,matrix_type=args.matrix_type)
| [
"[email protected]"
] | |
0cb178233d4cfcf9f34a0954b8cef8c6702b83b0 | 78caf1980448dabc4b0ea7a6177281bec085431d | /color.py | 7c4dda25bfeeafd31c3c74c437be6a04e780beed | [] | no_license | kohanyirobert/led-display-simulator | 69b08106c77dd2cb6379b65de2b53cbccd6fe8de | 7b7f1dd08773b74bdd0e1c8e0ba300e52c381c1c | refs/heads/master | 2021-05-16T00:51:08.216921 | 2017-10-15T07:52:02 | 2017-10-15T12:48:22 | 107,011,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | class Color():
def __init__(self, red, green, blue):
self.red = red
self.green = green
self.blue = blue
def __eq__(self, other):
return self.red == other.red \
and self.green == other.green \
and self.blue == other.blue
BLACK = Color(0, 0, 0)
WHITE = Color(255, 255, 255)
| [
"[email protected]"
] | |
6736b424599b92948dc2ae5aac6558d1bfdef98d | d5d01ca52800526ac467186aba7241b5fa728001 | /teste_coloring.py | 690896fb5ca1be5eb3de0106c636078d1a68c201 | [
"MIT"
] | permissive | kewitz/master | 1d4703064c26a4bb9e1e72e03271aa3c9c0af50c | fd07c7ec40bc72e8870b2bdff770390487977a2f | refs/heads/master | 2021-01-22T17:58:47.863942 | 2015-09-18T17:22:19 | 2015-09-18T17:22:19 | 32,545,570 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | # -*- coding: utf-8 -*-
from numpy import *
from ctypes import *
import NScheme as solver
path = """./res/"""
m = solver.Mesh(file=path + "teste1_1.msh", verbose=True)
bound = {1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 0.0, 6: 0.0}
for n in m.nodesOnLine(bound.keys()):
n.calc = False
nodes = solver.split([n for n in m.nodes if n.calc], 1E7, 2)
colors = []
groups = []
for g, dofs in enumerate(nodes):
groups.append([])
c = 0
while len(dofs) > 0:
groups[g].append([])
groups[g][c] = [dofs.pop()]
elements = [e for n in groups[g][c] for e in m.elements]
for i, n in enumerate(dofs):
if len(set(n.elements).intersection(elements)) is 0:
groups[g][c].append(dofs.pop(i))
elements = [e for n in groups[g][c] for e in n.elements]
print "Color: %i, %i nodes." % (c, len(groups[g][c]))
c += 1
| [
"[email protected]"
] | |
d05e491c484faf8626ee6e542c29ccb98cd0a7bd | 7c4edb6e475bc0b3aaf43736a111019640a5863c | /python/ISpyElectron_cfi.py | c9cb10e4f76feebf6b3421ab0154d7abe3aaa771 | [] | no_license | cms-outreach/ispy-analyzers | da52c44c18fa7640b7bb6d9091286a2f69f35c26 | 086143c71cbbc930ad58daf405b93f48320d3867 | refs/heads/master | 2023-05-25T13:53:22.963617 | 2022-03-07T18:15:23 | 2022-03-07T18:15:23 | 10,282,224 | 6 | 11 | null | 2023-05-18T14:22:36 | 2013-05-25T09:39:40 | C++ | UTF-8 | Python | false | false | 212 | py | import FWCore.ParameterSet.Config as cms
ISpyElectron = cms.EDAnalyzer('ISpyElectron',
iSpyElectronTag = cms.InputTag('hltPixelMatchElectronsL1Iso')
)
| [
"[email protected]"
] | |
11a0af5cc3c789157b5ab87ff783c6d0b93ee717 | d4f198892448cf0197614a1586892f82e52c02f8 | /pages/base_page.py | 62a564dd905f0270b5d7da5f792f61d89afdd5c7 | [] | no_license | Serg-Klimenko/Stepik_Selenium_final_task | 8c6a0945a6236af21665ba38cfa93c5bea735de8 | 9a9ee9f6eac1fbfd10d723c6fcc7d305b0e7ae2a | refs/heads/main | 2023-02-28T00:32:33.232340 | 2021-01-31T19:25:57 | 2021-01-31T19:25:57 | 334,411,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,624 | py | from .locators import BasePageLocators
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import math
class BasePage():
# constructor
def __init__(self, browser, url, timeout=10):
self.browser = browser
self.url = url
self.browser.implicitly_wait(timeout)
def go_to_basket_page(self):
link = self.browser.find_element(*BasePageLocators.BASKET_LINK)
link.click()
def go_to_login_page(self):
link = self.browser.find_element(*BasePageLocators.LOGIN_LINK)
link.click()
# open page
def open(self):
self.browser.get(self.url)
# catch exeption
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except (NoSuchElementException):
return False
return True
# element is not present on the page for define time
def is_not_element_present(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))
except TimeoutException:
return True
return False
# element is disappered from the page
def is_disappeared(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout, 1, TimeoutException). \
until_not(EC.presence_of_element_located((how, what)))
except TimeoutException:
return False
return True
def should_be_login_link(self):
assert self.is_element_present(*BasePageLocators.LOGIN_LINK), "Login link is not presented"
# solve mathematic quiz
def solve_quiz_and_get_code(self):
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented")
def should_be_authorized_user(self):
assert self.is_element_present(*BasePageLocators.USER_ICON), "User icon is not presented," \
" probably unauthorised user"
| [
"[email protected]"
] | |
6867439e9c0b0b68b52993a561f5a92529a58c1f | 8ec2f4d20a88a209b6fdd3b7c2519dfb0f909206 | /KM_KBQA/BertEntityRelationClassification/optimization.py | dfc1a07decc67db696c5c351ec2f2bffe434c8ea | [] | no_license | actnlp/aerospace_kbqa | 25e05bbd94df76595cbc47471c4d1278cce6326d | fc8ac1e159e25333a303739247be510991fb2c0d | refs/heads/master | 2023-01-08T23:54:16.492238 | 2020-11-13T02:28:38 | 2020-11-13T02:28:38 | 286,701,724 | 1 | 6 | null | null | null | null | UTF-8 | Python | false | false | 12,272 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import abc
import logging
import math
import sys
import torch
from torch.nn.utils import clip_grad_norm_
from torch.optim import Optimizer
from torch.optim.optimizer import required
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta('ABC', (), {})
class _LRSchedule(ABC):
""" Parent of all LRSchedules here. """
warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense
def __init__(self, warmup=0.002, t_total=-1, **kw):
"""
:param warmup: what fraction of t_total steps will be used for linear warmup
:param t_total: how many training steps (updates) are planned
:param kw:
"""
super(_LRSchedule, self).__init__(**kw)
if t_total < 0:
logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
warmup = max(warmup, 0.)
self.warmup, self.t_total = float(warmup), float(t_total)
self.warned_for_t_total_at_progress = -1
def get_lr(self, step, nowarn=False):
"""
:param step: which of t_total steps we're on
:param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps
:return: learning rate multiplier for current update
"""
if self.t_total < 0:
return 1.
progress = float(step) / self.t_total
ret = self.get_lr_(progress)
# warning for exceeding t_total (only active with warmup_linear
if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
logger.warning(
"Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
.format(ret, self.__class__.__name__))
self.warned_for_t_total_at_progress = progress
# end warning
return ret
@abc.abstractmethod
def get_lr_(self, progress):
"""
:param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress
:return: learning rate multiplier for current update
"""
return 1.
class ConstantLR(_LRSchedule):
def get_lr_(self, progress):
return 1.
class WarmupCosineSchedule(_LRSchedule):
"""
Cosine learning rate schedule with linear warmup. Cosine after warmup is without restarts.
"""
warn_t_total = True
def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
"""
:param warmup: see LRSchedule
:param t_total: see LRSchedule
:param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
:param kw:
"""
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
self.cycles = cycles
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
"""
Cosine learning rate schedule with linear warmup and hard restarts (if cycles > 1).
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert(cycles >= 1.)
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
return ret
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
"""
Cosine learning rate schedule with linear warmups and linear warmup restarts.
The same warmup rate is used for warmup restarts as for initial warmup.
The total effective fraction of warmup steps over all cycles is warmup * cycles!
"""
def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
assert(warmup * cycles < 1.)
warmup = warmup * cycles if warmup >= 0 else warmup
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
def get_lr_(self, progress):
progress = progress * self.cycles % 1.
if progress < self.warmup:
return progress / self.warmup
else:
progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup
ret = 0.5 * (1. + math.cos(math.pi * progress))
return ret
class WarmupConstantSchedule(_LRSchedule):
"""
Applies linear warmup. After warmup always returns 1..
"""
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return 1.
class WarmupLinearSchedule(_LRSchedule):
"""
Linear warmup. Linear decay after warmup.
"""
warn_t_total = True
def get_lr_(self, progress):
if progress < self.warmup:
return progress / self.warmup
return max((progress - 1.) / (self.warmup - 1.), 0.)
SCHEDULES = {
None: ConstantLR,
"none": ConstantLR,
"warmup_cosine": WarmupCosineSchedule,
"warmup_constant": WarmupConstantSchedule,
"warmup_linear": WarmupLinearSchedule
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
schedule: schedule to use for the warmup (see above).
Can be 'warmup_linear', 'warmup_constant', 'warmup_cosine', or a LRSchedule object.
Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
# initialize schedule object
if not isinstance(schedule, _LRSchedule):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
else:
if warmup != -1 or t_total != -1:
logger.warning("Non-default warmup and t_total are ineffective when LRSchedule object is provided. "
"Please specify custom warmup and t_total in LRSchedule object.")
defaults = dict(lr=lr, schedule=schedule,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
| [
"[email protected]"
] | |
f916b3145cdf32893f9de457fb27cf474867551c | 71f0e3697c7bc70b14708acb293945ed661255c4 | /Code_LR/Data_frame.py | 4b9e3ec413ccad54142b454b6188f060ffeae4c7 | [] | no_license | mrtrunghieu1/Graduate_Admission_LR | ccb254a1228bf9ee3b7ba766e1e5d6d20251f9c5 | ae9d561488fa1209fe365de7c0325f784ba8c327 | refs/heads/main | 2023-01-22T14:44:06.836524 | 2020-11-13T15:54:11 | 2020-11-13T15:54:11 | 309,565,542 | 0 | 0 | null | 2020-11-13T15:54:12 | 2020-11-03T03:47:21 | null | UTF-8 | Python | false | false | 3,229 | py | from __future__ import division, print_function, unicode_literals
from sklearn import preprocessing
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn import datasets,linear_model
# Đọc file data(csv)
data_frame = pd.read_csv('data/data_train.csv').values
data = data_frame[:, 1:9]
# print(data)
scaler = StandardScaler()
scaler.fit(data)
data_norm = scaler.fit_transform(data)
# print(data_norm)
chance_of_admit = data_norm[:, -1].reshape(-1, 1)
# print(chance_of_admit)
data_train = data_norm[:, 0:7]
# print(data_train)
# Đếm số các case
N = data_frame.shape[0]
# add_data = np.ones((N,1))
data1 = np.hstack((np.ones((N, 1)), data_train))
# print(data1)
# print(data1[0])
# print(chance_of_admit)
# # Chọn cột và reshape lại
# gre_score = data_frame[:, 1].reshape(-1, 1)
# # mean = np.mean(gre_score)
# # std = np.std(gre_score)
# # gre_score_nor = (gre_score-mean)/std
# gre = gre_score/np.linalg.norm(gre_score)
# gre_score_nor = preprocessing.scale(gre_score)
# print(gre_score_nor)
# tofel_score = data_frame[:, 2].reshape(-1, 1)
# tofel_score = preprocessing.scale(tofel_score)
# university_rating = data_frame[:, 3].reshape(-1, 1)
# university_rating = preprocessing.scale(university_rating)
# sop = data_frame[:, 4].reshape(-1, 1)
# sop = preprocessing.scale(sop)
# lor = data_frame[:, -4].reshape(-1, 1)
# lor = preprocessing.scale(lor)
# cgpa = data_frame[:, -3].reshape(-1, 1)
# cgpa = preprocessing.scale(cgpa)
# research = data_frame[:, -2].reshape(-1, 1)
# research = preprocessing.scale(research)
# chance_of_admit = data_frame[:, -1].reshape(-1, 1)
# chance_of_admit = preprocessing.scale(chance_of_admit)
# code of me
w = np.random.randn(8)
number_of_iteration = 10000
learning_rate = 0.0001
cost = np.zeros((number_of_iteration,1))
# print(w.T)
# print(w.reshape(-1,1))
# y_train = np.dot(data1, w.reshape(-1,1))
# r = chance_of_admit - y_train
# print(y_train)
# A = (1./N)*np.sum(np.multiply(r, data1[:,1].reshape(-1,1)))
# print(A)
# print(data1, data1[:,1].reshape(-1,1))
for i in range(1, number_of_iteration):
y_train = np.dot(data1, w.reshape(-1,1))
r = y_train - chance_of_admit
cost[i] = (0.5/N)*np.sum(r*r)
w[0] -= learning_rate*(1./N)*np.sum(r)
w[1] -= learning_rate*(1./N)*np.sum(np.multiply(r, data1[:,1].reshape(-1,1)))
w[2] -= learning_rate*(1./N)*np.sum(np.multiply(r, data1[:,2].reshape(-1,1)))
w[3] -= learning_rate*(1./N)*np.sum(np.multiply(r, data1[:,3].reshape(-1,1)))
w[4] -= learning_rate*(1./N)*np.sum(np.multiply(r, data1[:,4].reshape(-1,1)))
w[5] -= learning_rate*(1./N)*np.sum(np.multiply(r, data1[:,5].reshape(-1,1)))
w[6] -= learning_rate*(1./N)*np.sum(np.multiply(r, data1[:,6].reshape(-1,1)))
w[7] -= learning_rate*(1./N)*np.sum(np.multiply(r, data1[:,7].reshape(-1,1)))
print(cost[i])
print(w)
# Normal Equation
# A = np.dot(data1.T, data1)
# b = np.dot(data1.T, chance_of_admit)
# w1 = np.dot(np.linalg.pinv(A), b)
# print(w1)
# using Sklearn library
regr = linear_model.LinearRegression(fit_intercept=False)
regr.fit(data1,chance_of_admit)
print('Solution found by scikit-learn: ',regr.coef_)
| [
"[email protected]"
] | |
383267361bf252c0830c3a10c4898ee8eb75fcba | 8e71f1b8ee620170c44f10637d5746b69a48667b | /examples/add_command.py | 82273e1dad30850b830b05f1635bf2e4f4306d70 | [
"MIT"
] | permissive | arkocal/pycmdmenu | f95ba0feec7efeabdcc898a81517dff64bb3c7a9 | 0cb3f55e90f9f64f50ab6ca784a5329439cca2c2 | refs/heads/master | 2020-03-30T06:21:09.798765 | 2018-10-05T12:56:38 | 2018-10-05T12:56:38 | 150,854,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | import argparse
import cmdmenu
@cmdmenu.cmdmenu_function("Echo to terminal", "Longer description of echo")
def echo(message: "Message to echo"):
print(message)
@cmdmenu.cmdmenu_function("Echo reversed", "Longer description of mirror-echo")
def mirror_echo(message: "Message to echo reversed"):
print(message[::-1])
@cmdmenu.cmdmenu_function("Print a hello world message")
def hello(name=None):
if name is None:
print("Hello, World!")
else:
print("Hello, {}!".format(name))
@cmdmenu.cmdmenu_function("Print sum of given numbers")
def add_numbers(numbers: {"help": "Numbers to sum up",
"nargs": "+", "type":int}):
print(sum(numbers))
if __name__=="__main__":
parser = argparse.ArgumentParser("An example application")
subparsers = parser.add_subparsers()
cmdmenu.add_command(subparsers, echo)
cmdmenu.add_command(subparsers, mirror_echo)
cmdmenu.add_command(subparsers, add_numbers)
cmdmenu.add_command(subparsers, hello)
cmdmenu.parse_and_run_with(parser)
| [
"[email protected]"
] | |
36ab13a75baf8d404625533f5380751a6cb1f201 | e0259f0145cb0b4c6596f896157a02ab6c51df86 | /Algorithm/Python/175/0189_Rotate_Array.py | a014e45c0331be60d5d25f46a79c53f031c876ec | [] | no_license | fantasylsc/LeetCode | accaa9e13c0893db150efed4307ad5f777662e0d | f4a53f0ee4b2a53c1bf22f11d9d9eb9f53ff061a | refs/heads/master | 2022-10-10T12:04:48.537826 | 2022-08-24T02:23:46 | 2022-08-24T02:23:46 | 214,951,540 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | # Given an array, rotate the array to the right by k steps, where k is non-negative.
# Example 1:
# Input: [1,2,3,4,5,6,7] and k = 3
# Output: [5,6,7,1,2,3,4]
# Explanation:
# rotate 1 steps to the right: [7,1,2,3,4,5,6]
# rotate 2 steps to the right: [6,7,1,2,3,4,5]
# rotate 3 steps to the right: [5,6,7,1,2,3,4]
# Example 2:
# Input: [-1,-100,3,99] and k = 2
# Output: [3,99,-1,-100]
# Explanation:
# rotate 1 steps to the right: [99,-1,-100,3]
# rotate 2 steps to the right: [3,99,-1,-100]
# Note:
# Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
# Could you do it in-place with O(1) extra space?
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
k = k % n
self.reverse_list(nums, 0, n - k - 1)
self.reverse_list(nums, n - k, n - 1)
self.reverse_list(nums, 0, n - 1)
def reverse_list(self, nums, start, end):
while start < end:
nums[start], nums[end] = nums[end], nums[start]
start += 1
end -= 1
| [
"[email protected]"
] | |
51899d337f116527f3cb9a7cbe1797cc2cf9131d | c4bcb851c00d2830267b1997fa91d41e243b64c2 | /utils/paths/dirs.py | 44e7aa6a061355a5976a779c5c64e4004a06388d | [] | no_license | tjacek/cluster_images | 5d6a41114a4039b3bdedc34d872be4e6db3ba066 | 8c660c69658c64c6b9de66d6faa41c92486c24c5 | refs/heads/master | 2021-01-23T21:01:17.078036 | 2018-06-07T14:33:50 | 2018-06-07T14:33:50 | 44,881,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,959 | py | import sys,os
sys.path.append(os.path.abspath('../cluster_images'))
import numpy as np
import os
import os.path as io
import utils.paths
from natsort import natsorted
from shutil import copyfile
from sets import Set
import utils.paths
class ApplyToFiles(object):
def __init__(self,dir_arg=False):
self.dir_arg=dir_arg
def __call__(self, func):
@utils.paths.path_args
def inner_func(in_dir,out_dir):
print(str(in_dir))
in_paths=get_files(in_dir,dirs=self.dir_arg)
make_dir(out_dir)
out_paths=[ out_dir.replace(in_i) for in_i in in_paths]
for in_i,out_i in zip(in_paths,out_paths):
func(in_i,out_i)
return inner_func
def dir_arg(func):
def inner_func(dir_path):
dirs=get_files(dir_path,dirs=False)
return func(dirs)
return inner_func
def apply_to_dirs( func):
@utils.paths.path_args
def inner_func(*args):
old_path=str(args[0])
new_path=str(args[1])
other_args=args[2:]
in_paths=bottom_dirs(old_path)
out_paths=[path_i.exchange(old_path,new_path)
for path_i in in_paths]
make_dirs(new_path,out_paths)
for in_i,out_i in zip(in_paths,out_paths):
func(in_i,out_i,*other_args)
return inner_func
@utils.paths.path_args
def copy_dir(in_path,out_path):
in_files=get_files(in_path,dirs=True)
make_dir(str(out_path))
for in_file_i in in_files:
out_file_i=out_path.replace(in_file_i)
make_dir(str(out_file_i))
print(str(in_file_i))
print(str(out_file_i))
unify_dirs(str(in_file_i),str(out_file_i))
@utils.paths.path_args
def unify_dirs(in_path,out_path):
dirs_paths=get_files(in_path)
make_dir(str(out_path))
files_paths=[]
for dir_i in dirs_paths:
files_paths+=get_files(dir_i,dirs=False)
for in_file_i in files_paths:
out_file_i=out_path.replace(in_file_i)
copyfile(str(in_file_i),str(out_file_i))
def get_files(dir_path,dirs=True,append_path=True):
d_path=str(dir_path)
all_in_dir=os.listdir(d_path)
if(dirs):
files= [f for f in all_in_dir
if (not is_file(f,dir_path))]
else:
files= [f for f in all_in_dir
if is_file(f,dir_path)]
files=natsorted(files)
if(append_path):
files=[utils.paths.get_paths(dir_path,file_i) for file_i in files]
return files
def is_file(f,path):
file_path=str(path)+"/"+f
return io.isfile(file_path)#io.join(path,f))
@utils.paths.str_arg
def make_dir(path):
if(not os.path.isdir(path)):
os.mkdir(path)
def all_files(in_path,append_path=True):
dirs_i=get_files(in_path,dirs=True,append_path=append_path)
files_i=get_files(in_path,dirs=False,append_path=append_path)
if(dirs_i):
for dirs_ij in dirs_i:
files_i+=all_files(dirs_ij)
return files_i
@utils.paths.path_args
def bottom_dirs(in_path):
dirs_i=get_files(in_path,dirs=True)
bottom=[]
if(dirs_i):
for dirs_ij in dirs_i:
bottom+=bottom_dirs(dirs_ij)
else:
bottom.append(in_path)
return bottom
def make_dirs(out_path,dirs):
all_dirs=Set()
bottom_dirs=Set([str(dir_i) for dir_i in dirs])
for dir_i in dirs:
postfix=str(dir_i).replace(out_path,'')
postfix=postfix.split('/')
paths_i=sub_paths(out_path,dir_i)
all_dirs.update(paths_i)
make_dir(out_path)
for dir_i in all_dirs:
if(not dir_i in bottom_dirs):
make_dir(dir_i)
def sub_paths(out_path,dirs):
dir_path=[]
sub_paths=[]
for dir_i in dirs:
sub_path_i=dir_path + [dir_i]
sub_paths.append('/'.join(sub_path_i))
dir_path=sub_path_i
return sub_paths
if __name__ == "__main__":
path="../../dataset9/"
copy_dir(path+"cats2/",path+"actions/") | [
"[email protected]"
] | |
0e1c4e9c5b3ec55e52f896f330853ae4b11b161b | d3076892caaebfd9bab6540aaf045742765a6399 | /facts/current_hour.py | 2f3666537398158524a8d42e8dbf3bef4035cfe5 | [] | no_license | poundbangbash/munki-facts-conditions | ff3340fcd5b95f9d70e74b0365dd92ce00f5de80 | 1f73db0eba706f1c833107973ea0dacf1022dd45 | refs/heads/master | 2022-05-28T23:04:07.854006 | 2022-05-16T20:08:56 | 2022-05-16T20:08:56 | 117,352,958 | 3 | 3 | null | 2018-01-16T18:53:06 | 2018-01-13T15:06:57 | Python | UTF-8 | Python | false | false | 300 | py |
import datetime
def fact():
'''Get the current day name'''
try:
now = datetime.datetime.now()
current_hour = now.strftime("%H")
except (IOError, OSError):
current_hour = ""
return {'current_hour': current_hour}
if __name__ == '__main__':
print(fact())
| [
"(none)"
] | (none) |
0335735939c2dc7ee5586f49a615b1b8d5bca9d9 | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/legacy_test/test_tensor_data_ptr.py | eeaa91df0dcc17dc0127c30c132a1bde8bca8703 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 1,069 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
class TestTensorDataPtr(unittest.TestCase):
def test_tensor_data_ptr(self):
np_src = np.random.random((3, 8, 8))
src = paddle.to_tensor(np_src, dtype="float64")
dst = paddle.Tensor()
src._share_buffer_to(dst)
self.assertTrue(src.data_ptr() is not None)
self.assertEqual(src.data_ptr(), dst.data_ptr())
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6b0faaf8c2c9bc9d8d6d1973f7859558e1d8afd2 | dee62bd47b1d2ce7363bc8f4dc84ba24a25ff6c6 | /2. Условия/Ход ферзя.py | 1f2cb2b056f554b377e63ac97c268f81bc2092b1 | [] | no_license | hellion86/pythontutor-lessons | c0f5c8a1ee84bfd861f224ca10f9154e35ac82c6 | 2d6b721db2f8ddabb530f5898317118d77db5d47 | refs/heads/master | 2020-03-12T10:45:02.444438 | 2018-12-21T18:24:59 | 2018-12-21T18:24:59 | 130,580,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | '''
Условие
Шахматный ферзь ходит по диагонали, горизонтали или вертикали. Даны две различные клетки шахматной доски, определите, может ли ферзь попасть с первой клетки на вторую одним ходом.
'''
x1 = int(input())
y1 = int(input())
x2 = int(input())
y2 = int(input())
if (abs(x2 - x1) == abs(y2 - y1)) or (x1 == x2 or y1 == y2):
print('YES')
else:
print('NO')
| [
"[email protected]"
] | |
32ea4732b4a4406667275c7fab12cb6a8edb2740 | 5e52eec531f45e05d1e9433c46b77ae4afb8a0e5 | /leetcode/practice/strings/plus_one.py | e2b66b0a35fc79ab1e9fe73d1845ad0d8abe4bb7 | [] | no_license | markosolopenko/python | b63c671d4b52dd571f4ca0fab224c2d0eca848e8 | 49008edea3db5102cc9fb621ffff05e26aa82bed | refs/heads/master | 2023-01-22T04:34:55.501311 | 2020-12-04T09:42:39 | 2020-12-04T09:42:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | from typing import List
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
a = -1
k = 0
if digits[-1] + 1 >= 10:
if len(digits) > 1:
while k != len(digits):
if digits[a] + 1 < 10:
digits[a] += 1
break
else:
digits[a] = 0
k += 1
a -= 1
if k == len(digits):
digits.append(1)
digits.reverse()
else:
digits[a] = 1
digits.append(0)
else:
digits[-1] = digits[-1] + 1
return digits
if __name__ == '__main__':
my_class=Solution()
print(my_class.plusOne()) | [
"[email protected]"
] | |
2d19f71fd8206b7ef0efaeaa83accb0403a78789 | 9908401be2088363d56b314dca406365b666047e | /bot.py | 04aef0641d641a671a6148604ca27320ae9ffa19 | [] | no_license | itailitai/Reddit-songs-to-youtube-bot | ff4ffec7caf53eadd86ce4074cc31632790298d9 | 87dfd15e00e17412a9529744150e272d2aa9be26 | refs/heads/master | 2021-01-10T10:22:46.266145 | 2016-03-21T22:32:58 | 2016-03-21T22:32:58 | 53,138,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,431 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#a bot that replies with youtube songs that were mentioned in the comments
import re
import socket
import sqlite3
import sys
import time
import traceback
import urllib
import urllib2
import praw
from bs4 import BeautifulSoup
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = "Python automatic youtube linkerbot"
# This is a short description of what the bot does.
# For example "Python automatic replybot v2.0 (by /u/GoldenSights)"
SUBREDDIT = "90sHipHop+altrap+asianrap+backspin+ChapHop+Gfunk+HipHopHeads+Rap+rapverses+trapmuzik+Turntablists+80sHardcorePunk+90sAlternative+90sRock+AlternativeRock+AltCountry+AORMelodic+ausmetal+BlackMetal+bluegrass+Blues+bluesrock+CanadianClassicRock+CanadianMusic+ClassicRock+country+Christcore+crunkcore+deathcore+deathmetal+Djent+DoomMetal+Emo+EmoScreamo+folk+folkmetal+folkpunk+folkrock+GaragePunk+GothicMetal+Grunge+hardcore+HardRock+horrorpunk+indie_rock+jrock+krautrock+MathRock+melodicdeathmetal+MelodicMetal+MetalNews+metal+metalcore+monsterfuzz+neopsychedelia+NewWave+noiserock+numetal+pianorock+poppunkers+PostHardcore+PostRock+powermetal+powerpop+ProgMetal+progrockmusic+PsychedelicRock+punk+Punkskahardcore+Punk_Rock+Rock+shoegaze+stonerrock+symphonicblackmetal+symphonicmetal+synthrock+truethrash+Truemetal+OutlawCountry+WomenRock+90sHipHop+altrap+asianrap+backspin+ChapHop+Gfunk+HipHopHeads+Rap+rapverses+trapmuzik+Turntablists+scottishmusic+danktunes+albumaday+albumoftheday+Albums+albumlisteners+bassheavy+Catchysongs+CircleMusic+CoverSongs+DutchMusic+EarlyMusic+earlymusicalnotation+FemaleVocalists+findaband+freemusic+jazz+Frisson+gameofbands+GayMusic+germusic+HeadNodders+heady+HeyThatWasIn+HighFidelity+ifyoulikeblank+indie+Instrumentals+IndieWok+ipm+IsolatedVocals+japanesemusic+LetsTalkMusic+listentoconcerts+listentomusic+ListenToThis+ListenToUs+livemusic+Lyrics+mainstreammusic+MiddleEasternMusic+Music+MusicAlbums+musicsuggestions+MusicToSleepTo+musicvideos+NewAlbums+newmusic+onealbumaweek+partymusic+RedditOriginals+RepublicOfMusic+RoyaltyFreeMusic+SlavicMusicVideos+SpotifyMusic+ThemVoices+unheardof+WhatIListenTo+WTFMusicVideos+music+tipofmytongue+namethattune+whatsthatsong+whatsthesong+whatsthissong+NameThatSong+kqly+311+ADTR+AliciaKeys+ArcadeFire+ArethaFranklin+APerfectCircle+TheAvettBrothers+BaysideIsACult+TheBeachBoys+Beatles+billytalent+Blink182+BMSR+BoBurnham+boniver+brandnew+BruceSpringsteen+Burial+ChristinaAguilera+cityandcolour+Coldplay+CutCopy+TheCure+DaftPunk+DavidBowie+Deadmau5+DeathCabforCutie+DeathGrips+DeepPurple+Deftones+DieAntwoord+DMB+elliegoulding+Eminem+empireofthesun+EnterShikari+Evanescence+feedme+FirstAidKit+flaminglips+franzferdinand+Gorillaz+gratefuldead+Greenday+GunsNRoses+Incubus+JackJohnson+JackWhite+JanetJackson+John_frusciante+kings_of_leon+Korn+ladygaga+lanadelrey+lennykravitz+Led_Zeppelin+lorde+Macklemore+Madonna+Manowar+MariahCarey+MattAndKim+Megadeth+Metallica+MGMT+MichaelJackson+MinusTheBear+ModestMouse+Morrissey+mrbungle+MyChemicalRomance+Muse+NeilYoung+NIN+Nirvana+NOFX+oasis+Opeth+OFWGKTA+OutKast+panicatthedisco+PearlJam+phish+Pinback+PinkFloyd+porcupinetree+prettylights+Puscifer+Queen+Radiohead+RATM+RedHotChiliPeppers+The_Residents+RiseAgainst+Rush+SigurRos+Slayer+slipknot+SmashingPumpkins+SparksFTW+TeganAndSara+TheKillers+TheOffspring+TheStrokes+TheMagneticZeros+tragicallyhip+ToolBand+U2Band+Umphreys+UnicornsMusic+velvetunderground+Ween+weezer+WeirdAl+yesband+Zappa"
# This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
DO_SUBMISSIONS = False
DO_COMMENTS = True
# Look for submissions, comments, or both.
KEYWORDS = [""]
# These are the words you are looking for
KEYAUTHORS = []
# These are the names of the authors you are looking for
# The bot will only reply to authors on this list
# Keep it empty to allow anybody.
#REPLYSTRING = "**Hi, I'm a bot.**"
# This is the word you want to put in reply
MAXPOSTS = 100
# This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 30
# This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
STOPWORDS=[" By "," by ","-"]
CLEANCYCLES = 10
# After this many cycles, the bot will clean its database
# Keeping only the latest (2*MAXPOSTS) items
'''All done!'''
print('Opening SQL Database')
sql = sqlite3.connect('sql.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT)')
print('Logging in...')
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def replybot():
allcom=0
link=0
doesnotmatch=0
longcom=0
print('Searching %s.' % SUBREDDIT)
subreddit = r.get_subreddit(SUBREDDIT)
posts = []
if DO_SUBMISSIONS:
posts += list(subreddit.get_new(limit=MAXPOSTS))
if DO_COMMENTS:
posts += list(subreddit.get_comments(limit=MAXPOSTS))
posts.reverse()
for post in posts:
psub=post.subreddit.display_name
if len(post.body.split()) < 20:
if any(x in post.body for x in STOPWORDS):
if post.body.count('-')<2:
if "http" not in post.body:
#print ("Searching for another the next comment")
# Anything that needs to happen every loop goes here.
pid = post.id
try:
pauthor = post.author.name
except AttributeError:
# Author is deleted. We don't care about this post.
continue
if pauthor.lower() == r.user.name.lower():
# Don't reply to yourself, robot!
print('Will not reply to myself.')
continue
if KEYAUTHORS != [] and all(auth.lower() != pauthor for auth in KEYAUTHORS):
# This post was not made by a keyauthor
continue
cur.execute('SELECT * FROM oldposts WHERE ID=?', [pid])
if cur.fetchone():
# Post is already in the database
continue
if isinstance(post, praw.objects.Comment):
pbody = post.body
for ch in ['(',')']:
if ch in pbody:
pbody=pbody.replace(ch,"")
else:
pbody = '%s %s' % (post.title, post.selftext)
pbody = pbody.lower()
if not any(key.lower() in pbody for key in KEYWORDS):
# Does not contain our keyword
continue
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
sql.commit()
print('Replying to %s by %s in /r/%s' % (pid, pauthor,psub))
try:
if "\n" in pbody:
pbody=pbody[:pbody.index("\n")]
res = search_for_song(pbody)
if res:
# pbody=pbody[8:]
# pbody=pbody.replace("\n", "")
temp=pbody.lstrip()
temp=temp.rstrip()
temp=" ".join(temp.split())
temp=temp.title()
temp=temp.replace("?", "")
temp=temp.replace('"',"")
temp=temp.replace("#","")
temp=temp.replace(">","")
author, song_name = song_string_generator(pbody)
url = 'https://songlyrics.com/'+author+'/'+song_name+'-lyrics/'
post.reply("[**"+temp+"**]("+res+") \n ---- \n [**^^[Song ^^Lyrics]**] ("+url+") ^^| [**^^[Contact ^^me]**](https://www.reddit.com/message/compose?to=itailitai) ^^| ^^[**[Github]**](https://github.com/itailitai/Reddit-songs-to-youtube-bot) \n\n ^^Parent ^^commenter ^^can ^^reply ^^with ^^'delete'. ^^Will ^^also ^^delete ^^if ^^comment ^^score ^^is ^^-1 ^^or ^^less.")
# r.send_message('itailitai', 'SOME SUBJECT', 'Your bot just commented, check his profile : /u/youtubesong')
except praw.errors.Forbidden:
print('403 FORBIDDEN - is the bot banned from %s?' % post.subreddit.display_name)
else:
link+=1
allcom+=1
else:
doesnotmatch+=1
allcom+=1
else:
doesnotmatch+=1
allcom+=1
else:
longcom+=1
allcom+=1
print ("%s comments were completely ignored in this cycle:" % allcom)
print("Too long comments: %s" % longcom)
print ("Irrelevant comments: %s" % doesnotmatch)
print ("Comments including a link: %s" % link)
def search_for_song(pbody):
#print("in search_for_song")
song=pbody
if len(song)>0:
song=song
if song.isspace()==True or song=='':
return False
else:
#HEADERS = {'User-Agent': 'Song checker - check if songs exists by searching this website, part of a bot for reddit'}
author, song_name = song_string_generator(song)
if author=='' or song_name=='':
return False
else:
x=songexist(author,song_name,song)
if "By" in song:
return x
else:
if x:
return x
else:
return songexist(song_name,author,song)
def songexist(author,song_name,song):
url = 'https://songlyrics.com/'+author+'/'+song_name+'-lyrics/'
#page = requests.get(url, HEADERS)
check=1
while check==1:
try:
headers = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; rv:40.0) Gecko/20100101 Firefox/40.0' }
req = urllib2.Request(url, None, headers)
page= urllib2.urlopen(req)
check=2
except socket.error as error:
pass
except Exception:
print('An error occured while trying to verify song existence')
return False
soup = BeautifulSoup(page.read(), "lxml")
checklist=["Please check the spelling and try again","The page you're looking for cannot be found.","Do you like this album? Leave a review."]
if any(x in soup.get_text() for x in checklist):
print ("Song was not found in the database!")
return False
else:
print ("Song was found in the database!")
result=first_youtube(song)
return result
def song_string_generator(song):
#print("in song_string_generator")
song=song
check=["]","["]
if any(x in song for x in check):
author=''
song_name=''
return author, song_name
else:
song=song.rstrip('.')
song=song.rstrip(':')
song=song.rstrip('/')
author,song_name= '',''
if "-" in song:
l=song.split('-', 1 )
author=l[0]
song_name=l[1]
elif "by" in song:
l=song.split('by', 1 )
author=l[1]
song_name=l[0]
else:
print ("song name invalid")
song_name=" ".join(song_name.split())
author=" ".join(author.split())
if author == 'guns and roses':
author="guns n' roses"
if author == song_name:
author=''
song_name=''
return author, song_name
if author=="!" or song_name=="!":
author,song_name= '',''
return author, song_name
song_name=song_name.replace("\n", "")
author=author.replace("\n", "")
author=author.replace(" ", "-")
song_name=song_name.replace(" ", "-")
author=author.replace("'", "-")
song_name=song_name.replace("'", "-")
song_name=song_name.replace("?", "-")
author=author.replace("?", "-")
author=author.replace("!", "-")
song_name=song_name.replace("!", "-")
song_name=song_name.rstrip()
song_name=" ".join(song_name.split())
if len(song_name) - song_name.count(' ') == 1 or len(author) - author.count(' ') == 1:
return '',''
return author, song_name
def first_youtube(textToSearch):
reload(sys)
sys.setdefaultencoding('UTF-8')
query_string = textToSearch
try:
html_content = urllib.urlopen("http://www.youtube.com/results?search_query=" + query_string)
search_results = re.findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
result="http://www.youtube.com/watch?v=" + search_results[0]
return result
except IOError:
print ("IOError Occured while contacting Youtube!")
except Exception:
print ("A non IOError Occured while contacting Youtube!")
return False
def deleteowncomments():
print ("Comments deleting procedure has started")
user=r.get_redditor("youtubesong")
for c in user.get_comments(limit=None):
if c.score < 0 :
c.delete()
def removebyreply():
try:
print(0)
unread = r.get_unread(limit=None)
print(1)
for msg in unread:
if msg.body.lower() == 'delete':
try:
print(3)
id = msg.id
id = 't1_' + id
comment = r.get_info(thing_id=id)
comment= r.get_info(thing_id=comment.parent_id)
comment_parent = r.get_info(thing_id=comment.parent_id)
if msg.author.name == comment_parent.author.name or msg.author.name == 'itailitai':
print(4)
if "which was a reply to" not in comment.body:
comment.delete()
msg.reply('I have deleted [my comment](' + comment.permalink + '), which was a reply to [your comment](' + comment_parent.permalink + ').\n\nHave an amazing day, **' + str(msg.author.name) + '**!\n\n----- \n\n [**^^[Contact ^^me]**](https://www.reddit.com/message/compose?to=itailitai) ^^| ^^[**[Github]**](https://github.com/itailitai/Reddit-songs-to-youtube-bot)')
msg.mark_as_read()
else:
msg.mark_as_read()
else:
print(5)
msg.mark_as_read()
except Exception as e:
if (str(e) == "'NoneType' object has no attribute 'name'"):
print(6)
comment.delete()
msg.reply('I have deleted [my comment](' + comment.permalink + '), which was a reply to this [comment](' + comment_parent.permalink + ').\n\nHave an amazing day, **' + str(msg.author.name) + '**!\n\n----- \n\n [**^^[Contact ^^me]**](https://www.reddit.com/message/compose?to=itailitai) ^^| ^^[**[Github]**](https://github.com/itailitai/Reddit-songs-to-youtube-bot)')
else:
print(7)
continue
msg.mark_as_read()
continue
except Exception:
print(8)
None
cycles = 0
while True:
try:
replybot()
cycles += 1
removebyreply()
except Exception as e:
traceback.print_exc()
if cycles >= CLEANCYCLES:
print('Cleaning database')
cur.execute('DELETE FROM oldposts WHERE id NOT IN (SELECT id FROM oldposts ORDER BY id DESC LIMIT ?)', [MAXPOSTS * 2])
sql.commit()
deleteowncomments()
cycles = 0
print('Running again in %d seconds \n' % WAIT)
time.sleep(WAIT)
| [
"[email protected]"
] | |
89724807ba5a9c2f49cd0001c7b1988977246290 | ffb4d47a69b04026af0a2b8201bd7e52328eb9ec | /server/map_file.py | d6867521b3e11ea34e7f855a27d241231609ab95 | [] | no_license | a-jain/rappor-js | eb4303ad10a77753a05fb1a4e06f5f0f0bc07233 | d7f6bcab78af5db693569050ef74da19f95cb3af | refs/heads/master | 2021-01-18T21:22:22.015781 | 2016-05-21T02:37:55 | 2016-05-21T02:37:55 | 48,339,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | import hashlib
import csv
import sys
import os
import struct
import json
testing = False
if not testing:
subDir = os.path.join(os.getcwd(), "outputs/" + sys.argv[1])
paramsFile = subDir + "/params.csv"
mapFile = subDir + "/map.csv"
def getParams():
params = {}
f = open(paramsFile, "r")
reader = csv.reader(f)
rownum = 0
for row in reader:
if rownum == 0:
header = row
else:
colnum = 0
for col in row:
params[header[colnum]] = col
colnum += 1
rownum += 1
return params
# generate test strings
def generateCandidates(n):
candidates = ["true", "false"]
for i in range(0, n):
candidates.append("zzfake" + str(i))
return candidates
def to_big_endian(i):
"""Convert an integer to a 4 byte big endian string. Used for hashing."""
# https://docs.python.org/2/library/struct.html
# - Big Endian (>) for consistent network byte order.
# - L means 4 bytes when using >
return struct.pack('>L', i)
# format is:
# "true", [1..k], [1..k], ..., [1..k] {m times} and must start with a 1!
# "false", [1..k], [1..k], ..., [1..k] {m times}
# ...
# "test3", [1..k], [1..k], ..., [1..k] {m times}
def constructMap(candidates, m, h, k):
X = {}
for c in candidates:
candidateOnes = []
for i in range(m):
val = to_big_endian(i) + c
md5 = hashlib.md5(val)
digest = md5.digest()
ones = [k-((ord(digest[j]) % k))+i*k for j in xrange(h)]
candidateOnes.extend(sorted(ones))
X[c] = candidateOnes
return X
# print X into appropriate design matrix format
def writeToFile(X):
fo = open(mapFile, "w")
for key, val in X.iteritems():
fo.write("\"{}\"".format(key))
for v in val:
fo.write(",{}".format(v))
fo.write("\n")
fo.close()
def unitTest():
candidates = ["true", "false"]
m = 1
h = 2
k = 32
# p = 0.1
# q = 0.8
# f = 0.81
print constructMap(candidates, m, h, k)
def main():
if testing:
unitTest()
else:
params = getParams()
# candidates = generateCandidates(6)
candidates = json.loads(sys.argv[2])["strs"]
# print candidates
X = constructMap(candidates, int(params["m"]), int(params["h"]), int(params["k"]))
writeToFile(X)
main()
| [
"[email protected]"
] | |
568ce6d78e1527fd2a9466d9d7781b09f5da72f8 | 67864887cd8b63263797717078752cbdef304f9b | /r21SwiftNew/r21StatisticalAnalysis/install/InstallArea/x86_64-slc6-gcc62-opt/src/source/Run_gjjSearchPhase.py | 5c3c7319f2e8d514e48d6b2a475e702354df4e19 | [] | no_license | Yvonne-Ng/swiftLimitSetting | 5da59cf5f755b80a9485477241ef0c5d1dbbc76d | 5d92728c5aaab4bbd8acd1d015d6b2ad96bf88e3 | refs/heads/master | 2020-03-11T08:14:36.494021 | 2018-06-09T21:24:25 | 2018-06-09T21:24:25 | 129,878,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,376 | py | #!/usr/bin/env python
import subprocess # So can use shell scripting in python
import os
import ROOT
from ROOT import *
import re
# *****************
# Lydia Beresford
# April 2015
# Script to run Step 1:
# - SearchPhase.cxx
# - plotSearchPhase_gjj.py to plot results of SearchPhase.cxx
# Note:To change fit parameters, mjj cut (minXForFit) and other things, do this in Step1_SearchPhase.config!
# All log files stored 1 directory up in LogFiles directory, unless otherwise specified
# *****************
statspath = os.getcwd() # path used in outputFileName in config
headdir = statspath.split("/Bayesian")[0] # directory for whole package
logsdir = headdir # Log Files Sent Here
batchdir = logsdir+"/StatisticalAnalysis" #headdir # Folder to be copyied to batch, by default headdir unless otherwise specified
UserfileHistDict = {}
# *****************
#---------------------------
# ***** User specifies *****
#---------------------------
#---------------------------
# Files and directories
doSearch =True# Set to True to run SearchPhase.cxx
doPlotting =False# Set to True to run plotSearchPhase_gjj.py
#folderextension = "dijetgamma_data_hist_20160727_15p45fb_4Par_169_1493"
#
#inputFileDir = "./inputs/hist_20160727/OUT_dijetgamma_data/"
#
#UserfileHistDict[inputFileDir+"datalike.root"] = ["Zprime_mjj_var_DataLike_15p45fb"] # Dictionary given as example, key is inputFileDir+ file name
#
#HistDir = "dijetgamma_g150_2j25_nomjj"
#folderextension = "swiftCodeTestingWithScaleTrijet"
folderextension = "swiftCodeGlobalFittrijetABCDinclusive"
#inputFileDir = "./inputs/"
inputFileDir="/lustre/SCRATCH/atlas/ywng/WorkSpace/r21/r21Rebuild/input/btagged/"
#inputFileDir="/lustre/SCRATCH/atlas/ywng/WorkSpace/r21/r21Rebuild/input/btagged/"
#inputFileDir="/lustre/SCRATCH/atlas/ywng/WorkSpace/r21/r21Rebuild/input/"
UserfileHistDict[inputFileDir+"reweighted_hist-background_ABCD_trijet.root"] = ["Zprime_mjj_var"] # Dictionary given as example, key is inputFileDir+ file name
#UserfileHistDict[inputFileDir+"trijet_HLT_j380_inclusive.root"] = ["background_mjj_var"] # Dictionary given as example, key is inputFileDir+ file name
#UserfileHistDict[inputFileDir+"dijetgamma_g85_2j65_2btag.root"] = ["background"] # Dictionary given as example, key is inputFileDir+ file name
#HistDir = "dijetgamma_g85_2j65"
#HistDir = ""
HistDir = ""
# Turn to true if doing Spurious Signal test!
useScaled = True # Set to true if using scaled histograms instead of Datalike histograms
#---------------------------
# Analysis quantities
Ecm = 13000.0 # Centre of mass energy in GeV
##---------------------------
# Run controls
config = "./configurations/Step1_SearchPhase.config" # Path and name of config file
useBatch = False # Set to True to run SearchPhase.cxx on the batch, or set to False to run locally. runs code in batchdir
atOx = False # Set to True to use Oxford batch rather than lxbatch for running!
#----------------------------------
# ***** End of User specifies *****
#----------------------------------
#----------------------
# Preliminary steps
#----------------------
# Check inputs
if not inputFileDir.endswith("/"):
raise SystemExit("Error: inputFileDir specified by user in Run_SearchPhase.py must end with /")
# Make directories to store outputs if they don't exist already!
directories = ["%s/LogFiles/%s/Step1_SearchPhase/CodeOutput"%(logsdir,folderextension),"%s/LogFiles/%s/Step1_SearchPhase/ConfigArchive"%(logsdir,folderextension),"./results/Step1_SearchPhase/%s"%folderextension,"%s/LogFiles/%s/Step1_SearchPhase/ScriptArchive"%(logsdir,folderextension)]
for directory in directories:
if not os.path.exists(directory):
os.makedirs(directory)
Step1_ScriptArchive = "%s/LogFiles/%s/Step1_SearchPhase/ScriptArchive"%(logsdir,folderextension)
fileHistDict = {}
fileHistDict = UserfileHistDict
print "fileHistDict"
print fileHistDict
#-------------------------------------
# Performing Step 1: Search Phase for files histogram combinations in fileHistDict using SearchPhase.cxx
#-------------------------------------
for File, HistList in fileHistDict.iteritems():
for Hist in HistList:
if doSearch:
# open modified config file (fout) for writing
fout = open("%s/LogFiles/%s/Step1_SearchPhase/ConfigArchive/Step1_%s.config"%(logsdir,folderextension,Hist), 'w')
# read in config file as fin and replace relevant fields with user input specified at top of this file
with open('%s'%config, 'r') as fin:
for line in fin:
if (line.startswith("inputFileName") or line.startswith("dataHist") or line.startswith("outputFileName") or line.startswith("Ecm")):
if line.startswith("inputFileName"):
line = "inputFileName %s\n"%File
fout.write(line)
if line.startswith("dataHist"):
line = "dataHist %s/%s\n"%(HistDir,Hist)
fout.write(line)
if line.startswith("outputFileName"):
line = "outputFileName %s/results/Step1_SearchPhase/%s/Step1_SearchPhase_%s.root\n"%(statspath,folderextension,Hist)
fout.write(line)
if line.startswith("Ecm"):
line = "Ecm %d"%Ecm
fout.write(line)
else:
fout.write(line)
fin.close()
fout.close()
# Perform search phase locally (use tee to direct output to screen and to log file)
if (useScaled):
#command = "SearchPhase --noDE --useScaled --config %s/LogFiles/%s/Step1_SearchPhase/ConfigArchive/Step1_%s.config |& tee %s/LogFiles/%s/Step1_SearchPhase/CodeOutput/Step1_%s.txt"%(logsdir,folderextension,Hist,logsdir,folderextension,Hist) # noDE option means no DataErr, uses only MCErr
command="SearchPhase --useScaled --config %s/LogFiles/%s/Step1_SearchPhase/ConfigArchive/Step1_%s.config |& tee %s/LogFiles/%s/Step1_SearchPhase/CodeOutput/Step1_%s.txt"%(logsdir,folderextension,Hist,logsdir,folderextension,Hist) # noDE option means no DataErr, uses only MCErr
else:
#command = "SearchPhase --noDE --config %s/LogFiles/%s/Step1_SearchPhase/ConfigArchive/Step1_%s.config |& tee %s/LogFiles/%s/Step1_SearchPhase/CodeOutput/Step1_%s.txt"%(logsdir,folderextension,Hist,logsdir,folderextension,Hist) # noDE option means no DataErr, uses only MCErr
command ="SearchPhase --config %s/LogFiles/%s/Step1_SearchPhase/ConfigArchive/Step1_%s.config |& tee %s/LogFiles/%s/Step1_SearchPhase/CodeOutput/Step1_%s.txt"%(logsdir,folderextension,Hist,logsdir,folderextension,Hist) # noDE option means no DataErr, uses only MCErr
print command
# Perform setLimitsOneMassPoint locally
if not useBatch:
subprocess.call(command, shell=True)
# Use batch i.e. perform setLimitsOneMassPoint on the batch
if useBatch:
if atOx:
# Perform setLimitsOneMassPoint on Oxford batch
print "Ox Batch!!"
batchcommand = command.split("|&")[0]
CodeOutputName = (command.split("|& tee ")[1]).split(".txt")[0] # Name of files for code output to be stored as
print batchcommand
# Open batch script as fbatchin
fbatchin = open('./scripts/OxfordBatch/Step1_BatchScript_Template_Oxford.sh', 'r')
fbatchindata = fbatchin.read()
fbatchin.close()
# open modified batch script (fbatchout) for writing
fbatchout = open('%s/Step1_BatchScript_Template_%s.sh'%(Step1_ScriptArchive,Hist),'w')
fbatchoutdata = fbatchindata.replace("YYY",batchdir) # In batch script replace YYY for path for whole package
fbatchoutdata = fbatchoutdata.replace("ZZZ",batchcommand) # In batch script replace ZZZ for submit command
fbatchoutdata = fbatchoutdata.replace("OOO",CodeOutputName) # In batch script replace OOO (i.e. std output stream) to CodeOutput directory
fbatchoutdata = fbatchoutdata.replace("EEE",CodeOutputName) # In batch script replace EEE (i.e. output error stream) to CodeOutput directory
fbatchout.write(fbatchoutdata)
fbatchout.close()
subprocess.call("qsub < %s/Step1_BatchScript_Template_%s.sh"%(Step1_ScriptArchive,Hist), shell=True)
else:
# Perform setLimitsOneMassPoint on batch
print "Batch!!"
batchcommand = command.split("|&")[0]
CodeOutputName = (command.split("|& tee ")[1]).split(".txt")[0] # Name of files for code output to be stored as
print batchcommand
# Open batch script as fbatchin
fbatchin = open('./scripts/Step1_BatchScript_Template.sh', 'r')
fbatchindata = fbatchin.read()
fbatchin.close()
# open modified batch script (fbatchout) for writing
fbatchout = open('%s/Step1_BatchScript_Template_%s.sh'%(Step1_ScriptArchive,Hist),'w')
fbatchoutdata = fbatchindata.replace("YYY",batchdir) # In batch script replace YYY for path for whole package
fbatchoutdata = fbatchoutdata.replace("ZZZ",batchcommand) # In batch script replace ZZZ for submit command
fbatchoutdata = fbatchoutdata.replace("OOO",CodeOutputName) # In batch script replace OOO (i.e. std output stream) to CodeOutput directory
fbatchoutdata = fbatchoutdata.replace("EEE",CodeOutputName) # In batch script replace EEE (i.e. output error stream) to CodeOutput directory
fbatchout.write(fbatchoutdata)
modcommand = 'chmod 744 %s/Step1_BatchScript_Template_%s.sh'%(Step1_ScriptArchive,Hist)
print modcommand
subprocess.call(modcommand, shell=True)
subprocess.call("ls -l {0}".format(Step1_ScriptArchive), shell=True)
fbatchout.close()
command = "bsub -q 1nh %s/Step1_BatchScript_Template_%s.sh"%(Step1_ScriptArchive,Hist)
print command
subprocess.call(command, shell=True)
#-------------------------------------
# Plotting for Hists in HistList using plotSearchPhase_gjj.py
#-------------------------------------
if doPlotting:
# Use regex to find lumi of hist
lumi = 0
if (re.search('_[0-9]+fb',Hist) is not None):
lumi = re.search('_[0-9]+fb',Hist).group()
lumi = lumi.strip("_")
lumi = lumi.strip("fb")
lumi = float(lumi)*1000
if (re.search('_[0-9]+p[0-9]+fb',Hist) is not None):
lumi = re.search('_[0-9]+p[0-9]+fb',Hist).group()
lumi = lumi.replace("p",".")
lumi = lumi.strip("_")
lumi = lumi.strip("fb")
lumi = float(lumi)*1000
#Yvonne hard set of the luminosity
lumi = 35.09
#if lumi == 0: raise SystemExit('\n***Zero lumi*** regex issue')
# open modified plotSearchPhase_gjj.py (fout) for writing
fout = open('plotting/SearchPhase/plotSearchPhase_gjj_%s.py'%Hist, 'w')
# read in plotSearchPhase_gjj as fin and replace relevant fields
#with open('./plotting/SearchPhase/plotSearchPhase_gjj.py', 'r') as fin:
if os.path.isfile('plotting/SearchPhase/plotSearchPhase_gjj_%s.py'%Hist) :
print "file exist"
else:
print "file doesn't exist"
#with open('./plotting/SearchPhase/plotSearchPhase_gjj_Zprime_mjj_var.py', 'r') as fin:
with open('./plotting/SearchPhase/plotSearchPhase.py', 'r') as fin:
for line in fin:
if (line.startswith("searchInputFile") or line.startswith("folderextension") or line.startswith("luminosity") or line.startswith("Ecm")):
if line.startswith("searchInputFile"):
line = "searchInputFile = ROOT.TFile('./results/Step1_SearchPhase/%s/Step1_SearchPhase_%s.root')\n"%(folderextension,Hist)
fout.write(line)
if line.startswith("folderextension"):
line = "folderextension = './plotting/SearchPhase/plots/%s/%s/'\n"%(folderextension,Hist)
fout.write(line)
if line.startswith("luminosity"):
line = "luminosity = %s\n"%str(lumi)
fout.write(line)
if line.startswith("Ecm"):
line = "Ecm = %d\n"%(Ecm/1000)
fout.write(line)
else:
fout.write(line)
print "done"
fin.close()
fout.close()
subprocess.call("python plotting/SearchPhase/plotSearchPhase_gjj_%s.py -b"%Hist, shell=True)
print "done"
os.remove("./plotting/SearchPhase/plotSearchPhase_gjj_%s.py"%Hist)
| [
"[email protected]"
] | |
67c3b6015b3b12fd261856b9673f67abc4797a7f | a51a1ba62ea5acbc38a0266ea2a775a4d98dd193 | /navya/views.py | 7570fb8f0084d69bf373cdd6cad96e86d980c350 | [] | no_license | sathish-ku-mar/navya_python | 3c900c19d0b7987108f430c1d34f5676a99cd6db | 35a39547d7a6d8ddc9b949fe7dd4c44e294e4304 | refs/heads/master | 2022-12-09T14:12:23.825532 | 2019-11-20T19:21:09 | 2019-11-20T19:21:09 | 223,008,307 | 0 | 0 | null | 2022-12-08T06:54:59 | 2019-11-20T19:09:05 | Python | UTF-8 | Python | false | false | 3,264 | py | # Create your views here.
from rest_framework import status, viewsets
from rest_framework.response import Response
from django.http.request import QueryDict
from .settings import global_file_path
import json
def read_file(path, mode='r'):
with open(path, mode) as json_file:
data = json.load(json_file)
return data
def write_file(path, data, mode='w'):
with open(path, mode) as json_file:
json.dump(data, json_file)
return True
def get_user(id):
path = global_file_path + '/data_store/user.json'
data = read_file(path)
return [i for i in data if str(i['id']) == str(id)][0]
def get_role(data):
path = global_file_path + '/data_store/roles.json'
file_data = read_file(path)
return [i for i in file_data if i['id'] in data]
def get_permissions(data):
path = global_file_path + '/data_store/permissions.json'
file_data = read_file(path)
return [i for i in file_data if i['id'] in data]
def modify_role(id, per):
path = global_file_path + '/data_store/roles.json'
data = read_file(path)
for key, value in enumerate(data):
if value['id'] == id:
data[key]['permissions'] = per
return write_file(path, data, mode='w')
def delete_permissions(id):
path = global_file_path + '/data_store/permissions.json'
data = read_file(path)
for key, value in enumerate(data):
if value['id'] == id:
data.pop(key)
return write_file(path, data, mode='w')
class UserViewSet(viewsets.ModelViewSet):
"""
A simple ViewSet for the User's.
"""
def get_user_permissions(self, request, id):
"""
To get the User permissions list
URL Structure: /user/user1/
Required Fields: id
"""
user = get_user(id)
role = get_role(user['roles'])
flat_list = [item for sublist in role for item in sublist['permissions']]
permissions = get_permissions(flat_list)
return Response(permissions)
def get_checkpermission(self, request):
"""
To check user has permissions based on the permission id
URL Structure: /checkpermission/?userid=user1&permissionid=perm6
Required Fields: userid, permissionid
"""
user_id = request.GET['userid']
permissionid = request.GET['permissionid']
user = get_user(user_id)
role = get_role(user['roles'])
flat_list = [item for sublist in role for item in sublist['permissions'] if permissionid == item]
permissions = True if flat_list else False
return Response(permissions)
def modify_permissions_of_role(self, request, roleid):
"""
To modify the permissions of the Role
URL Structure: /roles/role3/
Required Fields: roleid
"""
res = modify_role(roleid, request.data['permissions'])
return Response(res)
def delete_permission (self, request, permission_id):
"""
To delete the permission based on the permission id
URL Structure: /permissions/perm6/
Required Fields: permission_id
"""
res = delete_permissions(permission_id)
return Response(res) | [
"[email protected]"
] | |
9757dc544e5546368dabdf8bf1927b9d01db5cfe | c8bc580a5b930d9e06533afbe36e5703b6bf4536 | /3/project_accessment/api/serializers.py | 51a89249b4a73328d009643a8c1dbad30badb5cd | [] | no_license | Austyns/pyclass | b5093e05cc268017d02f230d9da06c63711fb9aa | e8690bf52a04a2b674b5e09c29c349df0439df05 | refs/heads/master | 2020-03-27T19:37:41.349459 | 2018-09-08T10:21:54 | 2018-09-08T10:21:54 | 147,001,852 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | from rest_framework import serializers
from api.models import System_users, Subject, Accessment
# System Users
class System_usersSerializer(serializers.ModelSerializer):
class Meta:
model = System_users
fields = ('id', 'ad_name', 'ad_email', 'ad_role', 'ad_password', 'ad_phone', 'ad_status', 'ad_reg_at')
# Subject
class SubjectSerializer(serializers.ModelSerializer):
class Meta:
model = Subject
fields = ('id', 'name', 'discrip', 'cat_status', 'cat_slug', 'cat_reg_at', 'cat_reg_by')
# Accessments
class AccessmentSerializer(serializers.ModelSerializer):
class Meta:
model = Accessment
fields = ('id', 'user', 'subject', 'score', 'registered_at' )
| [
"[email protected]"
] | |
cd98ac8c0c8181f075c9c2d079bd0db1e9c5eee6 | 10794c9a6b410094cef87afd2af08b10ddb96120 | /products/tests.py | 7b76230fc72a400174668068bdd0d312e95dce74 | [] | no_license | selinaerhabor/keep-learning-first-aid | 03a4316168a0bfc883198d806791f3f928d4faf6 | 8cbef179bbe3b57380b74e961c11359ffa96255b | refs/heads/master | 2023-07-21T14:40:17.584293 | 2023-07-08T11:53:43 | 2023-07-08T11:53:43 | 179,534,573 | 0 | 0 | null | 2022-12-26T20:15:35 | 2019-04-04T16:21:37 | HTML | UTF-8 | Python | false | false | 301 | py | from django.test import TestCase
from .models import Product
# Create your tests here.
class ProductTests(TestCase):
# Tests run against Product model created
def test_str(self):
test_name = Product(name='CPR Manikin')
self.assertEqual(str(test_name), 'CPR Manikin') | [
"[email protected]"
] | |
d57161b04a99d8d7a915746775fc7c9eab09f30b | 36ebb72ddc930e2b73bb02e01384335c706bd037 | /problem_11.py | c61eab1ac4efaf42495cc90fd1505a7dc606bc50 | [] | no_license | sunshinee24/leetcode | 8870374e860cb9931eadfc0b74fde3b92700afe1 | cd50f0d6702feadb6d5c83f64243c7b2f4736d5a | refs/heads/master | 2020-04-29T23:46:15.343212 | 2019-03-19T12:22:13 | 2019-03-19T12:22:13 | 176,486,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | class Solution:
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
# area=min(height[right], height[left])*(right-left)
if (height == [] or len(height) < 2):
return 0
left = 0
right = len(height) - 1
max_area = 0
while (left < right):
max_area = max(max_area, min(height[right], height[left]) * (right - left))
#import pdb;pdb.set_trace()
#print(max_area)
if height[left] < height[right]:
left = left + 1
else:
right = right - 1
return max_area
if __name__ == "__main__":
input = [1,8,6,2,5,4,8,3,7]
result = Solution().maxArea(input)
print(result) | [
"[email protected]"
] | |
84b6b4f3ab5b6ee8f3cf6dac2754cb1b3820c24f | bf45c05e88f80002d07474ede90a38fe562eba85 | /PythonExercicios/Mundo_1/ex017.py | fe51c35d6803b3e28f2238878870466cbbb052f8 | [] | no_license | Patryck1999/Curso_em_Video | 262018d6ce7fb21932712110090c3b17d816a46b | 99946be44adca5ff94c4c82c432d6ab6a72cdfa9 | refs/heads/master | 2023-02-08T01:36:17.020755 | 2020-12-28T23:52:56 | 2020-12-28T23:52:56 | 325,139,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | """
Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triângulo retângulo, calcule
e mostre o comprimento da hipotesusa.
"""
"""
co = float(input("Comprimento do cateto oposto: "))
ca = float(input("Comprimento do cateto adjacente: "))
hi = ((co ** 2) + (ca ** 2)) ** (1/2)
print('A hipotenusa vai medir {}'.format(hi))
"""
from math import hypot
cateto_oposto = float(input("Digite o cateto oposto do triangulo retângulo: "))
cateto_adjacente = float(input("Digite o cateto adjacente do triângulo retângulo: "))
hipotenusa = hypot(cateto_oposto, cateto_adjacente)
print("A hipotenusa vai medir {:.2f}".format(hipotenusa))
| [
"[email protected]"
] | |
0ec61f3e8aa8071d599f9313f322b2894444add6 | 32648c442744ea9c49672d8383c1ea6644634e1c | /SI/hw/SI-5.py | ab88e79e53b111a83703203f4db3f60f282b3d3a | [] | no_license | NRL-Plasma-Physics-Division/turbopy-training-public | 2a411671d68491e4b34e74becda62fae458e1a5b | 411a9e07898277af84cd7cc1fdae89545d5b893a | refs/heads/main | 2023-04-22T20:12:43.310354 | 2020-11-09T21:52:52 | 2020-11-09T21:52:52 | 311,473,164 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,237 | py | import matplotlib.pyplot as plt
import numpy as np
import sys
# command line: python tang.py {alpha} {dt (step size)}
if not sys.argv[1:]:
print('please input an alpha and dt value')
exit()
ALPHA = float(sys.argv[1])
DT = float(sys.argv[2])
def firstorder(y0, n, dt):
"""
Calculates a list of first-order approximations
Parameters
----------
y0 : float
Initial y value
n : int
Number of days calculated for
dt : float
Step size of t
Returns
--------
f : list of float
List of first-order approximations from t = 0 to t = n
"""
i = dt
f = [y0]
while i <= n:
f += [(1 + (ALPHA * dt)) * f[-1]]
i += dt
return f
def secondorder(y0, n, dt):
"""
Calculates a list of second-order approximations
Parameters
----------
y0 : float
Initial y value
n : int
Number of days calculated for
dt : float
Step size of t
Returns
--------
s : list of float
List of second-order approximations from t = 0 to t = n
float
final value for t
"""
i = dt
s = [y0]
while i <= n:
s += [s[-1] * (1 + ALPHA * dt / 2) / (1 - ALPHA * dt / 2)]
i += dt
return s, i-dt
dt = np.arange(0.0, 5.0 + DT, DT)
t = np.arange(0.0, 5.01, 0.01)
control = np.exp(t * ALPHA)
fig1, one = plt.subplots()
one.plot(dt, firstorder(1, 5, DT), label='first order')
one.plot(dt, secondorder(1, 5, DT)[0], label='second order')
one.plot(t, control, label='exact')
one.set(xlabel='time (days)', ylabel='covid cases', title='first & second approximations')
one.grid()
one.legend()
ALPHA, DT = 1, 1
dt = np.arange(0.0, 5.0 + DT, DT)
control = np.exp(dt * ALPHA)
first = firstorder(1, 5, DT)
second = secondorder(1, 5, DT)[0]
fig2, two = plt.subplots()
two.plot(dt, np.abs(control - first), label='first order error')
two.plot(dt, np.abs(control - second), label='second order error')
two.set(xlabel='time (days)', ylabel='error', title='first & second approximation errors')
two.grid()
two.legend()
DT = 0.01
dt_values = np.logspace(-3, 0, 100)
control = np.exp(5)
first = [firstorder(1, 5, d)[-1] - control for d in dt_values]
second = []
for d in dt_values:
sec, i = secondorder(1, 5, d)
second += [sec[-1] * (1 + ALPHA * (5 - i) / 2) / (1 - ALPHA * (5 - i) / 2) - control]
fig3, three = plt.subplots()
three.set_xscale('log')
three.set_yscale('log')
three.plot(dt_values, np.abs(first), label='first order error')
three.plot(dt_values, np.abs(second), label='second order error')
three.set(xlabel='time (days)', ylabel='error', title='first & second approximation errors log log')
three.grid()
three.legend()
fig4, four = plt.subplots()
four.plot(np.arange(0.0, 2*np.pi, 1/2), np.cos(np.arange(0.0, 2*np.pi, 1/2)), label='delta = 1')
four.plot(np.arange(0.0, 2*np.pi, 1/4), np.cos(np.arange(0.0, 2*np.pi, 1/4)), label='delta = 1/2')
four.plot(np.arange(0.0, 2*np.pi, 1/6), np.cos(np.arange(0.0, 2*np.pi, 1/6)), label='delta = 1/3')
four.plot(np.arange(0.0, 2*np.pi, 0.01), np.cos(np.arange(0.0, 2*np.pi, 0.01)), label='cosine')
four.set(xlabel='x', ylabel='y', title='cosine approximations')
four.legend()
plt.show()
| [
"[email protected]"
] | |
6f813e0c622f6c3d96bca69e68f773713d3d2170 | 4a072ea49069c6aa2748b86dce4d59b94c5ffd61 | /bin/pip-3.7 | 2c5e6a1833edec68ea2100996b15935112363805 | [] | no_license | dahymond/tweetyou | 443f56e0227d13e4a2082e29e1c821e6c0678aea | 013b04a73ad9d6f4b831c4f7095256a12296fd2c | refs/heads/master | 2022-12-04T16:05:17.665843 | 2020-08-21T08:09:17 | 2020-08-21T08:09:17 | 285,932,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | 7 | #!/Users/technicalsuccessmanager/dev/tweetyou/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
3df1f8ee46dc0cf88bdeb39ac0d96a5490084917 | 13336e7a6c00f624d1bde319993a3842d50b21ca | /writeDCM.py | fa8d1f6043321e44d73a27f6deb07da10e859717 | [] | no_license | zenglilililili/WriteDCM | b82c691157bfa05708aa42f26503ddca7d67668b | 17521d27eb531e6bb20a292b26567dc8b5de212a | refs/heads/master | 2022-10-29T15:16:05.509458 | 2020-06-15T09:07:29 | 2020-06-15T09:07:29 | 271,925,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | import os
from getData import getData
from wDcm import writeDCM
root_Path = 'Data'
Doc_list = os.listdir(root_Path)
for doc in Doc_list:#对医生循环
pat_list = os.listdir(root_Path+'/'+doc)
for pat in pat_list:#对病人循环
data = getData(root_Path,doc,pat)
writeDCM(root_Path, doc, pat,data) | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.