max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
generateFormula.py | auto-staging/homebrew-stagectl | 0 | 12792451 | <reponame>auto-staging/homebrew-stagectl
#!/usr/bin/env/python
from jinja2 import Environment, FileSystemLoader
import os
envVersion = os.environ['VERSION']
envFileHash = os.environ['FILE_HASH']
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template('stagectl.rb.j2')
output = template.stream(version=envVersion, sha256=envFileHash).dump('stagectl.rb')
| 2.203125 | 2 |
djhelpers/tests.py | trunneml/djhelpers | 0 | 12792452 | # Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from djhelpers.adminhelpers import ActionDecorator
from djhelpers.modelhelpers import short_description
class ShortDescriptionDecoratorTest(unittest.TestCase):
def test_decorator(self):
# Arrange
description = 'description'
# Act
@short_description(description)
def tfunc(x):
return x
result = tfunc(mock.sentinel.func_test_parameter)
# Assert
self.assertEqual(result, mock.sentinel.func_test_parameter)
self.assertEqual(tfunc.short_description, description)
self.assertFalse(hasattr(tfunc, 'boolean'))
def test_decorator_with_kwargs(self):
# Arrange
description = 'description'
# Act
@short_description(description, boolean=mock.sentinel.kwarg)
def tfunc(x):
return x
result = tfunc(mock.sentinel.func_test_parameter)
# Assert
self.assertEqual(result, mock.sentinel.func_test_parameter)
self.assertEqual(tfunc.short_description, description)
self.assertEqual(tfunc.boolean, mock.sentinel.kwarg)
class ActionDecoratorTest(unittest.TestCase):
def test_admin_action(self):
# Arrange
actions = ActionDecorator()
# Act
desc = 'test description'
@actions.action(desc)
def _t(x):
return x
# Assert
self.assertIsInstance(actions, list)
self.assertIn(_t, actions)
self.assertEqual(len(actions), 1)
self.assertEqual(_t.short_description, desc)
if __name__ == '__main__':
suite = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=2).run(suite)
| 2.25 | 2 |
programmers/blind_phone_number.py | schio/algorithm_test | 0 | 12792453 | <filename>programmers/blind_phone_number.py
# https://programmers.co.kr/learn/courses/30/lessons/12948
def solution(phone_number):
phone_number = list(phone_number)
phone_number[:-4] = ["*"] * (len(phone_number) - 4)
return "".join(phone_number)
| 3.390625 | 3 |
src/filesystem/transformation/transformer.py | pgecsenyi/fst | 1 | 12792454 | <filename>src/filesystem/transformation/transformer.py
import os
import re
class Transformer:
def __init__(self):
self._cache = {}
self._full_path_cache = {}
def add_to_cache(self, directory_lister, transformations):
pattern_pairs = [
(re.compile(transformation.from_path), transformation.to_path)
for transformation
in transformations
]
paths = directory_lister.list_directory()
transformed_paths = self._transform_paths(paths, pattern_pairs)
self._build_cache(transformed_paths)
def get_directory_contents(self, path):
if path == os.sep:
return list(self._cache.keys())
parts = path.split(os.sep)
current_cache = self._cache
for part in parts:
if part == '':
continue
if part not in current_cache:
return list(current_cache.keys())
current_cache = current_cache[part]
return list(current_cache.keys())
def get_source_path(self, path):
if path.startswith(os.sep):
path = path[1:]
if path in self._full_path_cache:
return self._full_path_cache[path]
return ''
def _transform_paths(self, paths, patterns):
for path in paths:
for pattern_pair in patterns:
source_regexp = pattern_pair[0]
target_pattern = pattern_pair[1]
if source_regexp.match(path):
yield source_regexp.sub(target_pattern, path), path
break
def _build_cache(self, paths):
for path in paths:
source = path[1]
target = path[0]
parts = target.split(os.sep)
current_cache = self._cache
current_level = 0
deepest_level = len(parts) - 1
for part in parts:
if current_level < deepest_level:
current_cache.setdefault(part, {})
current_cache = current_cache[part]
current_level = current_level + 1
else:
current_cache[part] = source
self._full_path_cache[target] = source
break
| 2.71875 | 3 |
TestClient/TestClient.py | SHI3DO/prushka-web | 1 | 12792455 | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import requests
apiserver = ""
class ListView(QWidget):
def __init__(self, parent=None):
super(ListView, self).__init__(parent)
self.setWindowTitle('Asphodel Downloader Test Client')
self.resize(400, 100)
self.initUI()
def append(self):
"""
채팅을 추가함
"""
res = requests.put(f"http://192.168.127.12:7474/publicchat?content={self.appendChatInput.text()}").json()
def get(self):
"""
클라에 보관된 해시가 없으면 그냥 보내서 10개 받아오고,
클라에 해시가 보관되어 있으면 그거 보내서 필요한 부분만 받아옴
"""
if self.currentLastHashViewer.text() == '':
res = requests.get("http://192.168.127.12:7474/publicchat").json()
self.addItemList(res)
else:
res = requests.get(f"http://192.168.127.12:7474/publicchat?hash={self.currentLastHashViewer.text()}").json()
self.addItemList(res)
def addItemList(self, res):
"""
[[해시, 텍스트]] 형식의 리스트를 리스트뷰에 넣어줌
그리고 마지막 해시는 해시 뷰어에 넣어줌
"""
print(res)
for n, r in enumerate(res):
print(n, r)
self.chatting.addItem(str(r))
if n + 1== len(res):
self.currentLastHashViewer.setText(str(r[0]))
def save(self):
"""
API에 저장하라고 시킴
"""
requests.get("http://192.168.127.12:7474/savedb")
def initUI(self):
self.mainLayout = QVBoxLayout()
self.chatting = QListWidget()
self.currentLastHashTitle = QLabel('currentLastHash : ')
self.currentLastHashViewer = QLineEdit()
self.hashes = QHBoxLayout()
self.hashes.addWidget(self.currentLastHashTitle)
self.hashes.addWidget(self.currentLastHashViewer)
self.getChat = QPushButton('getChat')
self.getChat.clicked.connect(self.get)
self.appendChatInput = QLineEdit()
self.appendChatInput.setPlaceholderText('Input text to send')
self.appendChat = QPushButton('appendChat')
self.appendChat.clicked.connect(self.append)
self.saveBTN = QPushButton('SAVE')
self.saveBTN.clicked.connect(self.save)
self.mainLayout.addWidget(self.chatting)
self.mainLayout.addWidget(self.getChat)
self.mainLayout.addLayout(self.hashes)
self.mainLayout.addWidget(self.appendChatInput)
self.mainLayout.addWidget(self.appendChat)
self.mainLayout.addWidget(self.saveBTN)
self.setLayout(self.mainLayout)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = ListView()
window.show()
sys.exit(app.exec()) | 2.640625 | 3 |
src/plot.py | ekholabs/nltk_tutorial | 0 | 12792456 | """
Organisation: ekholabs
Author: <EMAIL>
"""
from nltk.book import text1, text2, text4
def plot_changes_in_use_of_words(book, words):
# Dispersion plot of the use of natural language in different contexts or situations. For example,
# the use of certain words used by Presidents over the years.
book.dispersion_plot(words)
if __name__ == '__main__':
# Inaugural Address Corpus
plot_changes_in_use_of_words(text4, ['citizen', 'democracy', 'freedom', 'duties', 'America'])
# <NAME> - <NAME>
plot_changes_in_use_of_words(text1, ['happy', 'sad'])
# <NAME> - Sense and Sensibility
plot_changes_in_use_of_words(text2, ['happy', 'sad']) | 3.140625 | 3 |
app/admin.py | iam-feysal/awwwwwwwards | 0 | 12792457 | from django.contrib import admin
from .models import Project,Profile,Review,Comment
# Register your models here.
class ReviewAdmin(admin.ModelAdmin):
model = Review
list_display = ('wine', 'rating', 'user_name', 'comment', 'pub_date')
list_filter = ['pub_date', 'user_name']
search_fields = ['comment']
admin.site.register(Project)
admin.site.register(Profile)
admin.site.register(Review)
admin.site.register(Comment)
| 1.632813 | 2 |
mrc/localization/color/nn/models.py | Lukasz1928/mobile-robots-control | 2 | 12792458 | from abc import ABC
import chainer
import chainer.functions as F
import chainer.links as L
class AbstractModel(ABC):
def predict(self, blob):
pass
class DefaultModel:
def __init__(self, n_units=100, n_out=6, colors=('red', 'blue', 'green', 'cyan', 'magenta', 'yellow')):
self.n_units = n_units
self.n_out = n_out
self.model = self._net_model()
self.colors = colors
def _net_model(self):
layer = chainer.Sequential(L.Linear(self.n_units), F.relu)
model = layer.repeat(1)
model.append(L.Linear(self.n_out))
return L.Classifier(
self._net_model(), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy)
def _normalize_data(self, image):
return image.reshape(100, -1, 3)
def predict(self, blob):
image = self._normalize_data(blob)
return self.model.predictor(image[None]).data
| 3.125 | 3 |
backend.py | insertcustomname/EmailTrackExtension | 0 | 12792459 | from collections import Counter
from notify_run import Notify
import os
import time
import dropbox
import json
dropboxkey=""
notify = Notify()
notifyendpoint=""
notify.endpoint=notifyendpoint
notify.write_config()
from flask import Flask,request
maindictionary={}
dbx = dropbox.Dropbox(dropboxkey)
dbx.files_download_to_file("bannedpixel.txt","/bannedpixel.txt")
dbx.files_download_to_file("logpixel.txt","/logpixel.txt")
dbx.files_download_to_file("dictionary.txt","/dictionary.txt")
app = Flask(__name__)
@app.route("/pixel")
def home():
maindictionary=json.load(open("dictionary.txt"))
if (request.args.get("id")) in open("bannedpixel.txt").read():
pass
elif open("logpixel.txt").read().count(request.args.get("id")) == 0:
with open("logpixel.txt","a+") as f:
f.writelines(request.args.get("id") +"\n")
dbx = dropbox.Dropbox("")
dbx.files_upload(open("logpixel.txt","rb").read(),"/logpixel.txt",mode=dropbox.files.WriteMode.overwrite)
maindictionary[request.args.get("id")] = time.time()
with open('dictionary.txt', 'w+') as file:
file.write(json.dumps(t))
dbx = dropbox.Dropbox(dropboxkey)
dbx.files_upload(open("dictionary.txt","rb").read(),"/dictionary.txt",mode=dropbox.files.WriteMode.overwrite)
elif open("logpixel.txt").read().count(request.args.get("id")) == 1:
if time.time() - maindictionary[request.args.get("id")] > 20:
notify.send('Your email too ' + request.args.get("email") + " with subject: " + request.args.get("subject") + " has been opened")
text=open("logpixel.txt","a+").read()
text=text.replace((request.args.get("id")),"")
with open("logpixel.txt","w+") as f:
f.write(text)
with open("bannedpixel.txt","a+") as f:
f.writelines((request.args.get("id")) + "\n")
dbx = dropbox.Dropbox(dropboxkey)
dbx.files_upload(open("logpixel.txt","rb").read(),"/logpixel.txt",mode=dropbox.files.WriteMode.overwrite)
dbx.files_upload(open("bannedpixel.txt","rb").read(),"/bannedpixel.txt",mode=dropbox.files.WriteMode.overwrite)
return " NONE TEST VIEW "
if __name__ == "__main__":
app.run(port= int(os.environ.get('PORT', 5000)),host="0.0.0.0")
| 2.53125 | 3 |
Aula02/exercise1.py | GabiDeutner/Python_exercises | 4 | 12792460 | <reponame>GabiDeutner/Python_exercises<filename>Aula02/exercise1.py
'''
1. Escreva um programa para ler 2 valores (considere que não serão informados valores iguais)
e escrever o maior deles.
'''
print('Digite o numero 1:')
numero1 = float(input())
print('Digite o numero 2:')
numero2 = float(input())
if(numero1>numero2):
print(numero1)
else:
print(numero2) | 3.9375 | 4 |
web/ctf_gameserver/web/scoring/decorators.py | exokortex/kaindorfctf-2018-ctf-gameserver | 0 | 12792461 | <reponame>exokortex/kaindorfctf-2018-ctf-gameserver
from functools import wraps
from django.shortcuts import redirect
from django.conf import settings
from django.utils.translation import ugettext as _
from django.contrib import messages
from .models import GameControl
def registration_open_required(view):
"""
View decorator which prohibts access to the decorated view if registration is closed from the GameControl
object.
"""
@wraps(view)
def func(request, *args, **kwargs):
if not GameControl.objects.get().registration_open:
messages.error(request, _('Sorry, registration is currently closed.'))
return redirect(settings.HOME_URL)
return view(request, *args, **kwargs)
return func
def competition_started_required(view):
"""
View decorator which prohibts access to the decorated view if the competition has not yet started (i.e.
it must be running or over).
"""
@wraps(view)
def func(request, *args, **kwargs):
game_control = GameControl.objects.get()
if not game_control.competition_running() and not game_control.competition_over():
messages.error(request, _('Sorry, the scoreboard is not available yet.'))
return redirect(settings.HOME_URL)
return view(request, *args, **kwargs)
return func
| 2.328125 | 2 |
pygraph/min_spanning_tree.py | jysh1214/pygraph | 0 | 12792462 | from .get_imformation import GI
from .disjoint_set import DS
import math
class MST():
def __init__(self, adj_matrix, ins_matrix):
"""
Parameters:
adj_matrix(list):
The adjacency matrix of the graph.
ins_matrix(list):
The incidence matrix of the graph.
Auto creat if the graph is undirect.
Returns:
Min spanning tree.
Attention:
Undirected graph difinition.
Raises:
ValueError, TypeError
"""
self.Adjacency_Matrix = adj_matrix
self.N = len(self.Adjacency_Matrix)
self.Insidence_Matrix = ins_matrix
self.con_ver = [] # contain vertices list now
self.mst = [] # min spanning tree now
def kruskal_algo(self):
"""
Returns: Min spanning tree.
Raises:
ValueError, TypeError
"""
# from get_imformation
gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix)
E = [(i, gi.get_weight(i)) for i in range(len(self.Insidence_Matrix[0]))]
# edges sorted by weight
E = sorted(E, key = lambda x: x[1])
for i in range(len(E)):
E[i] = E[i][0]
for i in range(len(E)):
(v_a, v_b) = gi.edge_term(E[i])
# from disjoint_set
ds = DS(self.Insidence_Matrix, self.con_ver, self.mst, v_a, v_b)
if ds.same_set(v_a, v_b):
pass # the edge could make circle
else:
self.mst.append(E[i])
if not (v_a in self.con_ver):
self.con_ver.append(v_a)
if not (v_b in self.con_ver):
self.con_ver.append(v_b)
return self.mst
def prims_algo(self, root):
"""
Parameters:
root(list):
The root of the min spanning tree.
Plays mst in interation.
Input [root] first generally.
Returns:
Min spanning tree.
Raises:
ValueError, TypeError
"""
# from get_imformation
gi = GI(self.Adjacency_Matrix, self.Insidence_Matrix)
# all neighbors of mst
min_ = math.inf
for i in range(len(root)):
root_nb = gi.get_nb(root[i])
# find which edges weight is min
for j in range(len(root_nb)):
if not(root_nb[j] in self.con_ver):
e = gi.get_edge(root[i], root_nb[j])
if gi.get_weight(e) < min_:
min_ = gi.get_weight(e)
min_term = root_nb[j]
min_edge = e
if len(root) == self.N:
return self.mst
root.append(min_term)
self.con_ver = root
self.mst.append(min_edge)
### unnecessary check loop ###
return self.prims_algo(root)
def put_all(a, b):
for i in a:
if not(i in b):
b.append(i)
return b
| 3.0625 | 3 |
tests/x-custom_tests.py | ivoupton/sheet2dict | 208 | 12792463 | import sys
from pathlib import Path
sys.path.append(str(Path(".").absolute().parent))
from sheet2dict import Worksheet
from io import BytesIO
ws = Worksheet()
ws.xlsx_to_dict(path="inventory.xlsx")
print(">>", ws.header)
print("ALL:", ws.sheet_items)
print("SANITIZED:", ws.sanitize_sheet_items)
path = "inventory.xlsx"
xlsx_file = open(path, "rb")
xlsx_file = BytesIO(xlsx_file.read())
ws = Worksheet()
ws.xlsx_to_dict(path=xlsx_file)
print(">>", ws.header)
ws = Worksheet()
path = "inventory.csv"
csv_file = open(path, "r", encoding="utf-8-sig")
ws.csv_to_dict(csv_file=csv_file, delimiter=";")
print("ALL:", ws.sheet_items)
print("SANITIZED:", ws.sanitize_sheet_items)
| 2.78125 | 3 |
python/code_troopers/Runner.py | tigeral/polygon | 0 | 12792464 | import sys
from MyStrategy import MyStrategy
from RemoteProcessClient import RemoteProcessClient
from model.Move import Move
from time import sleep
class Runner:
def __init__(self):
sleep(4)
if sys.argv.__len__() == 4:
self.remote_process_client = RemoteProcessClient(sys.argv[1], int(sys.argv[2]))
self.token = sys.argv[3]
else:
self.remote_process_client = RemoteProcessClient("localhost", 31001)
self.token = "0<PASSWORD>"
#next line enables my custom debugger window
debuggerEnabled = True
def run(self):
try:
self.remote_process_client.write_token(self.token)
team_size = self.remote_process_client.read_team_size()
self.remote_process_client.write_protocol_version()
game = self.remote_process_client.read_game_context()
strategies = []
for strategy_index in xrange(team_size):
strategies.append(MyStrategy())
while True:
player_context = self.remote_process_client.read_player_context()
if player_context is None:
break
player_trooper = player_context.trooper
move = Move()
strategies[player_trooper.teammate_index].move(player_trooper, player_context.world, game, move)
self.remote_process_client.write_move(move)
finally:
self.remote_process_client.close()
Runner().run()
| 2.34375 | 2 |
xldlib/controllers/bindings/table.py | Alexhuszagh/XLDiscoverer | 0 | 12792465 | '''
Controllers/Bindings/table
__________________________
Class with designed inheritance for copy/paste methods.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules/submodules
from PySide import QtCore
from xldlib.definitions import partial
from xldlib.onstart.main import APP
from xldlib.qt.objects import base, threads
from xldlib.qt import resources as qt
from . import copier, decorators, paster
# MUTEX
# -----
MUTEX = threads.ContextMutex(QtCore.QMutex.Recursive)
# SLOTS
# -----
def nullslot(result):
pass
def copyslot(result):
APP.clipboard().setText(result)
# OBJECTS
# -------
class TableBindings(base.BaseObject):
'''Provides methods to bind to QKeyShortcuts for facilitated table use'''
def __init__(self, table):
super(TableBindings, self).__init__(table)
self.table = table
self.copier = copier.HandleCopy(self.table)
self.paster = paster.HandlePaste(self.table)
self.set_shortcuts()
# PUBLIC FUNCTIONS
@decorators.newspinner(nullslot)
def delete(self):
self._delete()
def _delete(self, blank=""):
'''
Excel-like delete function. Deletes contents in all selected cells.
delete() -> void
(Row, Column) [Value] Selection: -->
(1,2) ["AA"], (1,3) ["BB"], (2,2) ["CC"], (2,3) ["DD"]
--> (1,2) [], (1,3) [], (2,2) [], (2,3) []
'''
selected_indexes = self.table.get_selected_indexes()
items = (self.table.item(i.row, i.column) for i in selected_indexes)
filtered = [i for i in items if i is not None]
for item in filtered:
item.setText(blank)
self.table.model().delete(selected_indexes)
if filtered and hasattr(self.table, "changed"):
self.table.changed = True
@decorators.newspinner(copyslot)
def cut(self):
'''Combines copy and delete operations for cut functionality'''
result = self.copier.copy()
self._delete()
return result
@decorators.newspinner(copyslot)
def copy(self):
return self.copier.copy()
def paste(self):
clipboard_text = self.app.clipboard().text()
self._paste(clipboard_text)
@decorators.newspinner(nullslot)
def _paste(self, clipboard_text):
self.paster.paste(clipboard_text)
def select_all(self):
'''Selects all item in the table'''
with MUTEX:
model = self.table.selectionModel()
mode = self.table.selectionMode()
self.table.setSelectionMode(qt.SELECTION_MODE['Extended'])
# clear selection
model.clearSelection()
selection = model.selection()
# reset the selection mode for all items
for column in range(self.table.columnCount()):
# only select visible items
if not self.table.isColumnHidden(column):
self.table.selectColumn(column)
selection.merge(model.selection(), qt.SELECTION_MODEL['Select'])
self.table.setSelectionMode(mode)
def select_mode(self, mode=None):
'''
Changes the QTableSelectionMode between the list options.
mode -- QtGui.QAbstractItemView.<atribute>
ExtendedSelection
SingleSelection
MultiSelection
select_mode(QtGui.QAbstractItemView.ExtendedSelection)
'''
mode = mode or qt.SELECTION_MODE['Extended']
self.table.setSelectionMode(mode)
self.table.blockSignals(mode != qt.SELECTION_MODE['Single'])
# GETTERS
def set_shortcuts(self):
self.shortcuts = {
'Ctrl+f': self.table.finder.show,
'Ctrl+b': self.table.block,
'Ctrl+c': self.copy,
'Ctrl+x': self.cut,
'Ctrl+v': self.paste,
'Del': self.delete,
'Ctrl+a': self.select_all
}
modes = {
'Ctrl+Shift+s': 'Single',
'Ctrl+Shift+m': 'Multi',
'Ctrl+Shift+e': 'Extended'
}
for keysequence, mode in modes.items():
fun = partial(self.select_mode, qt.SELECTION_MODE[mode])
self.shortcuts[keysequence] = fun
| 2.140625 | 2 |
01. Variable/021.py | MaksonViini/Aprendendo-Python | 1 | 12792466 | #Tocando um MP3
from pygame import mixer
mixer.init()
mixer.music.load('EX021.mp3') #Adicione o nome da musica
mixer.music.play() | 2.5 | 2 |
wp.py | zamoose/wp-ansible | 0 | 12792467 | <reponame>zamoose/wp-ansible
#!/usr/bin/python
# -*- coding: utf-8 -*-
def main():
module = AnsibleModule(
argument_spec = dict(
path=dict(required=True),
state=dict(),
plugin=dict(),
theme=dict(),
user=dict(),
version=dict(),
executable=dict(default=None),
),
supports_check_mode=True
)
path = module.params['path']
state = module.params['state']
before = None
after = None
changed = False
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
main()
| 1.765625 | 2 |
tweets/migrations/0004_auto_20201201_2002.py | bubaic/twitt | 0 | 12792468 | # Generated by Django 3.1.3 on 2020-12-01 14:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tweets', '0003_auto_20201201_0211'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.CreateModel(
name='TweetLikes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('tweet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tweets.tweet')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='tweet',
name='likes',
field=models.ManyToManyField(blank=True, related_name='tweet_user', through='tweets.TweetLikes', to=settings.AUTH_USER_MODEL),
),
]
| 1.71875 | 2 |
leaderboard.py | cclauss/repo-tools | 0 | 12792469 | <gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
import argparse
import collections
import datetime
import sys
from helpers import date_arg, make_timezone_aware
from repos import Repo
from webhookdb import get_pulls
def get_external_pulls(repo):
"""Produce a stream of external pull requests."""
for issue in get_pulls(repo, state="all", org=True):
if issue.intext == 'external':
yield issue
def get_all_external_pulls():
repos = [ r.name for r in Repo.from_yaml() if r.track_pulls ]
for repo in repos:
for pull in get_external_pulls(repo):
yield pull
def get_pulls_in_window(start, end):
for pull in get_all_external_pulls():
if start < make_timezone_aware(pull.created_at) < end:
yield pull
def get_contributor_counts(pulls):
board = collections.Counter()
for pull in pulls:
board[pull.user_login] += 1
return board
def main(argv):
parser = argparse.ArgumentParser(description="Count external pull requests opened by person")
parser.add_argument(
"--since", metavar="DAYS", type=int,
help="Use a start date DAYS ago"
)
parser.add_argument(
"--start", type=date_arg,
help="Date to start collecting, format is flexible: "
"20141225, Dec/25/2014, 2014-12-25, etc"
)
parser.add_argument(
"--end", type=date_arg,
help="Date to end collecting, format is flexible: "
"20141225, Dec/25/2014, 2014-12-25, etc"
)
args = parser.parse_args(argv[1:])
if args.start is None:
if args.since is None:
# Simplify the logic by always having a start date, but one so far back
# that it is like having no start date.
args.start = make_timezone_aware(datetime.datetime(2000, 1, 1))
else:
args.start = make_timezone_aware(datetime.datetime.now() - datetime.timedelta(days=args.since))
if args.end is None:
# Simplify the logic by always having an end date, but one so far ahead
# that it is like having no end date.
args.end = make_timezone_aware(datetime.datetime(2040, 1, 1))
pulls = get_pulls_in_window(args.start, args.end)
board = get_contributor_counts(pulls)
board = sorted(((v, k) for k,v in board.items()), reverse=True)
for i, (count, user_login) in enumerate(board, start=1):
print("{:4d}: {:4d} {}".format(i, count, user_login))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 2.625 | 3 |
python/setup.py | SamChill/drunkardswalk | 3 | 12792470 | #!/usr/bin/env python
from setuptools import setup
from os.path import dirname, abspath, join
setup(name='drunkardswalk',
packages=['drunkardswalk'],
)
| 1.320313 | 1 |
examples/docs_snippets/docs_snippets_tests/concepts_tests/io_management_tests/test_subselection.py | kstennettlull/dagster | 0 | 12792471 | from docs_snippets.concepts.io_management.subselection import (
execute_full,
execute_subselection,
)
def test_execute_job():
execute_full()
def test_execute_subselection():
execute_subselection()
| 1.179688 | 1 |
src/Scapy2Library/keywords/__init__.py | wywincl/Scapy2Library | 2 | 12792472 | <filename>src/Scapy2Library/keywords/__init__.py
from _corekeywords import _ScapyKeywords
from _runonfailure import _RunOnFailureKeywords
from _logging import _LoggingKeywords
__all__ = ["_ScapyKeywords",
"_RunOnFailureKeywords",
"_LoggingKeywords"]
| 1.234375 | 1 |
1701-1800/1712-Binary Subarrays With Sum/1712-Binary Subarrays With Sum.py | jiadaizhao/LintCode | 77 | 12792473 | <reponame>jiadaizhao/LintCode
class Solution:
"""
@param A: an array
@param S: the sum
@return: the number of non-empty subarrays
"""
def numSubarraysWithSum(self, A, S):
# Write your code here.
table = [0] * (len(A) + 1)
table[0] = 1
curr = count = 0
for a in A:
curr += a
if curr >= S:
count += table[curr - S]
table[curr] += 1
return count
| 3.328125 | 3 |
app/enquiries/common/consent.py | uktrade/enquiry-mgmt-tool | 3 | 12792474 | from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from requests import HTTPError
from rest_framework import status
from app.enquiries.common.client import APIClient
from app.enquiries.common.hawk import HawkAuth
CONSENT_SERVICE_PATH_PERSON = "/api/v1/person/"
def request(url, method, **kwargs):
if not all([
settings.CONSENT_SERVICE_HAWK_ID,
settings.CONSENT_SERVICE_HAWK_KEY,
settings.CONSENT_SERVICE_BASE_URL,
]):
raise ImproperlyConfigured("CONSENT_SERVICE_* environment variables must be set")
client = APIClient(
api_url=settings.CONSENT_SERVICE_BASE_URL,
auth=HawkAuth(
api_id=settings.CONSENT_SERVICE_HAWK_ID,
api_key=settings.CONSENT_SERVICE_HAWK_KEY,
verify_response=settings.CONSENT_SERVICE_VERIFY_RESPONSE,
),
default_timeout=(
settings.CONSENT_SERVICE_CONNECT_TIMEOUT,
settings.CONSENT_SERVICE_READ_TIMEOUT,
),
)
return client.request(path=url, method=method, **kwargs)
def check_consent(key):
if not settings.FEATURE_FLAGS["ENFORCE_CONSENT_SERVICE"]:
return None
key = key.lower().replace(" ", "")
url = f"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}{key}/"
try:
response = request(url=url, method="GET")
return bool(len(response.json()["consents"]))
except HTTPError as e:
if e.response and e.response.status_code == status.HTTP_404_NOT_FOUND:
return False
return False
def set_consent(key, value=True):
if not settings.FEATURE_FLAGS["ENFORCE_CONSENT_SERVICE"]:
return None
key = key.lower()
key_type = "email" if "@" in key else "phone"
data = {
"consents": [f"{key_type}_marketing"] if value else [],
key_type: key,
"modified_at": datetime.now().isoformat(),
}
try:
url = f"{settings.CONSENT_SERVICE_BASE_URL}{CONSENT_SERVICE_PATH_PERSON}"
request(url=url, method="POST", json=data)
return True
except Exception:
return None
| 2.03125 | 2 |
models/det/__init__.py | BruceHan98/OCHTPS | 0 | 12792475 | from .pannet import PANNet
| 1.101563 | 1 |
pcep/prac_2.py | gliverm/devnet-study-group | 1 | 12792476 | def fun(inp=2, out=3):
return inp * out
print(fun(out=2)) | 2.6875 | 3 |
pgweb/util/admin.py | ChristophBerg/pgweb | 1 | 12792477 | <reponame>ChristophBerg/pgweb
from django.contrib import admin
from django.conf import settings
from pgweb.core.models import ModerationNotification
from mailqueue.util import send_simple_mail
class PgwebAdmin(admin.ModelAdmin):
"""
ModelAdmin wrapper that will enable a few pg specific things:
* Markdown preview for markdown capable textfields (specified by
including them in a class variable named markdown_capable that is a tuple
of field names)
* Add an admin field for "notification", that can be sent to the submitter
of an item to inform them of moderation issues.
"""
change_form_template = 'admin/change_form_pgweb.html'
def formfield_for_dbfield(self, db_field, **kwargs):
fld = admin.ModelAdmin.formfield_for_dbfield(self, db_field, **kwargs)
if hasattr(self.model, 'markdown_fields'):
if db_field.name in self.model.markdown_fields:
fld.widget.attrs['class'] = fld.widget.attrs['class'] + ' markdown_preview'
return fld
def change_view(self, request, object_id, form_url='', extra_context=None):
if self.model.send_notification:
# Anything that sends notification supports manual notifications
if extra_context == None:
extra_context = dict()
extra_context['notifications'] = ModerationNotification.objects.filter(objecttype=self.model.__name__, objectid=object_id).order_by('date')
return super(PgwebAdmin, self).change_view(request, object_id, form_url, extra_context)
# Remove the builtin delete_selected action, so it doesn't
# conflict with the custom one.
def get_actions(self, request):
actions = super(PgwebAdmin, self).get_actions(request)
del actions['delete_selected']
return actions
# Define a custom delete_selected action. This is required because the
# default one uses the delete functionality in QuerySet, which bypasses
# the delete() operation on the model, and thus won't send out our
# notifications. Manually calling delete() on each one will be slightly
# slower, but will send proper notifications - and it's not like this
# is something that happens often enough that we care about performance.
def custom_delete_selected(self, request, queryset):
for x in queryset:
x.delete()
custom_delete_selected.short_description = "Delete selected items"
actions=['custom_delete_selected']
def save_model(self, request, obj, form, change):
if change and self.model.send_notification:
# We only do processing if something changed, not when adding
# a new object.
if request.POST.has_key('new_notification') and request.POST['new_notification']:
# Need to send off a new notification. We'll also store
# it in the database for future reference, of course.
if not obj.org.email:
# Should not happen because we remove the form field. Thus
# a hard exception is ok.
raise Exception("Organization does not have an email, canot send notification!")
n = ModerationNotification()
n.objecttype = obj.__class__.__name__
n.objectid = obj.id
n.text = request.POST['new_notification']
n.author = request.user.username
n.save()
# Now send an email too
msgstr = _get_notification_text(request.POST.has_key('remove_after_notify'),
obj,
request.POST['new_notification'])
send_simple_mail(settings.NOTIFICATION_FROM,
obj.org.email,
"postgresql.org moderation notification",
msgstr)
# Also generate a mail to the moderators
send_simple_mail(settings.NOTIFICATION_FROM,
settings.NOTIFICATION_EMAIL,
"Moderation comment on %s %s" % (obj.__class__._meta.verbose_name, obj.id),
_get_moderator_notification_text(request.POST.has_key('remove_after_notify'),
obj,
request.POST['new_notification'],
request.user.username
))
if request.POST.has_key('remove_after_notify'):
# Object should not be saved, it should be deleted
obj.delete()
return
# Either no notifications, or done with notifications
super(PgwebAdmin, self).save_model(request, obj, form, change)
def register_pgwebadmin(model):
admin.site.register(model, PgwebAdmin)
def _get_notification_text(remove, obj, txt):
objtype = obj.__class__._meta.verbose_name
if remove:
return """You recently submitted a %s to postgresql.org.
This submission has been rejected by a moderator, with the following comment:
%s
""" % (objtype, txt)
else:
return """You recently submitted a %s to postgresql.org.
During moderation, this item has received comments that need to be
addressed before it can be approved. The comment given by the moderator is:
%s
Please go to https://www.postgresql.org/account/ and make any changes
request, and your submission will be re-moderated.
""" % (objtype, txt)
def _get_moderator_notification_text(remove, obj, txt, moderator):
return """Moderator %s made a comment to a pending object:
Object type: %s
Object id: %s
Comment: %s
Delete after comment: %s
""" % (moderator,
obj.__class__._meta.verbose_name,
obj.id,
txt,
remove and "Yes" or "No",
)
| 2.171875 | 2 |
new_venv/Lib/site-packages/cardio/core/utils.py | Shlyankin/cardio | 250 | 12792478 | """Miscellaneous ECG Batch utils."""
import functools
import pint
import numpy as np
from sklearn.preprocessing import LabelBinarizer as LB
UNIT_REGISTRY = pint.UnitRegistry()
def get_units_conversion_factor(old_units, new_units):
"""Return a multiplicative factor to convert a measured quantity from old
to new units.
Parameters
----------
old_units : str
Current units in SI format.
new_units : str
Target units in SI format.
Returns
-------
factor : float
A factor to convert quantities between units.
"""
try: # pint exceptions are wrapped with ValueError exceptions because they don't implement __repr__ method
factor = UNIT_REGISTRY(old_units).to(new_units).magnitude
except Exception as error:
raise ValueError(error.__class__.__name__ + ": " + str(error))
return factor
def partialmethod(func, *frozen_args, **frozen_kwargs):
"""Wrap a method with partial application of given positional and keyword
arguments.
Parameters
----------
func : callable
A method to wrap.
frozen_args : misc
Fixed positional arguments.
frozen_kwargs : misc
Fixed keyword arguments.
Returns
-------
method : callable
Wrapped method.
"""
@functools.wraps(func)
def method(self, *args, **kwargs):
"""Wrapped method."""
return func(self, *frozen_args, *args, **frozen_kwargs, **kwargs)
return method
class LabelBinarizer(LB):
"""Encode categorical features using a one-hot scheme.
Unlike ``sklearn.preprocessing.LabelBinarizer``, each label will be
encoded using ``n_classes`` numbers even for binary problems.
"""
# pylint: disable=invalid-name
def transform(self, y):
"""Transform ``y`` using one-hot encoding.
Parameters
----------
y : 1-D ndarray of shape ``[n_samples,]``
Class labels.
Returns
-------
Y : 2-D ndarray of shape ``[n_samples, n_classes]``
One-hot encoded labels.
"""
Y = super().transform(y)
if len(self.classes_) == 1:
Y = 1 - Y
if len(self.classes_) == 2:
Y = np.hstack((1 - Y, Y))
return Y
def inverse_transform(self, Y, threshold=None):
"""Transform one-hot encoded labels back to class labels.
Parameters
----------
Y : 2-D ndarray of shape ``[n_samples, n_classes]``
One-hot encoded labels.
threshold : float, optional
The threshold used in the binary and multi-label cases. If
``None``, it is assumed to be half way between ``neg_label`` and
``pos_label``.
Returns
-------
y : 1-D ndarray of shape ``[n_samples,]``
Class labels.
"""
if len(self.classes_) == 1:
y = super().inverse_transform(1 - Y, threshold)
elif len(self.classes_) == 2:
y = super().inverse_transform(Y[:, 1], threshold)
else:
y = super().inverse_transform(Y, threshold)
return y
| 3.0625 | 3 |
test/unit/parser_test.py | jartigag/pydometer | 2 | 12792479 | import pytest
from models.parser import Parser
def test_new():
pass #TODO
#data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;'
#parser = Parser(data)
#assert parser.parsed_data==None
# --- Creation Tests ---
def test_create_combined_data():
data = '0.123,-0.123,5;0.456,-0.789,0.111;-0.212,0.001,1;'
parser = Parser(data)
assert parser.parsed_data==[ [[0.123, -0.123, 5.0], [0, 0, 0]],
[[0.456, -0.789, 0.111], [0, 0, 0]],
[[-0.2120710948533322, 0.0011468544965549535, 0.9994625125426089],
[7.109485333219216e-05, -0.00014685449655495343, 0.0005374874573911294]]]
def test_create_separated_data():
data = '0.028,-0.072,5|0.129,-0.945,-5;0,-0.07,0.06|0.123,-0.947,5;0.2,-1,2|0.1,-0.9,3;'
parser = Parser(data)
assert parser.parsed_data==[[[0.028, -0.072, 5.0], [0.129, -0.945, -5.0]],
[[0.0, -0.07, 0.06], [0.123, -0.947, 5.0]],
[[0.2, -1.0, 2.0], [0.1, -0.9, 3.0]]]
def test_create_string_values_parses_to_0s():
data = "1,2,foo;"
parser = Parser(data)
assert parser.parsed_data==[[[1.0, 2.0, 0.0], [0, 0, 0]]]
data = "1,2,foo|4,bar,6;"
parser = Parser(data)
assert parser.parsed_data==[[[1.0, 2.0, 0.0], [4.0, 0.0, 6.0]]]
# --- Creation Failure Tests ---
#def test_create_none():
# pass #TODO
#
#def test_create_empty():
# pass #TODO
#
#def test_create_bad_input_too_many_values():
# pass #TODO
#
#def test_create_bad_input_too_few_values():
# pass #TODO
#
#def test_create_bad_input_delimiters():
# pass #TODO
| 2.65625 | 3 |
models/stock_model.py | satyam93sinha/SuperSimpleStockMarket | 0 | 12792480 | class StockModel:
def __init__(self):
self.stock_symbol = None
self.stock_type = None
self.last_dividend = 0
self.fixed_dividend = 0
self.par_value = 0
def set_stock_symbol(self, symbol_of_stock: str) -> None:
self.stock_symbol = symbol_of_stock
def get_stock_symbol(self):
return self.stock_symbol
def set_stock_type(self, type_of_stock):
self.stock_type = type_of_stock
def get_stock_type(self):
return self.stock_type
def set_last_dividend(self, dividend):
self.last_dividend = dividend
def get_last_dividend(self):
return self.last_dividend
def set_fixed_dividend(self, dividend_fix):
self.fixed_dividend = dividend_fix
def get_fixed_dividend(self):
return self.fixed_dividend
def set_par_value(self, new_par_value):
self.par_value = new_par_value
def get_par_value(self):
return self.par_value | 3.09375 | 3 |
yowsup/demos/contacts/stack.py | zulu494/Anoa-Bot- | 1 | 12792481 | from .layer import SyncLayer
from yowsup.stacks import YowStackBuilder
from yowsup.layers import YowLayerEvent
from yowsup.layers.auth import YowAuthenticationProtocolLayer
from yowsup.layers.network import YowNetworkLayer
class YowsupSyncStack(object):
def __init__(self, profile, contacts):
"""
:param profile:
:param contacts: list of [jid ]
:return:
"""
stackBuilder = YowStackBuilder()
self._stack = stackBuilder \
.pushDefaultLayers() \
.push(SyncLayer) \
.build()
self._stack.setProp(SyncLayer.PROP_CONTACTS, contacts)
self._stack.setProp(YowAuthenticationProtocolLayer.PROP_PASSIVE, True)
self._stack.setProfile(profile)
def set_prop(self, key, val):
self._stack.setProp(key, val)
def start(self):
self._stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
self._stack.loop()
| 2.015625 | 2 |
1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/02_Conditional-Statements/00.Book-Exercise-3.1-08-Metric-Converter.py | karolinanikolova/SoftUni-Software-Engineering | 0 | 12792482 | <reponame>karolinanikolova/SoftUni-Software-Engineering
# конвертор за мерни единици
# Да се напише програма, която преобразува разстояние между следните 8 мерни единици: m, mm, cm, mi, in, km, ft, yd. Използвайте съответствията от таблицата по-долу:
amount = float(input())
unit_input = input().lower()
unit_output = input().lower()
if unit_input == 'mm':
amount = amount / 1000
elif unit_input == 'cm':
amount = amount / 100
elif unit_input == 'mi':
amount = amount / 0.000621371192
elif unit_input == 'in':
amount = amount / 39.3700787
elif unit_input == 'km':
amount = amount / 0.001
elif unit_input == 'ft':
amount = amount / 3.2808399
elif unit_input == 'yd':
amount = amount / 1.0936133
if unit_output == 'mm':
amount = amount * 1000
elif unit_output == 'cm':
amount = amount * 100
elif unit_output == 'mi':
amount = amount * 0.000621371192
elif unit_output == 'in':
amount = amount * 39.3700787
elif unit_output == 'km':
amount = amount * 0.001
elif unit_output == 'ft':
amount = amount * 3.2808399
elif unit_output == 'yd':
amount = amount * 1.0936133
print(amount)
# # Other method
# amount = float(input())
#
# unit_input = input().lower()
# unit_output = input().lower()
#
# dict = {'m':1, 'mm':1000, 'cm':100, 'mi':0.000621371192, 'in':39.3700787, 'km':0.001, 'ft':3.2808399, 'yd':1.0936133}
#
# amount = amount * dict[unit_output] / dict[unit_input]
#
# print(amount)
| 3.703125 | 4 |
Model/StaffModel.py | izazdhiya/E-Library-Desktop | 2 | 12792483 |
from .BaseModel import *
class StaffModel(BaseModel):
def __init__(self):
super().__init__()
def validStaff(self,usr,passwd):
query = f"SELECT * FROM staff WHERE email='{usr}' AND pass='{passwd}'"
try:
return self.database.fetchall(query)[0][0]
except Exception as e:
return False | 2.6875 | 3 |
setup.py | SeabornGames/File | 0 | 12792484 | from setuptools import setup
import os
try:
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
long_description = f.read()
except Exception:
long_description = ''
setup(
name='seaborn-file',
version='1.1.1',
description='Seaborn-File enables the manipulation of the'
'directories of a computer within a program.',
long_description='',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/SeabornGames/File',
download_url='https://github.com/SeabornGames/File'
'/tarball/download',
keywords=['os'],
install_requires=[
],
extras_require={},
packages=['seaborn_file'],
license='MIT License',
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: Other/Proprietary License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6'],
)
| 1.65625 | 2 |
iot/led/light-sensor/main.py | Kevin181/geektime | 0 | 12792485 | from ble_lightsensor import BLELightSensor
from lightsensor import LightSensor
import time
import bluetooth
def main():
ble = bluetooth.BLE()
ble.active(True)
ble_light = BLELightSensor(ble)
light = LightSensor(36)
light_density = light.value()
i = 0
while True:
# Write every second, notify every 10 seconds.
i = (i + 1) % 10
ble_light.set_light(light_density, notify=i == 0)
print("Light Lux:", light_density)
light_density = light.value()
time.sleep_ms(1000)
if __name__ == "__main__":
main() | 3.140625 | 3 |
scripts/summarize_word_frequency_by_corpus.py | codebyzeb/Zorro | 2 | 12792486 | <filename>scripts/summarize_word_frequency_by_corpus.py
"""
How often do words in test sentences occur in each target corpus?
"""
import numpy as np
from zorro import configs
from zorro.vocab import load_vocab_df
vocab_df = load_vocab_df(return_excluded_words=True)
column_names = [f'{corpus_name}-frequency' for corpus_name in configs.Data.corpus_names]
f_df = vocab_df[column_names]
vw2fs = {w: np.array([fs[k] for k in column_names]) for w, fs in f_df.iterrows()}
stop_words = set((configs.Dirs.external_words / "stopwords.txt").open().read().split())
# collect types used in test sentences
cn2f = {cn: 0 for cn in column_names}
fs_sum_total = 0
for paradigm_path in (configs.Dirs.sentences / 'babyberta').glob('*.txt'):
words_in_test_sentences = set()
for w in paradigm_path.read_text().split():
if w not in stop_words:
words_in_test_sentences.add(w.lower())
fs_sum = np.zeros(len(column_names))
for w in words_in_test_sentences:
fs = vw2fs[w]
fs_sum += fs
# collect
fs_sum_total += fs_sum.sum()
print(fs_sum_total)
print(paradigm_path.name)
for cn, f in zip(column_names, fs_sum):
print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum.sum():.2f}')
# collect
cn2f[cn] += f
# summary
print()
print('Summary')
for cn, f in cn2f.items():
print(f'{cn:.<32} {f:>12,} proportion={f/ fs_sum_total:.3f}') | 3.09375 | 3 |
labpack/records/ip.py | collectiveacuity/labPack | 2 | 12792487 | <gh_stars>1-10
__author__ = 'rcj1492'
__created__ = '2017.06'
__licence__ = 'MIT'
def get_ip(source='aws'):
''' a method to get current public ip address of machine '''
if source == 'aws':
source_url = 'http://checkip.amazonaws.com/'
else:
raise Exception('get_ip currently only supports queries to aws')
import requests
try:
response = requests.get(url=source_url)
except Exception as err:
from labpack.handlers.requests import handle_requests
from requests import Request
request_object = Request(method='GET', url=source_url)
request_details = handle_requests(request_object)
raise Exception(request_details['error'])
current_ip = response.content.decode()
current_ip = current_ip.strip()
return current_ip
def describe_ip(ip_address, source='whatismyip'):
''' a method to get the details associated with an ip address '''
# determine url
if source == 'nekudo':
source_url = 'https://geoip.nekudo.com/api/%s' % ip_address
elif source == 'geoip':
source_url = 'https://freegeoip.net/json/%s' % ip_address
elif source == 'whatismyip':
# http://whatismyipaddress.com/ip-lookup
source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address
else:
raise Exception('describe_ip currently only supports queries to nekudo')
# TODO incorporate geoip module and c dependencies with local database
# http://tech.marksblogg.com/ip-address-lookups-in-python.html
# send request
ip_details = {
'accuracy_radius': 0,
'asn': '',
'assignment': '',
'city': '',
'continent': '',
'country': '',
'hostname': '',
'ip': '',
'isp': '',
'latitude': 0.0,
'longitude': 0.0,
'organization': '',
'postal_code': '',
'region': '',
'timezone': '',
'type': ''
}
import requests
try:
response = requests.get(url=source_url)
except Exception as err:
from labpack.handlers.requests import handle_requests
from requests import Request
request_object = Request(method='GET', url=source_url)
request_details = handle_requests(request_object)
raise Exception(request_details['error'])
# extract response
if source == 'whatismyip':
import re
response_text = response.content.decode()
table_regex = re.compile('<table>\n<tr><th>IP.*?</table>\n<span\sstyle', re.S)
table_search = table_regex.findall(response_text)
if table_search:
table_text = table_search[0]
field_list = [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code']
for field in field_list:
field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S)
field_search = field_regex.findall(table_text)
if field_search:
ip_details[field.lower().replace(' ','_')] = field_search[0]
for field in ('longitude', 'latitude'):
if field in ip_details.keys():
coord_regex = re.compile('\-?\d+\.\d+')
coord_search = coord_regex.findall(ip_details[field])
if coord_search:
ip_details[field] = float(coord_search[0])
if 'country' in ip_details.keys():
country_regex = re.compile('([\w\s]+?)($|\s<img)')
country_search = country_regex.findall(ip_details['country'])
if country_search:
ip_details['country'] = country_search[0][0]
for field in ('type', 'assignment'):
if field in ip_details.keys():
link_regex = re.compile('>(.*?)<')
link_search = link_regex.findall(ip_details[field])
if link_search:
ip_details[field] = link_search[0]
if 'state/region' in ip_details.keys():
ip_details['region'] = ip_details['state/region']
del ip_details['state/region']
elif source == 'nekudo':
response_details = response.json()
ip_details['country'] = response_details['country']['name']
ip_details['latitude'] = response_details['location']['latitude']
ip_details['longitude'] = response_details['location']['longitude']
ip_details['accuracy_radius'] = response_details['location']['accuracy_radius']
if response_details['city']:
ip_details['city'] = response_details['city']
ip_details['ip'] = response_details['ip']
for key in response_details.keys():
if key not in ip_details.keys() and key != 'location':
ip_details[key] = response_details[key]
else:
response_details = response.json()
for field in ('city', 'ip', 'latitude', 'longitude'):
ip_details[field] = response_details[field]
ip_details['country'] = response_details['country_name']
ip_details['region'] = response_details['region_name']
ip_details['postal_code'] = response_details['zip_code']
ip_details['timezone'] = response_details['time_zone']
return ip_details
if __name__ == '__main__':
from pprint import pprint
ip_address = get_ip()
ip_details = describe_ip(ip_address)
pprint(ip_details)
pprint(describe_ip(ip_address, 'nekudo'))
pprint(describe_ip(ip_address, 'geoip')) | 2.84375 | 3 |
autobuyfast/cars/migrations/0045_auto_20210908_0458.py | dark-codr/autouyfast | 0 | 12792488 | <filename>autobuyfast/cars/migrations/0045_auto_20210908_0458.py
# Generated by Django 3.1.13 on 2021-09-08 03:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cars', '0044_carcompare'),
]
operations = [
migrations.AlterField(
model_name='carcompare',
name='car_one',
field=models.ForeignKey(default=4, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carone', to='cars.autosearch'),
),
migrations.AlterField(
model_name='carcompare',
name='car_three',
field=models.ForeignKey(default=3, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='carthree', to='cars.autosearch'),
),
migrations.AlterField(
model_name='carcompare',
name='car_two',
field=models.ForeignKey(default=9, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='cartwo', to='cars.autosearch'),
),
]
| 1.359375 | 1 |
tests/test_list.py | zheng-gao/ez_code | 0 | 12792489 | <reponame>zheng-gao/ez_code
from ezcode.list.linked_list import SinglyLinkedList
from ezcode.list.stack import Stack, MinStack, MaxStack
from ezcode.list.queue import Queue, MonotonicQueue
from ezcode.list.lru_cache import LRUCache
from fixture.utils import equal_list
class Node:
def __init__(self, v=None, n=None):
self.v = v
self.n = n
def __repr__(self):
return f"Node({self.v})"
def test_singly_linked_list_basics():
list_0 = SinglyLinkedList(head=None, data_name="v", next_name="n")
list_0_copy = SinglyLinkedList(head=None, data_name="v", next_name="n")
list_0_reverse = SinglyLinkedList(head=None, data_name="v", next_name="n")
list_1 = SinglyLinkedList(head=Node(1), data_name="v", next_name="n")
list_1_copy = SinglyLinkedList(head=Node(1), data_name="v", next_name="n")
list_1_reverse = SinglyLinkedList(head=Node(1), data_name="v", next_name="n")
list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name="v", next_name="n")
list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name="v", next_name="n")
list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name="v", next_name="n")
list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name="v", next_name="n")
list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name="v", next_name="n")
list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name="v", next_name="n")
assert list_0_copy == list_0
assert list_1_copy == list_1
assert list_2_copy == list_2
assert list_3_copy == list_3
assert list_0.copy() == list_0_copy
assert list_1.copy() == list_1_copy
assert list_2.copy() == list_2_copy
assert list_3.copy() == list_3_copy
assert not list_0 == list_1
assert not list_1 == list_2
assert not list_2 == list_3
assert not list_3 == list_0
assert str(list_0) == "None"
assert str(list_1) == "1 ─> None"
assert str(list_2) == "1 ─> 2 ─> None"
assert str(list_3) == "1 ─> 2 ─> 3 ─> None"
assert equal_list(list_0.to_array(), [])
assert equal_list(list_1.to_array(), [1])
assert equal_list(list_2.to_array(), [1, 2])
assert equal_list(list_3.to_array(), [1, 2, 3])
list_0_reverse_copy = list_0_reverse.copy()
list_1_reverse_copy = list_1_reverse.copy()
list_2_reverse_copy = list_2_reverse.copy()
list_3_reverse_copy = list_3_reverse.copy()
list_0_reverse_copy.reverse()
list_1_reverse_copy.reverse()
list_2_reverse_copy.reverse()
list_3_reverse_copy.reverse()
assert list_0_copy == list_0_reverse_copy
assert list_1_copy == list_1_reverse_copy
assert list_2_copy == list_2_reverse_copy
assert list_3_copy == list_3_reverse_copy
list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head))
list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head))
list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head))
list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head))
assert list_0_copy == list_0_reverse
assert list_1_copy == list_1_reverse
assert list_2_copy == list_2_reverse
assert list_3_copy == list_3_reverse
try:
list_0.peek_head() == 0
except IndexError as e:
assert e.args[0] == "Peek head at an empty SinglyLinkedList"
else:
assert False
list_1.peek_head() == 1
list_2.peek_head() == 2
list_3.peek_head() == 3
list_3_copy.pop_head() == list_2_copy
list_2_copy.pop_head() == list_1_copy
list_1_copy.pop_head() == list_0_copy
try:
list_0.pop_head()
except IndexError as e:
assert e.args[0] == "Pop head from an empty SinglyLinkedList"
else:
assert False
list_3.delete(set([2, 3])) == list_1
list_2.delete(set([1, 2])) == list_0
def test_reverse_sublist():
lists = [
SinglyLinkedList(head=Node(0), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1)), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name="v", next_name="n"),
]
for list_orig in lists:
list_orig.print()
for i in range(len(list_orig)):
list_orig_copy = list_orig.copy()
list_orig_copy.reverse(start_index=i)
assert equal_list(list_orig_copy.to_array(), [x for x in range(i)] + [x for x in range(len(list_orig) - 1, i - 1, -1)])
list_orig_copy = list_orig.copy()
list_orig_copy.reverse(end_index=i)
assert equal_list(list_orig_copy.to_array(), [x for x in range(i, -1, -1)] + [x for x in range(i + 1, len(list_orig))])
sublist_length = len(list_orig) // 2
if sublist_length > 0 and i <= len(list_orig) - sublist_length:
start, end = i, i + sublist_length - 1
list_orig_copy = list_orig.copy()
list_orig_copy.reverse(start_index=start, end_index=end)
assert equal_list(
list_orig_copy.to_array(),
[x for x in range(start)] + [x for x in range(end, start - 1, -1)] + [x for x in range(end + 1, len(list_orig))]
)
def test_queue():
queue = Queue()
for i in range(3):
assert len(queue) == i
queue.push(i)
assert queue.peek() == 0
for i in range(3):
assert len(queue) == 3 - i
assert queue.peek() == i
assert queue.pop() == i
def test_stack():
stack = Stack()
for i in range(3):
assert len(stack) == i
stack.push(i)
assert stack.peek() == i
for i in range(3):
assert len(stack) == 3 - i
assert stack.peek() == 2 - i
assert stack.pop() == 2 - i
def test_lru_cache():
lru_cache = LRUCache(capacity=3)
assert lru_cache.get(1) is None
lru_cache.put(key=1, value=1)
lru_cache.put(key=2, value=2)
lru_cache.put(key=3, value=3)
assert lru_cache.get(1) == 1 # 1 3 2
lru_cache.put(key=4, value=4) # 4 1 3 (no 2)
assert lru_cache.get(2) is None
assert lru_cache.get(4) == 4 # 4 1 3
lru_cache.put(key=3, value=33) # 3 4 1
lru_cache.put(key=5, value=5) # 5 3 4 (no 1)
assert lru_cache.get(1) is None
assert lru_cache.get(3) == 33
assert lru_cache.get(5) == 5
def test_min_max_stack():
min_stack = MinStack()
max_stack = MaxStack()
for data, min_data, max_data in zip([2, 1, 3, 5, 4], [2, 1, 1, 1, 1], [2, 2, 3, 5, 5]):
min_stack.push(data)
max_stack.push(data)
assert min_stack.get_min() == min_data
assert max_stack.get_max() == max_data
for min_data, max_data in zip([1, 1, 1, 2], [5, 3, 2, 2]):
min_stack.pop()
max_stack.pop()
assert min_stack.get_min() == min_data
assert max_stack.get_max() == max_data
def test_monontonic_queue():
mq = MonotonicQueue(is_increasing=True)
for data, benchmark in zip([5, 3, 1, 2, 4], [5, 3, 1, 1, 1]):
mq.push(data)
assert mq.peek() == benchmark
mq = MonotonicQueue(is_increasing=False)
for data, benchmark in zip([5, 3, 1, 2, 4], [5, 5, 5, 5, 5]):
mq.push(data)
assert mq.peek() == benchmark
| 3.015625 | 3 |
examples/optimal_burst/bursts_ttk_simulation.py | spascou/ps2-analysis | 2 | 12792490 | import logging
import os
from typing import List, Optional
import altair
from ps2_census.enums import PlayerState
from ps2_analysis.enums import DamageLocation
from ps2_analysis.fire_groups.cone_of_fire import ConeOfFire
from ps2_analysis.fire_groups.data_files import (
update_data_files as update_fire_groups_data_files,
)
from ps2_analysis.fire_groups.fire_mode import FireMode
from ps2_analysis.utils import CodeTimer
from ps2_analysis.weapons.infantry.data_files import (
update_data_files as update_infantry_weapons_data_files,
)
from ps2_analysis.weapons.infantry.generate import generate_all_infantry_weapons
from ps2_analysis.weapons.infantry.infantry_weapon import InfantryWeapon
logging.basicConfig(level=logging.INFO)
SERVICE_ID: Optional[str] = os.environ.get("CENSUS_SERVICE_ID")
DATAFILES_DIRECTORY: str = "../datafiles"
if not SERVICE_ID:
raise ValueError("CENSUS_SERVICE_ID envvar not found")
update_fire_groups_data_files(
directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID,
)
update_infantry_weapons_data_files(
directory=DATAFILES_DIRECTORY, service_id=SERVICE_ID,
)
infantry_weapons: List[InfantryWeapon] = list(
generate_all_infantry_weapons(data_files_directory=DATAFILES_DIRECTORY)
)
print(f"Generated {len(infantry_weapons)} infantry weapons")
wp: InfantryWeapon = next(x for x in infantry_weapons if x.item_id == 43)
fm: FireMode = wp.fire_groups[0].fire_modes[1]
cof: ConeOfFire = fm.player_state_cone_of_fire[PlayerState.STANDING]
rttks: List[dict] = []
distance: int = 30
burst_length: int
for burst_length in range(0, int(round(fm.max_consecutive_shots / 4)) + 1, 1):
control_time: int
for control_time in range(
0, cof.recover_time(cof.min_cof_angle() + cof.bloom * burst_length * 2) + 10, 10
):
with CodeTimer(
f"{burst_length} length and {control_time}ms control time simulation"
):
ttk: int
timed_out_ratio: float
ttk, timed_out_ratio = fm.real_time_to_kill(
distance=distance,
runs=500,
control_time=control_time,
auto_burst_length=burst_length,
aim_location=DamageLocation.TORSO,
recoil_compensation=True,
)
rttks.append(
{
"distance": distance,
"control_time": control_time + fm.fire_timing.refire_time,
"burst_length": burst_length,
"ttk": ttk if timed_out_ratio < 0.20 else -1,
"timed_out_ratio": timed_out_ratio,
}
)
dataset = altair.Data(values=rttks)
chart = (
altair.Chart(dataset)
.mark_rect()
.encode(
x="burst_length:O",
y=altair.Y(
"control_time:O",
sort=altair.EncodingSortField("control_time", order="descending"),
),
color=altair.Color(
"ttk:Q", scale=altair.Scale(scheme="plasma"), sort="descending"
),
tooltip=["ttk:Q", "timed_out_ratio:Q"],
)
.properties(
title=f"{wp.name} TTK by burst length and control time at {distance}m",
height=900,
width=900,
)
.interactive()
)
chart.save("bursts_ttk_simulation.html")
| 2.265625 | 2 |
trello/cards/views.py | copydataai/clon-trello | 0 | 12792491 |
# DRF
from rest_framework.viewsets import ModelViewSet
from rest_framework import permissions
from rest_framework.permissions import IsAuthenticated
# Serializer
from trello.cards.serializers import CardSerializer
# Model
from trello.cards.models import Card
class CardViewSet(ModelViewSet):
serializer_class = CardSerializer
queryset = Car.objects.filter(list)
permission_classes = [permissions.IsAuthenticated]
| 1.742188 | 2 |
python/caty/core/command/usage.py | hidaruma/caty | 0 | 12792492 | #coding: utf-8
class CommandUsage(object):
def __init__(self, profile_container):
self.pc = profile_container
def get_type_info(self):
r = []
for p in self.pc.profiles:
opts, args, input, output = self.profile_usage(p)
type_vars = ', '.join(map(lambda x:'<%s>' % x.var_name, self.pc.type_params))
if opts:
r.append('Usage: %s%s OPTION %s' % (self.pc.name, type_vars, args))
r.append('Option:\n%s' % self.indent(opts))
else:
if args == 'null':
r.append('Usage: %s%s' % (self.pc.name, type_vars))
else:
r.append('Usage: %s%s %s' % (self.pc.name, type_vars, args))
r.append('Input:\n%s' % self.indent(input))
r.append('Output:\n%s' % self.indent(output))
r.append('\n')
return u'\n'.join(r)
def get_usage(self):
return self.get_type_info() + 'Description:\n' + self.get_doc()
def get_doc(self):
return self.pc.doc
@property
def title(self):
return self.pc.doc.splitlines()[0].strip()
def indent(self, s):
r = []
for l in s.splitlines():
r.append(' ' + l)
return '\n'.join(r)
def profile_usage(self, prof):
opt = TreeDumper().visit(prof.opts_schema)
arg = ArgDumper().visit(prof.args_schema)
inp = MiniDumper().visit(prof.in_schema)
out = MiniDumper().visit(prof.out_schema)
return opt, arg, inp, out
from caty.core.casm.cursor.dump import TreeDumper
class ArgDumper(TreeDumper):
def _process_option(self, node, buff):
if node.options:
items = [(k, v) for k, v in node.options.items() if k not in ('subName', 'minCount', 'maxCount')]
if 'subName' in node.options:
buff.append(' ' + node.options['subName'])
class MiniDumper(TreeDumper):
def _visit_root(self, node):
return node.name
| 2.328125 | 2 |
mbq/client/tests/test_storage.py | managedbyq/mbq.client | 1 | 12792493 | import os
from tempfile import NamedTemporaryFile
from unittest import TestCase
from mbq.client.storage import FileStorage
class FileStorageTestCase(TestCase):
def setUp(self):
self.test_filename = NamedTemporaryFile(delete=False).name
self.storage = FileStorage(self.test_filename)
def tearDown(self):
os.remove(self.test_filename)
def test_storage(self):
# When the file is empty, we should receive None for any key.
self.assertIsNone(self.storage.get('key1'))
# We should be able to write a key/value,
self.storage.set('key1', 'value1')
# retrieve it,
self.assertEqual(self.storage.get('key1'), 'value1')
# and still receive None for missing keys.
self.assertIsNone(self.storage.get('key2'))
# We should be able to write a 2nd key,
self.storage.set('key2', 'value2')
# retrieve it,
self.assertEqual(self.storage.get('key2'), 'value2')
# still retrieve the earlier key we wrote,
self.assertEqual(self.storage.get('key1'), 'value1')
# and still receive None for missing keys.
self.assertIsNone(self.storage.get('key3'))
# We should be able to update an existing key,
self.storage.set('key2', 'some-new-value')
# see the value change when retrieving,
self.assertEqual(self.storage.get('key2'), 'some-new-value')
# the other values should remain unchanged,
self.assertEqual(self.storage.get('key1'), 'value1')
# and we should still receive None for missing keys.
self.assertIsNone(self.storage.get('key3'))
# If we re-init the storage object with the same file,
self.storage = FileStorage(self.test_filename)
# all keys should be persisted.
self.assertEqual(self.storage.get('key2'), 'some-new-value')
self.assertEqual(self.storage.get('key1'), 'value1')
self.assertIsNone(self.storage.get('key3'))
| 2.890625 | 3 |
rpkiclientweb/web.py | job/rpki-client-web | 0 | 12792494 | <gh_stars>0
import asyncio
import dataclasses
import json
import logging
import os
import random
from dataclasses import dataclass
from typing import Dict, List, Optional
from aiohttp import web
from prometheus_async import aio
from rpkiclientweb.rpki_client import ExecutionResult, RpkiClient
from rpkiclientweb.util import repeat
LOG = logging.getLogger(__name__)
OUTPUT_BUFFER_SIZE = 8_388_608
class RpkiClientWeb:
result: Optional[ExecutionResult] = None
conf: Dict
app: web.Application
host: str
port: int
interval: int
jitter: int
def __init__(self, conf: Dict) -> None:
self.app = web.Application()
self.interval = conf.pop("interval")
# default to the interval for jitter value
self.jitter = conf.pop("jitter")
self.host = conf.pop("host", "localhost")
self.port = conf.pop("port", 8080)
self.conf = conf
self.client = RpkiClient(**self.conf)
self.app.add_routes(
[
web.get("/", self.index),
web.get("/config", self.config_response),
web.get("/metrics", aio.web.server_stats),
web.get("/result", self.json_result),
web.get("/objects/validated", self.validated_objects),
web.static(
"/cache",
os.path.abspath(conf["cache_dir"]),
follow_symlinks=False,
show_index=True,
),
]
)
async def index(self, req) -> web.Response:
return web.Response(
text="""<html>
<head><title>rpki-client wrapper</title></head>
<body>
<h1>rpki-client wrapper</h1>
<p><a href="/cache">Cache directory</a></p>
<p><a href="/config">Configuration</a></p>
<p><a href="/metrics">Metrics</a></p>
<p><a href="/objects/validated">Validated objects</a></p>
<p><a href="/result">Result</a></p>
</body>
</html>""",
content_type="text/html",
)
async def config_response(self, req) -> web.Response:
return web.json_response(self.conf)
async def validated_objects(self, req) -> web.FileResponse:
path = os.path.join(os.path.abspath(self.conf["output_dir"]), "json")
return web.FileResponse(path)
async def call_client(self) -> None:
"""Run the rpki-client wrapper again."""
self.result = await self.client.run()
async def json_result(self, req) -> web.Response:
if self.result:
return web.json_response(dataclasses.asdict(self.result))
return web.json_response(None, status=500)
async def run(self):
LOG.info("starting webserver on %s:%d", self.host, self.port)
runner = web.AppRunner(self.app)
await runner.setup()
site = web.TCPSite(runner, self.host, self.port)
asyncio.create_task(site.start(), name="site")
if self.jitter:
jitter_delay = random.uniform(0, self.jitter)
LOG.info(
"delaying by random delay of [0, %d] seconds of %f seconds",
self.jitter,
jitter_delay,
)
await asyncio.sleep(jitter_delay)
return await repeat(self.interval, self.call_client)
| 2.140625 | 2 |
trivago2015/users/migrations/0002_userprofile.py | ephes/trivago2015 | 0 | 12792495 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
('image', models.ImageField(null=True, upload_to='image', default=None)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.84375 | 2 |
converge.py | adamorse/soccer-stats | 1 | 12792496 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 08:38:14 2018
@author: <NAME>
compute how quickly soccer league tables converge to the final distribution
"""
import pandas as pd
import numpy as np
import glob
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.stats import entropy
from scipy.optimize import curve_fit
import seaborn as sns
sns.set()
# function to compute Jensen-Shannon divergence
def JSD(p, q):
r = 0.5 * (p + q)
return 0.5 * (entropy(p, r) + entropy(q, r))
# the data files have already been acquired and cleaned
# see get_football-data_data.py
# build a list of filenames
filenames = glob.glob('data/*.csv')
# initialize an array to hold JSD values
# each row will contain the JSD curve data for one season
jsds = np.zeros((len(filenames),500))
# initialize an array to hold final league tables
finals = np.zeros((len(filenames),25))
# initialize a season counter
season = 0
# list of columns needed from the data files
cols = ['Date','HomeTeam','AwayTeam','FTHG','FTAG']
for file in filenames:
# load the season data
df = pd.read_csv(file,index_col='Date',encoding = "ISO-8859-1",usecols=cols).dropna(axis=0,how='any')
# get the unique team names for that season
teams = list(df.HomeTeam.unique())
# set up array for league tables
# each column corresponds to a team
# each row corresponds to the league table after that number of games
tables = np.zeros((df.shape[0]+1,len(teams)))
# initialize game counter
num_games = 1
# loop through the season data game by game
for idx,row in df.iterrows():
# initialize the current league table to be the same as the last
tables[num_games,:] = tables[num_games-1,:]
# get indices for the teams involved in thisgame
home_idx = teams.index(row['HomeTeam'])
away_idx = teams.index(row['AwayTeam'])
# compute home goals - away goals
goal_diff = row.FTHG - row.FTAG
# update the league table based on the result
if goal_diff > 0:
tables[num_games,home_idx] += 3
elif goal_diff < 0:
tables[num_games,away_idx] += 3
else:
tables[num_games,home_idx] += 1
tables[num_games,away_idx] += 1
# increment the game counter
num_games += 1
# delete first row of the table
tables = tables[1:,:]
# compute the probability distribution for the final league table
p = tables[-1,:]/np.sum(tables[-1,:])
# store p
for idx,team in enumerate(p):
finals[season,idx] = team
# for each of the running league tables, convert to a distribution
# and then compute the JSD
for i in range(len(tables[:,0])):
#if np.count_nonzero(tables[idx,:]) == len(tables[idx,:]):
q = tables[i,:]/np.sum(tables[i,:])
jsds[season,i] = JSD(p,q)
# increment the season counter
season += 1
# compute the average JSD curve
avg = np.sum(jsds,axis=0)/110
# array of x values for the games
xs = np.array([i for i in range(len(avg))])
# define function for curve-fitting
def f(x, a, b, c):
return a * np.exp(-b * x) + c
# perform the curve fit
popt, pcov = curve_fit(f, xs, avg)
# plot the individual JSD curves
for i in range(jsds.shape[0]):
plt.plot(jsds[i,:],alpha=.3,color='gray')
# add title and axis labels
plt.title('Convergence of league tables over time')
plt.xlabel('Number of games played')
plt.ylabel('JSD with final table')
# set axis limits, 461 most games in an individual season
axes = plt.gca()
axes.set_xlim([0,461])
plt.savefig('allseasons.png')
# zoom in on the first 100 games
axes.set_xlim([0,100])
plt.savefig('convbegin.png')
# zoom out again
axes.set_xlim([0,380])
# plot the average curve
plt.plot(xs,avg,'b-',label='average JSD')
# add a legend
plt.legend()
plt.savefig('convwithavg.png')
# plot the best-fit curve
plt.plot(xs, f(xs, *popt), 'r-',
label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
# update the legend
plt.legend()
plt.savefig('conv.png')
plt.show()
plt.clf()
plt.cla()
plt.close()
# compute examples of final probability distributions
# spain 16-17
xd = [i for i in range(18)]
plt.bar(xd,np.sort(finals[5,:18]))
plt.title('La Liga 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('sp1617.png')
plt.clf()
plt.cla()
plt.close()
# italy 16-17
xd = [i for i in range(20)]
plt.bar(xd,np.sort(finals[27,:20]))
plt.title('Serie A 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('it1617.png')
plt.clf()
plt.cla()
plt.close()
# france 16-17
xd = [i for i in range(20)]
plt.bar(xd,np.sort(finals[49,:20]))
plt.title('Ligue 1 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('fr1617.png')
plt.clf()
plt.cla()
plt.close()
# england 16-17
xd = [i for i in range(20)]
plt.bar(xd,np.sort(finals[71,:20]))
plt.title('Premier League 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('en1617.png')
plt.clf()
plt.cla()
plt.close()
# germany 16-17
xd = [i for i in range(18)]
plt.bar(xd,np.sort(finals[93,:18]))
plt.title('Bundesliga 2016-2017')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Point distribution')
plt.savefig('ge1617.png')
plt.clf()
plt.cla()
plt.close()
# generate animation
# code below based on an example by <NAME>:
# email: <EMAIL>
# website: http://jakevdp.github.com
# license: BSD
# set up the figure
fig = plt.figure()
# set up the axes
ax = plt.axes(xlim=(-1, 20), ylim=(0, .12))
line, = ax.plot([], [],'o',linestyle='None')
# add title, legend, etc.
plt.title('\'99-\'00 Premier League points distribution over time')
plt.xticks([],'')
plt.xlabel('Ranked teams')
plt.ylabel('Proportion of total points')
# draw the background
def init():
line.set_data([],[])
plt.bar([i for i in range(20)],np.sort(tables[-1,:]/np.sum(tables[-1,:])),alpha=.3)
return line,
# animation function, each frame draws a distribution after one more game
def animate(i):
xd = [i for i in range(20)]
y = np.sort(tables[i+40,:]/np.sum(tables[i+40,:]))
line.set_data(xd, y)
return line,
# animate
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=340, interval=20, blit=True,repeat_delay=1000)
# save the animation
anim.save('basic_animation.mp4', fps=50, extra_args=['-vcodec', 'libx264'])
plt.show()
| 2.84375 | 3 |
main.py | Abhishek-P/py-hello-world-run-from-colab | 0 | 12792497 | print("Hello World! from Colab") | 1.117188 | 1 |
core/serializers/address.py | decosterkevin/foodtrack-back | 0 | 12792498 | <gh_stars>0
from rest_framework import serializers
from core.models import Exploitation, Address
from drf_extra_fields.geo_fields import PointField
class AddressSerializer(serializers.ModelSerializer):
# lat = PointSerializer(source='point.y', read_only=True)
lat = PointField(source='point.y', read_only=True)
lng = PointField(source='point.x', read_only=True)
class Meta:
model = Address
fields = ("street", "street_cp", "city", "province", "postal_code", "country", "lat", "lng")
class ExploitationSerializer(serializers.ModelSerializer):
class Meta:
model = Exploitation
fields = ("address", "pictures", "creator")
| 2.3125 | 2 |
soluciones/operaciones/operaciones.py | carlosviveros/Soluciones | 4 | 12792499 | """AyudaEnPython: https://www.facebook.com/groups/ayudapython
"""
# del repositorio del grupo AyudaEnPython:
# https://github.com/AyudaEnPython/Soluciones/blob/main/soluciones/calculadora/operadores.py
def suma(a: float, b: float) -> float:
"""Suma dos números.
:param a: Primer número.
:a type: float
:param b: Segundo número.
:b type: float
:return: La suma de los dos números.
:rtype: float
"""
return a + b
def resta(a: float, b: float) -> float:
"""Resta dos números.
:param a: Primer número.
:a type: float
:param b: Segundo número.
:b type: float
:return: La resta de los dos números.
:rtype: float
"""
return a - b
def multiplicacion(a: float, b: float) -> float:
"""Multiplica dos números.
:param a: Primer número.
:a type: float
:param b: Segundo número.
:b type: float
:return: La multiplicación de los dos números.
:rtype: float
"""
return a * b
def division(a: float, b: float) -> float:
"""Divide dos números.
:param a: Primer número.
:a type: float
:param b: Segundo número.
:b type: float
:raises ZeroDivisionError: Si el segundo número es cero.
:return: La división de los dos números.
:rtype: float
"""
try:
return a / b
except ZeroDivisionError:
return "No se puede dividir entre cero" | 3.84375 | 4 |
io_scene_vrm/external/cats_blender_plugin_support.py | iCyP/VRM_IMPORTER_for_Blender2.8 | 26 | 12792500 | <reponame>iCyP/VRM_IMPORTER_for_Blender2.8
import traceback
from typing import Dict
import bpy
from ..common.human_bone import HumanBoneName
from .cats_blender_plugin.tools.armature import FixArmature
from .cats_blender_plugin_armature import CatsArmature
__cats_bone_name_to_human_bone_name = {
# Order by priority
# Required bones
"Hips": HumanBoneName.HIPS,
"Spine": HumanBoneName.SPINE,
"Chest": HumanBoneName.CHEST,
"Neck": HumanBoneName.NECK,
"Head": HumanBoneName.HEAD,
"Right arm": HumanBoneName.RIGHT_UPPER_ARM,
"Right elbow": HumanBoneName.RIGHT_LOWER_ARM,
"Right wrist": HumanBoneName.RIGHT_HAND,
"Left arm": HumanBoneName.LEFT_UPPER_ARM,
"Left elbow": HumanBoneName.LEFT_LOWER_ARM,
"Left wrist": HumanBoneName.LEFT_HAND,
"Right leg": HumanBoneName.RIGHT_UPPER_LEG,
"Right knee": HumanBoneName.RIGHT_LOWER_LEG,
"Right ankle": HumanBoneName.RIGHT_FOOT,
"Left leg": HumanBoneName.LEFT_UPPER_LEG,
"Left knee": HumanBoneName.LEFT_LOWER_LEG,
"Left ankle": HumanBoneName.LEFT_FOOT,
# Optional bones
"Eye_R": HumanBoneName.RIGHT_EYE,
"Eye_L": HumanBoneName.LEFT_EYE,
"Right shoulder": HumanBoneName.RIGHT_SHOULDER,
"Left shoulder": HumanBoneName.LEFT_SHOULDER,
"Thumb0_R": HumanBoneName.RIGHT_THUMB_PROXIMAL,
"Thumb1_R": HumanBoneName.RIGHT_THUMB_INTERMEDIATE,
"Thumb2_R": HumanBoneName.RIGHT_THUMB_DISTAL,
"Thumb0_L": HumanBoneName.LEFT_THUMB_PROXIMAL,
"Thumb1_L": HumanBoneName.LEFT_THUMB_INTERMEDIATE,
"Thumb2_L": HumanBoneName.LEFT_THUMB_DISTAL,
"IndexFinger1_R": HumanBoneName.RIGHT_INDEX_PROXIMAL,
"IndexFinger2_R": HumanBoneName.RIGHT_INDEX_INTERMEDIATE,
"IndexFinger3_R": HumanBoneName.RIGHT_INDEX_DISTAL,
"IndexFinger1_L": HumanBoneName.LEFT_INDEX_PROXIMAL,
"IndexFinger2_L": HumanBoneName.LEFT_INDEX_INTERMEDIATE,
"IndexFinger3_L": HumanBoneName.LEFT_INDEX_DISTAL,
"MiddleFinger1_R": HumanBoneName.RIGHT_MIDDLE_PROXIMAL,
"MiddleFinger2_R": HumanBoneName.RIGHT_MIDDLE_INTERMEDIATE,
"MiddleFinger3_R": HumanBoneName.RIGHT_MIDDLE_DISTAL,
"MiddleFinger1_L": HumanBoneName.LEFT_MIDDLE_PROXIMAL,
"MiddleFinger2_L": HumanBoneName.LEFT_MIDDLE_INTERMEDIATE,
"MiddleFinger3_L": HumanBoneName.LEFT_MIDDLE_DISTAL,
"RingFinger1_R": HumanBoneName.RIGHT_RING_PROXIMAL,
"RingFinger2_R": HumanBoneName.RIGHT_RING_INTERMEDIATE,
"RingFinger3_R": HumanBoneName.RIGHT_RING_DISTAL,
"RingFinger1_L": HumanBoneName.LEFT_RING_PROXIMAL,
"RingFinger2_L": HumanBoneName.LEFT_RING_INTERMEDIATE,
"RingFinger3_L": HumanBoneName.LEFT_RING_DISTAL,
"LittleFinger1_R": HumanBoneName.RIGHT_LITTLE_PROXIMAL,
"LittleFinger2_R": HumanBoneName.RIGHT_LITTLE_INTERMEDIATE,
"LittleFinger3_R": HumanBoneName.RIGHT_LITTLE_DISTAL,
"LittleFinger1_L": HumanBoneName.LEFT_LITTLE_PROXIMAL,
"LittleFinger2_L": HumanBoneName.LEFT_LITTLE_INTERMEDIATE,
"LittleFinger3_L": HumanBoneName.LEFT_LITTLE_DISTAL,
"Right toe": HumanBoneName.RIGHT_TOES,
"Left toe": HumanBoneName.LEFT_TOES,
}
def create_human_bone_mapping(armature: bpy.types.Armature) -> Dict[str, HumanBoneName]:
cats_armature = CatsArmature.create(armature)
try:
FixArmature.create_cats_bone_name_mapping(cats_armature)
except Exception as e:
traceback.print_exc()
print(f"Human Bone Name Auto Detection: {e}")
mapping = {}
cats_name_to_original_name = cats_armature.cats_name_to_original_name()
for cats_name, human_name in __cats_bone_name_to_human_bone_name.items():
original_name = cats_name_to_original_name.get(cats_name)
if not original_name:
continue
mapping[original_name] = human_name
return mapping
| 1.710938 | 2 |
main.py | maajtga/python-pong | 0 | 12792501 | <gh_stars>0
import pygame
from data import paddle
from data import ball
pygame.init()
winsize = [900, 550]
win = pygame.display.set_mode(winsize)
pygame.display.set_caption('Pong')
icon = pygame.image.load('gfx/icon.png')
pygame.display.set_icon(icon)
running = True
Player1 = paddle.Paddle(70, 225)
Player2 = paddle.Paddle(800, 225)
Ball = ball.Ball(270, 400)
ballspeedx = 0.5
ballspeedy = 0.5
while running:
win.fill(0)
Player1.render(win)
Player2.render(win)
Ball.render(win)
Ball.x += ballspeedx
Ball.y += ballspeedy
if Ball.x > 486 or Ball.x < 0:
ballspeedx *= -1
if Ball.y > 836 or Ball.y < 0:
ballspeedy *= -1
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
Player1.y -= 20
if event.key == pygame.K_s:
Player1.y += 20
if event.key == pygame.K_UP:
Player2.y -= 20
if event.key == pygame.K_DOWN:
Player2.y += 20
if event.type == pygame.KEYUP:
if event.key == pygame.K_w:
Player1.y -= 20
pygame.display.flip() | 2.9375 | 3 |
py/scrap_heroes/test/test_utils.py | BenjaminCbr/cinoisp-storm | 0 | 12792502 | <filename>py/scrap_heroes/test/test_utils.py<gh_stars>0
from __future__ import unicode_literals
from django.test import TestCase
from ..utils import partial_dict_equals
class DictUtilsTest(TestCase):
def test_partial_dict_equals__regular_case(self):
small_dict = {
"a": 1,
"b": [2, 3, 6],
"d": False
}
big_dict = {
"aa": 123,
"fdsg": 2,
"a": 1,
"b": [2, 6, 3],
}
self.assertTrue(partial_dict_equals(small_dict, big_dict))
def test_partial_dict_equals__irregular_case(self):
small_dict = {
"a": 1,
"b": [2, 6],
"d": False
}
big_dict = {
"aa": 123,
"fdsg": 2,
"a": 1,
"b": [2, 6, 3],
}
self.assertFalse(partial_dict_equals(small_dict, big_dict))
| 2.625 | 3 |
tests/test_klass.py | surroundaustralia/ndesgateway-testclient | 1 | 12792503 | <filename>tests/test_klass.py
from rdflib.namespace import RDF, OWL
from client.model import Klass
def test_basic_rdf():
r1 = Klass()
rdf = r1.to_graph()
assert (None, RDF.type, OWL.Class) in rdf
| 2.3125 | 2 |
flake8_pytest_style/visitors/fail.py | kianmeng/flake8-pytest-style | 125 | 12792504 | <reponame>kianmeng/flake8-pytest-style<gh_stars>100-1000
import ast
from flake8_plugin_utils import Visitor
from flake8_pytest_style.config import Config
from flake8_pytest_style.errors import AssertAlwaysFalse, FailWithoutMessage
from flake8_pytest_style.utils import (
get_simple_call_args,
is_empty_string,
is_fail_call,
is_falsy_constant,
)
class FailVisitor(Visitor[Config]):
def _check_fail_call(self, node: ast.Call) -> None:
"""Checks for PT016."""
args = get_simple_call_args(node)
msg_argument = args.get_argument('msg', 0)
if not msg_argument or is_empty_string(msg_argument):
self.error_from_node(FailWithoutMessage, node)
def visit_Assert(self, node: ast.Assert) -> None:
"""Checks for PT015."""
if is_falsy_constant(node.test):
self.error_from_node(AssertAlwaysFalse, node)
def visit_Call(self, node: ast.Call) -> None:
if is_fail_call(node):
self._check_fail_call(node)
| 2.21875 | 2 |
chat_app/management/commands/load_dummy_data.py | PS-Division-BITS/Chat | 1 | 12792505 | <reponame>PS-Division-BITS/Chat
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from django.utils.crypto import get_random_string
from chat_app.models import *
class Command(BaseCommand):
def _create(self):
# creating user and super-user
User.objects.all().delete()
print('Creating admin..')
admin = User.objects.create_superuser(username='admin', password='<PASSWORD>')
print('admin created.')
print('username: admin password: <PASSWORD>')
print('Creating Ghost user..')
g = User.objects.create_user(username='Ghost', password=get_random_string(8),)
print('done.')
print('Creating other users..')
u1 = User.objects.create_user(username='Ritik', password=get_random_string(8))
u2 = User.objects.create_user(username='Chetan', password=get_random_string(8))
u3 = User.objects.create_user(username='Random', password=get_random_string(8))
print('done.')
# create chats
Chat.objects.all().delete()
print('Creating chats..')
c1 = Chat.objects.create(uri='1', name='main', description='Main Chat Room')
c2 = Chat.objects.create(uri='2', name='chat2', description='Chat Room 2')
c3 = Chat.objects.create(uri='3', name='chat3', description='Chat Room 3')
print('done.')
# create msgs
Message.objects.all().delete()
print('Creating messages..')
m1 = Message.objects.create(sender=admin, content='Hello World!')
m2 = Message.objects.create(sender=admin, content='Foo Bar')
m3 = Message.objects.create(sender=admin, content='Wazz Buzz')
print('done.')
# add users to chats
print('winding up..')
c1.participants.add(admin, u1, u2, u3, g)
c2.participants.add(admin, u1, g)
c3.participants.add(admin, u2, u3, g)
# add msgs in chats
c1.messages.add(m1, m2)
c2.messages.add(m2)
c3.messages.add(m3)
print('done.')
def handle(self, *args, **kwargs):
self._create()
| 2.390625 | 2 |
tests/test_update_order.py | AltaPay/python-client-library | 0 | 12792506 | <reponame>AltaPay/python-client-library
from __future__ import absolute_import, unicode_literals
import responses
from altapay import API, UpdateOrder
from .test_cases import TestCase
class UpdateOrderTest(TestCase):
def setUp(self):
self.api = API(mode='test', auto_login=False)
@responses.activate
def test_update_order_invalid_order_line(self):
uo = UpdateOrder(api=self.api)
responses.add(
responses.POST,
self.get_api_url('API/updateOrder'),
body=self.load_xml_response(
'200_update_order_success.xml'),
status=200,
content_type='application/xml')
with self.assertRaisesRegexp(Exception, "order_lines must "
"contain 2 elements"):
uo.update("payment id", [])
@responses.activate
def test_update_order_success(self):
uo = UpdateOrder(api=self.api)
responses.add(
responses.POST,
self.get_api_url('API/updateOrder'),
body=self.load_xml_response(
'200_update_order_success.xml'),
status=200,
content_type='application/xml')
self.assertEqual(uo.update("payment id", [{}, {}]), True)
@responses.activate
def test_update_order_error(self):
uo = UpdateOrder(api=self.api)
responses.add(
responses.POST,
self.get_api_url('API/updateOrder'),
body=self.load_xml_response(
'200_update_order_error.xml'),
status=200,
content_type='application/xml')
self.assertEqual(uo.update("payment id", [{}, {}]), False)
self.assertEqual(uo.error_code, 10000001)
self.assertEqual(uo.error_message, "Number of original order lines "
"and updated ones does not match.")
| 2.296875 | 2 |
src/titanic/scripts/cross_validate_models.py | alvaromendoza/pytanic | 1 | 12792507 | <filename>src/titanic/scripts/cross_validate_models.py
"""Cross-validate machine learning models."""
import os
import time
import pprint
import random as rn
import numpy as np
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.svm import SVC
from category_encoders.ordinal import OrdinalEncoder
import titanic.tools as tools
from titanic.modelling import SimpleDataFrameImputer, DataFrameDummifier, CategoricalToString
from titanic.modelling import ExtendedClassifier
from titanic.config import RANDOM_SEED
np.random.seed(RANDOM_SEED)
os.environ['PYTHONHASHSEED'] = '0'
rn.seed(RANDOM_SEED)
def cross_validate_logreg(X_train, y_train, pipes, grids, kfolds):
"""Cross-validate LogisticRegression pipeline."""
pipes['logreg'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'],
mode_cols=['Embarked']),
DataFrameDummifier(),
LogisticRegression(solver='liblinear'))
grids['logreg'] = {'logisticregression__C': [0.01, 0.1, 0.5, 0.8, 1, 1.2, 2, 5, 10]}
grids['logreg'] = {'logisticregression__C': [0.6, 0.75, 0.8, 0.85, 0.9]}
logreg = ExtendedClassifier.cross_validate(pipes['logreg'], X_train, y_train, grids['logreg'],
sklearn_gscv_kws={'cv': 3},
sklearn_cvs_kws={'cv': kfolds},
param_strategy='best',
logdir_path=r'logs/models/logreg',
serialize_to=r'models/logreg.pickle')
return logreg
def cross_validate_forest(X_train, y_train, pipes, grids, kfolds, random_search=False):
"""Cross-validate RandomForestClassifier pipeline."""
pipes['forest'] = make_pipeline(CategoricalToString(),
SimpleDataFrameImputer(median_cols=['Age', 'Fare'],
mode_cols=['Embarked']),
OrdinalEncoder(cols=['Title', 'Deck', 'Embarked'],
handle_unknown='impute'),
RandomForestClassifier(**{'bootstrap': True,
'max_depth': 70,
'max_features': 'auto',
'min_samples_leaf': 4,
'min_samples_split': 10,
'n_estimators': 64,
'random_state': RANDOM_SEED}))
if random_search:
n_estimators = [int(x) for x in np.linspace(start=10, stop=500, num=10)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
random_grid = {'randomforestclassifier__n_estimators': n_estimators,
'randomforestclassifier__max_features': max_features,
'randomforestclassifier__max_depth': max_depth,
'randomforestclassifier__min_samples_split': min_samples_split,
'randomforestclassifier__min_samples_leaf': min_samples_leaf,
'randomforestclassifier__bootstrap': bootstrap}
pprint.pprint(random_grid)
randsearch = RandomizedSearchCV(pipes['forest'], random_grid, n_iter=50, cv=3,
verbose=0, random_state=42)
start = time.time()
randsearch.fit(X_train, y_train)
finish = time.time()
print('randsearch.fit execution time:', finish - start)
pprint.pprint(randsearch.best_params_)
forest = ExtendedClassifier.cross_validate(pipes['forest'], X_train, y_train,
sklearn_cvs_kws={'cv': kfolds},
param_strategy='init',
logdir_path=r'logs/models/forest',
serialize_to=r'models/forest.pickle')
return forest
def cross_validate_svc(X_train, y_train, pipes, grids, kfolds):
"""Cross-validate SVC pipeline."""
pipes['svc'] = make_pipeline(SimpleDataFrameImputer(median_cols=['Age', 'Fare'],
mode_cols=['Embarked']),
DataFrameDummifier(),
SVC(kernel='linear', C=0.1, probability=False))
C = [0.001, 0.01, 0.1, 1, 10]
gamma = [0.001, 0.01, 0.1, 1]
grids['svc'] = {'svc__C': C, 'svc__gamma': gamma}
svc = ExtendedClassifier.cross_validate(pipes['svc'], X_train, y_train,
sklearn_cvs_kws={'cv': kfolds},
param_strategy='init',
logdir_path=r'logs/models/svc',
serialize_to=r'models/svc.pickle')
return svc
def cross_validate_voting(X_train, y_train, pipes, grids, kfolds):
"""Cross-validate VotingClassifier."""
estimators = [('logreg', pipes['logreg']), ('forest', pipes['forest']), ('svc', pipes['svc'])]
voting = ExtendedClassifier.cross_validate(VotingClassifier(estimators, voting='hard'),
X_train, y_train,
sklearn_cvs_kws={'cv': kfolds},
param_strategy='init',
logdir_path=r'logs/models/voting',
serialize_to=r'models/voting.pickle')
return voting
def main():
X_train = tools.deserialize(r'data/processed/X_train.pickle')
y_train = tools.deserialize(r'data/processed/y_train.pickle')
pipes = dict()
grids = dict()
kfolds = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED)
cross_validate_logreg(X_train, y_train, pipes, grids, kfolds)
cross_validate_forest(X_train, y_train, pipes, grids, kfolds)
cross_validate_svc(X_train, y_train, pipes, grids, kfolds)
cross_validate_voting(X_train, y_train, pipes, grids, kfolds)
if __name__ == '__main__':
os.chdir(r'../../../')
main()
| 2.375 | 2 |
server/contests/auth/test_core.py | jauhararifin/ugrade | 15 | 12792508 | <gh_stars>10-100
import pytest
import jwt
import bcrypt
from mixer.backend.django import mixer
from django.test import TestCase
from django.core.exceptions import ValidationError
from ugrade import settings
from contests.models import User
from contests.exceptions import NoSuchUserError, \
NoSuchContestError, \
AuthenticationError, \
UserHaventSignedUpError, \
UserAlreadySignedUpError, \
UsernameAlreadyUsedError
from .core import get_all_permissions, \
get_all_users, \
get_user_by_id, \
get_user_by_username, \
get_user_by_email, \
get_contest_users, \
sign_in, \
sign_up, \
forgot_password, \
reset_password, \
get_user_from_token
@pytest.mark.django_db
def test_get_all_permissions():
mixer.cycle(5).blend('contests.Permission')
assert len(get_all_permissions()) == 5
@pytest.mark.django_db
def test_get_all_users():
mixer.cycle(5).blend('contests.User')
users = get_all_users()
assert len(users) == 5
@pytest.mark.django_db
def test_get_user_by_id():
perm1 = mixer.blend('contests.Permission', code='perm1')
perm2 = mixer.blend('contests.Permission', code='perm2')
mixer.cycle(5).blend('contests.User', name='<NAME>',
permissions=[perm1, perm2])
user1 = get_user_by_id(1)
assert user1.name == '<NAME>'
assert user1.has_permission('perm1') and user1.has_permission('perm2')
assert user1.permission_codes == ['perm1', 'perm2']
with pytest.raises(NoSuchUserError):
get_user_by_id(6)
@pytest.mark.django_db
def test_get_user_by_username():
contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1')
mixer.blend('contests.Contest', id=2, name='Contest 2')
mixer.blend('contests.User', name='Test 1',
username='username1', contest=contest1)
mixer.blend('contests.User', name='Test 2',
username='username2', contest=contest1)
assert get_user_by_username(1, 'username1').name == 'Test 1'
assert get_user_by_username(1, 'username2').name == 'Test 2'
with pytest.raises(NoSuchUserError):
get_user_by_username(1, 'nonexistent')
with pytest.raises(NoSuchUserError):
get_user_by_username(2, 'username2')
with pytest.raises(NoSuchContestError):
get_user_by_username(3, 'username1')
@pytest.mark.django_db
def test_get_user_by_email():
contest1 = mixer.blend('contests.Contest', id=1, name='Contest 1')
mixer.blend('contests.Contest', id=2, name='Contest 2')
mixer.blend('contests.User', name='Test 1',
email='email1', contest=contest1)
mixer.blend('contests.User', name='Test 2',
email='email2', contest=contest1)
assert get_user_by_email(1, 'email1').name == 'Test 1'
assert get_user_by_email(1, 'email2').name == 'Test 2'
with pytest.raises(NoSuchUserError):
get_user_by_email(1, 'nonexistent')
with pytest.raises(NoSuchUserError):
get_user_by_email(2, 'email2')
with pytest.raises(NoSuchContestError):
get_user_by_email(3, 'email1')
@pytest.mark.django_db
def test_get_contest_users():
contest1 = mixer.blend('contests.Contest', id=1)
mixer.cycle(5).blend('contests.User', name='Name', contest=contest1)
result = get_contest_users(1)
assert len(result) == 5
for user in result:
assert user.name == 'Name'
with pytest.raises(NoSuchContestError):
get_contest_users(2)
@pytest.mark.django_db
class SignInTest(TestCase):
@classmethod
def setUpTestData(cls):
mixer.cycle(5).blend('contests.User',
name=("User %d" % n for n in range(1, 6)),
email=("<EMAIL>" %
n for n in range(1, 6)),
username=("user%d" % n for n in range(1, 6)),
password=bcrypt.hashpw(b't<PASSWORD>', bcrypt.gensalt()).decode('utf-8'))
mixer.blend('contests.User', name='User 6', email='<EMAIL>')
def test_wrong_user_id(self):
with pytest.raises(AuthenticationError):
sign_in(7, 'pass')
def test_havent_signed_up(self):
with pytest.raises(UserHaventSignedUpError):
sign_in(6, 'pass')
def test_wrong_password(self):
with pytest.raises(AuthenticationError):
sign_in(1, '<PASSWORD>')
def test_success(self):
user, token = sign_in(2, 'testtest')
assert user.id == 2
assert token is not None and token != ''
token_data = jwt.decode(token, verify=False)
assert token_data['id'] == 2
@pytest.mark.django_db
class SignUpTest(TestCase):
@classmethod
def setUpTestData(cls):
contest1 = mixer.blend('contests.Contest', id=1)
contest2 = mixer.blend('contests.Contest', id=2)
mixer.blend('contests.User',
name='Some Name',
email='<EMAIL>',
username='username',
password=bcrypt.hashpw(
b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'),
contest=contest1)
mixer.blend('contests.User',
email='<EMAIL>',
contest=contest2,
signup_otc='12345678')
mixer.blend('contests.User',
name='<NAME>',
email='<EMAIL>',
username='jauhararifin',
password=bcrypt.hashpw(
b'userpass', bcrypt.gensalt()).decode('utf-8'),
contest=contest2)
def test_wrong_email(self):
with pytest.raises(NoSuchUserError):
sign_up(4, 'username', 'name', 'somepass', '00000000')
with pytest.raises(NoSuchUserError):
sign_up(4, 'username', 'name', 'somepass', '<PASSWORD>')
def test_already_signed_up(self):
with pytest.raises(UserAlreadySignedUpError):
sign_up(1, 'username',
'name', 'somepass', '00000000')
def test_wrong_otc(self):
with pytest.raises(AuthenticationError):
sign_up(2, 'username',
'name', 'somepass', '00000000')
def test_already_used_username(self):
with pytest.raises(UsernameAlreadyUsedError):
sign_up(2, 'jauhararifin',
'name', 'somepass', '12345678')
def test_invalid_input(self):
with pytest.raises(ValidationError) as error:
sign_up(2, 'u',
'name', 'password', '<PASSWORD>')
assert error.value.message_dict['username'] is not None
def test_success(self):
user, token = sign_up(2,
'username', 'My Name', '<PASSWORD>', '<PASSWORD>')
assert user.id == 2
assert token is not None and token != ''
token_data = jwt.decode(token, verify=False)
assert token_data['id'] == 2
user = User.objects.get(pk=2)
assert user.signup_otc is None
@pytest.mark.django_db
class ForgotPasswordTest(TestCase):
@classmethod
def setUpTestData(cls):
mixer.blend('contests.User',
name='Some Name 1',
email='<EMAIL>',
username='username1',
password=bcrypt.hashpw(b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'))
mixer.blend('contests.User',
email='<EMAIL>',
signup_otc='12345678')
def test_wrong_email(self):
with pytest.raises(NoSuchUserError):
forgot_password(3)
def test_havent_signed_up(self):
with pytest.raises(UserHaventSignedUpError):
forgot_password(2)
def test_success_and_create_new_otc(self):
forgot_password(1)
user = User.objects.get(pk=1)
assert user.reset_password_otc is not None
def test_success_and_use_old_otc(self):
user = User.objects.get(pk=1)
user.reset_password_otc = '<PASSWORD>'
user.save()
forgot_password(1)
user = User.objects.get(pk=1)
assert user.reset_password_otc == '00000000'
@pytest.mark.django_db
class ResetPasswordTest(TestCase):
@classmethod
def setUpTestData(cls):
contest1 = mixer.blend('contests.Contest', id=1)
mixer.blend('contests.User',
name='Some Name 1',
email='<EMAIL>',
username='username1',
password=bcrypt.hashpw(
b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'),
contest=contest1)
mixer.blend('contests.User',
email='<EMAIL>',
contest=contest1,
signup_otc='12345678')
def test_wrong_user_id(self):
with pytest.raises(NoSuchUserError):
reset_password(3, '<PASSWORD>', '<PASSWORD>')
def test_havent_signed_up(self):
with pytest.raises(UserHaventSignedUpError):
reset_password(2, '<PASSWORD>', '<PASSWORD>')
def test_wrong_code(self):
user = User.objects.get(pk=1)
user.reset_password_otc = '<PASSWORD>'
user.save()
with pytest.raises(AuthenticationError):
reset_password(1, '<PASSWORD>', '<PASSWORD>')
def test_success(self):
user = User.objects.get(pk=1)
user.reset_password_otc = '<PASSWORD>'
user.save()
reset_password(1, '<PASSWORD>', '<PASSWORD>')
user = User.objects.get(pk=1)
assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8'))
def test_with_forgot_password(self):
forgot_password(1)
user = User.objects.get(pk=1)
assert user.reset_password_otc is not None
reset_password(1, user.reset_password_otc, '<PASSWORD>')
user = User.objects.get(pk=1)
assert bcrypt.checkpw(b'<PASSWORD>', bytes(user.password, 'utf-8'))
@pytest.mark.django_db
class GetUserFromTokenTest(TestCase):
@classmethod
def setUpTestData(cls):
contest1 = mixer.blend('contests.Contest', id=1)
mixer.blend('contests.User',
name='Some Name 1',
email='<EMAIL>',
username='username1',
password=bcrypt.hashpw(
b'<PASSWORD>', bcrypt.gensalt()).decode('utf-8'),
contest=contest1)
mixer.blend('contests.User',
email='<EMAIL>',
contest=contest1,
signup_otc='12345678')
def test_authentication_error(self):
with pytest.raises(AuthenticationError):
get_user_from_token('some.invalid.token')
with pytest.raises(AuthenticationError):
get_user_from_token('')
# check wrong key
with pytest.raises(AuthenticationError):
get_user_from_token(
'<KEY>')
# check none algorithm
with pytest.raises(AuthenticationError):
get_user_from_token(
'<KEY>
with pytest.raises(AuthenticationError):
get_user_from_token(
'<KEY>')
# invalid payload
with pytest.raises(AuthenticationError):
get_user_from_token(
jwt.encode({'id': 'hehe'}, settings.SECRET_KEY,
algorithm='HS256')
)
# user not exists
with pytest.raises(AuthenticationError):
get_user_from_token(
jwt.encode({'id': 10}, settings.SECRET_KEY,
algorithm='HS256')
)
def test_success(self):
user = get_user_from_token(
jwt.encode({'id': 1}, settings.SECRET_KEY,
algorithm='HS256')
)
assert user.id == 1
| 2 | 2 |
RecoHI/HiEgammaAlgos/python/HiIsolationCommonParameters_cff.py | ckamtsikis/cmssw | 852 | 12792509 | import FWCore.ParameterSet.Config as cms
isolationInputParameters = cms.PSet(
barrelBasicCluster = cms.InputTag("islandBasicClusters","islandBarrelBasicClusters"),
endcapBasicCluster = cms.InputTag("islandBasicClusters","islandEndcapBasicClusters"),
horeco = cms.InputTag("horeco"),
hfreco = cms.InputTag("hfreco"),
hbhereco = cms.InputTag("hbhereco"),
track = cms.InputTag("hiGeneralTracks"),
photons = cms.InputTag("cleanPhotons")
)
| 1.132813 | 1 |
test/tcp/client.py | phpyii/workerman-test | 0 | 12792510 | <reponame>phpyii/workerman-test
#! /usr/bin/env python
#coding=utf-8
import socket
# 创建socket对象
# 参数一 指定用ipv4版本,参数2 指定用udp协议
serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
HOST='127.0.0.1'
PORT=17000 #从指定的端口,从任何发送者,接收UDP数据
BUFSIZ=1024
ADDR=(HOST, PORT)
serverSocket.connect(ADDR)
while True:
#提示用户输入数据
send_data = input("请输入要发送的数据:")
serverSocket.send(send_data.encode("utf-8"))
# 接收对方发送过来的数据,最大接收1024个字节
recvData = serverSocket.recv(BUFSIZ)
print('接收到的数据为:', recvData.decode('utf-8')) | 2.703125 | 3 |
src/eventsHandler/on_message/on_message.py | gastbob40/discord-request-support-bot | 2 | 12792511 | import discord
from src.eventsHandler.on_message.commands.activate import disable, enable
from src.eventsHandler.on_message.commands.cancel import cancel_request
from src.eventsHandler.on_message.commands.end_request import end_request
from src.eventsHandler.on_message.commands.place import get_place
from src.eventsHandler.on_message.commands.request import make_request
class OnMessage:
@staticmethod
async def run(client: discord.Client, message: discord.Message):
if message.author.bot:
return
if message.content and message.content[0] != '!':
return
command = message.content.split()[0][1:]
args = message.content.split()[1:]
if command == 'request':
await make_request(client, message, args)
elif command == 'cancel':
await cancel_request(client, message, args)
elif command == 'place':
await get_place(client, message, args)
elif command == 'close':
await end_request(client, message)
elif command == 'enable':
await enable(client, message, args)
elif command == 'disable':
await disable(client, message, args)
| 2.4375 | 2 |
app/utils/mail.py | YogeshUpdhyay/Stocklytic.io | 1 | 12792512 | <gh_stars>1-10
from .. import mail
from flask_mail import Message
from flask import url_for
def send_reset_email(user):
token = user.generate_reset_token()
msg = Message('Password Reset Request',
sender="<EMAIL>",
recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('user.reset_password', token=token, _external=True)}'''
mail.send(msg) | 2.4375 | 2 |
0x10-python-network_0/6-peak.py | BennettDixon/holbertonschool-higher_level_programming | 1 | 12792513 | <filename>0x10-python-network_0/6-peak.py
#!/usr/bin/python3
"""script for finding peak in list of ints, interview prep
"""
"""
THOUGHT PROCESS
it is not sorted, so sorting would take n(log(n))
-> not worth sorting
looping through and keeping track of max (brute force)
-> O(n)
possibly looping from each end reducing to 1/2 run time
-> still O(n)
"""
def find_peak(list_of_integers):
"""BRUTE force implementation for question
"""
max_i = None
for ele in list_of_integers:
if max_i is None or max_i < ele:
max_i = ele
return max_i
| 3.203125 | 3 |
imix/models/vqa_models/visdial_principles.py | linxi1158/iMIX | 23 | 12792514 | import torch.nn as nn
from ..builder import VQA_MODELS, build_backbone, build_encoder, build_head
@VQA_MODELS.register_module()
class VISDIALPRINCIPLES(nn.Module):
def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head):
super().__init__()
self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0)
self.encoder_model = build_encoder(encoder)
self.backbone = build_backbone(backbone)
self.head = build_head(head) # 包括 classification head, generation head
def forward(self, data):
img = data['img_feat']
ques = data['ques']
his = data['hist']
batch_size, rnd, max_his_length = his.size()
cap = his[:, 0, :]
ques_len = data['ques_len']
hist_len = data['hist_len']
cap_len = hist_len[:, 0]
ques_embed = self.embedding_model(ques)
cap_emb = self.embedding_model(cap.contiguous())
his = his.contiguous().view(-1, max_his_length)
his_embed = self.embedding_model(his)
q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len)
ques_location = ques_len.view(-1).cpu().numpy() - 1
ques_encoded = q_output[range(batch_size), ques_location, :]
cap_location = cap_len.view(-1).cpu().numpy() - 1
cap_encoded = c_output[range(batch_size), cap_location, :]
his_feat = his_feat.view(batch_size, rnd, -1)
fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len,
ques_embed, cap_emb, img, batch_size)
scores = self.head(fuse_feat, data)
return scores
| 2.09375 | 2 |
run_ppxf.py | cebarbosa/muse-maps | 0 | 12792515 | # -*- coding: utf-8 -*-
"""
Forked in Hydra IMF from Hydra/MUSE on Feb 19, 2018
@author: <NAME>
Run pPXF in data
"""
import os
import yaml
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy import constants
from astropy.table import Table, vstack, hstack
from ppxf.ppxf import ppxf
from ppxf import ppxf_util
from spectres import spectres
import context
import misc
from der_snr import DER_SNR
def run_ppxf(specs, templates_file, outdir, velscale=None, redo=False, V0=None):
""" Running pPXF. """
velscale = context.velscale if velscale is None else velscale
V0 = context.V if V0 is None else V0
# Reading templates
ssp_templates = fits.getdata(templates_file, extname="SSPS").T
params = Table.read(templates_file, hdu=1)
nssps = ssp_templates.shape[1]
logwave_temp = Table.read(templates_file, hdu=2)["loglam"].data
wave_temp = np.exp(logwave_temp)
# Use first spectrum to set emission lines
start0 = [V0, 100., 0., 0.]
bounds0 = [[V0 - 2000., V0 + 2000], [velscale/10, 800.]]
for spec in specs:
print("Processing spectrum {}".format(spec))
name = spec.replace(".fits", "")
outyaml = os.path.join(outdir, "{}.yaml".format(name))
if os.path.exists(outyaml) and not redo:
continue
table = Table.read(spec)
wave_lin = table["wave"]
flux = table["flux"]
fluxerr = table["fluxerr"]
# Removing red part of the spectrum
idx = np.where(wave_lin < 7000)[0]
wave_lin = wave_lin[idx]
flux = flux[idx]
fluxerr = fluxerr[idx]
der_sn = misc.snr(flux)[2]
data_sn = np.nanmedian(flux / fluxerr)
###################################################################
# Rebinning the data to a logarithmic scale for ppxf
wave_range = [wave_lin[0], wave_lin[-1]]
logwave = ppxf_util.log_rebin(wave_range, flux, velscale=velscale)[1]
wave = np.exp(logwave)
wave = wave[(wave > wave_lin[0]) & (wave < wave_lin[-1])][1:-1]
flux, fluxerr = spectres(wave, wave_lin, flux, spec_errs=fluxerr)
####################################################################
# Setting up the gas templates
gas_templates, line_names, line_wave = \
ppxf_util.emission_lines(logwave_temp,
[wave_lin[0], wave_lin[-1]], 2.95)
ngas = gas_templates.shape[1]
####################################################################
# Masking bad pixels
skylines = np.array([4785, 5577, 5889, 6300, 6360, 6863])
goodpixels = np.arange(len(wave))
for line in skylines:
sky = np.argwhere((wave < line - 10) | (wave > line + 10)).ravel()
goodpixels = np.intersect1d(goodpixels, sky)
# Making goodpixels mask
goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(flux))[0])
goodpixels = np.intersect1d(goodpixels, np.where(np.isfinite(
fluxerr))[0])
# Cleaning input spectrum
fluxerr[~np.isfinite(fluxerr)] = np.nanmax(fluxerr)
flux[~np.isfinite(flux)] = 0.
########################################################################
# Preparing the fit
dv = (logwave_temp[0] - logwave[0]) * \
constants.c.to("km/s").value
templates = np.column_stack((ssp_templates, gas_templates))
components = np.hstack((np.zeros(nssps), np.arange(ngas)+1)).astype(
np.int)
gas_component = components > 0
start = [start0[:2]] * (ngas + 1)
bounds = [bounds0] * (ngas + 1)
moments = [2] * (ngas + 1)
########################################################################
# Fitting with two components
pp = ppxf(templates, flux, fluxerr, velscale=velscale,
plot=True, moments=moments, start=start, vsyst=dv,
lam=wave, component=components, mdegree=-1,
gas_component=gas_component, gas_names=line_names,
quiet=False, degree=15, bounds=bounds, goodpixels=goodpixels)
plt.savefig(os.path.join(outdir, "{}.png".format(name)), dpi=250)
plt.close()
pp.name = name
# Saving results and plot
save(pp, outdir)
def save(pp, outdir):
""" Save results from pPXF into files excluding fitting arrays. """
array_keys = ["lam", "galaxy", "noise", "bestfit", "gas_bestfit",
"mpoly", "apoly"]
array_keys = [_ for _ in array_keys if isinstance(getattr(pp, _),
np.ndarray)]
table = Table([getattr(pp, key) for key in array_keys], names=array_keys)
table.write(os.path.join(outdir, "{}_bestfit.fits".format(pp.name)),
overwrite=True)
ppdict = {}
save_keys = ["name", "regul", "degree", "mdegree", "reddening", "clean",
"ncomp", "chi2"]
# Chi2 is a astropy.unit.quantity object, we have to make it a scalar
pp.chi2 = float(pp.chi2)
for key in save_keys:
ppdict[key] = getattr(pp, key)
klist = ["V", "sigma"]
for j, sol in enumerate(pp.sol):
for i in range(len(sol)):
ppdict["{}_{}".format(klist[i], j)] = float(sol[i])
ppdict["{}err_{}".format(klist[i], j)] = float(pp.error[j][i])
with open(os.path.join(outdir, "{}.yaml".format(pp.name)), "w") as f:
yaml.dump(ppdict, f, default_flow_style=False)
# Saving table with emission lines
gas = pp.gas_component
emtable = []
for j, comp in enumerate(pp.component[gas]):
t = Table()
t["name"] = [ pp.gas_names[j]]
t["flux"] = [pp.gas_flux[j]]
t["fluxerr"] = [pp.gas_flux_error[j]]
t["V"] = [pp.sol[comp][0]]
t["Verr"] = [pp.error[comp][0]]
t["sigma"] = [pp.sol[comp][1]]
t["sigmaerr"] = [pp.error[comp][1]]
emtable.append(t)
emtable = vstack(emtable)
emtable.write(os.path.join(outdir, "{}_emission_lines.fits".format(
pp.name)), overwrite=True)
def make_table(direc, output):
""" Read all yaml files in a ppf directory to one make table for all
bins. """
filenames = sorted([_ for _ in os.listdir(direc) if _.endswith(".yaml")])
keys = ["name", "V_0", "Verr_0", "sigma_0", "sigmaerr_0", "der_sn"]
names = {"name": "spec", "V_0": "V", "Verr_0": "Verr",
"sigma_0": "sigma", "sigmaerr_0": "sigmaerr", "der_sn": "SNR"}
outtable = []
for fname in filenames:
with open(os.path.join(direc, fname)) as f:
props = yaml.load(f)
data = Table([[props[k]] for k in keys], names=[names[k] for k in keys])
outtable.append(data)
outtable = vstack(outtable)
outtable.write(output, format="fits", overwrite=True)
if __name__ == '__main__':
targetSN = 100
sample = "kinematics"
velscale = context.velscale
tempfile = os.path.join(context.data_dir, "templates",
"emiles_vel{}_{}_fwhm2.95.fits".format(int(velscale), sample))
wdir = os.path.join(context.data_dir, "MUSE/sn{}/sci".format(targetSN))
os.chdir(wdir)
outdir = os.path.join(os.path.split(wdir)[0], "ppxf")
if not os.path.exists(outdir):
os.mkdir(outdir)
specs = sorted([_ for _ in os.listdir(".") if _.endswith(".fits")])
run_ppxf(specs, tempfile, outdir, redo=False) | 2.140625 | 2 |
run-filter.py | NaN-xyz/Glyph-Filters | 69 | 12792516 | import glyphsLib
import importlib
import argparse
import sys
from glob import glob
parser = argparse.ArgumentParser(description='Filter a font file')
parser.add_argument('input', metavar='GLYPHS',
help='the Glyphs file')
parser.add_argument('filter',metavar='FILTER',
help='the filter to use')
args = parser.parse_args()
base_path = "NaNGlyphFilters"
sys.path.append(base_path)
glyphsLib.Glyphs.font = glyphsLib.GSFont(args.input)
filter_script = args.filter
sys.modules['GlyphsApp'] = glyphsLib
try:
i = importlib.import_module(filter_script)
except ModuleNotFoundError as e:
modules = [x[len(base_path)+1:-3] for x in sorted(glob(base_path+"/*.py")) if "/NaN" not in x]
print("Couldn't find filter '%s'.\nTry one of: %s" % (filter_script, ", ".join(modules)))
sys.exit(1)
save_file = args.input.replace(".glyphs", "-"+filter_script+".glyphs")
glyphsLib.Glyphs.font.save(save_file)
print("Saved on %s" % save_file)
| 2.59375 | 3 |
twiiterbot.py | mahabharathi/twitterbot | 0 | 12792517 | import tweepy
import time
import sys
auth = tweepy.OAuthHandler('FzQNofWMcCfK1ghaqpwM3sCJu', '<KEY>')
auth.set_access_token('<KEY>', '<KEY>')
api = tweepy.API(auth)
'''user=api.me()
print(user.name,user.screen_name,user.followers_count)
public_tweets = api.home_timeline()
for tweet in public_tweets:
print(tweet.text)
'''
def limit_handle(cursor):
try:
while True:
yield cursor.next()
except tweepy.RateLimitError:
print("Limit Handle Exceeded. Sleeping for 7 minutes.")
time.sleep(10)
except StopIteration:
return
#Generous bot
for follower in limit_handle(tweepy.Cursor(api.followers).items()):
print(follower.name,follower.followers_count)
#seach keywords python
numberOfTweets=2
search_str='indiaforsale'
for tweet in tweepy.Cursor(api.search,search_str).items(numberOfTweets):
try:
tweet.favorite()
print('I liked the tweet')
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break | 3.03125 | 3 |
Python Script Tools/42.0 Weather Status of a Geographical Location.py | juan1305/0.11-incremento_descremento | 1 | 12792518 | <reponame>juan1305/0.11-incremento_descremento
import openweather
from datetime import datetime
ow = openweather.openweather()
# Obtener las estaciones metereologicas cercanas
stations = ow.find_stations_near(
7.0, # Longitud
50.0, # Altitud
100 # Radio en Km
)
# Estado del tiempo usando el ID de la estacion
print(ow.get_weather(4885))
# fechas
start_date = datetime(2013,9,10)
end_date = datetime(2013, 9, 15)
# Estado del tiempo en intervalo diario
print(ow.get_historic_weather(4885, start_date, end_date))
print(ow.get_historic_weather(4885, start_date, end_date, "day")) | 3.046875 | 3 |
1-code/editorialAssessment.py | gcabanac/editorial-assessment | 0 | 12792519 | <filename>1-code/editorialAssessment.py
# Harvest Crossref for editorial assessment dates for ISSN 1866-7538: Arabian Journal of Geosciences (https://www.springer.com/journal/12517)
# See also https://retractionwatch.com/2021/08/26/guest-editor-says-journal-will-retract-dozens-of-inappropriate-papers-after-his-email-was-hacked/
# See also https://retractionwatch.com/2021/09/28/springer-nature-slaps-more-than-400-papers-with-expressions-of-concern-all-at-once/
#
# Caveat: data come from Crossref and publishers might not have pushed comprehensive metadata (especially in the early years).
#
# @since 06-SEP-2021
# @version 29-SEP-2021
# Crossref (https://github.com/fabiobatalha/crossrefapi)
import crossref.restful as cr
import jmespath
# Please use the polite method to query the Crossref API (https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service)
# crossref = cr.Journals(etiquette=cr.Etiquette('editorial-assessment', '1.0', 'your URL', 'your email'))
# Public Crossref API with no etiquette
crossref = cr.Journals()
# https://github.com/CrossRef/rest-api-doc#multiple-filters
for pub in crossref.works('1866-7538').filter(from_pub_date='2018-01-01').select('author,assertion,DOI,title,volume').sort('published-print').order('desc'):
doi = pub.get('DOI')
title = pub.get('title')[0]
volume = pub.get('volume')
dateReceived = jmespath.search("assertion[?name == 'received'].value|[0]", pub)
dateAccepted = jmespath.search("assertion[?name == 'accepted'].value|[0]", pub)
dateOnline = jmespath.search("assertion[?name == 'first_online'].value|[0]", pub)
authors = '; '.join([a.get('family') +', '+a.get('given', 'NO-GIVEN-NAME') for a in pub.get('author')]) if pub.get('author') else 'NO-AUTHORS'
print(doi, volume, dateReceived, dateAccepted, dateOnline, title, authors, sep='\t') | 1.929688 | 2 |
get_cmap.py | mathDR/BP-AR-HMM | 11 | 12792520 | <gh_stars>10-100
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
def my_color_map(N):
from numpy import mod
colormap = ['r','g','b','k','c','m','y']
return colormap[mod(N,7)]
def get_cmap(N):
'''Returns a function that maps each index in 0, 1, ... N-1 to a distinct
RGB color.'''
color_norm = colors.Normalize(vmin=0, vmax=N-1)
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv')
def map_index_to_rgb_color(index):
return scalar_map.to_rgba(index)
return map_index_to_rgb_color
def main():
N = 30
fig=plt.figure()
ax=fig.add_subplot(111)
plt.axis('scaled')
ax.set_xlim([ 0, N])
ax.set_ylim([-0.5, 0.5])
cmap = get_cmap(N)
for i in range(N):
col = cmap(i)
rect = plt.Rectangle((i, -0.5), 1, 1, facecolor=col)
ax.add_artist(rect)
ax.set_yticks([])
plt.show()
if __name__=='__main__':
main()
| 3.046875 | 3 |
test/routes/test_collections.py | tiredpixel/pikka-bird-server-py | 1 | 12792521 | import datetime
from flask import json
import msgpack
import pikka_bird_server
from pikka_bird_server.models.collection import Collection
from pikka_bird_server.models.machine import Machine
from pikka_bird_server.models.report import Report
from pikka_bird_server.models.service import Service
class TestCollections:
def assert_create_success(self, res, data):
assert res.status_code == 201
assert data == {}
assert Machine.query.count() == 1
machine = Machine.query.first()
assert isinstance(machine.created_at, datetime.datetime)
assert isinstance(machine.updated_at, datetime.datetime)
assert machine.address == '127.0.0.1'
assert machine.hostname == 'localhost'
assert Service.query.count() == 1
service = Service.query.first()
assert isinstance(service.created_at, datetime.datetime)
assert service.code == 'system'
assert Collection.query.count() == 1
collection = Collection.query.first()
assert isinstance(collection.created_at, datetime.datetime)
assert collection.collected_at == datetime.datetime(2015, 4, 4, 19, 32, 20, 616977)
assert collection.collecting_at == datetime.datetime(2015, 4, 4, 19, 33, 1, 424242)
assert collection.hostname == 'localhost'
assert collection.machine == machine
assert collection.pid == 42
assert collection.version_server == pikka_bird_server.__version__
assert collection.version_collector == '1.2.3'
assert Report.query.count() == 1
report = Report.query.first()
assert report.collection == collection
assert report.data == {'load': {'avg_15_min': 1.62939453125}}
assert report.service == service
def test_create_json(self, client, collection_valid):
res = client.post('/collections',
data=json.dumps(collection_valid),
headers={
'Content-Type': 'application/json'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
self.assert_create_success(res, data)
def test_create_binary(self, client, collection_valid):
res = client.post('/collections',
data=msgpack.packb(collection_valid),
headers={
'Content-Type': 'application/octet-stream'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
self.assert_create_success(res, data)
def test_create_no_content_type(self, client, collection_valid):
res = client.post('/collections',
data=json.dumps(collection_valid))
data = json.loads(res.data)
assert res.status_code == 415
assert data == {
'message': '415: Unsupported Media Type'}
assert Machine.query.count() == 0
assert Service.query.count() == 0
assert Collection.query.count() == 0
assert Report.query.count() == 0
def test_create_collection_empty(self, client):
res = client.post('/collections',
data=json.dumps({}),
headers={
'Content-Type': 'application/json'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
assert res.status_code == 422
assert data == {
'message': '422: Unprocessable Entity'}
assert Machine.query.count() == 1
assert Service.query.count() == 0
assert Collection.query.count() == 0
assert Report.query.count() == 0
def test_create_collection_partial(self, client, collection_valid):
collection_invalid = collection_valid.copy()
del collection_invalid['environment']['hostname']
res = client.post('/collections',
data=json.dumps(collection_invalid),
headers={
'Content-Type': 'application/json'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
assert res.status_code == 422
assert data == {
'message': '422: Unprocessable Entity'}
assert Machine.query.count() == 1
assert Service.query.count() == 0
assert Collection.query.count() == 0
assert Report.query.count() == 0
def test_create_collection_invalid_url(self, client, collection_valid):
res = client.post('/this-is-not-the-service-you-are-looking-for',
data=json.dumps(collection_valid),
headers={
'Content-Type': 'application/json'},
environ_base={
'REMOTE_ADDR': '127.0.0.1'})
data = json.loads(res.data)
assert res.status_code == 404
assert data == {
'message': '404: Not Found'}
assert Machine.query.count() == 0
assert Service.query.count() == 0
assert Collection.query.count() == 0
assert Report.query.count() == 0
| 2.171875 | 2 |
baseline_app.py | hatdropper1977/flask-recaptcha | 4 | 12792522 | #!/usr/bin/env python
from flask import Flask, render_template, request
from flask_bootstrap import Bootstrap
from models import QuizForm
class Config(object):
SECRET_KEY = '<KEY>'
application = Flask(__name__)
application.config.from_object(Config)
Bootstrap(application)
@application.route('/', methods=['GET', 'POST'])
def take_test():
form = QuizForm(request.form)
if not form.validate_on_submit():
return render_template('take_quiz_template.html', form=form)
if request.method == 'POST':
return 'Submitted!'
if __name__ == '__main__':
application.run(host='0.0.0.0', debug=True)
| 2.703125 | 3 |
polymorphism_and_magic_methods/exercise/wild_animals_04/test.py | BoyanPeychinov/object_oriented_programming | 0 | 12792523 | <filename>polymorphism_and_magic_methods/exercise/wild_animals_04/test.py<gh_stars>0
from wild_animals_04.animals.birds import Hen
from wild_animals_04.food import Meat, Vegetable, Fruit
# owl = Owl("Pip", 10, 10)
# print(owl)
# meat = Meat(4)
# print(owl.make_sound())
# owl.feed(meat)
# veg = Vegetable(1)
# print(owl.feed(veg))
# print(owl)
hen = Hen("Harry", 10, 10)
veg = Vegetable(3)
fruit = Fruit(5)
meat = Meat(1)
print(hen)
print(hen.make_sound())
hen.feed(veg)
hen.feed(fruit)
hen.feed(meat)
print(hen)
| 2.859375 | 3 |
utils/text_processing_utils.py | angelinaku/wsd_pipeline | 0 | 12792524 | <reponame>angelinaku/wsd_pipeline<gh_stars>0
from typing import List, Optional, Dict, Tuple
import numpy as np
import torch
from gensim.models import Word2Vec
from simple_elmo import ElmoModel
from torch import nn
def pad_sequences(
sequences: List,
maxlen: Optional[int],
dtype: str = 'int32',
padding: str = 'post',
truncating: str = 'post',
value: int = 0,
) -> np.array:
"""Pad sequences to the same length.
from Keras
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the end.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
for x in sequences:
try:
lengths.append(len(x))
except TypeError:
raise ValueError('`sequences` must be a list of iterables. ' 'Found non-iterable: ' + str(x))
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = ()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError(f'Truncating type "{truncating}" ' 'not understood')
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
f'Shape of sample {trunc.shape[1:]} of sequence at position {idx}'
f'is different from expected shape {sample_shape}'
)
if padding == 'post':
x[idx, : len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc) :] = trunc
else:
raise ValueError(f'Padding type "{padding}" not understood')
return x
def build_matrix(
word_idx: Dict,
embedding_path: str = '',
embeddings_type: str = 'word2vec',
max_features: int = 100000,
embed_size: int = 300,
) -> Tuple[np.array, int, List]:
"""
Create embedding matrix
Args:
embedding_path: path to embeddings
embeddings_type: type of pretrained embeddings ('word2vec', 'glove'')
word_idx: mapping from words to their indexes
max_features: max features to use
embed_size: size of embeddings
Returns:
embedding matrix, number of of words and the list of not found words
"""
if embeddings_type not in ['word2vec', 'glove']:
raise ValueError('Unacceptable embedding type.\nPermissible values: word2vec, glove')
model = Word2Vec.load(embedding_path)
# Creating Embedding Index
embedding_index = {}
for word in model.wv.vocab:
coefs = np.asarray(model.wv[word])
embedding_index[word] = coefs
nb_words = min(max_features, len(word_idx))
if embeddings_type in ['word2vec', 'glove']:
embedding_size = embed_size if embed_size != 0 else len(list(embedding_index.values())[0])
all_embs = np.stack(embedding_index.values())
embed_mean, embed_std = all_embs.mean(), all_embs.std()
if '<unk>' not in embedding_index:
embedding_index['<unk>'] = np.random.normal(embed_mean, embed_std, (1, embedding_size))
embedding_matrix = np.random.normal(embed_mean, embed_std, (nb_words + 1, embed_size))
for word, num in word_idx.items():
# possible variants of the word to be found in word to idx dictionary
variants_of_word = [word, word.lower(), word.capitalize(), word.upper()]
for variant in variants_of_word:
embedding_vector = embedding_index.get(variant)
if embedding_vector is not None:
embedding_matrix[num] = embedding_vector
break
return embedding_matrix
else:
raise ValueError('Unacceptable embedding type.\nPermissible values: word2vec, glove')
class Embedder(nn.Module):
"""
Transform tokens to embeddings
"""
def __init__(self, word_to_idx: Dict, embeddings_path: str, embeddings_type: str, embeddings_dim: int = 0):
super().__init__()
self.weights_matrix = build_matrix(
word_idx=word_to_idx, embedding_path=embeddings_path,
embeddings_type=embeddings_type, max_features=len(word_to_idx), embed_size=embeddings_dim
)
self.weights_matrix = torch.tensor(self.weights_matrix, dtype=torch.float32)
self.embedding = nn.Embedding.from_pretrained(self.weights_matrix)
self.embedding.weight.requires_grad = False
def forward(self, x: torch.LongTensor) -> torch.Tensor:
embed = self.embedding(x)
return embed
class ELMo_Embedder(nn.Module):
"""
Transform tokens to embeddings
"""
def __init__(self, embeddings_path: str):
super().__init__()
self.model = ElmoModel()
self.model.load(embeddings_path)
self.sess = self.model.get_elmo_session()
print('ELMo Embedding Model is Loaded')
def forward(self, x: List) -> torch.Tensor:
# embed = self.model.get_elmo_vectors(x)
embed = self.model.get_elmo_vectors_session(x, self.sess)
embed = torch.Tensor(embed)
return embed
| 3.03125 | 3 |
hackathon/urls.py | AlexiaDelorme/ci-hackathon-app | 0 | 12792525 | from django.urls import path
from .views import (
HackathonListView, create_hackathon, update_hackathon, delete_hackathon, judging
)
urlpatterns = [
path('', HackathonListView.as_view(), name="hackathon-list"),
path("<int:hack_id>/team/<int:team_id>/judging/", judging, name="judging"),
path("create_hackathon", create_hackathon, name='create_hackathon'),
path("<int:hackathon_id>/update_hackathon", update_hackathon, name="update_hackathon"),
path("<int:hackathon_id>/delete_hackathon", delete_hackathon, name="delete_hackathon"),
]
| 1.84375 | 2 |
rex/data/dataset.py | Spico197/REx | 4 | 12792526 | <reponame>Spico197/REx
from typing import Iterable, Optional
from torch.utils.data import Dataset
class CachedDataset(Dataset):
def __init__(self, data: Iterable) -> None:
super().__init__()
self.data = data
def __getitem__(self, index: int):
return self.data[index]
def __len__(self) -> int:
return len(self.data)
class CachedBagREDataset(Dataset):
def __init__(self, data_with_scopes) -> None:
super().__init__()
data, scopes = data_with_scopes
self.data = data
self.scopes = scopes
def __getitem__(self, index: int):
results = []
for idx in self.scopes[index]:
results.append(self.data[idx])
return results
def __len__(self) -> int:
return len(self.scopes)
class StreamTransformDataset(Dataset):
def __init__(
self, data: Iterable, transform, debug: Optional[bool] = False
) -> None:
super().__init__()
if debug:
data = data[:128]
self.data = data
self.transform = transform
def __getitem__(self, index: int):
return self.transform(self.data[index])
def __len__(self) -> int:
return len(self.data)
| 2.5 | 2 |
Basic Programs/Day12.py | kv-95/pyStreak | 1 | 12792527 | # Python Program for n\’th multiple of a number in Fibonacci Series
def findMultiple(n,k):
a = 0
b = 1
count = 1
while(True):
c = a+b
if c % k==0:
if count == n:
return(c)
count += 1
a,b = b,c
if __name__ == "__main__":
k = int(input("Enter the number which is in the Fibonacci Series: "))
n = int(input("Enter the value of n : "))
print("{}th/nd/rd multiple of {} in Fibonacci Series is {}".format(n,k,findMultiple(n,k)))
| 4.21875 | 4 |
biosys/apps/main/tests/api/test_auth.py | florianm/biosys | 1 | 12792528 | from django.conf import settings
from django.urls import reverse
from django.test import override_settings
from rest_framework import status
from rest_framework.test import APIClient
from freezegun import freeze_time
from main.tests.api import helpers
class TestAuth(helpers.BaseUserTestCase):
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',),
REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS)
def test_token_auth_end_point(self):
"""
Test that when hitting the auth_token end point we receive a token
:return:
"""
client = APIClient()
# request token
url = reverse('api:auth-token')
user = self.readonly_user
self.assertTrue(user.check_password('password'))
data = {
'username': "readonly",
"password": "password"
}
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# check that we have a token
self.assertTrue('token' in resp.data)
token = resp.data.get('token')
self.assertTrue(token)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.MD5PasswordHasher',),
REST_FRAMEWORK_TEST_SETTINGS=helpers.REST_FRAMEWORK_TEST_SETTINGS)
def test_token_valid(self):
"""
Test that the token received can be used for authentication
:return:
"""
client = APIClient()
user = self.readonly_user
self.assertTrue(user.check_password('password'))
url = reverse('api:auth-token')
data = {
'username': user.username,
"password": "password"
}
resp = client.post(url, data=data, format='json')
token = resp.data.get('token')
self.assertTrue(token)
# can't get dataset list without token
url = reverse('api:dataset-list')
resp = client.get(url)
self.assertIn(resp.status_code, [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN])
# set credential token
client.credentials(HTTP_AUTHORIZATION='Token ' + token)
resp = client.get(url)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
class TestUserAuthThrottling(helpers.BaseUserTestCase):
"""
Use case: Prevent brute force authentication by preventing the API user issuing too many auth-token request
"""
def test_brute_force(self):
"""
test that a hacker sending auth request with wrong password will be blocked after n attempts
:return:
"""
rate = '6/hour'
drf_settings = settings.REST_FRAMEWORK
drf_settings['DEFAULT_THROTTLE_RATES']['auth'] = rate
with override_settings(REST_FRAMEWORK=drf_settings):
max_attempt = 6
client = APIClient()
# request token
url = reverse('api:auth-token')
user = self.readonly_user
self.assertTrue(user.check_password('password'))
data = {
'username': "readonly",
"password": "<PASSWORD>"
}
# Hacking attempt should return HTTP_400_BAD_REQUEST while attempts < throttle rate
with freeze_time("2018-05-29 12:00:00", tick=True):
for attempt in range(max_attempt):
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# next attempt should return a HTTP_429_TOO_MANY_REQUESTS
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS)
# let's simulate a 30 min jump in time. Should still return HTTP_429_TOO_MANY_REQUESTS
with freeze_time("2018-05-29 12:30:00", tick=True):
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_429_TOO_MANY_REQUESTS)
# let's jump more than one hour in time. Should be back at returning HTTP_400_BAD_REQUEST
with freeze_time("2018-05-29 13:00:05", tick=True):
resp = client.post(url, data=data, format='json')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
| 2.390625 | 2 |
cride/registros/migrations/0001_initial.py | albertoaldanar/serecsinAPI | 0 | 12792529 | <reponame>albertoaldanar/serecsinAPI
# Generated by Django 2.0.9 on 2019-07-28 03:39
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Egreso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created instance', verbose_name='created_a')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')),
('fecha', models.DateField(default=datetime.date.today)),
('importe', models.PositiveIntegerField(default=0)),
('mes', models.CharField(blank=True, max_length=30, null=True)),
('año', models.PositiveIntegerField(default=2019)),
('cliente', models.CharField(blank=True, max_length=30, null=True)),
('concepto', models.CharField(blank=True, max_length=30, null=True)),
('genero', models.CharField(blank=True, max_length=30, null=True)),
('cantidad', models.FloatField(blank=True, default=None, null=True)),
('usuario', models.CharField(blank=True, max_length=30, null=True)),
('lugar', models.CharField(blank=True, max_length=30, null=True)),
('cuenta_origen', models.PositiveIntegerField(default=0, null=True)),
('metodo_pago', models.CharField(blank=True, max_length=30, null=True)),
('forma_pago', models.CharField(blank=True, max_length=30, null=True)),
('cfdi', models.CharField(blank=True, max_length=30, null=True)),
('folio', models.CharField(blank=True, max_length=30, null=True)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': ['created'],
'abstract': False,
},
),
migrations.CreateModel(
name='Ingreso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time for created instance', verbose_name='created_a')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time for modified instance', verbose_name='modified_at')),
('cliente', models.CharField(blank=True, max_length=30, null=True)),
('mes', models.CharField(blank=True, max_length=30, null=True)),
('año', models.PositiveIntegerField(default=2019)),
('importe', models.PositiveIntegerField(default=0)),
('adeudo_mes', models.PositiveIntegerField(default=0)),
('importante', models.BooleanField(default=False)),
('adeudo_acumulado', models.PositiveIntegerField(default=0)),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': ['created'],
'abstract': False,
},
),
]
| 1.890625 | 2 |
app/core/migrations/0003_auto_20210420_0656.py | sawamotokai/Project-Backend-Energy | 0 | 12792530 | <filename>app/core/migrations/0003_auto_20210420_0656.py
# Generated by Django 2.1.15 on 2021-04-20 06:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20210420_0653'),
]
operations = [
migrations.RemoveField(
model_name='device',
name='state',
),
migrations.AddField(
model_name='device',
name='stateAlgo',
field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True),
),
migrations.AddField(
model_name='device',
name='stateReal',
field=models.CharField(choices=[('On', 'On'), ('Off', 'Off')], max_length=255, null=True),
),
]
| 1.601563 | 2 |
grammar_induction/earley_parser/earley_parser.py | tdonca/OpenBottle | 0 | 12792531 | <reponame>tdonca/OpenBottle<filename>grammar_induction/earley_parser/earley_parser.py
import nltk
def read_induced_grammar(path):
with open(path) as f:
rules = [rule.strip() for rule in f.readlines()]
grammar = nltk.PCFG.fromstring(rules)
return grammar
def predict_next_symbols(grammar, tokens):
def get_production_prob(selected_edge):
# Find the corresponding production rule of the edge, and return its probability
for production in grammar.productions(lhs=selected_edge.lhs()):
if production.rhs() == selected_edge.rhs():
# print selected_edge, production.prob()
return production.prob()
def find_parent(selected_edge):
# Find the parent edges that lead to the selected edge
p_edges = list()
for p_edge in e_chart.edges():
if p_edge.end() == selected_edge.start() and p_edge.nextsym() == selected_edge.lhs():
p_edges.append(p_edge)
return p_edges
def get_edge_prob(selected_edge):
# Compute the probability of the edge by recursion
prob = get_production_prob(selected_edge)
if selected_edge.start() != 0:
parent_prob = 0
for parent_edge in find_parent(selected_edge):
parent_prob += get_edge_prob(parent_edge)
prob *= parent_prob
return prob
symbols = list()
earley_parser = nltk.EarleyChartParser(grammar, trace=0)
e_chart = earley_parser.chart_parse(tokens)
end_edges = list()
for edge in e_chart.edges():
# print edge
if edge.end() == len(tokens):
# Only add terminal nodes
if isinstance(edge.nextsym(), unicode):
symbols.append(edge.nextsym())
end_edges.append(edge)
probs = list()
for end_edge in end_edges:
probs.append(get_edge_prob(end_edge))
# Eliminate duplicate
symbols_no_duplicate = list()
probs_no_duplicate = list()
for s, p in zip(symbols, probs):
if s not in symbols_no_duplicate:
symbols_no_duplicate.append(s)
probs_no_duplicate.append(p)
else:
probs_no_duplicate[symbols_no_duplicate.index(s)] += p
return zip(symbols_no_duplicate, probs_no_duplicate)
def main():
grammar = read_induced_grammar('../grammars/parser_input.txt')
sentence = 'approach'
tokens = sentence.split()
prediction = predict_next_symbols(grammar, tokens)
print prediction
sentence = 'approach pinch'
tokens = sentence.split()
prediction = predict_next_symbols(grammar, tokens)
print prediction
sentence = 'approach pinch twist'
tokens = sentence.split()
prediction = predict_next_symbols(grammar, tokens)
print prediction
sentence = 'approach pinch twist unpinch'
tokens = sentence.split()
prediction = predict_next_symbols(grammar, tokens)
print prediction
sentence = 'approach pinch twist unpinch move'
tokens = sentence.split()
prediction = predict_next_symbols(grammar, tokens)
print prediction
sentence = 'approach pinch twist unpinch move grasp_right'
tokens = sentence.split()
prediction = predict_next_symbols(grammar, tokens)
print prediction
sentence = 'approach pinch twist unpinch move grasp_right twist'
tokens = sentence.split()
prediction = predict_next_symbols(grammar, tokens)
print prediction
if __name__ == '__main__':
main()
| 2.28125 | 2 |
kerosene/datasets/cifar100.py | dribnet/kerosene | 35 | 12792532 | <reponame>dribnet/kerosene
# -*- coding: utf-8 -*-
import fuel.datasets
from .dataset import Dataset
class CIFAR100(Dataset):
basename = "cifar100"
default_sources=['features', 'coarse_labels']
class_for_filename_patch = fuel.datasets.CIFAR100
def build_data(self, sets, sources):
return map(lambda s: fuel.datasets.CIFAR100(which_sets=[s], sources=sources), sets)
def load_data(sets=None, sources=None, fuel_dir=False):
return CIFAR100().load_data(sets, sources, fuel_dir);
| 2.3125 | 2 |
gorden_crawler/spiders/shopbop_eastdane_common.py | Enmming/gorden_cralwer | 2 | 12792533 | # -*- coding: utf-8 -*-
from gorden_crawler.spiders.shiji_base import BaseSpider
from scrapy.selector import Selector
from gorden_crawler.items import BaseItem, ImageItem, Color, SkuItem
from scrapy import Request
from gorden_crawler.utils.item_field_handler import handle_price
import re
import execjs
class ShopbopEastdaneCommon(BaseSpider):
def parse_pages(self, response):
sel = Selector(response)
category = response.meta['category']
product_type = response.meta['product_type']
gender = response.meta['gender']
category_url = response.meta['category_url']
item_link_lis = sel.xpath('//li[contains(@class, "hproduct product")]')
if len(item_link_lis.extract())>0 :
for item_link_li in item_link_lis:
item_link_uri = item_link_li.xpath('./div/a/@href').extract()[0]
url = self.shopbop_base_url + item_link_uri
baseItem = BaseItem()
baseItem['type'] = 'base'
baseItem['category'] = category
baseItem['product_type'] = product_type
baseItem['url'] = url
baseItem['gender'] = gender
baseItem['brand'] = item_link_li.xpath('.//div[@class="brand"]/text()').extract()[0]
baseItem['title'] = item_link_li.xpath('.//div[@class="title"]/text()').extract()[0]
baseItem['cover'] = item_link_li.xpath('.//img/@src').extract()[0]
baseItem['list_price'] = handle_price(item_link_li.xpath('.//span[@class="retail-price"]/text()').extract()[0])
baseItem['current_price'] = handle_price(item_link_li.xpath('.//span[@class="sale-price-low"]/text()').extract()[0])
yield Request(url, callback=self.parse_item, meta={'baseItem' : baseItem})
next_page_link = sel.xpath('//span[@data-at="nextPage"]/@data-next-link').extract()
if len(next_page_link)>0 and (category_url[category] != next_page_link[0]):
url = self.shopbop_base_url + next_page_link[0]
yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender' : gender, 'category_url' : category_url})
def parse_item(self, response):
baseItem = response.meta['baseItem']
return self.handle_parse_item(response, baseItem)
def handle_parse_item(self, response, baseItem):
product_detail_str="".join(re.findall(r"var\s+productDetail[^;]+", response.body))
if len(product_detail_str)>0:
context = execjs.compile('''
%s
function get_product_detail(){
return productDetail;
}
''' % (product_detail_str))
product_detail = context.call('get_product_detail')
sel = Selector(response)
product_id = sel.xpath('//div[@id="productId"]/text()').extract()[0]
skus = []
baseItem['from_site'] = self.name
baseItem['show_product_id'] = product_id
size_js_infos = product_detail['sizes']
size_infos = {}
size_values = []
for size_id in size_js_infos:
size_infos[size_js_infos[size_id]['sizeCode']] = size_id
size_values.append(size_id)
list_price = sel.xpath('//div[@id="productPrices"]//meta[@itemprop="price"]/@content').extract()[0]
color_price_blocks = sel.xpath('//div[@id="productPrices"]//div[@class="priceBlock"]')
color_price_mapping = {}
for color_price_block in color_price_blocks:
color_name = color_price_block.xpath('./span[@class="priceColors"]/text()').extract()
if len(color_name) > 0:
regular_price_span = color_price_block.xpath('./span[@class="regularPrice"]/text()').extract()
if len(regular_price_span) > 0:
color_price_mapping[color_name[0]] = regular_price_span[0]
else:
color_price_mapping[color_name[0]] = color_price_block.xpath('./span[@class="salePrice"]/text()').extract()[0]
image_items = product_detail['colors']
color_names = []
for key in image_items:
imageItems = image_items[key]['images']
color_name = image_items[key]['colorName'].strip()
color_names.append(color_name)
images=[]
tmp_images = []
for image_key in imageItems:
imageItem = ImageItem()
image = imageItems[image_key]
imageItem['thumbnail'] = image['thumbnail']
imageItem['image'] = image['zoom']
tmp_images.append((image['index'], imageItem))
tmp_images = sorted(tmp_images, key=lambda x:x[0])
for tmp_tuple in tmp_images:
images.append(tmp_tuple[1])
colorItem = Color()
colorItem['type'] = 'color'
colorItem['show_product_id'] = product_id
colorItem['from_site'] = self.name
colorItem['cover'] = image_items[key]['swatch']
colorItem['name'] = color_name
colorItem['images'] = images
yield colorItem
sizes = image_items[key]['sizes']
for size in sizes:
size_name = size_infos[size]
skuItem = SkuItem()
skuItem['type'] = 'sku'
skuItem['from_site'] = self.name
skuItem['color'] = color_name
skuItem['show_product_id'] = product_id
skuItem['id'] = key+"-"+size
skuItem['size'] = size_name
skuItem['list_price'] = list_price
if len(color_price_mapping)>0 and color_name in color_price_mapping.keys():
# skuItem['current_price'] = sale_price_span.re(r'\d+.?\d*')[0]
skuItem['current_price'] = color_price_mapping[colorItem['name']]
else:
skuItem['current_price'] = skuItem['list_price']
skuItem['is_outof_stock'] = False
skus.append(skuItem)
baseItem['sizes'] = size_values
baseItem['colors']= color_names
baseItem['skus'] = skus
size_fit_container = sel.xpath('//div[@id="sizeFitContainer"]')
if len(size_fit_container)>0:
size_fit = size_fit_container.extract()[0]
baseItem['desc'] = '<div>'+sel.xpath('//div[@itemprop="description"]').extract()[0]+size_fit+"</div>"
else:
baseItem['desc'] = sel.xpath('//div[@itemprop="description"]').extract()[0]
baseItem['dimensions'] = ['size', 'color']
yield baseItem | 2.28125 | 2 |
Program's_Contributed_By_Contributors/Python_Programs/regex_date_validator.py | a-ayush19/Hacktoberfest2k21 | 0 | 12792534 | import re # import regex module
# check if date is valid (yyyy-mm-dd)
def date_validation(self, date):
if re.fullmatch(r"/^\d{4}-\d{2}-\d{2}$/", date):
return True
else:
return False
date_validation("2022-02-29") # False/True
| 3.375 | 3 |
cfripper/main.py | ocrawford555/cfripper | 0 | 12792535 | <gh_stars>0
"""
Copyright 2018 Skyscanner Ltd
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
from cfripper.config.config import Config
from cfripper.s3_adapter import S3Adapter
from cfripper.model.rule_processor import RuleProcessor
from cfripper.rules import ALL_RULES
from cfripper.model.result import Result
from cfripper.config.logger import get_logger
logger = get_logger()
def log_results(project_name, service_name, stack_name, rules, _type, warnings, template_url):
logger.info("{}: project - {}, service- {}, stack - {}. {} {} URL: {}".format(
_type,
project_name,
service_name,
stack_name,
json.dumps(rules),
str(warnings),
template_url,
))
def handler(event, context):
"""
Main entry point of the Lambda function.
:param event: {
"stack_template_url": String
}
:param context:
:return:
"""
if not event.get("stack_template_url"):
raise ValueError("Invalid event type: no parameter 'stack_template_url' in request.")
result = Result()
s3 = S3Adapter()
template = s3.download_template_to_dictionary(event["stack_template_url"])
if not template:
# In case of an ivalid script log a warning and return early
result.add_exception(TypeError("Malformated CF script: {}".format(event["stack_template_url"])))
return {
"valid": "true",
"reason": '',
"failed_rules": [],
"exceptions": [x.args[0] for x in result.exceptions],
}
# Process Rules
config = Config(
project_name=event.get("project"),
service_name=event.get("serviceName"),
stack_name=event.get("stack", {}).get("name"),
rules=ALL_RULES.keys(),
event=event.get("event"),
template_url=event.get("stack_template_url"),
)
logger.info("Scan started for: {}; {}; {};".format(
config.project_name,
config.service_name,
config.stack_name,
))
rules = [ALL_RULES.get(rule)(config, result) for rule in config.RULES]
processor = RuleProcessor(*rules)
processor.process_cf_template(template, config, result)
if not result.valid:
log_results(
"Failed rules",
config.project_name,
config.service_name,
config.stack_name,
result.failed_rules,
result.warnings,
event["stack_template_url"],
)
logger.info("FAIL: {}; {}; {}".format(
config.project_name,
config.service_name,
config.stack_name,
))
else:
logger.info("PASS: {}; {}; {}".format(
config.project_name,
config.service_name,
config.stack_name,
))
if len(result.failed_monitored_rules) > 0 or len(result.warnings) > 0:
log_results(
"Failed monitored rules",
config.project_name,
config.service_name,
config.stack_name,
result.failed_monitored_rules,
result.warnings,
event["stack_template_url"],
)
return {
"valid": str(result.valid).lower(),
"reason": ",".join(["{}-{}".format(r["rule"], r["reason"]) for r in result.failed_rules]),
"failed_rules": result.failed_rules,
"exceptions": [x.args[0] for x in result.exceptions],
"warnings": result.failed_monitored_rules,
}
| 1.882813 | 2 |
examples/quicksort.py | RaphaelArkadyMeyer/LiveCoding | 0 | 12792536 | <reponame>RaphaelArkadyMeyer/LiveCoding<gh_stars>0
from sys import stdin
@@ begin hide
def main():
print("start qs", stdin.readline()) # Ignore first line with number of inputs on it
array_in = stdin.readline()
print(array_in)
presort = list(map(int, array_in.split(' ')))
sort = quickSort(presort)
print(sort)
@@ end hide
def quickSort(arr):
less = []
pivotList = []
more = []
if len(arr) <= 1:
@@ begin question base case
@@ description: Find the base case
@@ points: 100
return arr
@@ end question
else:
pivot = arr[0]
@@ begin question recursion
@@ points: 500
for i in arr:
if i < pivot:
less.append(i)
elif i > pivot:
more.append(i)
else:
pivotList.append(i)
@@ end question
less = quickSort(less)
more = quickSort(more)
return less + pivotList + more
@@ begin hide
if __name__ == '__main__':
main()
@@ end hide
| 3.34375 | 3 |
otherdave/util/madlib.py | BooGluten/OtherDave | 0 | 12792537 | import inflect
import json
import random
import re
infl = inflect.engine()
class MadLibber():
def make(self):
template = self.actions["template"]()
tokens = template.split(" ")
result = ""
for token in tokens:
action = re.match("\{\{(.+?)\}\}", token)
if(action):
if(action[1] in self.actions):
result += self.actions[action[1]]()
else:
result += action[0]
else:
result += token
result += " "
return result.strip()
class Complimenter(MadLibber):
def __init__(self):
with open("./data/respect/adjectives.json") as adf:
self.adjectives = json.load(adf)
with open("./data/respect/amounts.json") as amf:
self.amounts = json.load(amf)
with open("./data/respect/parts.json") as parf:
self.parts = json.load(parf)
with open("./data/respect/persons.json") as perf:
self.persons = json.load(perf)
with open("./data/respect/templates.json") as temf:
self.templates = json.load(temf)
with open("./data/respect/things.json") as thinf:
self.things = json.load(thinf)
self.actions = {
"adjective" : lambda : random.choice(self.adjectives),
"an_adjective" : lambda : infl.an(self.actions["adjective"]()),
"amount" : lambda : random.choice(self.amounts),
"an_amount" : lambda : infl.an(self.actions["amount"]()),
"parts" : lambda : random.choice(self.parts),
"person" : lambda : random.choice(self.persons),
"thing" : lambda : random.choice(self.things),
"template" : lambda : random.choice(self.templates)
}
class Prompter(MadLibber):
def __init__(self):
with open("./data/prompt/adjectives.json") as adf:
self.adjectives = json.load(adf)
with open("./data/prompt/nouns.json") as nf:
self.nouns = json.load(nf)
self.actions = {
"adjective" : lambda : random.choice(self.adjectives),
"noun" : lambda : random.choice(self.nouns),
"template" : lambda : r"{{adjective}} {{noun}}"
}
def addNoun(self, noun):
self.nouns.append(noun)
with open("./data/prompt/nouns.json", "w") as nf:
json.dump(nf)
def remNoun(self, noun):
if(noun in self.nouns):
self.nouns.remove(noun)
with open("./data/prompt/nouns.json", "w") as nf:
json.dump(nf)
def addAdjective(self, adjective):
self.adjectives.append(adjective)
with open("./data/prompt/adjectives.json", "w") as adf:
json.dump(adf)
def remAdjective(self, adjective):
if(adjective in self.adjectives):
self.adjectives.remove(adjective)
with open("./data/prompt/adjectives.json", "w") as adf:
json.dump(adf) | 2.734375 | 3 |
gradient.py | sebdisdv/HdrProject | 0 | 12792538 | from math import exp
import cv2 as cv
import numpy as np
from concurrent.futures import ProcessPoolExecutor
from numba import jit
from numpy import float32
from tqdm import tqdm
from utils import (
get_region_indexes,
get_region_centers,
associate_index_to_centers,
get_window,
)
@jit
def P(v):
return v / 255
@jit
def deltaIx(img, channel, x, y):
res = 0
if x + 1 < img.shape[0] and y < img.shape[1]:
res = abs(img[x + 1][y][channel] - img[x][y][channel])
else:
res = 0
return res
@jit
def deltaIy(img, channel, x, y):
res = 0
if y - 1 > 0 and x < img.shape[0]:
res = abs(img[x][y - 1][channel] - img[x][y][channel])
else:
res = 0
return res
def getDetailsRegions(imgs):
region_indexes = get_region_indexes(imgs[0].shape[0], imgs[0].shape[1], 10)
M = []
for i in range(len(imgs)):
M.append([])
for j in tqdm(range(region_indexes.shape[0])):
M_B = 0
M_G = 0
M_R = 0
for x in range(region_indexes[j][0][0], region_indexes[j][0][1]):
for y in range(region_indexes[j][1][0], region_indexes[j][1][1]):
M_B += P(max(deltaIx(imgs[i], 0, x, y), deltaIy(imgs[i], 0, x, y)))
M_G += P(max(deltaIx(imgs[i], 1, x, y), deltaIy(imgs[i], 1, x, y)))
M_R += P(max(deltaIx(imgs[i], 2, x, y), deltaIy(imgs[i], 2, x, y)))
M[i].append([M_B, M_G, M_R])
return np.array(M), region_indexes
def joinBestRegions(imgs, M, region_indexes):
res = np.zeros(imgs[0].shape)
for channel_indx in range(3):
for r_indx in tqdm(range(M.shape[1])): # iterate over each region
max_r = {}
for i in range(len(imgs)):
max_r[np.sum(M[i][r_indx])] = i
index_image = max_r[max(max_r)]
for i in range(region_indexes[r_indx][0][0], region_indexes[r_indx][0][1]):
for j in range(
region_indexes[r_indx][1][0], region_indexes[r_indx][1][1]
):
res[i][j][channel_indx] = imgs[index_image][i][j][channel_indx]
return res
@jit
def U(x_c_reg, y_c_reg, x_c, y_c):
epsilon = 2
return abs(x_c_reg - x_c) <= epsilon and abs(y_c_reg - y_c) <= epsilon
@jit
def exp_g(x, y, x_c, y_c) -> float:
sigma_x = 100
sigma_y = 100
return exp(
-((((x - x_c) ** 2) / (2 * sigma_x)) + (((y - y_c) ** 2) / (2 * sigma_y)))
)
@jit
def gaussianBlendingFunction(x, y, x_c, y_c, region_indexes, center_indexes):
num = exp_g(x, y, x_c, y_c)
den = 0.0
for i in range(center_indexes.shape[0]):
den += exp_g(x, y, center_indexes[i][0], center_indexes[i][1])
den *= center_indexes.shape[0]
return num / den
def compute_channel(channel, region_indexes, center_indexes, map_px_center):
center_indexes = np.float32(center_indexes)
res = np.zeros(shape=channel.shape, dtype=float32)
for x in tqdm(range(res.shape[0])):
for y in range(res.shape[1]):
window = get_window(x, y, channel, 5) # WINDOW VERSION
for i in range(window[0][0], window[0][1]):
for j in range(window[1][0], window[1][1]):
# for i in range(res.shape[0]):
# for j in range(res.shape[1]):
add = 0
if U(
map_px_center[(i, j)][0],
map_px_center[(i, j)][1],
map_px_center[(x, y)][0],
map_px_center[(x, y)][1],
):
add = 1
add *= gaussianBlendingFunction(
map_px_center[(x, y)][0],
map_px_center[(x, y)][1],
map_px_center[(i, j)][0],
map_px_center[(i, j)][1],
region_indexes,
center_indexes,
)
add *= channel[x][y]
res[x][y] += add
return res
def blend(img, regions_indexes):
centers_indexes = get_region_centers(regions_indexes)
pixel_region_center = associate_index_to_centers(regions_indexes, centers_indexes)
b, g, r = cv.split(img)
with ProcessPoolExecutor() as excecutor:
proc1 = excecutor.submit(
compute_channel, b, regions_indexes, centers_indexes, pixel_region_center
)
proc2 = excecutor.submit(
compute_channel, g, regions_indexes, centers_indexes, pixel_region_center
)
proc3 = excecutor.submit(
compute_channel, r, regions_indexes, centers_indexes, pixel_region_center
)
b = proc1.result()
g = proc2.result()
r = proc3.result()
return cv.merge((b, g, r))
def compute(imgs):
for i in range(len(imgs)):
imgs[i] = np.float32(imgs[i])
M, regions_indexes = getDetailsRegions(imgs)
res = blend(joinBestRegions(imgs, M, regions_indexes), regions_indexes)
res = res / np.amax(res)
res = 255 * res
return res
| 2.046875 | 2 |
model.py | imayank/project4 | 0 | 12792539 | <gh_stars>0
import pandas as pd
import numpy as np
import csv
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.model_selection import train_test_split
from keras import regularizers
from keras.models import Sequential
from keras.layers import Dense, Flatten, Lambda, Cropping2D, Conv2D, MaxPooling2D, Activation, BatchNormalization,Dropout
""" Location of the driving_log.csv file and images generated using
Udacity Car Simulator in training mode."""
base_path = "../data/recording/"
base_path_img = "../data/recording/IMG/"
"""Reading in the drving_log file"""
data = pd.read_csv(base_path + "driving_log.csv")
""" This function takes the data frame and merge the three columns
for the ceter, left , right images into a single column with respective
target steering angle value in the second column."""
def expanding_data(data):
X_center = data.loc[:,'center'] ## The central camera image
y_center = data.loc[:,'target'] ## Respective value for steering
X_left = data.loc[:,'left'] ## The image from left camera
y_left = y_center + 0.3 ## To steer a bit right add a positive value
X_right = data.loc[:,'right'] ## The image from the right camera
y_right = y_center - 0.3 ## To steer a bit left add a negative value
""" Three data frames for central, left, right camera data, each with
two columns - image location and target value for steering"""
center_data = pd.concat([X_center,y_center],axis=1,ignore_index=True)
left_data = pd.concat([X_left,y_left],axis=1,ignore_index=True)
right_data = pd.concat([X_right,y_right],axis=1,ignore_index=True)
"""Merging the data frames"""
merged_data = pd.concat([center_data,left_data,right_data],axis=0,ignore_index=True)
merged_data.columns=['path','target']
return merged_data
""" The function takes as input a data frame and returns a data frame with undersampled
data for some target steering values. The track in the simulator has long almost straight sections,
therefore the data has a large number of observations having low steering angles. Due to this the model may
be biased towards drving straight. The data for such low angle value are undersampled."""
def undersampling(merged_data):
out = pd.cut(list(merged_data['target']),30,labels=False) ## divide the steering values in 30 eqaully sized bins
bins, counts = np.unique(out, return_counts=True) ## count the unique bins and number of values in each bin
avg_counts = np.mean(counts) ## average number of values in bins
target_counts = int(np.percentile(counts,75)) ## the count to which the value will be undersampled -- 75th percentile
indices = np.where(counts>avg_counts) ## indices where the counts in the bin is greater than average counts
target_bins = bins[indices] ## bins corresponding to the above indices
target_indices = [] ## list holding the undersampled data points
total_indices = list(range(len(out))) ## Complete list of indices of the data
remaining_indices = total_indices ## list containing the indices remaining after the values in undersampled bins are removed,initialized to the total_indices
### iterating through bins having value counts greater than avg_counts and undersampling from the those bins
for value in target_bins:
bin_ind = list(np.where(out == value)[0]) ## selecting data points in the bin being iterated
remaining_indices = list(set(remaining_indices) - set(bin_ind)) ## remove the corresponding indices
random_indices = list(np.random.choice(bin_ind,target_counts, replace=False)) ## randomly selecting 'target_counts' data points from the selected data points
target_indices.extend(random_indices) ## adding undersampled indices to the list
undersampled_indices = np.concatenate([target_indices,remaining_indices]) ## concatenating the remaining indices with undersampled indices
undersampled_data = merged_data.loc[undersampled_indices] ## selecting the data points from the data frame
return undersampled_data ## returning the undersampled data
"""Function that reset the index and adds an "ID" columns to the data frame input """
def reset_and_add(undersampled_data):
undersampled_data = undersampled_data.reset_index()
undersampled_data["ID"] = list(range(len(undersampled_data)))
return undersampled_data
""" The function is a python data generator for producing batches
of data of size = batch_size to be used in keras fit_generator function"""
def dataGenerator(data, batch_size,base_path_img):
ids = data['ID'].values ## selecting all the IDs
#print(ids)
num = len(ids) ## length of the data frame
#indices = np.arange(len(ids))
np.random.seed(42)
while True:
#indices = shuffle(indices)
np.random.shuffle(ids) ## shuffling the data
for offset in range(0,num,batch_size):
batch = ids[offset:offset+batch_size] ## selectiing a batch
images = [] ## list holding the batch images
target = [] ## list holding the steering values corresponding to thte above list
## creating a batch of data
for batch_id in batch:
img_path = data.loc[batch_id,'path']
img_name = img_path.split('\\')[-1]
new_path = base_path_img + img_name
images.append(((mpimg.imread(new_path))/255)-0.5)
target.append(data.loc[batch_id,'target'])
images = np.array(images)
target = np.array(target)
yield images, target ## returning a batch
""" Function that creates a model as given in the NVIDIA research paper"""
def model_nvidia_updated():
model = Sequential()
model.add(Cropping2D(((20,20),(0,0)),input_shape=(160,320,3)))
model.add(Conv2D(24,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))
#model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(Conv2D(36,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))
#model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(Conv2D(48,5,strides=(2,2),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))
#model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))
#model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(Conv2D(64,3,strides=(1,1),padding='valid',kernel_regularizer=regularizers.l2(0.0001)))
#model.add(BatchNormalization())
model.add(Activation('elu'))
model.add(Flatten())
model.add(Dense(100,kernel_regularizer=regularizers.l2(0.0001)))
model.add(Activation('elu'))
model.add(Dense(50,kernel_regularizer=regularizers.l2(0.0001)))
model.add(Activation('elu'))
model.add(Dense(10,kernel_regularizer=regularizers.l2(0.0001)))
model.add(Activation('elu'))
model.add(Dense(1))
return model
### Tried undersampling the data, but results were not satisfactory, so end up using complete data for training
"""undersampled_data = undersampling(data)
undersampled_data = expanding_data(undersampled_data)
undersampled_data = reset_and_add(undersampled_data)"""
### using complete data
undersampled_data = expanding_data(data)
undersampled_data = reset_and_add(undersampled_data)
### dividing the data into training and validation sets
train_data, validation_data = train_test_split(undersampled_data,test_size=0.2,random_state=42)
#create data generators for training and validation with batch size of 128
train_generator = dataGenerator(train_data, 128,base_path_img)
valid_generator = dataGenerator(validation_data,128, base_path_img)
""" creating a model"""
model = model_nvidia_updated()
## Compiling the model using Adam optimizer and mean squared error as loss function
model.compile(loss='mse',optimizer='adam')
## training the model using fit_generator, batch size = 128
model.fit_generator(generator=train_generator,
steps_per_epoch = (len(train_data)//128)+1,
validation_data=valid_generator,
validation_steps = (len(validation_data)//128)+1,
epochs = 3)
## saving the model
model.save('model_new.h5')
| 2.921875 | 3 |
badger_utils/sacred/natural_sort.py | GoodAI/distributed_es | 6 | 12792540 | import re
from typing import List, Union, Iterable
class NaturalSort:
@staticmethod
def atoi(text: str) -> int:
return int(text) if text.isdigit() else text
@staticmethod
def natural_keys(text: str) -> List[Union[str, int]]:
"""
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [NaturalSort.atoi(c) for c in re.split(r'(\d+)', text)]
@staticmethod
def sorted(data: Iterable):
return sorted(data, key=NaturalSort.natural_keys)
| 3.5625 | 4 |
python_pb2/go/chromium/org/luci/buildbucket/proto/token_pb2.py | xswz8015/infra | 0 | 12792541 | <reponame>xswz8015/infra
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: go.chromium.org/luci/buildbucket/proto/token.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='go.chromium.org/luci/buildbucket/proto/token.proto',
package='buildbucket.v2',
syntax='proto3',
serialized_options=b'Z4go.chromium.org/luci/buildbucket/proto;buildbucketpb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n2go.chromium.org/luci/buildbucket/proto/token.proto\x12\x0e\x62uildbucket.v2\"\x8f\x01\n\tTokenBody\x12\x10\n\x08\x62uild_id\x18\x01 \x01(\x03\x12\x32\n\x07purpose\x18\x02 \x01(\x0e\x32!.buildbucket.v2.TokenBody.Purpose\x12\r\n\x05state\x18\x03 \x01(\x0c\"-\n\x07Purpose\x12\x17\n\x13PURPOSE_UNSPECIFIED\x10\x00\x12\t\n\x05\x42UILD\x10\x01\"\x9b\x01\n\rTokenEnvelope\x12\x36\n\x07version\x18\x01 \x01(\x0e\x32%.buildbucket.v2.TokenEnvelope.Version\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\"A\n\x07Version\x12\x17\n\x13VERSION_UNSPECIFIED\x10\x00\x12\x1d\n\x19UNENCRYPTED_PASSWORD_LIKE\x10\x01\x42\x36Z4go.chromium.org/luci/buildbucket/proto;buildbucketpbb\x06proto3'
)
_TOKENBODY_PURPOSE = _descriptor.EnumDescriptor(
name='Purpose',
full_name='buildbucket.v2.TokenBody.Purpose',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='PURPOSE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BUILD', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=169,
serialized_end=214,
)
_sym_db.RegisterEnumDescriptor(_TOKENBODY_PURPOSE)
_TOKENENVELOPE_VERSION = _descriptor.EnumDescriptor(
name='Version',
full_name='buildbucket.v2.TokenEnvelope.Version',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='VERSION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNENCRYPTED_PASSWORD_LIKE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=307,
serialized_end=372,
)
_sym_db.RegisterEnumDescriptor(_TOKENENVELOPE_VERSION)
_TOKENBODY = _descriptor.Descriptor(
name='TokenBody',
full_name='buildbucket.v2.TokenBody',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='build_id', full_name='buildbucket.v2.TokenBody.build_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='purpose', full_name='buildbucket.v2.TokenBody.purpose', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='buildbucket.v2.TokenBody.state', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_TOKENBODY_PURPOSE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=71,
serialized_end=214,
)
_TOKENENVELOPE = _descriptor.Descriptor(
name='TokenEnvelope',
full_name='buildbucket.v2.TokenEnvelope',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='buildbucket.v2.TokenEnvelope.version', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='payload', full_name='buildbucket.v2.TokenEnvelope.payload', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_TOKENENVELOPE_VERSION,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=372,
)
_TOKENBODY.fields_by_name['purpose'].enum_type = _TOKENBODY_PURPOSE
_TOKENBODY_PURPOSE.containing_type = _TOKENBODY
_TOKENENVELOPE.fields_by_name['version'].enum_type = _TOKENENVELOPE_VERSION
_TOKENENVELOPE_VERSION.containing_type = _TOKENENVELOPE
DESCRIPTOR.message_types_by_name['TokenBody'] = _TOKENBODY
DESCRIPTOR.message_types_by_name['TokenEnvelope'] = _TOKENENVELOPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TokenBody = _reflection.GeneratedProtocolMessageType('TokenBody', (_message.Message,), {
'DESCRIPTOR' : _TOKENBODY,
'__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2'
# @@protoc_insertion_point(class_scope:buildbucket.v2.TokenBody)
})
_sym_db.RegisterMessage(TokenBody)
TokenEnvelope = _reflection.GeneratedProtocolMessageType('TokenEnvelope', (_message.Message,), {
'DESCRIPTOR' : _TOKENENVELOPE,
'__module__' : 'go.chromium.org.luci.buildbucket.proto.token_pb2'
# @@protoc_insertion_point(class_scope:buildbucket.v2.TokenEnvelope)
})
_sym_db.RegisterMessage(TokenEnvelope)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 1.015625 | 1 |
concept_neuron/concept_neuron_accuracy.py | jacarvalho/concept_neurons | 0 | 12792542 | """
2018, University of Freiburg.
<NAME> <<EMAIL>>
"""
import os
import argparse
import pickle
import numpy as np
import re
from sklearn.metrics import accuracy_score, precision_score, recall_score
from concept_neuron import split_train_valid_test, process_sentence_pos_tags
from concept_neuron import print_pos_tag_statistics, compute_LSTM_states
# hidden_states or cell_states of LSTMs
state_type = 'cell_states'
# List of concepts to analyse - Upenn POS tags
# http://www.nltk.org/api/nltk.tag.html
# To find the available POS tags:
# import nltk.help; nltk.help.upenn_tagset()
concepts = ['(', ')', ',', '.', 'CC', 'CD', 'DT', 'IN', 'JJ', 'MD',
'NN', 'NNP', 'PRP', 'RB', 'TO', 'VB']
concepts.extend(['SPACE', 'OTHER'])
def concept_neurons_accuracy(args):
"""
Computes the accuracy for various logistic regression classifiers
for different POS tags, as a multiclass classifier.
Args:
args (argparse): arguments.
Returns:
None.
"""
# Directory with LSTM model.
save_dir = args.save_dir
# Folder to save results.
if not os.path.isdir(args.results_dir):
os.makedirs(args.results_dir)
results_dir = args.results_dir
# Data to analyse.
input_file = args.data_file
# Get training data, tokenize and POS tag sentences.
# X holds the sentences (word1, word2, ...)
# Y holds the corresponding ((word1, tag1), (word2, tags), ...)
X, Y = process_sentence_pos_tags(input_file, args.group_tags)
# Set the concepts to the whole set if no grouping is required.
unique_tags, counts = np.unique([y[1] for sublist in Y for y in sublist],
return_counts=True)
if not args.group_tags:
global concepts
concepts = unique_tags
# Print some statistics about the initial distribution of POS tags.
print_pos_tag_statistics(unique_tags, counts)
# Computes the LSTM state for each byte in X.
X_t, X_t_pos_tags = compute_LSTM_states(save_dir, X, Y)
# Compute the overall metrics for the logistic regression classifiers.
print('\n-----> Test results')
classifiers_id = ['all', 'top1', 'top2', 'top3']
for classifier_id in classifiers_id:
print('\n- {}'.format(classifier_id))
concept_classifiers = []
predicted_probs = []
classes = []
for concept in concepts:
lr_file = os.path.join(
results_dir, 'log_reg_model_' + concept +
'_' + classifier_id + '.sav')
if not os.path.exists(lr_file):
continue
concept_classifiers.append(concept)
lr_model = pickle.load(open(lr_file, 'rb'))
classes.append(lr_model.classes_[0])
# Largest coefficients
lr_file_all = os.path.join(
results_dir, 'log_reg_model_' + concept + '_all.sav')
coef_sorted = np.argsort(-np.abs(np.squeeze(
pickle.load(open(lr_file_all, 'rb')).coef_)))
x = re.search(r'^top(?P<k>\d)$', classifier_id)
if x is None: # all weights
X_t_ = X_t
else: # top k weights
k = int(x.group('k'))
X_t_ = [x[coef_sorted[0:k]] for x in X_t]
trX, vaX, teX, trY, vaY, teY = split_train_valid_test(X_t_,
X_t_pos_tags)
predicted_probs.append(lr_model.predict_proba(teX)[:, 0])
# Find the class with largest predicted probability.
concept_classifiers = np.array(concept_classifiers)
predicted_probs = np.array(predicted_probs)
max_prob_ind = np.argmax(predicted_probs, axis=0)
pred_classes = concept_classifiers[max_prob_ind].tolist()
y_true, y_pred = teY, pred_classes
print('Test accuracy: {:.3f}'.format(accuracy_score(y_true, y_pred)))
print('Test precision: {:.3f}'.format(
precision_score(y_true, y_pred, average='weighted')))
print('Test recall: {:.3f}'.format(
recall_score(y_true, y_pred, average='weighted')))
if __name__ == '__main__':
"""
Parse CLI arguments.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--save_dir', type=str,
default='../byte_LSTM_trained_models/wikitext/save/95/',
help='directory containing LSTM-model')
parser.add_argument('--data_file', type=str, default=None,
help="""file to use as input to the classifier.
If no file is provided, the
nltk.corpus.treebank is used
""")
parser.add_argument('--results_dir', type=str, default='results',
help='directory with saved classifiers')
parser.add_argument('--group_tags', action='store_true',
help="""group all VB* tags into VB;
JJ* into JJ;
NN* into NN;
NNP* into NNP;
RB* into RB.
""")
args = parser.parse_args()
concept_neurons_accuracy(args)
| 2.671875 | 3 |
weblogic/server/set_server_log.py | codejsha/infrastructure | 4 | 12792543 | <filename>weblogic/server/set_server_log.py
#!/usr/bin/env python
log_dir = os.environ['LOG_DIR']
admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS']
admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT']
admin_username = os.environ['ADMIN_USERNAME']
admin_password = os.environ['<PASSWORD>']
managed_server_name = os.environ['MANAGED_SERVER_NAME']
######################################################################
def set_server_log_config(_domain_version, _log_dir, _server_name):
cd('/Servers/' + _server_name + '/Log/' + _server_name)
cmo.setFileName(_log_dir + '/' + _server_name + '/' +
'general.' + _server_name + '.%%yyyy%%%%MM%%%%dd%%_%%HH%%%%mm%%%%ss%%.log')
# cmo.setFileName('/dev/null')
cmo.setRotationType('byTime')
cmo.setRotationTime('00:00')
cmo.setFileTimeSpan(24)
cmo.setNumberOfFilesLimited(True)
cmo.setFileCount(30)
cmo.setRotateLogOnStartup(False)
cmo.setDateFormatPattern('MMM d, yyyy h:mm:ss,SSS a z')
cmo.setLoggerSeverity('Info')
# cmo.setLoggerSeverity('Trace')
cmo.setRedirectStdoutToServerLogEnabled(True)
cmo.setRedirectStderrToServerLogEnabled(True)
if ('14.' in _domain_version) or ('12.2' in _domain_version):
cmo.setLogMonitoringEnabled(True)
cmo.setLogMonitoringIntervalSecs(30)
cmo.setLogMonitoringThrottleThreshold(1500)
cmo.setLogMonitoringThrottleMessageLength(50)
cmo.setLogMonitoringMaxThrottleMessageSignatureCount(1000)
cmo.setLogFileSeverity('Info')
cmo.setBufferSizeKB(0)
cmo.setStdoutSeverity('Info')
cmo.setDomainLogBroadcastSeverity('Info')
cmo.setDomainLogBroadcasterBufferSize(10)
cmo.setStdoutLogStack(True)
if '10.3' in _domain_version:
cmo.setMemoryBufferSeverity('Info')
cmo.setStacktraceDepth(5)
cmo.setStdoutFormat('standard')
######################################################################
admin_server_url = 't3://' + admin_server_listen_address + ':' + admin_server_listen_port
connect(admin_username, admin_password, admin_server_url)
edit()
startEdit()
domain_version = cmo.getDomainVersion()
set_server_log_config(domain_version, log_dir, managed_server_name)
save()
activate()
exit()
| 1.859375 | 2 |
hyperstream/tools/clock/2016-11-14_v0.1.0.py | vishalbelsare/HyperStream | 12 | 12792544 | # The MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from hyperstream.stream import StreamInstance
from hyperstream.tool import Tool, check_input_stream_count
from hyperstream.utils import MIN_DATE, get_timedelta
from datetime import datetime
class Clock(Tool):
def __init__(self, first=MIN_DATE, stride=1.0):
"""
Simple clock ticker tool
:param first: Start of the clock
:param stride: Tick stride as timedelta
"""
super(Clock, self).__init__(first=first, stride=stride)
if not isinstance(first, datetime):
raise ValueError("Expected datetime.datetime, got {}".format(first.__type__.__name__))
self._stride = get_timedelta(stride)
def message(self, interval):
return '{} running from {} to {} with stride {}s'.format(
self.__class__.__name__, str(interval.start), str(interval.end), str(self.stride))
@check_input_stream_count(0)
def _execute(self, sources, alignment_stream, interval):
if interval.start < self.first:
interval.start = self.first
n_strides = int((interval.start - self.first).total_seconds() // self._stride.total_seconds())
t = self.first + n_strides * self._stride
while t <= interval.end:
if t > interval.start:
yield StreamInstance(t, t)
t += self._stride
| 2.296875 | 2 |
settings.py | Asadbek07/aiogram-django-template | 0 | 12792545 | <filename>settings.py
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# SECURITY WARNING: Modify this secret key if using in production!
SECRET_KEY = ""
DEFAULT_AUTO_FIELD='django.db.models.AutoField'
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": '',
'USER' : "",
"PASSWORD" : "",
"HOST" : "localhost",
"PORT" : "5432",
}
}
"""
To connect to an existing postgres database, first:
pip install psycopg2
then overwrite the settings above with:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'YOURDB',
'USER': 'postgres',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '',
}
}
"""
INSTALLED_APPS = ("db",)
| 2.125 | 2 |
post_office/__init__.py | LeGast00n/django-post_office | 0 | 12792546 | VERSION = (1, 1, 1)
from .backends import EmailBackend
from .models import PRIORITY
from .utils import send_mail
| 1.09375 | 1 |
tool.py | LasTAD/VAST-2017-MC-1 | 1 | 12792547 | <gh_stars>1-10
import PySimpleGUI as sg
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from SOM import SOM
layout = [[sg.Text('SOM для VAST 2017 MC1', font='Any 18')],
# [sg.Text('Path to data'), sg.FileBrowse('output.txt', key='-Path-data-')],
# [sg.Text('Path to target'), sg.FileBrowse('targets.txt', key='-Path-target-')],
[sg.Text('Размер сетки SOM:'), sg.InputText('20', key='-width-', size=(2, 1)), sg.Text('на'),
sg.InputText('20', key='-height-', size=(2, 1))],
[sg.Text('Количество эпох'), sg.InputText(10000, key='-epochs-', size=(6, 1))],
[sg.Text('Тип "Decay"'), sg.Radio('hill', 'DECAY', True, key='hill'),
sg.Radio('linear', 'DECAY', key='linear')],
[sg.Text('Тип инициализации'), sg.Radio('PCA', 'init', True, key='pca'),
sg.Radio('Случайно', 'init', key='random')],
[sg.Button('Начать расчет', key='-start-'), sg.Button('Выход', key="Exit")]
]
# create the form and show it without the plot
window = sg.Window('Аналитический инструмент для SOM',
layout, finalize=True, size=(320, 170))
def draw_figure(canvas, figure, loc=(0, 0)):
figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)
figure_canvas_agg.draw()
figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)
return figure_canvas_agg
while True:
event, values = window.read()
if event == '-start-':
data = np.loadtxt('output.txt', delimiter=';', usecols=range(40))
som = SOM(int(values['-width-']), int(values['-height-'])) # initialize the SOM
if values['hill']:
if values['random']:
som.fit(data, int(values['-epochs-']), decay='hill', init_type='random')
else:
som.fit(data, int(values['-epochs-']), decay='hill')
if values['linear']:
if values['random']:
som.fit(data, int(values['-epochs-']), decay='linear', init_type='random')
else:
som.fit(data, int(values['-epochs-']), decay='linear')
targets = np.loadtxt('target.txt', dtype='int')
targets = targets - 1
names = ['Автомобиль',
'Грузовик 2',
'Грузовик 3',
'Грузовик 4+',
'Автобус 2',
'Автобус 3',
'Грузовик рейнджеров'
]
codes = ['1', '2', '3', '4', '5', '6', '2P']
fig1 = som.plot_point_map_gui(data, targets, codes)
# fig2 = som.plot_class_density_gui(data, targets, t=0, name=names[0])
fig1.set_size_inches(7, 7)
# fig2.set_size_inches(6, 6)
figure1_x, figure1_y, figure1_w, figure1_h = fig1.bbox.bounds
# figure_x, figure2_y, figure2_w, figure2_h = fig2.bbox.bounds
layout2 = [[sg.Canvas(size=(figure1_w, figure1_h), key='canvas_som')],
[sg.OK('OK'), sg.Button('Print result', key='-print-')]
]
window2 = sg.Window('SOM Result',
layout2, finalize=True)
# fig_canvas_agg = draw_figure(window2['canvas_density'].TKCanvas, fig2)
fig_canvas_agg = draw_figure(window2['canvas_som'].TKCanvas, fig1)
while True:
event2, values2 = window2.read()
if event2 == 'OK':
window2.close()
break
if event2 == '-print-':
som.plot_point_map(data, targets, names, filename='images/SOM/som.png')
# som.plot_class_density(data, targets, t=0, name='Vehicles', filename='images/density.png')
if event == 'Exit':
window.close()
break
pass
| 2.359375 | 2 |
Semester 1/Python/Q4.py | sufiyaanusmani/FAST-NUCES | 0 | 12792548 | from decimal import *
decimal = int(input("Enter a number: "))
binary = 0
i = 1
while decimal != 0:
digit = decimal % 2
binary = Decimal(binary + (i * digit))
i *= 10
decimal = decimal // 2
print(binary) | 4.09375 | 4 |
stochpy/pscmodels/GeneDuplication.py | bgoli/stochpy | 35 | 12792549 | model = """
# Reactions
R1:
G1 > G1 + mRNA1
Ksyn*G1
R2:
mRNA1 > $pool
Kdeg*mRNA1
R3:
G2 > G2 + mRNA2
Ksyn*G2
R4:
mRNA2 > $pool
Kdeg*mRNA2
# Fixed species
# Variable species
mRNA1 = 50.0
G1 = 1
mRNA2 = 50.0
G2 = 1
# Parameters
Ksyn = 10
Kdeg = 0.2
"""
| 1.242188 | 1 |
coco_scripts/eval_coco.py | yourfatherI/VSR-guided-CIC | 32 | 12792550 | <filename>coco_scripts/eval_coco.py
from speaksee.data import TextField
import os, sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from data import COCOControlSetField_Verb, COCODetSetField_Verb, ImageDetectionsField
from data.dataset import COCOEntities
from models import ControllableCaptioningModel
from speaksee.data import DataLoader, DictionaryDataset, RawField
from speaksee.evaluation import Bleu, Meteor, Rouge, Cider, Spice
from speaksee.evaluation import PTBTokenizer
from models import SinkhornNet, S_SSP
from config import *
import torch
import random
import numpy as np
import itertools
import argparse
import munkres
from tqdm import tqdm
from utils import verb_rank_merge
random.seed(1234)
torch.manual_seed(1234)
device = torch.device('cuda')
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=16, type=int, help='batch size')
parser.add_argument('--nb_workers', default=0, type=int, help='number of workers')
parser.add_argument('--checkpoint_path', type=str, default="res")
parser.add_argument('--start_from', type=str, default=None)
parser.add_argument('--sinkhorn_len', type=int, default=10)
parser.add_argument('--fixed_len', type=int, default=10)
parser.add_argument('--det', action='store_true', help='whether use detected region')
parser.add_argument('--gt', action='store_true', help="whether use gt verb")
opt = parser.parse_args()
print(opt)
print('Loading caption model trained with CIDEr optimization.')
saved_data = torch.load('saved_model/coco_cap/ours_coco_rl.pth')
opt_cap = saved_data['opt']
# define the field
image_field = ImageDetectionsField(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'), load_in_tmp=False)
if not opt.det:
det_field = COCOControlSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'),
classes_path=os.path.join(coco_root, 'object_class_list.txt'),
img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'),
precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'),
verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'),
idx_vs_path=os.path.join(coco_root, 'idx_2_vs_v.json'),
cap_classes_path=os.path.join(coco_root, 'cap_2_classes_v.json'),
cap_verb_path=os.path.join(coco_root, 'cap_2_verb_v.json'),
vocab_path=os.path.join(coco_root, 'vocab_tv.json'),
idx_2_verb_og_path=os.path.join(coco_root, 'idx_2_v_og.json'),
verb_vob_path=os.path.join(coco_root, 'verb_2_vob.json'),
fix_length=10, max_detections=20, gt_verb=opt.gt)
else:
det_field = COCODetSetField_Verb(detections_path=os.path.join(coco_root, 'coco_detections.hdf5'),
verb_idx_path=os.path.join(coco_root, 'verb_2_idx.json'),
classes_path=os.path.join(coco_root, 'object_class_list.txt'),
img_shapes_path=os.path.join(coco_root, 'coco_img_shapes.json'),
precomp_glove_path=os.path.join(coco_root, 'object_class_glove.pkl'),
vocab_path=os.path.join(coco_root, 'vocab_tv.json'),
vlem_2_v_og_path=os.path.join(coco_root, 'vlem_2_vog_coco.json'),
cls_seq_path=os.path.join('saved_data/coco', 'img_cap_v_2_class_self.json'),
fix_length=10, max_detections=20, gt_verb=opt.gt)
text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, remove_punctuation=True, fix_length=20)
# define the datasets
dataset = COCOEntities(image_field, det_field, text_field,
img_root='',
ann_root=os.path.join(coco_root, 'annotations'),
entities_file=os.path.join(coco_root, 'coco_entities.json'),
id_root=os.path.join(coco_root, 'annotations'))
test_dataset = COCOEntities(image_field, det_field, RawField(),
img_root='',
ann_root=os.path.join(coco_root, 'annotations'),
entities_file=os.path.join(coco_root, 'coco_entities.json'),
id_root=os.path.join(coco_root, 'annotations'),
filtering=True,
det_filtering=opt.det)
train_dataset, val_dataset, _ = dataset.splits
text_field.build_vocab(train_dataset, val_dataset, min_freq=5)
# define the dataloader
_, _, test_dataset = test_dataset.splits
test_dataset = DictionaryDataset(test_dataset.examples, test_dataset.fields, 'image')
dataloader_test = DataLoader(test_dataset, batch_size=opt.batch_size, num_workers=opt.nb_workers)
# S-level SSP
re_sort_net = S_SSP().cuda()
re_sort_net.load_state_dict(torch.load(os.path.join('saved_model/coco_s_ssp', 'model-tr.pth')))
re_sort_net.eval()
# R-level SSP
sinkhorn_len = opt.sinkhorn_len
sinkhorn_net = SinkhornNet(sinkhorn_len, 20, 0.1).cuda()
sinkhorn_net.load_state_dict(torch.load(os.path.join('saved_model/coco_sinkhorn', 'model-sh.pth')))
sinkhorn_net.eval()
# Role-shifting Captioning Model
model = ControllableCaptioningModel(20, len(text_field.vocab), text_field.vocab.stoi['<bos>'], \
h2_first_lstm=opt_cap.h2_first_lstm, img_second_lstm=opt_cap.img_second_lstm).to(device)
model.load_state_dict(saved_data['state_dict'])
model.eval()
fixed_len = opt.fixed_len
predictions = []
gt_captions = []
# Evaluate
with tqdm(desc='Test', unit='it', ncols=110, total=len(iter(dataloader_test))) as pbar:
with torch.no_grad():
for it, (keys, values) in enumerate(iter(dataloader_test)):
detections, imgids = keys # b_s, 100, feat
if not opt.det:
det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \
det_seqs_sr, control_verb, _, _, _, verb_list, captions = values
else:
det_seqs_txt, det_seqs_vis, det_seqs_pos, det_seqs_all, det_seqs_v, \
det_seqs_sr, control_verb, _, verb_list, captions = values
for i in range(detections.size(0)): # batch
# add a region sort model
det_seqs_recons = np.zeros(det_seqs_all[i].shape)
img_verb_list = np.zeros(verb_list[i].shape)
for idx in range(len(control_verb[i])): # caption数目
# visual feature
this_seqs_vis = det_seqs_vis[i][idx]
this_seqs_txt = det_seqs_txt[i][idx]
this_seqs_pos = det_seqs_pos[i][idx] # pos是position信息
this_seqs_all = det_seqs_all[i][idx]
# semantic role and verb
this_control_verb = control_verb[i][idx] # (max_verb)
this_det_seqs_v = det_seqs_v[i][idx] # (fixed_len, max_verb)
this_det_seqs_sr = det_seqs_sr[i][idx] # (fixed_len, max_sr)
this_verb_list = verb_list[i][idx]
# visual feature concat
this_seqs_perm = torch.cat((this_seqs_vis, this_seqs_txt, this_seqs_pos), -1)
verb_ranks = []
for verb in this_control_verb:
# 找到某个verb对应的semantic role序列
if verb == 0:
break
verb_det_seqs_sr = this_det_seqs_sr.new_zeros(this_det_seqs_sr.shape[0])
find_sr = 0
sr_find = {}
need_re_rank = set()
for j, vs in enumerate(this_det_seqs_v): # fixed_len
for k, v in enumerate(vs): # max_verb
if verb == v and find_sr < 10:
if int(this_det_seqs_sr[j][k].item()) not in sr_find:
sr_find[int(this_det_seqs_sr[j][k].item())] = []
sr_find[int(this_det_seqs_sr[j][k].item())].append(j)
verb_det_seqs_sr[find_sr] = this_det_seqs_sr[j][k].item()
find_sr += 1
else:
sr_find[int(this_det_seqs_sr[j][k].item())].append(j)
need_re_rank.add(int(this_det_seqs_sr[j][k].item()))
if find_sr == 0:
continue
this_verb = verb.unsqueeze(0).to(device)
verb_det_seqs_sr = verb_det_seqs_sr.unsqueeze(0).to(device)
output = re_sort_net.generate(this_verb, verb_det_seqs_sr, mode='not-normal')
sr_rank = {}
if len(need_re_rank) != 0:
for sr in need_re_rank:
this_sr_perm = torch.zeros(sinkhorn_len, this_seqs_perm.shape[1])
tr_locs = torch.ones(sinkhorn_len) * 10
for j, loc in enumerate(sr_find[sr]):
tr_locs[j] = loc
this_sr_perm[j, :] = this_seqs_perm[loc]
tr_matrix = sinkhorn_net(this_sr_perm.unsqueeze(0).to(device))
mx = torch.transpose(tr_matrix, 1, 2).squeeze()
if isinstance(mx, torch.Tensor):
mx = mx.detach().cpu().numpy()
m = munkres.Munkres()
ass = m.compute(munkres.make_cost_matrix(mx))
sr_re = []
for idx_ in range(len(sr_find[sr])):
for a in ass:
if a[0] == idx_:
sr_re.append(a[1])
sr_re = np.array(sr_re)
sr_idx = np.argsort(sr_re) # sr_idx代表
output_idx = np.zeros(len(sr_find[sr]))
for j, idx_ in enumerate(sr_idx):
output_idx[j] = sr_find[sr][idx_]
sr_rank[sr] = output_idx
verb_rank = []
for sr_ in output[0].squeeze().cpu().numpy():
if sr_ == 0:
break
if len(sr_find[sr_]) != 1:
verb_rank += list(sr_rank[sr_])
else:
verb_rank += sr_find[sr_]
verb_ranks.append(verb_rank)
final_rank = []
if len(verb_ranks) == 1:
final_rank = verb_ranks[0]
else:
final_rank = verb_ranks[0]
for j in range(len(verb_ranks) - 1):
final_rank = verb_rank_merge(final_rank, verb_ranks[j+1])
# final_rank存的是原来idx现在应该在的位置
perm_matrix = np.zeros((fixed_len, fixed_len))
for j, rk in enumerate(final_rank):
if j < fixed_len:
perm_matrix[j, int(rk)] = 1
perm = np.reshape(this_seqs_all, (this_seqs_all.shape[0], -1)) # fixed_len, -1
recons = np.dot(perm_matrix, perm)
recons = np.reshape(recons, this_seqs_all.shape[0:])
recons = recons[np.sum(recons, (1, 2)) != 0]
last = recons.shape[0] - 1
det_seqs_recons[idx, :recons.shape[0]] = recons
det_seqs_recons[idx, last + 1:] = recons[last:last+1]
# permute the verb_list
perm_mask = (np.sum(perm_matrix, -1) == 0).astype(int)
img_verb_list[idx] = -1 * perm_mask[:, np.newaxis] + np.dot(perm_matrix, this_verb_list)
# detections_i: (1, det_len, feat_dim), det_seqs_recons: (1, fixed_len, max_det, feat_dim)
img_verb_list = torch.tensor(img_verb_list).to(device).squeeze(-1)
detections_i, det_seqs_recons = detections[i].to(device), torch.tensor(det_seqs_recons).float().to(device)
detections_i = detections_i.unsqueeze(0).expand(det_seqs_recons.size(0), detections_i.size(0), detections_i.size(1))
out, _ = model.beam_search_v((detections_i, det_seqs_recons, img_verb_list),
eos_idxs=[text_field.vocab.stoi['<eos>'], -1], beam_size=5, \
out_size=1, gt=opt.gt)
out = out[0].data.cpu().numpy()
for o, caps in zip(out, captions[i]):
predictions.append(np.expand_dims(o, axis=0))
gt_captions.append(caps)
pbar.update()
# Compute the metric scores
predictions = np.concatenate(predictions, axis=0)
gen = {}
gts = {}
print("Computing accuracy performance.")
for i, cap in enumerate(predictions):
pred_cap = text_field.decode(cap, join_words=False)
pred_cap = ' '.join([k for k, g in itertools.groupby(pred_cap)])
gts[i] = [gt_captions[i]]
gen[i] = [pred_cap]
gts_t = PTBTokenizer.tokenize(gts)
gen_t = PTBTokenizer.tokenize(gen)
val_bleu, _ = Bleu(n=4).compute_score(gts_t, gen_t)
method = ['Blue_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']
for metric, score in zip(method, val_bleu):
print(metric, score)
val_meteor, _ = Meteor().compute_score(gts_t, gen_t)
print('METEOR', val_meteor)
val_rouge, _ = Rouge().compute_score(gts_t, gen_t)
print('ROUGE_L', val_rouge)
val_cider, _ = Cider().compute_score(gts_t, gen_t)
print('CIDEr', val_cider)
val_spice, _ = Spice().compute_score(gts_t, gen_t)
print('SPICE', val_spice)
| 1.960938 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.