hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
47bbb88fe7ad9a14195f7bde44006fac967ad0e2 | 2,044 | py | Python | python/datadb2/core/svc/build_db2_url.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
]
| null | null | null | python/datadb2/core/svc/build_db2_url.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
]
| null | null | null | python/datadb2/core/svc/build_db2_url.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
from base import BaseObject
from base import CryptoBase
from base import FileIO
from datadb2.core.dmo import BaseDB2Client
class BuildDb2Url(BaseObject):
""" Create a DB2 connection """
__config_path = 'resources/config/db2/schemas.yml'
def __init__(self,
is_debug: bool = False):
"""
Created:
9-Oct-2019
[email protected]
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1080
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._config = FileIO.file_to_yaml_by_relative_path(self.__config_path)
@staticmethod
def _values(d_config: dict):
username = CryptoBase.decrypt_str(os.environ[d_config['username'][1:]])
password = CryptoBase.decrypt_str(os.environ[d_config['password'][1:]])
return {
'host': d_config['host'].strip(),
'database': d_config['database'].strip(),
'port': d_config['port'],
'username': username.strip(),
'password': password.strip()}
@staticmethod
def _connect(d_config: dict) -> BaseDB2Client:
return BaseDB2Client(some_database_name=d_config['database'],
some_hostname=d_config['host'],
some_port=d_config['port'],
some_username=d_config['username'],
some_password=d_config['password'])
def wft_dev(self) -> BaseDB2Client:
"""
Purpose:
Connect to DB2 WFT DEV
:return:
"""
return self._connect(self._values(self._config['wft_dev']))
def cendant(self) -> BaseDB2Client:
"""
:return:
"""
return self._connect(self._values(self._config['cendant']))
if __name__ == "__main__":
# BuildDb2Url().wft_dev()
# BuildDb2Url().wft_prod()
BuildDb2Url().cendant()
| 28.788732 | 81 | 0.581703 | 1,741 | 0.851761 | 0 | 0 | 828 | 0.405088 | 0 | 0 | 624 | 0.305284 |
47bbc3f593c6dfe99cc6291d9534d485f7b0f42d | 3,462 | py | Python | nussl/transformers/transformer_deep_clustering.py | KingStorm/nussl | 78edfdaad16845fc705cefb336a7e6e5923fbcd4 | [
"MIT"
]
| 1 | 2018-10-22T19:30:45.000Z | 2018-10-22T19:30:45.000Z | dataHelper/nussl/transformers/transformer_deep_clustering.py | AleXander-Tsui/Audio-Localization-and-Seperation | 17d40e72b406d62ca5cb695938b50c6412f9524a | [
"MIT"
]
| null | null | null | dataHelper/nussl/transformers/transformer_deep_clustering.py | AleXander-Tsui/Audio-Localization-and-Seperation | 17d40e72b406d62ca5cb695938b50c6412f9524a | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Deep Clustering modeller class
"""
from .. import torch_imported
if torch_imported:
import torch
import torch.nn as nn
import numpy as np
class TransformerDeepClustering(nn.Module):
"""
Transformer Class for deep clustering
"""
def __init__(self, hidden_size=300, input_size=150, num_layers=2, embedding_size=20):
if not torch_imported:
raise ImportError('Cannot import pytorch! Install pytorch to continue.')
super(TransformerDeepClustering, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.embedding_size = embedding_size
self.num_layers = num_layers
rnn = nn.LSTM(self.input_size, self.hidden_size, self.num_layers, bidirectional=True,
batch_first=True, dropout=0.5)
linear = nn.Linear(self.hidden_size*2, self.input_size*self.embedding_size)
self.add_module('rnn', rnn)
self.add_module('linear', linear)
def forward(self, input_data):
"""
Forward training
Args:
input_data:
Returns:
"""
sequence_length = input_data.size(1)
num_frequencies = input_data.size(2)
output, hidden = self.rnn(input_data)
output = output.contiguous()
output = output.view(-1, sequence_length, 2*self.hidden_size)
embedding = self.linear(output)
embedding = embedding.view(-1, sequence_length*num_frequencies, self.embedding_size)
embedding = nn.functional.normalize(embedding, p=2, dim=-1)
return embedding
@staticmethod
def affinity_cost(embedding, assignments):
"""
Function defining the affinity cost for deep clustering
Args:
embedding:
assignments:
Returns:
"""
embedding = embedding.view(-1, embedding.size()[-1])
assignments = assignments.view(-1, assignments.size()[-1])
silence_mask = torch.sum(assignments, dim=-1, keepdim=True)
embedding = silence_mask * embedding
embedding_transpose = embedding.transpose(1, 0)
assignments_transpose = assignments.transpose(1, 0)
class_weights = nn.functional.normalize(torch.sum(assignments, dim=-2),
p=1, dim=-1).unsqueeze(0)
class_weights = 1.0 / (torch.sqrt(class_weights) + 1e-7)
weights = torch.mm(assignments, class_weights.transpose(1, 0))
assignments = assignments * weights.repeat(1, assignments.size()[-1])
embedding = embedding * weights.repeat(1, embedding.size()[-1])
loss_est = torch.norm(torch.mm(embedding_transpose, embedding), p=2)
loss_est_true = 2*torch.norm(torch.mm(embedding_transpose, assignments), p=2)
loss_true = torch.norm(torch.mm(assignments_transpose, assignments), p=2)
loss = loss_est - loss_est_true + loss_true
loss = loss / (loss_est + loss_true)
return loss
@staticmethod
def show_model(model):
"""
Prints a message to the console with model info
Args:
model:
Returns:
"""
print(model)
num_parameters = 0
for p in model.parameters():
if p.requires_grad:
num_parameters += np.cumprod(p.size())[-1]
print('Number of parameters: {}'.format(num_parameters))
| 33.941176 | 94 | 0.625361 | 3,261 | 0.941941 | 0 | 0 | 1,803 | 0.520797 | 0 | 0 | 607 | 0.175332 |
47bcdf89bfb403747fce6b37d8765b1f6f980172 | 431 | py | Python | ex067 - Tabuada v3.0.py | marvincosmo/Python-Curso-em-Video | 47ee3dd6423835e7bca159ffd7ee796423569176 | [
"MIT"
]
| null | null | null | ex067 - Tabuada v3.0.py | marvincosmo/Python-Curso-em-Video | 47ee3dd6423835e7bca159ffd7ee796423569176 | [
"MIT"
]
| null | null | null | ex067 - Tabuada v3.0.py | marvincosmo/Python-Curso-em-Video | 47ee3dd6423835e7bca159ffd7ee796423569176 | [
"MIT"
]
| null | null | null | """ 67 - Faça um programa que mostre a tabuada de vários números, um de cada vez, para cada valor digitado pelo
usuário. O programa será interrompido quando o número solicitado for negativo. """
while True:
n = int(input('Informe um número para ver sua tabuada: '))
if n < 0:
break
print('-' * 13)
for m in range(1, 11):
print(f'{n} x {m} = {n*m}')
print('-' * 13)
print('Programa encerrado.')
| 33.153846 | 111 | 0.62645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.6621 |
47be1f989acf928be71983840ea1023cdafbcb67 | 1,569 | py | Python | Gallery/views.py | munganyendesandrine/GalleryApp | cb17eca8b814f212c1b78925d957b40380830f9b | [
"Unlicense",
"MIT"
]
| null | null | null | Gallery/views.py | munganyendesandrine/GalleryApp | cb17eca8b814f212c1b78925d957b40380830f9b | [
"Unlicense",
"MIT"
]
| null | null | null | Gallery/views.py | munganyendesandrine/GalleryApp | cb17eca8b814f212c1b78925d957b40380830f9b | [
"Unlicense",
"MIT"
]
| null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from .models import Image,Category,Location
def gallery_today(request):
gallery = Image.objects.all()
return render(request, 'all-galleries/today-gallery.html', {"gallery": gallery})
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
searched_images = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'all-galleries/search.html',{"message":message,"images": searched_images})
else:
message = "You haven't searched for any term"
return render(request, 'all-galleries/search.html',{"message":message})
def filter_results(request):
if 'location' in request.GET and request.GET["location"]:
filter_term = request.GET.get("location")
filtered_images = Image.filter_by_location(filter_term)
message = f"{filter_term}"
return render(request, 'all-galleries/filter.html',{"message":message,"images": filtered_images})
else:
message = "You haven't filtered for any term"
return render(request, 'all-galleries/filter.html',{"message":message})
# def delete_image(request, pk):
# gallery = get_object_or_404(Cat, pk=pk)
# if request.method == 'POST':
# gallery.delete()
# return redirect('/')
# return render(request, 'all-galleries/today-gallery.html', {"gallery": gallery}) | 34.108696 | 105 | 0.662843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 663 | 0.422562 |
47bf6e3c9c36dabf9fe1d3cb252c2d9d2f56f9af | 843 | py | Python | tests/tests_query_operations/table_models.py | Robinson04/StructNoSQL | 335c63593025582336bb67ad0b0ed39d30800b74 | [
"MIT"
]
| 3 | 2020-10-30T23:31:26.000Z | 2022-03-30T21:48:40.000Z | tests/tests_query_operations/table_models.py | Robinson04/StructNoSQL | 335c63593025582336bb67ad0b0ed39d30800b74 | [
"MIT"
]
| 42 | 2020-09-16T15:23:11.000Z | 2021-09-20T13:00:50.000Z | tests/tests_query_operations/table_models.py | Robinson04/StructNoSQL | 335c63593025582336bb67ad0b0ed39d30800b74 | [
"MIT"
]
| 2 | 2021-01-03T21:37:22.000Z | 2021-08-12T20:28:52.000Z | from typing import Dict
from StructNoSQL import TableDataModel, BaseField, MapModel
class BaseTableModel(TableDataModel):
type = BaseField(field_type=str, required=False)
fieldOne = BaseField(field_type=str, required=False)
fieldTwo = BaseField(field_type=str, required=False)
class ContainerModel(MapModel):
fieldOne = BaseField(field_type=str, required=False)
fieldTwo = BaseField(field_type=str, required=False)
fieldThree = BaseField(field_type=str, required=False)
container = BaseField(field_type=Dict[str, ContainerModel], key_name='containerKey', required=False)
class DynamoDBTableModel(BaseTableModel):
accountId = BaseField(field_type=str, required=True)
class ExternalDynamoDBApiTableModel(BaseTableModel):
accountProjectTableKeyId = BaseField(field_type=str, required=True)
| 42.15 | 104 | 0.778173 | 752 | 0.892052 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.016607 |
47c00f796dfbf64fa498d1661e0430227f50240a | 3,301 | py | Python | __init__.py | mchorse/io_export_bobj | 2de7a55c59a5e4ece5ae047cceaa16da94272685 | [
"CNRI-Python"
]
| 2 | 2021-10-04T17:03:20.000Z | 2021-12-07T20:20:49.000Z | __init__.py | mchorse/io_export_bobj | 2de7a55c59a5e4ece5ae047cceaa16da94272685 | [
"CNRI-Python"
]
| null | null | null | __init__.py | mchorse/io_export_bobj | 2de7a55c59a5e4ece5ae047cceaa16da94272685 | [
"CNRI-Python"
]
| null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "Blockbuster extended OBJ format",
"author": "Campbell Barton, Bastien Montagne, McHorse",
"version": (0, 1, 0),
"blender": (2, 77, 0),
"location": "File > Export",
"description": "Export Blockbuster OBJ models (meshes, armatures and keyframes)",
"warning": "",
"category": "Export"
}
import bpy
from bpy.props import (BoolProperty, FloatProperty, StringProperty, EnumProperty)
from bpy_extras.io_utils import (ExportHelper, orientation_helper_factory, path_reference_mode, axis_conversion)
IOOBJOrientationHelper = orientation_helper_factory("IOOBJOrientationHelper", axis_forward='Z', axis_up='Y')
# Export panel
class ExportOBJ(bpy.types.Operator, ExportHelper, IOOBJOrientationHelper):
# Panel's information
bl_idname = "export_scene.bobj"
bl_label = 'Export Blockbuster OBJ'
bl_options = {'PRESET'}
# Panel's properties
filename_ext = ".bobj"
filter_glob = StringProperty(default="*.bobj", options={'HIDDEN'})
use_selection = BoolProperty(name="Selection Only", description="Export selected objects only", default=False)
include_geometry = BoolProperty(name="Export geometry", description="Include meshes in the model's file", default=True)
include_keyframes = BoolProperty(name="Export keyframes", description="Include actions in the model's file", default=True)
keep_vertex_order = BoolProperty(name="Keep Vertex Order", description="", default=False)
path_mode = path_reference_mode
check_extension = True
def execute(self, context):
from . import export_bobj
from mathutils import Matrix
keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "check_existing", "filter_glob", "path_mode"))
keywords["global_matrix"] = Matrix.Scale(1, 4) * axis_conversion(to_forward=self.axis_forward, to_up=self.axis_up).to_4x4()
return export_bobj.save(context, **keywords)
# Register and stuff
def menu_func_export(self, context):
self.layout.operator(ExportOBJ.bl_idname, text="Blockbuster OBJ (.bobj)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if "bpy" in locals():
import importlib
if "export_bobj" in locals():
importlib.reload(export_bobj)
if __name__ == "__main__":
register() | 39.771084 | 131 | 0.723417 | 1,276 | 0.38655 | 0 | 0 | 0 | 0 | 0 | 0 | 1,524 | 0.461678 |
47c2fc4cc67997a7602b32b94d673235ee2e4478 | 1,303 | py | Python | dashboard/migrations/0010_auto_20191214_1611.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
]
| null | null | null | dashboard/migrations/0010_auto_20191214_1611.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
]
| null | null | null | dashboard/migrations/0010_auto_20191214_1611.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
]
| null | null | null | # Generated by Django 2.2.5 on 2019-12-14 15:11
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0009_auto_20191211_2107'),
]
operations = [
migrations.RemoveField(
model_name='subject',
name='body_location',
),
migrations.AddField(
model_name='csvdata',
name='body_location',
field=models.CharField(choices=[('L', 'Left wrist'), ('R', 'Right wrist'), ('O', 'Other')], default='L', max_length=1, verbose_name='body location'),
),
migrations.AddField(
model_name='subject',
name='creation_date',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='creation date'),
preserve_default=False,
),
migrations.AlterField(
model_name='csvdata',
name='description',
field=models.CharField(blank=True, max_length=255, verbose_name='description'),
),
migrations.AlterField(
model_name='subject',
name='code',
field=models.CharField(max_length=50, unique=True, verbose_name='subject code'),
),
]
| 32.575 | 161 | 0.593246 | 1,181 | 0.90637 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.224866 |
47c5b5de088c43f83c5e3e066561ed05afd513fb | 1,666 | py | Python | select_language.py | zhangenter/tetris | 300c668d9732cd037bfc6f47c289bd5ee4a009b2 | [
"Apache-2.0"
]
| 3 | 2019-05-08T14:49:10.000Z | 2021-01-20T13:22:45.000Z | select_language.py | zhangenter/tetris | 300c668d9732cd037bfc6f47c289bd5ee4a009b2 | [
"Apache-2.0"
]
| null | null | null | select_language.py | zhangenter/tetris | 300c668d9732cd037bfc6f47c289bd5ee4a009b2 | [
"Apache-2.0"
]
| 2 | 2020-01-28T14:37:06.000Z | 2020-04-03T13:37:14.000Z | # -*- coding=utf-8 -*-
import pygame
from bf_form import BFForm
from bf_button import BFButton
from globals import LanguageConfigParser, LanguageLib
class SelectLanguageForm(BFForm):
def __init__(self, screen, after_close):
super(SelectLanguageForm, self).__init__(screen, after_close)
def select_language(self, btn):
lang_conf_parser = LanguageConfigParser()
lang_conf_parser.set_cut_language(btn.tag)
lang_conf_parser.save()
LanguageLib.instance().reload_language()
self.result = 1
if self.after_close: self.after_close(self)
def prepare(self):
lang_conf_parser = LanguageConfigParser()
supports = lang_conf_parser.get_support_names()
num = len(supports)
parent_width, parent_height = self.screen.get_size()
self.desc = LanguageLib.instance().get_text('please select language')
self.width = 400
btn_width = self.width * 0.6
btn_height = 40
btn_top = 20
btn_space = 20
self.height = btn_top + 30 + num * btn_height + (num - 1) * btn_space + 30 + self.footer_height
btn_left = (self.width - btn_width) / 2 + (parent_width-self.width) / 2
btn_y = btn_top + 30 + (parent_height - self.height)/2
for k in supports:
label = lang_conf_parser.get_support_label(k)
btn = BFButton(self.screen, (btn_left, btn_y, btn_width, btn_height), text=label.decode('utf-8'), click=self.select_language)
btn.tag = k
self.btn_group.add_button(btn)
btn_y += btn_height + btn_space
self.add_cancel_btn(parent_width, parent_height)
| 35.446809 | 137 | 0.660264 | 1,512 | 0.907563 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.031813 |
47c64ebe5bf8c8e7b695f55fd8ecece7fcce4585 | 3,084 | py | Python | SpellingCorrection/SpellingCorrection.py | kxu776/Natural-Langauge-Processing | 61c863e6cccf6d745b7bfc630a803dcec89214a1 | [
"MIT"
]
| null | null | null | SpellingCorrection/SpellingCorrection.py | kxu776/Natural-Langauge-Processing | 61c863e6cccf6d745b7bfc630a803dcec89214a1 | [
"MIT"
]
| null | null | null | SpellingCorrection/SpellingCorrection.py | kxu776/Natural-Langauge-Processing | 61c863e6cccf6d745b7bfc630a803dcec89214a1 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 14:09:44 2018
@author: VeNoMzZxHD
"""
import tkinter
from tkinter.filedialog import askopenfilename
from collections import Counter
import re
import string
#Returns string of text file
def readFile():
'''
tkinter.Tk().withdraw()
inputfilename = askopenfilename()
'''
inputfilename = 'big.txt'
with open(inputfilename) as inputfile:
return inputfile.read()
#Returns Counter dictionary containing words and their number of occurences within the input file
def countWords(text):
words = re.findall(r'\w+', text.lower())
return Counter(words)
def P(word):
total = sum(countDict.values())
probability = (countDict[word]/total)
#print("The probability of '" + word + "' occuring: " + str(probability))
return probability
#Returns list of possible permutations of removing a single char from input word.
def removeLetter(word):
permList = []
for i in range(len(word)):
permList.append(word[:i]+word[i+1:])
return permList
def insertLetter(word):
permList = []
for i in range(len(word)+1):
for letter in string.ascii_lowercase:
permList.append(word[:i] + letter + word[i:])
return permList
def replaceLetter(word):
permList = []
for i in range(len(word)):
for letter in string.ascii_lowercase:
permList.append(word[:i] + letter + word[i+1:])
return permList
def swapLetters(word):
permList = []
for i in range(len(word)):
for x in range(i,len(word)):
modWord = bytearray(word, 'utf8')
tempChar = modWord[i]
modWord[i] = modWord[x]
modWord[x] = tempChar
permList.append(modWord.decode('utf8'))
return list(set(permList))
def oneCharEdits(word):
permutations = []
permutations.extend(removeLetter(word))
permutations.extend(insertLetter(word))
permutations.extend(replaceLetter(word))
permutations.extend(swapLetters(word))
return list(set(permutations))
def twoCharEdits(word):
validWords = []
oneCharPerms = oneCharEdits(word)
twoCharPerms = oneCharPerms.copy()
for permWord in oneCharPerms:
twoCharPerms.extend(oneCharEdits(permWord))
twoCharPerms = list(set(twoCharPerms))
for permWord in twoCharPerms:
if isAWord(permWord):
validWords.append(permWord)
return validWords
def isAWord(word):
return True if word in countDict else False
def findCorrection(word):
if isAWord(word):
return word
candidates = twoCharEdits(word)
candidates = {key: P(key) for key in candidates}
try:
return max(candidates, key=candidates.get)
except ValueError:
return ""
countDict = countWords(readFile())
while True:
inword = input("Type quit to exit, or input word: \n")
if inword.lower() == 'quit':
break
correction = findCorrection(inword)
if correction=="":
print("No correction found")
else:
print(correction) | 28.293578 | 97 | 0.648508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 539 | 0.174773 |
47c7ce2b3e6297aeb01c2a6dd339609f2dbc4c40 | 9,826 | py | Python | src/derivation/FactNode.py | KDahlgren/orik | 4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f | [
"MIT"
]
| 2 | 2018-01-23T22:08:32.000Z | 2018-03-11T18:32:53.000Z | src/derivation/FactNode.py | KDahlgren/orik | 4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f | [
"MIT"
]
| 4 | 2017-10-24T19:13:40.000Z | 2018-06-05T22:16:45.000Z | src/derivation/FactNode.py | KDahlgren/orik | 4e66107cf2dc2cd1a30ba4bfbe15c1ad1c176c0f | [
"MIT"
]
| 2 | 2017-10-24T18:55:45.000Z | 2018-01-26T05:11:38.000Z | #!/usr/bin/env python
# **************************************** #
#############
# IMPORTS #
#############
# standard python packages
import ConfigParser, copy, inspect, logging, os, sys
from Node import Node
if not os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../../../lib/iapyx/src" ) )
from utils import tools
# **************************************** #
class FactNode( Node ) :
#####################
# SPECIAL ATTRIBS #
#####################
treeType = "fact"
#################
# CONSTRUCTOR #
#################
def __init__( self, name="DEFAULT", isNeg=None, record=[], parsedResults={}, cursor=None, argDict = {} ) :
logging.debug( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" )
logging.debug( "in FactNode.FactNode : " + name )
logging.debug( " name = " + name )
logging.debug( " isNeg = " + str( isNeg ) )
logging.debug( " record = " + str( record ) )
logging.debug( ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" )
self.argDict = {}
self.argDict = argDict
self.name = name
self.isNeg = isNeg
self.record = record
self.parsedResults = parsedResults
self.cursor = cursor
self.interesting = False
# -------------------------------- #
# grab settings configs
self.num_filtering_configs = 0
# +++++++++++++++ #
# TREE SIMPLIFY #
# +++++++++++++++ #
try :
self.TREE_SIMPLIFY = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"TREE_SIMPLIFY", \
bool )
except ConfigParser.NoOptionError :
self.TREE_SIMPLIFY = False
logging.warning( "WARNING : no 'TREE_SIMPLIFY' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with TREE_SIMPLIFY==False." )
logging.debug( " FACT NODE : using TREE_SIMPLIFY = " + str( self.TREE_SIMPLIFY ) )
# +++++++++++++ #
# CLOCKS_ONLY #
# +++++++++++++ #
try :
self.CLOCKS_ONLY = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"CLOCKS_ONLY", \
bool )
if self.CLOCKS_ONLY :
self.num_filtering_configs += 1
except ConfigParser.NoOptionError :
self.CLOCKS_ONLY = False
logging.warning( "WARNING : no 'CLOCKS_ONLY' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with CLOCKS_ONLY==False." )
# ++++++++++++++++ #
# POS_FACTS_ONLY #
# ++++++++++++++++ #
try :
self.POS_FACTS_ONLY = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"POS_FACTS_ONLY", \
bool )
if self.POS_FACTS_ONLY :
self.num_filtering_configs += 1
except ConfigParser.NoOptionError :
self.POS_FACTS_ONLY = False
logging.warning( "WARNING : no 'POS_FACTS_ONLY' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with POS_FACTS_ONLY==False." )
# ++++++++++++++++++++ #
# EXCLUDE_SELF_COMMS #
# ++++++++++++++++++++ #
try :
self.EXCLUDE_SELF_COMMS = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"EXCLUDE_SELF_COMMS", \
bool )
if self.EXCLUDE_SELF_COMMS :
self.num_filtering_configs += 1
except ConfigParser.NoOptionError :
self.EXCLUDE_SELF_COMMS = False
logging.warning( "WARNING : no 'EXCLUDE_SELF_COMMS' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with EXCLUDE_SELF_COMMS==False." )
# ++++++++++++++++++++++ #
# EXCLUDE_NODE_CRASHES #
# ++++++++++++++++++++++ #
try :
self.EXCLUDE_NODE_CRASHES = tools.getConfig( self.argDict[ "settings" ], \
"DEFAULT", \
"EXCLUDE_NODE_CRASHES", \
bool )
if self.EXCLUDE_NODE_CRASHES :
self.num_filtering_configs += 1
except ConfigParser.NoOptionError :
self.EXCLUDE_NODE_CRASHES = False
logging.warning( "WARNING : no 'EXCLUDE_NODE_CRASHES' defined in 'DEFAULT' section of " + \
self.argDict[ "settings" ] + "...running with EXCLUDE_NODE_CRASHES==False." )
# -------------------------------- #
# make sure this is actually a
# fact.
if not self.is_fact() :
tools.bp( __name__, inspect.stack()[0][3], " FATAL ERROR : relation '" + self.name + "' does not reference a fact. aborting." )
# -------------------------------- #
# determine whether this fact is interesting
self.am_i_interesting()
# -------------------------------- #
# initialize node object
Node.__init__( self, self.treeType, \
self.name, \
self.isNeg, \
self.record, \
self.parsedResults, \
self.cursor )
#############
# __STR__ #
#############
# the string representation of a FactNode
def __str__( self ) :
if self.isNeg :
negStr = "_NOT_"
return "fact->" + negStr + self.name + "(" + str(self.record) + ")"
else :
return "fact->" + self.name + "(" + str(self.record) + ")"
######################
# AM I INTERESTING #
######################
# check if this fact is interesting
# using heuristics
def am_i_interesting( self ) :
flag = 0
if self.CLOCKS_ONLY and self.name.startswith( "clock" ) :
flag += 1
if self.POS_FACTS_ONLY and not self.isNeg :
flag += 1
if self.EXCLUDE_SELF_COMMS and not self.is_self_comm() :
flag += 1
if self.EXCLUDE_NODE_CRASHES and not self.is_node_crash() :
flag += 1
logging.debug( " AM I INTERESTING : flag = " + str( flag ) )
logging.debug( " AM I INTERESTING : self.num_filtering_configs = " + str( self.num_filtering_configs ) )
logging.debug( " AM I INTERESTING : flag == self.num_filtering_configs = " + str( flag == self.num_filtering_configs ) )
if flag >= self.num_filtering_configs :
self.interesting = True
logging.debug( " AM I INTERESTING : self.name = " + self.name )
logging.debug( " AM I INTERESTING : conclusion : " + str( self.interesting ) )
##################
# IS SELF COMM #
##################
def is_self_comm( self ) :
if not self.name == "clock" :
return False
else :
if self.record[ 0 ] == self.record[ 1 ] :
return True
else :
return False
###################
# IS NODE CRASH #
###################
def is_node_crash( self ) :
if not self.name == "clock" :
return False
else :
if self.record[ 1 ] == "_" :
return True
else :
return False
#############
# IS FACT #
#############
# make sure this is actually a fact in the database.
def is_fact( self ) :
if self.name == "clock" or self.name == "next_clock" or self.name == "crash" :
return True
self.cursor.execute( "SELECT fid \
FROM Fact \
WHERE name=='" + self.name + "'" )
fid_list = self.cursor.fetchall()
fid_list = tools.toAscii_list( fid_list )
logging.debug( " IS FACT : fid_list = " + str( fid_list ) )
# if this is a negative fact, just make sure the relation exists
if self.isNeg :
if len( fid_list ) > 0 :
return True
else :
return False
else :
for fid in fid_list :
self.cursor.execute( "SELECT dataID,data,dataType \
FROM FactData \
WHERE fid=='" + fid + "'" )
data_list = self.cursor.fetchall()
data_list = tools.toAscii_multiList( data_list )
fact = []
for d in data_list :
data = d[1]
dataType = d[2]
if dataType == "int" :
fact.append( data )
else :
data = data.replace( "'", "" )
data = data.replace( '"', '' )
fact.append( data )
logging.debug( "fact = " + str( fact ) )
logging.debug( "self.record = " + str( self.record ) )
logging.debug( "fact == self.record is " + str( fact == self.record ) )
#if fact == self.record : # does not handle wildcards
if self.is_match( fact ) :
return True
return False # otherwise, return false
##############
# IS MATCH #
##############
# check if the input fact 'matches' the record for this fact node.
def is_match( self, fact ) :
for i in range( 0, len( self.record ) ) :
fact_datum = fact[ i ]
record_datum = self.record[ i ]
# remove any quotes
if record_datum.startswith( "'" ) and record_datum.endswith( "'" ) :
record_datum = record_datum.replace( "'", "" )
elif record_datum.startswith( '"' ) and record_datum.endswith( '"' ) :
record_datum = record_datum.replace( '"', "" )
if record_datum == "_" :
pass
else :
if not fact_datum == record_datum :
return False
return True
#########
# EOF #
#########
| 32.006515 | 134 | 0.479849 | 9,353 | 0.951862 | 0 | 0 | 0 | 0 | 0 | 0 | 3,309 | 0.33676 |
47c7ee324c762d85e146cce680e1d27dab07ca7e | 219 | py | Python | freezegame/ladder.py | mattfister/pybacon | c864e5f5c872f92b3c694f0ef83feb0f20f93193 | [
"MIT"
]
| 2 | 2017-02-06T14:49:48.000Z | 2021-03-20T08:19:01.000Z | freezegame/ladder.py | mattfister/pybacon | c864e5f5c872f92b3c694f0ef83feb0f20f93193 | [
"MIT"
]
| null | null | null | freezegame/ladder.py | mattfister/pybacon | c864e5f5c872f92b3c694f0ef83feb0f20f93193 | [
"MIT"
]
| 2 | 2017-11-04T10:13:59.000Z | 2020-04-24T05:15:33.000Z | from freezegame.sprite import Sprite
class Ladder(Sprite):
def __init__(self, x, y, state):
Sprite.__init__(self, x, y, [0, 0, 32, 32], state, 'tileSet', [0, 160, 32, 32], state.batch, state.ladder_group)
| 31.285714 | 120 | 0.657534 | 179 | 0.817352 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.041096 |
47ca2154dad4d9f3a8ceb261cf0f46981b5b61af | 2,656 | py | Python | sector/models.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
]
| 1 | 2019-01-18T03:50:46.000Z | 2019-01-18T03:50:46.000Z | sector/models.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
]
| 50 | 2018-01-24T18:04:08.000Z | 2019-01-03T03:30:30.000Z | sector/models.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
]
| 2 | 2018-02-12T15:20:52.000Z | 2019-01-18T03:51:52.000Z | from django.db import models
from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.core.blocks import StructBlock, CharBlock
from wagtail.core.fields import StreamField
from wagtail.core.models import Page
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtailmarkdown.blocks import MarkdownBlock
from invest.blocks.location import LocationAccordionItemBlock
from invest.blocks.markdown import MarkdownAccordionItemBlock
class SectorLandingPage(Page):
subpage_types = ['sector.sectorPage']
# page fields
heading = models.CharField(max_length=255)
hero_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
content_panels = Page.content_panels + [
FieldPanel('heading'),
ImageChooserPanel('hero_image'),
]
def get_context(self, request):
context = super().get_context(request)
sector_cards = self.get_descendants().type(SectorPage) \
.live() \
.order_by('sectorpage__heading')
context['sector_cards'] = sector_cards
return context
class SectorPage(Page):
# Related sector are implemented as subpages
subpage_types = ['sector.sectorPage']
featured = models.BooleanField(default=False)
description = models.TextField() # appears in card on external pages
# page fields
heading = models.CharField(max_length=255)
hero_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
pullout = StreamField([
('content', StructBlock([
('text', MarkdownBlock()),
('stat', CharBlock()),
('stat_text', CharBlock()
)], max_num=1, min_num=0))
], blank=True)
# accordion
subsections = StreamField([
('markdown', MarkdownAccordionItemBlock()),
('location', LocationAccordionItemBlock()),
])
content_panels = Page.content_panels + [
FieldPanel('description'),
FieldPanel('featured'),
ImageChooserPanel('hero_image'),
FieldPanel('heading'),
StreamFieldPanel('pullout'),
StreamFieldPanel('subsections')
]
def get_context(self, request):
context = super().get_context(request)
context['sector_cards'] = self.get_children().type(SectorPage) \
.live() \
.order_by('sectorpage__heading')
# pages will return as Page type, use .specific to get sectorPage
return context
| 29.511111 | 73 | 0.657756 | 2,182 | 0.821536 | 0 | 0 | 0 | 0 | 0 | 0 | 476 | 0.179217 |
47ca616f814b4735648b4fa4271fc547d28d5fca | 44 | py | Python | serve.py | xsblanket/sweetie | cce71db39961fa017f888afef756f3522f549716 | [
"MIT"
]
| null | null | null | serve.py | xsblanket/sweetie | cce71db39961fa017f888afef756f3522f549716 | [
"MIT"
]
| 2 | 2021-03-16T10:28:33.000Z | 2021-03-17T09:11:37.000Z | serve.py | xsblanket/sweetie | cce71db39961fa017f888afef756f3522f549716 | [
"MIT"
]
| 1 | 2021-03-16T10:03:19.000Z | 2021-03-16T10:03:19.000Z | from utility.sweetie import serve
serve() | 14.666667 | 34 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
47ca87bbbe5378196163b9f006e09077555d7b34 | 985 | py | Python | output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_enumeration_5_xsd/nistschema_sv_iv_atomic_id_enumeration_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_enumeration_5_xsd/nistschema_sv_iv_atomic_id_enumeration_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/id/schema_instance/nistschema_sv_iv_atomic_id_enumeration_5_xsd/nistschema_sv_iv_atomic_id_enumeration_5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| null | null | null | from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-ID-enumeration-5-NS"
class NistschemaSvIvAtomicIdEnumeration5Type(Enum):
BA = "ba"
CA = "ca"
EFOR = "efor"
HREGISTRY_AS_ON_WORK_U = "hregistry.as.on-work.u"
ITS_INCLUD = "_its-includ"
@dataclass
class Out:
class Meta:
name = "out"
namespace = "NISTSchema-SV-IV-atomic-ID-enumeration-5-NS"
any_element: Optional[object] = field(
default=None,
metadata={
"type": "Wildcard",
"namespace": "##any",
}
)
@dataclass
class NistschemaSvIvAtomicIdEnumeration5:
class Meta:
name = "NISTSchema-SV-IV-atomic-ID-enumeration-5"
namespace = "NISTSchema-SV-IV-atomic-ID-enumeration-5-NS"
value: Optional[NistschemaSvIvAtomicIdEnumeration5Type] = field(
default=None,
metadata={
"required": True,
}
)
| 22.906977 | 68 | 0.636548 | 800 | 0.812183 | 0 | 0 | 640 | 0.649746 | 0 | 0 | 277 | 0.281218 |
47cc63cf4b5393de155d0003d5754fcb3e06068b | 889 | py | Python | tests/test_http_requests.py | andreygrechin/umbr_api | e9efd734a7395d25a1bab87c861b2cfee61e6a05 | [
"MIT"
]
| 4 | 2021-01-11T02:14:59.000Z | 2022-02-15T09:20:25.000Z | tests/test_http_requests.py | andreygrechin/umbr_api | e9efd734a7395d25a1bab87c861b2cfee61e6a05 | [
"MIT"
]
| null | null | null | tests/test_http_requests.py | andreygrechin/umbr_api | e9efd734a7395d25a1bab87c861b2cfee61e6a05 | [
"MIT"
]
| 2 | 2021-12-14T10:20:00.000Z | 2022-02-20T01:05:18.000Z | #!/usr/bin/env python3
# pylint: disable=no-self-use
"""Test unit."""
import unittest
class TestCase(unittest.TestCase):
"""Main class."""
def test_send_post(self):
"""Call incorrect send_post, get None.""" # import requests
from umbr_api._http_requests import send_post
response = send_post(" ")
self.assertEqual(response, None)
def test_send_get(self):
"""Call incorrect send_get, get None.""" # import requests
from umbr_api._http_requests import send_get
response = send_get(" ")
self.assertEqual(response, None)
def test_send_delete(self):
"""Call incorrect send_delete, get None.""" # import requests
from umbr_api._http_requests import send_delete
response = send_delete(" ")
self.assertEqual(response, None)
if __name__ == "__main__":
unittest.main()
| 25.4 | 70 | 0.651294 | 750 | 0.843645 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.312711 |
47cd00f1c6e6fe88e15b29bda7971944f1ec4024 | 2,127 | py | Python | mylast.py | JohnTocher/descrobbler | 0bca4d05e0029b63d11fe615e933362cadb30c11 | [
"Apache-2.0"
]
| null | null | null | mylast.py | JohnTocher/descrobbler | 0bca4d05e0029b63d11fe615e933362cadb30c11 | [
"Apache-2.0"
]
| null | null | null | mylast.py | JohnTocher/descrobbler | 0bca4d05e0029b63d11fe615e933362cadb30c11 | [
"Apache-2.0"
]
| null | null | null | ''' this file creates the objects used to access the scrobbling service api
No actual creds should be stored here!
This module will be imported and used by the main code
'''
import os
import sys
import pylast
try:
API_KEY = os.environ["LASTFM_API_KEY"]
API_SECRET = os.environ["LASTFM_API_SECRET"]
except KeyError:
API_KEY = "my_api_key"
API_SECRET = "my_apy_secret"
try:
lastfm_username = os.environ["LASTFM_USERNAME"]
lastfm_password_hash = os.environ["LASTFM_PASSWORD_HASH"]
print("Environment variables for user OK")
except KeyError:
# In order to perform a write operation you need to authenticate yourself
lastfm_username = "my_username"
# You can use either use the password, or find the hash once and use that
lastfm_password_hash = pylast.md5("my_password")
print(lastfm_password_hash)
# lastfm_password_hash = "my_password_hash"
print("Environment variables for user missing! So far:")
print(f"API_KEY: {API_KEY}")
print(f"API_SECRET: {API_SECRET}")
print(f"LFM USER: {lastfm_username}")
print(f"LPW HASH: {lastfm_password_hash}")
lastfm_network = pylast.LastFMNetwork(
api_key=API_KEY,
api_secret=API_SECRET,
username=lastfm_username,
password_hash=lastfm_password_hash,
)
def track_and_timestamp(track):
return f"{track.playback_date}\t{track.track}"
def print_track(track):
print(track_and_timestamp(track))
TRACK_SEPARATOR = " - "
def split_artist_track(artist_track):
artist_track = artist_track.replace(" – ", " - ")
artist_track = artist_track.replace("“", '"')
artist_track = artist_track.replace("”", '"')
(artist, track) = artist_track.split(TRACK_SEPARATOR)
artist = artist.strip()
track = track.strip()
print("Artist:\t\t'" + artist + "'")
print("Track:\t\t'" + track + "'")
# Validate
if len(artist) == 0 and len(track) == 0:
sys.exit("Error: Artist and track are blank")
if len(artist) == 0:
sys.exit("Error: Artist is blank")
if len(track) == 0:
sys.exit("Error: Track is blank")
return (artist, track)
| 29.136986 | 77 | 0.686883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 896 | 0.420066 |
47cd0ed87b30c0eeeb6aca7161bf214f8970893c | 4,802 | py | Python | sharpenCommander/dlgFind.py | cjng96/sharpenCommander | 0d3a95dccc617481d9976789feffc115520243e6 | [
"Apache-2.0"
]
| null | null | null | sharpenCommander/dlgFind.py | cjng96/sharpenCommander | 0d3a95dccc617481d9976789feffc115520243e6 | [
"Apache-2.0"
]
| null | null | null | sharpenCommander/dlgFind.py | cjng96/sharpenCommander | 0d3a95dccc617481d9976789feffc115520243e6 | [
"Apache-2.0"
]
| null | null | null |
import os
import urwid
from .globalBase import *
from .urwidHelper import *
from .tool import *
#import dc
from .myutil import *
class DlgFind(cDialog):
def __init__(self, onExit=None):
super().__init__()
self.onExit = onExit
self.widgetFileList = mListBox(urwid.SimpleFocusListWalker(btnListMakeTerminal([], None)))
self.widgetFileList.setFocusCb(lambda newFocus: self.onFileFocusChanged(newFocus))
self.widgetContent = mListBox(urwid.SimpleListWalker(textListMakeTerminal(["< Nothing to display >"])))
self.widgetContent.isViewContent = True
self.header = ">> dc find - q/F4(Quit) </>,h/l(Prev/Next file) Enter(goto) E(edit)..."
self.headerText = urwid.Text(self.header)
self.widgetFrame = urwid.Pile(
[(15, urwid.AttrMap(self.widgetFileList, 'std')), ('pack', urwid.Divider('-')), self.widgetContent])
self.mainWidget = urwid.Frame(self.widgetFrame, header=self.headerText)
self.cbFileSelect = lambda btn: self.onFileSelected(btn)
self.content = ""
self.selectFileName = ""
self.lstFile = []
def onFileFocusChanged(self, newFocus):
# old widget
# widget = self.widgetFileList.focus
# markup = ("std", widget.base_widget.origTxt)
# widget.base_widget.set_label(markup)
# widget = self.widgetFileList.body[newFocus]
# markup = ("std_f", widget.base_widget.origTxt)
# widget.base_widget.set_label(markup)
widget = self.widgetFileList.focus
widget.original_widget.set_label(widget.base_widget.markup[0])
widget = self.widgetFileList.body[newFocus]
widget.base_widget.set_label(widget.base_widget.markup[1])
self.widgetFileList.set_focus_valign("middle")
self.selectFileName = fileBtnName(widget)
try:
with open(self.selectFileName, "r", encoding="UTF-8") as fp:
ss = fp.read()
except UnicodeDecodeError:
ss = "No utf8 file[size:%d]" % os.path.getsize(self.selectFileName)
ss = ss.replace("\t", " ")
del self.widgetContent.body[:]
self.widgetContent.body += textListMakeTerminal(ss.splitlines())
self.widgetFrame.set_focus(self.widgetContent)
return True
def onFileSelected(self, btn):
if btn.original_widget.attr is None:
self.close()
return
self.selectFileName = gitFileBtnName(btn)
itemPath = os.path.join(os.getcwd(), self.selectFileName)
pp = os.path.dirname(itemPath)
os.chdir(pp)
g.savePath(pp)
g.targetFile = os.path.basename(itemPath)
#raise urwid.ExitMainLoop()
self.close()
def inputFilter(self, keys, raw):
if filterKey(keys, "down"):
self.widgetContent.scrollDown()
if filterKey(keys, "up"):
self.widgetContent.scrollUp()
if filterKey(keys, "enter"):
self.onFileSelected(self.widgetFileList.focus)
return keys
def recvData(self, data):
if data is None:
self.headerText.set_text(self.header + "!!!")
if len(self.widgetFileList.body) == 0:
self.widgetFileList.body += btnListMakeTerminal(["< No result >"], None)
return
ss = data.decode("UTF-8")
self.content += ss
pt = self.content.rfind("\n")
if pt == -1:
return True
ss = self.content[:pt]
self.content = self.content[pt:]
for line in ss.splitlines():
line = line.strip()
if line == "":
continue
self.lstFile.append(line)
self.fileShow()
return True
def fileShow(self):
del self.widgetFileList.body[:]
for line in self.lstFile:
# TODO: filter
# markup = erminal2markup(line, 0)
# markupF = terminal2markup(line, 1)
markup = ("std", line)
markupF = ('std_f', line)
btn = btnGen(markup, markupF, self.cbFileSelect, len(self.widgetFileList.body) == 0)
self.widgetFileList.body.append(btn)
if len(self.widgetFileList.body) == 1:
self.onFileFocusChanged(0)
def unhandled(self, key):
if key == 'f4' or key == "q":
#raise urwid.ExitMainLoop()
self.close()
elif key == 'left' or key == "[" or key == "h":
self.widgetFileList.focusPrevious()
elif key == 'right' or key == "]" or key == "l":
self.widgetFileList.focusNext()
elif key == "H":
for i in range(10):
self.widgetFileList.focusPrevious()
elif key == "L":
for i in range(10):
self.widgetFileList.focusNext()
elif key == "k":
self.widgetContent.scrollUp()
elif key == "j":
self.widgetContent.scrollDown()
elif key == "K":
for i in range(15):
self.widgetContent.scrollUp()
elif key == "J":
for i in range(15):
self.widgetContent.scrollDown()
elif key == "e" or key == "E":
btn = self.widgetFileList.focus
fname = gitFileBtnName(btn)
g.loop.stop()
systemRet("%s %s" % (g.editApp, fname))
g.loop.start()
elif key == "H":
popupMsg("Dc help", "Felix Felix Felix Felix\nFelix Felix")
| 27.44 | 106 | 0.659933 | 4,652 | 0.968763 | 0 | 0 | 0 | 0 | 0 | 0 | 764 | 0.1591 |
47cf4c7848eb2692961ae1f8fb2074a86bde0da7 | 8,595 | py | Python | Code to apply on BS output/Python/makeTtests.py | albertocottica/community-management-simulator | e942f854f41705fcb114a79308536a2765896e60 | [
"MIT"
]
| null | null | null | Code to apply on BS output/Python/makeTtests.py | albertocottica/community-management-simulator | e942f854f41705fcb114a79308536a2765896e60 | [
"MIT"
]
| null | null | null | Code to apply on BS output/Python/makeTtests.py | albertocottica/community-management-simulator | e942f854f41705fcb114a79308536a2765896e60 | [
"MIT"
]
| null | null | null | # runs t-tests over the null hypothesis
# avg_gini if (priority == "newer") == avg_gini if (priority == "more active")
import csv
import numpy as np
from scipy.stats import ttest_ind
from scipy.special import stdtr
def readCsvFile(fileName):
'''
(string) => list of dicts
Read the file called fileName and put its content in computer memory
'''
allData = [] # data go here as list of dicts
with open(fileName) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
allData.append(row)
return allData
def computeTtests (data):
'''
(list of dicts) => list of dicts
Execute t-tests on data. Return results in an easy-to-read form, i.e.:
[
{
'globalchattiness': value, 'intimacystrength': value, 'randomisedchattiness': value, 'policy': value,
'dropouts': value, 'totalmembershipstrength': value, 'totalcomments': value, 'managementeffort':value',
'ms_gini': value, 'nc_gini': value
},
...
]
'''
results = []
# assign the parameter space in which we operate
for globChat in [.1, .2, .4]:
for intStren in [1, 5, 11]:
for randChat in ["true", "false"]:
for pol in ["engage", "both"]:
# keep track of parameters' values
result ={}
result['globalchattiness'] = globChat
result['intimacystrength'] = intStren
result['randomisedchattiness'] = randChat
result['policy'] = pol
# take care of non-Gini variables first
for nonGiniVar in ['dropouts', 'totalmembershipstrength', 'totalcomments', 'mgmteffort']:
# accumulate in two lists the values, separated by priority
moreActiveArray = []
newerArray = []
# read the data.
for row in data:
if ( float(row['globalchattiness']) == globChat and
int(row['intimacystrength']) == intStren and
row['randomisedchattiness'] == randChat and
row['policy'] == pol):
if row['priority'] == 'newer':
newerArray.append(float(row[nonGiniVar]))
elif row['priority'] == 'more active':
moreActiveArray.append(float(row[nonGiniVar]))
# save the means relative to the moreActive and newer cases
result[nonGiniVar + '_n_mean'] = float(sum(newerArray))/len(newerArray)
result[nonGiniVar + '_ma_mean'] = float(sum(moreActiveArray))/len(moreActiveArray)
# compute the t-tests. When T is positive, moreActive > newer
thisTest = ttest_ind(moreActiveArray, newerArray, equal_var = 'False')
result[nonGiniVar + '_t'] = float(thisTest[0])
result[nonGiniVar + '_pVal'] = float(thisTest[1])
# now the two Ginis
for giniVar in ['ms', 'nc']:
# no need for lists, I have already calculated means and SEs
# read the data.
for row in data:
if ( float(row['globalchattiness']) == globChat and
int(row['intimacystrength']) == intStren and
row['randomisedchattiness'] == randChat and
row['policy'] == pol):
if row['priority'] == 'newer':
newerMean = float(row[giniVar + '_avg_gini'])
newerSE = float(row[giniVar + '_inblockse'])
elif row['priority'] == 'more active':
moreActiveMean = float(row[giniVar + '_avg_gini'])
moreActiveSE = float(row[giniVar + '_inblockse'])
# save mean values
result[giniVar + '_gini_n_mean'] = newerMean
result[giniVar + '_gini_ma_mean'] = moreActiveMean
# compute the t-tests. When T is positive, moreActive > newer
tStat = (moreActiveMean - newerMean) / np.sqrt((moreActiveSE**2 + newerSE**2)/24)
result[giniVar + '_gini_t'] = tStat
dof = (moreActiveSE/24 + newerSE/24)**2 / (moreActiveSE**2/(24**2*23) + newerSE**2/(24**2*23))
result[giniVar + '_gini_pVal'] = 2*stdtr(dof, -np.abs(tStat))
results.append(result)
return results
def saveCsvFile(data, filename):
'''
(list of dicts. str) => NoneType
saves list of dicts into a CSV file called filename
'''
# get the fieldnames from the data:
with open (filename, 'w') as csvfile:
fieldnames = sorted(data[0].keys())
writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def findTrends(data):
'''
(list of dicts) => NoneType
prints some info to screen
'''
moreActiveMoreInclusivity1 = 0
moreActiveMoreInclusivity2 = 0
moreActiveMoreActivity = 0
moreActiveMoreDiversity = 0
moreActiveMoreLoyalty = 0
for row in data:
if row['dropouts_t'] < 0 and row['dropouts_pVal'] < .01:
moreActiveMoreInclusivity1 += 1
if row['ms_gini_t'] < 0 and row['ms_gini_pVal'] < .01:
moreActiveMoreInclusivity2 += 1
if row['totalcomments_t'] > 0 and row['totalcomments_pVal'] < .01:
moreActiveMoreActivity += 1
if row['nc_gini_t'] < 0 and row['nc_gini_pVal'] < 0.1:
moreActiveMoreDiversity += 1
if row['totalmembershipstrength_t'] > 0 and row['totalmembershipstrength_pVal'] < .01:
moreActiveMoreLoyalty += 1
print 'Priority "more active" has FEWER dropouts: ' + str(moreActiveMoreInclusivity1)
print 'Priority "more active" has MORE inclusivity (lower Gini on ms): ' + str(moreActiveMoreInclusivity2)
print 'Priority "more active" has MORE comments: ' + str(moreActiveMoreActivity)
print 'Priority "more active" has MORE diversity (lower gini on nc): ' + str(moreActiveMoreDiversity)
print 'Priority "more active" has MORE loyalty (higher total membership strength): ' + str(moreActiveMoreLoyalty)
newerMoreInclusivity1 = 0
newerMoreInclusivity2 = 0
newerMoreActivity = 0
newerMoreDiversity = 0
newerMoreLoyalty = 0
for row in data:
if row['dropouts_t'] > 0 and row['dropouts_pVal'] < .01:
newerMoreInclusivity1 += 1
if row['ms_gini_t'] > 0 and row['ms_gini_pVal'] < .01:
newerMoreInclusivity2 += 1
if row['totalcomments_t'] < 0 and row['totalcomments_pVal'] < .01:
newerMoreActivity += 1
if row['nc_gini_t'] > 0 and row['nc_gini_pVal'] < 0.1:
newerMoreDiversity += 1
if row['totalmembershipstrength_t'] < 0 and row['totalmembershipstrength_pVal'] < .01:
newerMoreLoyalty += 1
print 'Priority "newer" has FEWER dropouts: ' + str(newerMoreInclusivity1)
print 'Priority "newer" has MORE inclusivity (lower Gini on ms): ' + str(newerMoreInclusivity2)
print 'Priority "newer" has MORE comments: ' + str(newerMoreActivity)
print 'Priority "newer" has MORE diversity (lower gini on nc): ' + str(newerMoreDiversity)
print 'Priority "newer" has MORE loyalty (higher total membership strength): ' + str(newerMoreLoyalty)
if __name__ == '__main__':
dirPath = '/Users/albertocottica/github/local/community-management-simulator/Data/'
allData = readCsvFile(dirPath + 'ready-4-tTest.csv')
results = computeTtests(allData)
saveCsvFile(results, dirPath + 'tTestsResults.csv')
findTrends(results)
| 47.225275 | 118 | 0.532984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,993 | 0.348226 |
47d18703506147df7e77ebf700589e58f57e4508 | 350 | py | Python | running_sum.py | erjan/coding_exercises | 53ba035be85f1e7a12b4d4dbf546863324740467 | [
"Apache-2.0"
]
| null | null | null | running_sum.py | erjan/coding_exercises | 53ba035be85f1e7a12b4d4dbf546863324740467 | [
"Apache-2.0"
]
| null | null | null | running_sum.py | erjan/coding_exercises | 53ba035be85f1e7a12b4d4dbf546863324740467 | [
"Apache-2.0"
]
| null | null | null | '''
Given an array nums. We define a running sum of an array as runningSum[i] = sum(nums[0]…nums[i]).
Return the running sum of nums.
'''
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
res = list()
for i in range(len(nums)):
res.append(sum(nums[:(i+1)]))
print(res)
return res
| 21.875 | 97 | 0.58 | 208 | 0.590909 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.397727 |
47d20ca2d18b88c21b0a0f588589e8dcfe03114e | 81 | py | Python | examples/import.py | vic/typhon | 72b8ceb34f431d93321fee6046b08094afbc213c | [
"BSD-3-Clause"
]
| 14 | 2015-01-06T10:59:09.000Z | 2021-01-09T17:57:52.000Z | examples/import.py | vic/typhon | 72b8ceb34f431d93321fee6046b08094afbc213c | [
"BSD-3-Clause"
]
| 1 | 2017-04-08T17:35:03.000Z | 2017-04-08T17:35:03.000Z | examples/import.py | vic/typhon | 72b8ceb34f431d93321fee6046b08094afbc213c | [
"BSD-3-Clause"
]
| 3 | 2015-05-09T15:16:37.000Z | 2016-01-26T07:57:59.000Z | # -*- coding: utf-8 -*-
import imported
import foo.bar
print(imported.__doc__)
| 11.571429 | 23 | 0.691358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.283951 |
47d397381f542a9f03743386be3039bce2cd0248 | 9,576 | py | Python | participants_codes/konnectomics/utils/submission.py | orlandi/connectomicsPerspectivesPaper | 98060e613d58c8e1ef9d14eb213439ae4cb8272b | [
"MIT"
]
| null | null | null | participants_codes/konnectomics/utils/submission.py | orlandi/connectomicsPerspectivesPaper | 98060e613d58c8e1ef9d14eb213439ae4cb8272b | [
"MIT"
]
| null | null | null | participants_codes/konnectomics/utils/submission.py | orlandi/connectomicsPerspectivesPaper | 98060e613d58c8e1ef9d14eb213439ae4cb8272b | [
"MIT"
]
| null | null | null | # Copyright 2014 Alistair Muldal <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import warnings
import datetime
import subprocess
from matplotlib import pyplot as plt
from itertools import izip
from sklearn import metrics
def make_submission(valid_weights, test_weights, fname=None, norm=True,
compress=True):
if fname is None:
fname = 'data/submission_%s.csv' % datetime.date.today()
if norm:
valid_weights = valid_weights.astype(np.float64)
valid_weights = (valid_weights - valid_weights.min()
) / valid_weights.ptp()
test_weights = test_weights.astype(np.float64)
test_weights = (test_weights - test_weights.min()) / test_weights.ptp()
with open(fname, 'w') as f:
f.write('NET_neuronI_neuronJ,Strength\n')
for ii in xrange(valid_weights.shape[0]):
for jj in xrange(valid_weights.shape[1]):
f.write('valid_%i_%i,%.8f\n' %(
ii + 1, jj + 1, valid_weights[ii, jj]))
for ii in xrange(test_weights.shape[0]):
for jj in xrange(test_weights.shape[1]):
f.write('test_%i_%i,%.8f\n' %(
ii + 1, jj + 1, test_weights[ii, jj]))
print 'Connection weights written to "%s"' % fname
if compress:
state = subprocess.call(['gzip', '-9', fname])
if state == 0:
print 'Compressed to "%s"' % (fname + '.gz')
else:
print 'Compression failed, code=%i' % state
pass
def run_auc(M, real_network, nsteps=1000, do_plot=False):
n = M.shape[0]
# convert the real network to dense vector format
ij, ground_truth = real2dense(real_network, n)
# convert the adjacency matrix to a vector of weights for each possible
# connection
ij, weights = adjacency2vec(M)
# compute the ROC curve, and the AUC
thresh, fpr, tpr, pl10, auc = roc(weights, ground_truth, do_plot=do_plot,
nsteps=nsteps)
return fpr, tpr, auc
def adjacency2vec(M):
"""
Unpack an n-by-n directed adjacency matrix to a 1D vector of connection
weights
Arguments
----------
M: 2D float array
adjacency matrix, where: M[i, j] corresponds to w(i->j)
Returns
----------
ij: 2D int array
2-by-npairs array of row/column indices
w_ij: 1D int array
corresponding weights, i.e. w(i->j)
"""
ncells = M.shape[0]
ij = all_directed_connections(ncells)
# sanity check
# npairs = ncells * (ncells - 1)
# assert ij.shape[1] == npairs
i, j = ij
w_ij = M[i, j]
return ij, w_ij
def vec2adjacency(ij, connected):
"""
Pack a 1D vector of connection weights into an n-by-n directed adjacency
matrix
Arguments
----------
ij: 2D int array
2-by-npairs array of row/column indices
w_ij: 1D int array
corresponding weights, i.e. w(i->j)
Returns
----------
M: 2D float array
adjacency matrix, where: M[i, j] corresponds to w(i->j)
M[j, i] corresponds to w(j->i)
"""
npairs = connected.size
# 0 = ncells**2 - ncells -npairs
roots = np.roots((1, -1, -npairs))
ncells = int(roots[roots > 0])
M = np.zeros((ncells, ncells), dtype=connected.dtype)
for (ii, jj), cc in izip(ij.T, connected):
M[ii, jj] = cc
return M
def real2dense(real_connections, n=None, adj=False):
"""
The network data provided for the challenge lists connections weighted '-1'
(which aren't actually present in the simulation), and does not list any
weights for pairs of nodes that are not connected.
This function converts the provided data into a more convenient dense vector
format compatible with adjacency2vec and roc, where every possible directed
pair of nodes has a True/False weight.
Arguments:
-----------
real_connections: 2D np.ndarray or tables.(C)Array
npairs-by-3 array, whose columns represent (i, j, connected(i->j)).
i, j are assumed to follow MATLAB indexing convenions (i.e. they
start at 1).
n: positive int, optional
the total number of nodes (cells). if unspecified, this is taken to
be the maximum index in the first two columns of real_connections
plus 1.
Returns:
----------
ij: 2D int array
2-by-npairs array of row/column indices
connected:
boolean vector, True where i->j is connected
"""
if n is None:
n = int(real_connections[:, :2].max())
if np.any(real_connections[:, :2] > n):
raise ValueError('real_connections contains indices > n')
# handle CArrays
real_connections = real_connections[:]
# cast to integers
real_connections = real_connections.astype(np.int)
# find the indices of the cells that are genuinely connected ('1' means
# connection, either '-1', '0' or omission means no connection).
ij_con = real_connections[(real_connections[:, 2] == 1), :2].T
# we subtract 1 from the indices because MATLAB-style indexing starts at 1,
# whereas Python indexing starts at 0
ij_con -= 1
# we'll do this the lazy way - construct an adjacency matrix from the
# connected indices ...
M = np.zeros((n, n), dtype=np.bool)
M[ij_con[0, :], ij_con[1, :]] = True
if adj:
return M
else:
# ... then convert this directly to the desired format
ij, connected = adjacency2vec(M)
return ij, connected
def all_directed_connections(n):
"""
For an n-by-n adjacency matrix, return the indices of the nodes for every
possible directed connection, i.e. (i->j) and (j->i), but not (i->i) or
(j->j)
Arguments:
n: int
number of nodes
Returns:
idx: 2D int array
[2, n * (n - 1)] array of i, j indices
"""
# all possible pairs of indices (including repeated indices)
all_idx = np.indices((n, n)).T.reshape(-1, 2).T
# remove repeated indices
repeats = (all_idx[0, :] == all_idx[1, :])
idx = all_idx[:, ~repeats]
return idx
def roc(weights, ground_truth, nsteps=None, do_plot=False, show_progress=True):
"""
Compute ROC curve and performance metrics for a given set of posterior
connection probabilities and the set of ground-truth connections
Arguments:
----------
weights: 1D float array
vector of posterior probabilities for each possible pairwise
connection
ground_truth: 1D bool array
vector of ground-truth connections
nsteps: int, optional
number of linear steps between the minimum and maximum values of
weights at which to compute the FPR and TPR. if unspecified, every
unique value of weights is used, so that the ROC curve is computed
exactly
do_plot: bool, optional
make a pretty plot
show_progress: bool, optional
show a pretty progress bar
Returns:
---------
thresh: 1D float array
vector of threshold values used for computing the ROC curve
fpr: 1D float array
false-positive rate at each threshold value
tpr: 1D float array
true-positive rate at each threshold value
pl10: float
10% performance level (tpr at the threshold value that gives 10%
false-positives)
auc: float
area under the ROC curve
"""
# make sure we're dealing with 1D arrays
weights = weights.ravel()
ground_truth = ground_truth.ravel()
if weights.size != ground_truth.size:
raise ValueError('Input vectors must have the same number of elements')
fpr, tpr, thresh = metrics.roc_curve(ground_truth, weights, pos_label=True)
auc = metrics.roc_auc_score(ground_truth, weights)
# 'performance level' is defined as the fraction of true positives at 10%
# false-positives
pl10 = tpr[fpr.searchsorted(0.1, side='left')]
if do_plot:
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.hold(True)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_aspect('equal')
ax.plot(fpr, tpr, '-b', lw=2)
ax.set_xlabel('False-positive rate')
ax.set_ylabel('True-positive rate')
ax.set_title('ROC')
bbox_props = dict(boxstyle='round', fc='w', ec='0.5')
arrow_props = dict(arrowstyle='->', color='k', linewidth=2)
ax.annotate('AUC = %.4g' % auc, xy=(0.9, 0.1),
xycoords='axes fraction', ha='right', va='bottom',
bbox=bbox_props, fontsize=16)
plt.show()
return thresh, fpr, tpr, pl10, auc
| 29.018182 | 80 | 0.611529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,650 | 0.590017 |
47d3c6d2f3f9ad6b0e3ffc64b6de5590845ebff4 | 30,949 | py | Python | MagiskPatcher.py | affggh/Magisk_patcher | 77b7a90c821d45e0b090ee1905dfbca7028e9ac2 | [
"Apache-2.0"
]
| 19 | 2022-01-27T11:12:43.000Z | 2022-03-06T00:09:47.000Z | MagiskPatcher.py | affggh/Magisk_patcher | 77b7a90c821d45e0b090ee1905dfbca7028e9ac2 | [
"Apache-2.0"
]
| null | null | null | MagiskPatcher.py | affggh/Magisk_patcher | 77b7a90c821d45e0b090ee1905dfbca7028e9ac2 | [
"Apache-2.0"
]
| 6 | 2022-01-28T15:51:19.000Z | 2022-02-20T17:39:46.000Z | #!/usr/bin/env python3
# 脚本 by affggh
# Apcache 2.0
import os
import sys
import shutil
import zipfile
import subprocess
import platform
import requests
if os.name == 'nt':
import tkinter as tk
if os.name == 'posix':
from mttkinter import mtTkinter as tk
# While Load some need thread funcion on Linux it will failed
# Just use mttkinter replace regular tkinter
from tkinter.filedialog import *
from tkinter import ttk
from tkinter import *
#import ttkbootstrap as ttk
import time
import webbrowser
import threading
# Hide console , need ```pip install pywin32```
# import win32gui, win32con
# the_program_to_hide = win32gui.GetForegroundWindow()
# win32gui.ShowWindow(the_program_to_hide, win32con.SW_HIDE)
def main():
VERSION = "20220611"
LOCALDIR = os.path.abspath(os.path.dirname(sys.argv[0]))
# Read config from GUIcfg.txt
configPath = LOCALDIR + os.sep + "bin" + os.sep + "GUIcfg.txt"
with open(configPath, "r") as file:
for line in file.readlines():
if((line.split('=', 1)[0]) == "THEME"):
THEME = line.split('=', 1)[1]
THEME = THEME.replace('\n', '')
if(THEME!="dark"): # 防止手贱改成别的导致主题爆炸
THEME="light"
elif((line.split('=', 1)[0]) == "DONATE_BUTTON"):
SHOW_DONATE_BUTTON = line.split('=', 1)[1]
SHOW_DONATE_BUTTON = SHOW_DONATE_BUTTON.replace('\n', '') #显示捐赠按钮
elif((line.split('=', 1)[0]) == "GIT_USE_MIRROR"):
if (line.split('=', 1)[1].strip("\n").lower()) == "true":
GIT_USE_MIRROR = True
else:
GIT_USE_MIRROR = False
elif((line.split('=', 1)[0]) == "GIT_MIRROR"):
GIT_MIRROR = line.split('=', 1)[1]
# Detect machine and ostype
ostype = platform.system().lower()
machine = platform.machine().lower()
if machine == 'aarch64_be' \
or machine == 'armv8b' \
or machine == 'armv8l':
machine = 'aarch64'
if machine == 'i386' or machine == 'i686':
machine = 'x86'
if machine == "amd64":
machine = 'x86_64'
if ostype == 'windows':
if not machine == 'x86_64':
print("Error : Program on windows only support 64bit machine")
sys.exit(1)
if ostype == 'linux':
if not (machine == 'aarch64' or \
machine == 'arm' or \
machine == 'x86_64'):
print("Error : Machine not support your device [%s]" %machine)
sys.exit(1)
root = tk.Tk()
root.geometry("820x480")
# Set the initial theme
root.tk.call("source", LOCALDIR+os.sep+"sun-valley.tcl")
root.tk.call("set_theme", THEME)
def change_theme():
# NOTE: The theme's real name is sun-valley-<mode>
if root.tk.call("ttk::style", "theme", "use") == "sun-valley-dark":
# Set light theme
root.tk.call("set_theme", "light")
else:
# Set dark theme
root.tk.call("set_theme", "dark")
root.resizable(0,0) # 设置最大化窗口不可用
root.title("Magisk Patcher by 酷安 affggh " + "版本号 : %s" %(VERSION))
def logo():
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
root.iconbitmap(os.path.abspath(LOCALDIR+os.sep+'bin' + os.sep+ 'logo.ico'))
if os.name == 'nt':
logo()
# Frame 这里都用到了外部命令导致卡顿,子进程运行来缓解
frame2_3 = Frame(root, relief=FLAT)
frame2 = ttk.LabelFrame(frame2_3, text="功能页面", labelanchor="n", relief=SUNKEN, borderwidth=1)
frame3 = ttk.LabelFrame(frame2_3, text="信息反馈", labelanchor="nw", relief=SUNKEN, borderwidth=1)
textfont = "Consolas"
text = Text(frame3,width=70,height=15,font=textfont) # 信息展示
filename = tk.StringVar()
arch = tk.StringVar()
keepverity = tk.StringVar()
keepforceencrypt = tk.StringVar()
patchvbmetaflag = tk.StringVar()
mutiseletion = tk.StringVar()
recoverymodeflag = tk.BooleanVar()
recoverymode = tk.StringVar()
recoverymode.set('false')
# For logo
photo = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"logo.png")#file:t图片路径
# For aboutme
photo2 = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"logo.png")#file:t图片路径
# For donate QR code
photo3 = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"alipay.png")#file:t图片路径
photo4 = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"wechat.png")#file:t图片路径
photo5 = tk.PhotoImage(file=LOCALDIR+os.sep+"bin"+os.sep+"zfbhb.png")#file:t图片路径
global Thanks
Thanks = 0 # 左下角的贴图说谢谢
os.chdir(LOCALDIR)
def get_time():
'''显示当前时间'''
global time1
time1 = ''
time2 = time.strftime('%H:%M:%S')
# 能动态显示系统时间
if time2 != time1:
time1 = time2
text.insert(END, "[%s] : " %(time1))
def selectFile():
global filepath
filepath = askopenfilename() # 选择打开什么文件,返回文件名
filename.set(os.path.abspath(filepath))
showinfo("选择文件为:\n%s" %(filename.get()))
def showinfo(textmsg):
textstr = textmsg
get_time() # 获取时间戳
text.insert(END,"%s" %(textstr) + "\n")
text.update() # 实时返回信息
text.yview('end')
def affgghsay(word):
line = ''
for i in range(len(word.encode("gb2312"))):
line += '─' # gb2312中文是两个字节,利用这点填充全角半角
text.insert(END,
'''
(\︵/) ┌%s┐
>(—﹏—)< < %s│
/ ﹌ \╯ └%s┘
affggh 提醒您
'''%(line, word, line))
text.yview('end')
def runcmd(cmd):
if os.name == 'nt':
sFlag = False
else:
sFlag = True # fix file not found on linux
try:
ret = subprocess.Popen(cmd, shell=sFlag, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for i in iter(ret.stdout.readline, b''):
text.insert(END, i.strip().decode("UTF-8") + "\n")
text.update()
text.yview(END)
except subprocess.CalledProcessError as e:
for i in iter(e.stdout.readline,b''):
text.insert(END, i.strip().decode("UTF-8") + "\n")
text.update()
text.yview(END)
def get_releases(url):
data = requests.get(url).json()
return data
def ret_dlink(url):
data = get_releases(url)
dlink = {}
for i in data:
for j in i['assets']:
if j['name'].startswith("Magisk-v") and j['name'].endswith(".apk"):
if GIT_USE_MIRROR:
dlink.update({j['name'] : j['browser_download_url'].replace("https://github.com/", GIT_MIRROR)})
else:
dlink.update({j['name'] : j['browser_download_url']})
return dlink
def download(url, fileToSave):
def p(now, total):
return int((now/total)*100)
file = fileToSave
chunk_size = 1024
affgghsay("Starting download file...")
r = requests.get(url, stream=True)
total_size = int(r.headers['content-length'])
now = 0
progressbar['maximum'] = 100
with open(file, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
before = now
f.write(chunk)
now += chunk_size
if now > before:
# print("下载进度 [%s/100]" %progress(now, total_size), end='\r')
progress.set(p(now, total_size))
progress.set(0)
affgghsay("文件下载完成"+file)
def thrun(fun): # 调用子线程跑功能,防止卡住
# showinfo("Test threading...")
th=threading.Thread(target=fun)
th.daemon = True
th.start()
def cleaninfo():
text.delete(1.0, END) # 清空text
text.image_create(END,image=photo)
text.insert(END," Copyright(R) affggh Apache2.0\n" \
"\n 此脚本为免费工具,如果你花钱买了你就是大傻逼\n")
def test():
affgghsay("Testing...")
def showConfig():
affgghsay("确认配置信息")
text.insert(END , "\n" + \
" 镜像架构 = " + "%s\n" %(arch.get()) + \
" 保持验证 = " + "%s\n" %(keepverity.get()) + \
" 保持强制加密 = " + "%s\n" %(keepforceencrypt.get()) + \
" 修补vbmeta标志 = "+ "%s\n" %(patchvbmetaflag.get()) +\
" Recovery Mode = " + "%s\n" %(recoverymode.get()))
tabControl.select(tab2)
def selectConfig():
configpath = askopenfilename() # 选择打开什么文件,返回文件名
showinfo("从配置文件中读取:\n%s" %(configpath))
if os.path.isfile(configpath):
with open(configpath, 'r') as f:
PatchConfig = {}
for i in f.readlines():
if not i[0:1] == '#':
l = i.strip('\n').split('=')
if not i.find('=') == -1:
PatchConfig[l[0]] = l[1]
arch.set(PatchConfig['arch'])
keepverity.set(PatchConfig['keepverity'])
keepforceencrypt.set(PatchConfig['keepforceencrypt'])
patchvbmetaflag.set(PatchConfig['patchvbmetaflag'])
recoverymode.set(PatchConfig['recoverymode'])
if recoverymode.get() == 'true':
recoverymodeflag.set(True)
else:
recoverymodeflag.set(False)
# showConfig()
else:
affgghsay("取消选择config文件")
def confirmConfig():
showConfig()
def __select(*args):
affgghsay("选择Magisk版本为 : %s" %(mutiseletion.get()))
if not os.access("." + os.sep + "prebuilt" + os.sep + mutiseletion.get() + ".apk", os.F_OK):
affgghsay("你选择的版本文件不存在,正在下载...")
try:
download(dlink[mutiseletion.get()+".apk"], "."+os.sep+"prebuilt"+os.sep+mutiseletion.get()+".apk")
except:
affgghsay("出现错误,请关掉代理重试")
def select(*args):
th = threading.Thread(target=__select, args=args)
th.daemon = True
th.start()
def recModeStatus():
if recoverymodeflag.get()== True:
affgghsay("开启recovery模式修补")
recoverymode.set("true")
else:
affgghsay("关闭recovery模式修补")
recoverymode.set("false")
def parseZip(filename):
def returnMagiskVersion(buf):
v = "Unknow"
l = buf.decode('utf_8').split("\n")
for i in l:
if not i.find("MAGISK_VER=") == -1:
v = i.split("=")[1].strip("'")
break
return v
def rename(n):
if n.startswith("lib") and n.endswith(".so"):
n = n.replace("lib", "").replace(".so", "")
return n
if not os.access(filename, os.F_OK):
return False
else:
f = zipfile.ZipFile(filename, 'r')
l = f.namelist() # l equals list
tl = [] # tl equals total get list
for i in l:
if not i.find("assets/") == -1 or \
not i.find("lib/") == -1:
tl.append(i)
buf = f.read("assets/util_functions.sh")
mVersion = returnMagiskVersion(buf)
showinfo("Parse Magisk Version : " + mVersion)
for i in tl:
if arch.get() == "arm64":
if i.startswith("lib/arm64-v8a/") and i.endswith(".so"):
if not (i.endswith("busybox.so") or i.endswith("magiskboot.so")):
f.extract(i, "tmp")
elif arch.get() == "arm":
if i.startswith("lib/armeabi-v7a/") and i.endswith(".so"):
if not (i.endswith("busybox.so") or i.endswith("magiskboot.so")):
f.extract(i, "tmp")
elif arch.get() == "x86_64":
if i.startswith("lib/x86_64/") and i.endswith(".so"):
if not (i.endswith("busybox.so") or i.endswith("magiskboot.so")):
f.extract(i, "tmp")
elif arch.get() == "x86":
if i.startswith("lib/x86/") and i.endswith(".so"):
if not (i.endswith("busybox.so") or i.endswith("magiskboot.so")):
f.extract(i, "tmp")
for i in tl:
if arch.get() == "arm64" and not os.access("libmagisk32.so", os.F_OK):
if i == "lib/armeabi-v7a/libmagisk32.so":
f.extract("lib/armeabi-v7a/libmagisk32.so", "tmp")
elif arch.get() == "x86_64" and not os.access("libmagisk32.so", os.F_OK):
if i == "lib/x86/libmagisk32.so":
f.extract("lib/armeabi-v7a/libmagisk32.so", "tmp")
for root, dirs, files in os.walk("tmp"):
for file in files:
if file.endswith(".so"):
shutil.move(root+os.sep+file, rename(os.path.basename(file)))
shutil.rmtree("tmp")
return True
def PatchonWindows():
affgghsay(" ---->> 修补开始")
progressbar['maximum'] = 3
start_time = time.time()
if not os.access(filename.get(), os.F_OK):
affgghsay("待修补文件不存在")
affgghsay(" <<---- 修补失败")
return False
# cmd = [LOCALDIR+os.sep+'magisk_patcher.bat','patch','-i','%s' %(filename.get()),'-a','%s' %(arch.get()),'-kv','%s' %(keepverity.get()),'-ke','%s' %(keepforceencrypt.get()),'-pv','%s' %(patchvbmetaflag.get()),'-m','.\\prebuilt\\%s.apk' %(mutiseletion.get())]
f = "." + os.sep + "prebuilt" + os.sep + mutiseletion.get() + ".apk"
if not parseZip(f):
affgghsay("apk文件解析失败")
affgghsay(" <<---- 修补失败")
return False
progress.set(1)
if os.name == 'nt':
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "busybox ash "
elif os.name == 'posix':
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "busybox ash "
else:
showinfo("not support")
progress.set(0)
return False
if not os.access("./bin/boot_patch.sh", os.F_OK):
affgghsay("Error : 关键脚本丢失")
progress.set(0)
return False
cmd += "." + os.sep + "bin" + os.sep + "boot_patch.sh \"%s\"" %(filename.get())
cmd += " %s" %keepverity.get()
cmd += " %s" %keepforceencrypt.get()
cmd += " %s" %patchvbmetaflag.get()
cmd += " %s" %recoverymode.get()
try:
progress.set(2)
thrun(runcmd(cmd)) # 调用子线程运行减少卡顿
except:
progress.set(0)
affgghsay("Error : 出现问题,修补失败")
progress.set(3)
cleanUp()
end_time = time.time()
use_time = end_time - start_time
affgghsay(" 总共用时 [%.2f] s" %use_time)
affgghsay(" <<--- 修补结束")
progress.set(0)
def GenDefaultConfig():
affgghsay(" ---->> 生成选中配置")
if os.path.isfile('.' + os.sep + 'config.txt'):
os.remove('.' + os.sep + 'config.txt')
with open("." + os.sep + "config.txt", 'w') as f:
f.write("# VAR TYPE\n")
f.write("arch=%s\n" %(arch.get()) + \
"keepverity=%s\n" %(keepverity.get()) + \
"keepforceencrypt=%s\n" %(keepforceencrypt.get()) + \
"patchvbmetaflag=%s\n" %(patchvbmetaflag.get()) + \
"recoverymode=%s\n" %(recoverymode.get()) + \
"magisk=%s\n" %("." + os.sep + "prebuilt" + os.sep + mutiseletion.get() + ".apk") )
# magisk=%s not use on python program, only worked on batch version
if os.path.isfile('.' + os.sep + 'config.txt'):
affgghsay("确认配置信息:")
text.insert(END, "\n" + \
" 镜像架构 = " + "%s\n" %(arch.get()) + \
" 保持验证 = " + "%s\n" %(keepverity.get()) + \
" 保持强制加密 = " + "%s\n" %(keepforceencrypt.get()) + \
" 修补vbmeta标志 = "+ "%s\n" %(patchvbmetaflag.get()) +\
" Recovery Mode = " + "%s\n" %(recoverymode.get()))
affgghsay("成功生成配置")
else:
affgghsay("选中配置生成失败")
affgghsay(" <<---- 生成选中配置")
def GetDeviceConfig():
affgghsay(" ---->> 读取设备配置")
affgghsay(" 根据设备不同,生成速度也不同...请稍等...")
if os.name == 'nt':
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "adb get-state"
elif os.name == 'posix':
cmd = "adb get-state"
else:
affgghsay("系统不支持")
return False
deviceState = subprocess.getstatusoutput(cmd)
if deviceState[0] == 1:
affgghsay("设备未连接,或驱动未安装")
return False
elif deviceState[0] == 0:
if os.name == 'nt':
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "adb "
elif os.name == 'posix':
cmd = "adb "
if deviceState[1].strip(" ").strip("\n") == 'device':
tmppath = "/data/local/tmp"
elif deviceState[1].strip(" ").strip("\n") == 'recovery':
tmppath = "/tmp"
else:
affgghsay("不支持的设备状态")
return False
subprocess.getoutput(cmd + "push " + "." + os.sep + "bin" + os.sep + "get_config.sh %s/get_config.sh" %tmppath)
subprocess.getoutput(cmd + "shell chmod a+x %s/get_config.sh" %tmppath)
out = subprocess.getoutput(cmd + "shell sh %s/get_config.sh" %tmppath)
for i in out.splitlines():
if len(i.split("=")) > 1:
var = i.split("=")[0].strip(" ").lower()
t = i.split("=")[1].strip(" ").lower()
if var == 'arch':
arch.set(t)
elif var == 'keepverity':
keepverity.set(t)
elif var == 'keepforceencrypt':
keepforceencrypt.set(t)
elif var == 'patchvbmetaflag':
patchvbmetaflag.set(t)
affgghsay("自动修改配置%s为%s" %(var, t))
else:
affgghsay("设备未知状态")
return False
affgghsay(" <<---- 读取设备配置")
def opensource():
webbrowser.open("https://github.com/affggh/Magisk_Patcher")
def About():
root2 = tk.Toplevel()
curWidth = 300
curHight = 180
# 获取屏幕宽度和高度
scn_w, scn_h = root.maxsize()
# print(scn_w, scn_h)
# 计算中心坐标
cen_x = (scn_w - curWidth) / 2
cen_y = (scn_h - curHight) / 2
# print(cen_x, cen_y)
# 设置窗口初始大小和位置
size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)
root2.geometry(size_xy)
#root2.geometry("300x180")
root2.resizable(0,0) # 设置最大化窗口不可用
root2.title("关于脚本和作者信息")
aframe1 = Frame(root2, relief=FLAT, borderwidth=1)
aframe2 = Frame(root2, relief=FLAT, borderwidth=1)
aframe1.pack(side=BOTTOM, expand=YES, pady=3)
aframe2.pack(side=BOTTOM, expand=YES, pady=3)
ttk.Button(aframe1, text='访问项目', command=opensource).pack(side=LEFT, expand=YES, padx=5)
ttk.Button(aframe1, text='获取最新', command=lambda u="https://hub.fastgit.xyz/affggh/Magisk_patcher/archive/refs/heads/main.zip":webbrowser.open(u)).pack(side=LEFT, expand=YES, padx=5)
ttk.Label(aframe2, text='脚本编写自affggh\nshell脚本提取修改自Magisk-v24.1安装包\n项目开源地址:github.com/affggh/Magisk_Patcher\n').pack(side=BOTTOM, expand=NO, pady=3)
imgLabe2 = ttk.Label(aframe2,image=photo2)#把图片整合到标签类中
imgLabe2.pack(side=TOP, expand=YES, pady=3)
root2.mainloop()
def donateme():
cleaninfo()
text.image_create(END,image=photo3)
text.image_create(END,image=photo4)
text.image_create(END,image=photo5)
global Thanks
if Thanks==0:
Label(frame4,text=' ----------------------------\n' \
' < 谢谢老板!老板发大财!|\n' \
' ----------------------------').pack(side=LEFT, expand=NO, pady=3)
Thanks = 1
def color(value):
digit = list(map(str, range(10))) + list("ABCDEF")
if isinstance(value, tuple):
string = '#'
for i in value:
a1 = i // 16
a2 = i % 16
string += digit[a1] + digit[a2]
return string
elif isinstance(value, str):
a1 = digit.index(value[1]) * 16 + digit.index(value[2])
a2 = digit.index(value[3]) * 16 + digit.index(value[4])
a3 = digit.index(value[5]) * 16 + digit.index(value[6])
return (a1, a2, a3)
def colorfuldonate():
button = tk.Button(frame41, text='给我捐钱', width=12, height=1, command=donateme, bg="red", fg="white", font=('黑体', '14'))
button.grid(row=0, column=1, padx=3, pady=0)
while(True):
r = 255
g = 0
b = 0
for c in range(255):
r = r-1
g = g+1
button.configure(bg=color((r,g,b)))
time.sleep(0.000001)
for c in range(255):
g = g-1
b = b+1
button.configure(bg=color((r,g,b)))
time.sleep(0.000001)
for c in range(255):
b = b-1
r = r+1
button.configure(bg=color((r,g,b)))
time.sleep(0.000001)
def pointdonate():
lab = tk.Label(frame41, text='<<点我', font=('黑体', '14'))
lab.grid(row=0, column=2, padx=2, pady=0)
while(True):
lab.configure(bg='#FFFF00',fg='#000000')
time.sleep(0.1)
lab.configure(bg='#9400D3',fg='#FFFFFF')
time.sleep(0.1)
def pointdonate2():
lab = tk.Label(frame41, text='点我>>', font=('黑体', '14'))
lab.grid(row=0, column=0, padx=2, pady=0)
while(True):
lab.configure(bg='#FFFF00',fg='#000000')
time.sleep(0.1)
lab.configure(bg='#9400D3',fg='#FFFFFF')
time.sleep(0.1)
def pdp():
th2=threading.Thread(target=pointdonate)
th2.setDaemon(True) #守护线程
th2.start()
th=threading.Thread(target=colorfuldonate)
th.setDaemon(True) #守护线程
th.start()
th3=threading.Thread(target=pointdonate2)
th3.setDaemon(True) #守护线程
th3.start()
def listdir(path):
L=[]
for root, dirs, files in os.walk(path):
for file in files:
if os.path.splitext(file)[1] == '.apk':
tmp = os.path.basename(os.path.join(root, file)).strip(".apk")
L.append(tmp)
return L
def cleanUp():
def rm(p):
if os.access(p, os.F_OK):
if os.path.isdir(p):
shutil.rmtree(p)
elif os.path.isfile(p):
os.remove(p)
else:
os.remove(p)
l = ["busybox", "magisk32", "magisk64", "magiskinit", "magiskboot"]
d = ["tmp"]
for i in l:
rm(i)
for i in d:
rm(i)
cmd = "." + os.sep + "bin" + os.sep + ostype + os.sep + machine + os.sep + "magiskboot cleanup"
thrun(runcmd(cmd))
def get_comboxlist():
url = "https://api.github.com/repos/topjohnwu/Magisk/releases"
l = []
try:
global dlink
dlink = ret_dlink(url)
for i in dlink.keys():
l.append(i.replace(".apk", ""))
except:
affgghsay(" 从网络读取失败, 仅加载本地目录")
for i in os.listdir("." + os.sep + "prebuilt"):
if i.endswith(".apk"):
l.append(os.path.basename(i).replace(".apk", ""))
l2=list(set(l))
l2.sort(key=l.index)
comboxlist["values"] = l2
if len(l) > 0:
comboxlist.current(0)
select()
# button and text
# Frame 1 文件选择
frame1 = LabelFrame(root, text="文件选择", labelanchor="w", relief=FLAT, borderwidth=1)
frame1.pack(side=TOP, fill=BOTH, padx=6, pady=8, expand=NO)
# tk.Label(frame1, text='选择文件').pack(side=LEFT)
ttk.Entry(frame1, width=70,textvariable=filename).pack(side=LEFT, expand=YES, fill=X, padx=10)
ttk.Button(frame1, text='选择文件', command=selectFile).pack(side=LEFT)
#
# Frame 2 功能页面
frame2.pack(side=LEFT, fill=BOTH, padx=2, pady=3, expand=NO)
tabControl = ttk.Notebook(frame2)
tab1 = ttk.Frame(tabControl) #增加新选项卡
tab11 = ttk.Frame(tab1)
tab111 = ttk.LabelFrame(tab11, text="镜像架构", labelanchor="n", relief=SUNKEN, borderwidth=1)
tab111.pack(side=TOP, expand=NO, fill=BOTH)
arch.set("arm64")
ttk.Radiobutton(tab111, text='arm',variable=arch, value='arm').grid(row=0, column=0, padx=0, pady=0)
ttk.Radiobutton(tab111, text='arm64',variable=arch, value='arm64').grid(row=0, column=1, padx=0, pady=0)
ttk.Radiobutton(tab111, text='x86',variable=arch, value='x86').grid(row=1, column=0, padx=0, pady=0)
ttk.Radiobutton(tab111, text='x86_64',variable=arch, value='x86_64').grid(row=1, column=1, padx=0, pady=0)
tab112 = ttk.LabelFrame(tab11, text="保持验证", labelanchor="n", relief=SUNKEN, borderwidth=1)
tab112.pack(side=TOP, expand=YES, fill=BOTH)
keepverity.set("true")
ttk.Radiobutton(tab112, text='是',variable=keepverity, value='true').grid(row=0, column=0, padx=0, pady=0)
ttk.Radiobutton(tab112, text='否',variable=keepverity, value='false').grid(row=0, column=1, padx=10, pady=0)
tab113 = ttk.LabelFrame(tab11, text="保持强制加密", labelanchor="n", relief=SUNKEN, borderwidth=1)
tab113.pack(side=TOP, expand=YES, fill=BOTH)
keepforceencrypt.set("true")
ttk.Radiobutton(tab113, text='是',variable=keepforceencrypt, value='true').grid(row=0, column=0, padx=0, pady=0)
ttk.Radiobutton(tab113, text='否',variable=keepforceencrypt, value='false').grid(row=0, column=1, padx=10, pady=0)
tab113 = ttk.LabelFrame(tab11, text="修补vbmeta标志", labelanchor="n", relief=SUNKEN, borderwidth=1)
tab113.pack(side=TOP, expand=YES, fill=BOTH)
patchvbmetaflag.set("false")
ttk.Radiobutton(tab113, text='是',variable=patchvbmetaflag, value='true').grid(row=0, column=0, padx=0, pady=0)
ttk.Radiobutton(tab113, text='否',variable=patchvbmetaflag, value='false').grid(row=0, column=1, padx=10, pady=0)
tab12 = ttk.Frame(tab1)
tab11.pack(side=TOP, expand=YES, fill=BOTH)
ttk.Button(tab12, text='确认配置', command=confirmConfig).pack(side=TOP, expand=YES, pady=3)
ttk.Button(tab12, text='指定config.txt', command=selectConfig).pack(side=TOP, expand=YES, pady=2)
tabControl.add(tab1, text='配置') #把新选项卡增加到Notebook
tab2 = ttk.Frame(tabControl) #增加新选项卡
ttk.Button(tab2, text='使用当前配置\n修 补', command=PatchonWindows).pack(side=TOP, expand=NO, pady=3)
# ttk.Button(tab2, text='连接设备环境\n修 补', command=PatchonDevice).pack(side=TOP, expand=NO, pady=3)
ttk.Label(tab2, text='使用设备环境修补不需要\n配置各种参数\n配置来源与设备').pack(side=BOTTOM, expand=NO, pady=3)
ttk.Label(tab2, text='选择Magisk版本').pack(side=TOP, expand=NO, pady=3)
ttk.Checkbutton(tab2, variable=recoverymodeflag, text="recovery修补", command=recModeStatus).pack(side=TOP, expand=NO, pady=3)
comboxlist = ttk.Combobox(tab2, textvariable=mutiseletion, width=14)
'''
filelist = listdir("./prebuilt")
filelist.reverse() # 高版本在前面
comboxlist["values"]=(filelist)
if len(filelist)>0:
comboxlist.current(0) # 选择第一个
else:
showinfo("Error : 没有找到Magisk安装包,请确保prebuilt目录下存在apk文件")
'''
# thrun(get_comboxlist())
comboxlist.bind("<<ComboboxSelected>>",select)
comboxlist.pack(side=TOP, expand=NO, pady=3)
tabControl.add(tab2, text='修补') #把新选项卡增加到Notebook
ttk.Button(tab2, text='获取magisk列表', command=get_comboxlist).pack(side=TOP, expand=NO, pady=3)
tab3 = tk.Frame(tabControl) #增加新选项卡
ttk.Button(tab3, text='生成选中配置\nconfig.txt', command=lambda:thrun(GenDefaultConfig)).pack(side=TOP, expand=NO, pady=3)
ttk.Button(tab3, text='读取设备配置\nconfig.txt', command=lambda:thrun(GetDeviceConfig)).pack(side=TOP, expand=NO, pady=3)
# ttk.Button(tab3, text='test', command=lambda:thrun(test)).pack(side=TOP, expand=NO, pady=3)
tabControl.add(tab3, text='读取') #把新选项卡增加到Notebook
tab12.pack(side=TOP, expand=NO, fill=BOTH)
tabControl.pack(side=TOP, expand=YES, fill="both")
# Frame 3 信息展示 功能页面
frame3.pack(side=RIGHT, fill=BOTH, padx=2, pady=3, expand=YES)
scroll = ttk.Scrollbar(frame3)
scroll.pack(side=RIGHT,fill=Y, padx=1, pady=5)
text.pack(side=RIGHT, expand=YES, fill=BOTH, padx=5 ,pady=1)
scroll.config(command=text.yview)
text.config(yscrollcommand=scroll.set)
frame2_3.pack(side=TOP, expand=NO, pady=2, fill=BOTH)
# Frame 4 关于 和 清除信息t
frame4 = Frame(root, relief=FLAT)
progress = tk.DoubleVar(value=0)
progressbar = ttk.Progressbar(frame4, length=200, variable=progress, mode='determinate')
ttk.Button(frame4, text='清空信息', command=cleaninfo).pack(side=RIGHT, expand=NO, pady=3)
ttk.Button(frame4, text='关于', command=About).pack(side=RIGHT, expand=NO, pady=3)
ttk.Button(frame4, text='切换主题', command=change_theme).pack(side=RIGHT, expand=NO, pady=3)
if(SHOW_DONATE_BUTTON!="False"):
# 超炫的捐赠按钮
frame41 = Frame(frame4, relief=FLAT)
pdp()
frame41.pack(side=RIGHT, expand=NO, pady=3)
else:
ttk.Button(frame4, text='捐赠', command=donateme).pack(side=RIGHT, expand=NO, pady=3)
progressbar.pack(side=RIGHT, expand=NO, padx=(0, 10))
ttk.Label(frame4, text="进度条:").pack(side=RIGHT, expand=NO, padx=(10, 0))
frame4.pack(side=TOP, expand=NO, padx=10, ipady=5, fill=X)
imgLabel = ttk.Label(frame4,image=photo)#把图片整合到标签类中
imgLabel.pack(side=LEFT, expand=NO, pady=3)
text.image_create(END,image=photo)
text.insert(END," Copyright(R) affggh Apache2.0\n" \
" 当前脚本运行环境:\n" \
" [%s] [%s]\n" \
"此脚本为免费工具,如果你花钱买了你就是大傻逼\n" \
"普通流程:\n" \
"修改配置-->确认配置-->修补\n" \
"简单点:\n" \
"直接选个magisk版本-->插手机-->手机修补\n (不过配置只能用手机的)\n" \
" 注:recovery模式仅支持windows修补\n" %(ostype, machine))
affgghsay("此脚本为免费工具,如果你花钱买了你就是大傻逼")
# root.update()
root.mainloop()
if __name__=='__main__':
main()
| 40.509162 | 267 | 0.534751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,756 | 0.265954 |
47d6eebb03b0ea6e42bdb7a3c49d5f0b5a409c1e | 49 | py | Python | __init__.py | faridfibrianto/pyrex | bedd088370e90bceefa45788cddf952c03bea945 | [
"MIT"
]
| null | null | null | __init__.py | faridfibrianto/pyrex | bedd088370e90bceefa45788cddf952c03bea945 | [
"MIT"
]
| null | null | null | __init__.py | faridfibrianto/pyrex | bedd088370e90bceefa45788cddf952c03bea945 | [
"MIT"
]
| 1 | 2021-07-03T04:49:53.000Z | 2021-07-03T04:49:53.000Z | from .helpers import *
from .decorators import *
| 16.333333 | 25 | 0.755102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
47d719fa6ddaa13236b1671d0f097880df05054a | 3,010 | py | Python | solvers/shortest_path.py | Psychofun/Snake-Gym | 59646ef2213e4cc2a68e238d010f5e9f25826951 | [
"MIT"
]
| null | null | null | solvers/shortest_path.py | Psychofun/Snake-Gym | 59646ef2213e4cc2a68e238d010f5e9f25826951 | [
"MIT"
]
| null | null | null | solvers/shortest_path.py | Psychofun/Snake-Gym | 59646ef2213e4cc2a68e238d010f5e9f25826951 | [
"MIT"
]
| null | null | null | import sys
sys.path.append("..")
from gym_snake.envs.node import Node
from gym_snake.envs.snake_env import action_to_vector
from gym_snake.envs.snake_env import SnakeAction
from gym_snake.envs.snake_env import SnakeCellState
from gym_snake.envs.snake_env import rotate_action_clockwise
from gym_snake.envs.snake_env import rotate_action_counter_clockwise
from gym_snake.envs.snake_env import invert_action
from gym_snake.queue import Queue
class ShortestPathBFSSolver():
def __init__(self):
pass
def move(self, environment):
self.environment = environment.copy()
self.environment.move()
shortest_path_move_from_transposition_table = self.environment._path_move_from_transposition_table(self.environment.starting_node, self.environment.fruit_node)
if shortest_path_move_from_transposition_table:
#print(" shortest_path_move_from_transposition_table: ", shortest_path_move_from_transposition_table)
return shortest_path_move_from_transposition_table
shortest_path = self.shortest_path(self.environment, self.environment.starting_node, self.environment.fruit_node)
if shortest_path:
#print("Shortest path: ", [x.action for x in shortest_path])
self.environment.transposition_table[self.environment.fruit_node] = shortest_path
first_point = shortest_path[-2]
return first_point.action
#print("prev action: ", self.environment.prev_action)
return self.environment.prev_action
def shortest_path(self, environment, start, end):
queue = Queue([start])
visited_nodes = set([start])
shortest_path = []
while queue.queue:
current_node = queue.dequeue()
if current_node == end:
shortest_path = current_node._recreate_path_for_node()
break
for action in environment.possible_actions_for_current_action(current_node.action):
# Convert action (int) to tuple
a_vector = action_to_vector(action)
# Apply action to point
neighbor = (current_node.point[0] + a_vector[0], current_node.point[1] + a_vector[1])
neighbor_state = environment.cell_state(neighbor)
if (neighbor_state == SnakeCellState.EMPTY or
neighbor_state == SnakeCellState.DOT
):
child_node = Node(neighbor)
child_node.action = action
child_node.previous_node = current_node
if child_node not in visited_nodes and child_node not in queue.queue:
visited_nodes.add(current_node)
queue.enqueue(child_node)
if shortest_path:
return shortest_path
else:
return []
| 38.589744 | 168 | 0.639535 | 2,545 | 0.845515 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.092691 |
47d7a401f53299346b73e5c7c5fe542392290c13 | 22,070 | py | Python | progressbot.py | tchapley/ProgressBot | 60837055999cbddcad637a514dc8af2e748374a8 | [
"MIT"
]
| null | null | null | progressbot.py | tchapley/ProgressBot | 60837055999cbddcad637a514dc8af2e748374a8 | [
"MIT"
]
| 2 | 2021-03-31T18:38:57.000Z | 2021-12-13T19:46:50.000Z | progressbot.py | tchapley/ProgressBot | 60837055999cbddcad637a514dc8af2e748374a8 | [
"MIT"
]
| null | null | null | import discord
from discord.ext import commands
import asyncio
import logging
import sys
import requests
import datetime
from bs4 import BeautifulSoup
from util import *
from wowapi import WowApi, WowApiException, WowApiConfigException
from killpoints import KillPoints
from math import ceil
base_wow_progress = "http://www.wowprogress.com"
base_wow_armory = "http://us.battle.net/wow/en/character/{0}/{1}/advanced"
base_wc_logs = "https://www.warcraftlogs.com:443/v1"
class_array = [ "Warrior", "Paladin", "Hunter", "Rogue", "Priest", "Death Knight",
"Shaman", "Mage", "Warlock", "Monk", "Druid", "Demon Hunter" ]
race_map = {
1: "Human", 2: "Orc", 3: "Dwarf", 4: "Night Elf", 5: "Undead", 6: "Tauren", 7: "Gnome",
8: "Troll", 9: "Goblin", 10: "Blood Elf", 11: "Draenei", 22: "Worgen",
24:"Pandaren", 25:"Pandaren", 26:"Pandaren"
}
artifactLevelCost = {
1: { "cost": 100, "total": 100 },
2: { "cost": 300, "total": 400 },
3: { "cost": 325, "total": 725 },
4: { "cost": 350, "total": 1075 },
5: { "cost": 375, "total": 1450 },
6: { "cost": 400, "total": 1850 },
7: { "cost": 425, "total": 2275 },
8: { "cost": 450, "total": 3250 },
9: { "cost": 525, "total": 3875 },
10: { "cost": 625, "total": 4625 },
11: { "cost": 750, "total": 4625 },
12: { "cost": 875, "total": 5500 },
13: { "cost": 1000, "total": 6500 },
14: { "cost": 6840, "total": 13340 },
15: { "cost": 8830, "total": 22170 },
16: { "cost": 11280, "total": 33450 },
17: { "cost": 14400, "total": 47850 },
18: { "cost": 18620, "total": 66470 },
19: { "cost": 24000, "total": 90470 },
20: { "cost": 30600, "total": 121070 },
21: { "cost": 39520, "total": 160590 },
22: { "cost": 50880, "total": 211470 },
23: { "cost": 64800, "total": 276270 },
24: { "cost": 82500, "total": 358770 },
25: { "cost": 105280, "total": 464050 },
26: { "cost": 138650, "total": 602700 },
27: { "cost": 182780, "total": 785480 },
28: { "cost": 240870, "total": 1026350 },
29: { "cost": 315520, "total": 1341870 },
30: { "cost": 417560, "total": 1759430 },
31: { "cost": 546000, "total": 2305430 },
32: { "cost": 718200, "total": 3023630 },
33: { "cost": 946660, "total": 3970290 },
34: { "cost": 1245840, "total": 5216130 },
35: { "cost": 1635200, "total": 6851330 },
36: { "cost": 1915000, "total": 8766330 },
37: { "cost": 2010000, "total": 10776330 },
38: { "cost": 2110000, "total": 12886330 },
39: { "cost": 2215000, "total": 15101330 },
40: { "cost": 2325000, "total": 17426330 },
41: { "cost": 2440000, "total": 19866330 },
42: { "cost": 2560000, "total": 22426330 },
43: { "cost": 2690000, "total": 25116330 },
44: { "cost": 2825000, "total": 27941330 },
45: { "cost": 2965000, "total": 30906330 },
46: { "cost": 3115000, "total": 34021330 },
47: { "cost": 3270000, "total": 37291330 },
48: { "cost": 3435000, "total": 40726330 },
49: { "cost": 3605000, "total": 44331330 },
50: { "cost": 3785000, "total": 48116330 },
51: { "cost": 3975000, "total": 52091330 },
52: { "cost": 4175000, "total": 56266330 },
53: { "cost": 4385000, "total": 60651330 },
54: { "cost": 4605000, "total": 65256330 }
}
artifactKnowledge = {
0: 1,
1: 1.25,
2: 1.5,
3: 1.9,
4: 2.4,
5: 3,
6: 3.75,
7: 4.75,
8: 6,
9: 7.5,
10: 9.5,
11: 12,
12: 15,
13: 18.75,
14: 23.5,
15: 29.5,
16: 37,
17: 46.5,
18: 58,
19: 73,
20: 91,
21: 114,
22: 143,
23: 179,
24: 224,
25: 250
}
apRewards = {
"+2-3": 500,
"+4-6": 800,
"+7-9": 1000,
"10+": 1200,
}
set_wow_api_key()
set_wclogs_api_key()
# Logger info
discord_logger = logging.getLogger('discord')
discord_logger.setLevel(logging.CRITICAL)
bot = commands.Bot(command_prefix='!', description ='Progress Bot')
"""
Events Region
"""
@bot.event
async def on_ready():
print("Logged in as {0} with ID {1}".format(bot.user.name, bot.user.id));
@bot.command()
async def exit():
print('Exiting')
# await bot.say('This conversation can serve no purpose anymore. Goodbye.')
await bot.logout()
"""
Commands Region
"""
@bot.command()
async def ap(classes="", realm="connected-boulderfist", region="us"):
print("\n%s***COMMAND***: artifact power command with arguments class=%s realm=%s region=%s"%(get_current_time(),classes, realm, region))
url_class = ""
if classes:
if classes == "death_knight":
classes = string.replace(classes, "_", "")
url_class = "class." + classes
url = base_wow_progress + "/artifact_power/{0}/{1}/{2}".format(region, realm, url_class)
page = requests.get(url)
print("URL: {0} Status: {1}".format(url, page.status_code))
try:
soup = BeautifulSoup(page.content, "lxml", from_encoding="UTF")
table = soup.find("table").find_all("td")
values = []
for i in table:
values.append(i.get_text().encode("UTF"))
characters = []
for rank, name, guild, ap in zip(values[0::4], values[1::4], values[2::4], values[3::4]):
characters.append(ArtifactPower(rank.decode("unicode_escape"), name.decode("unicode_escape"), guild.decode("unicode_escape"), ap.decode("unicode_escape")))
headers = ['rank', 'name', 'guild', 'ap']
item_lens = [[getattr(character, x) for x in headers] for character in characters]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message = "```css\nArtifact power rankings for {0}-{1}".format(region, realm)
if classes:
message += " for " + classes + "s"
message += "\n"
for i in characters:
message += '\t'.join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
await bot.say("{0}\n```<{1}>".format(message, url))
except Exception as ex:
print(ex)
await bot.say("{0}\n<{1}>".format(str(ex), url))
@bot.command()
async def character(name="bresp", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: character command with arguments name=%s realm=%s region=%s"%(get_current_time(), name, realm, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="achievements,items,statistics")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
playerName = payload['name']
level = payload['level']
race = race_map[payload['race']]
playerClass = class_array[payload['class']-1]
playerRealm = payload['realm']
battlegroup = payload['battlegroup']
itemLevel = payload['items']['averageItemLevelEquipped']
achievementPoints = payload['achievementPoints']
artifactPoints = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(30103)]
mainLevel = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(29395)]
knowledge = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(31466)]
lastModified = get_time(payload['lastModified'] / 1000)
fifteen = 0
ten = 0
five = 0
two = 0
if 32028 in payload['achievements']['criteria']:
fifteen = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(32028)]
if 33098 in payload['achievements']['criteria']:
ten += payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(33098)]
if 33097 in payload['achievements']['criteria']:
five += payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(33097)]
if 33096 in payload['achievements']['criteria']:
two += payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(33096)]
mythics = "Mythics: #fifteen: {0} #ten: {1} #five: {2} #two: {3}".format(fifteen, ten, five, two)
EN = []
TOV = []
NH = []
for x in payload['statistics']['subCategories']:
if x['name'] == "Dungeons & Raids":
for y in x['subCategories']:
if y['name'] == "Legion":
populate_raids(y, EN, 7, 33)
populate_raids(y, TOV, 3, 61)
populate_raids(y, NH, 10, 73)
en = get_difficulty(EN, 7)
tov = get_difficulty(TOV, 3)
nh = get_difficulty(NH, 10)
print("Looking for {0} on {1}-{2}".format(name, region, realm))
message = "**{0}** *{1} {2} {3}*\n".format(playerName, level, race, playerClass)
message += "```css\n"
message += "Realm: {0}\n".format(playerRealm)
message += "Battlegroup: {0}\n".format(battlegroup)
message += "Item Level: {0}\n".format(itemLevel)
message += "Achievement Points: {0}\n".format(achievementPoints)
message += "Artifact Power: {0}\n".format(artifactPoints)
message += "Artifact Knowledge: {0}\n".format(knowledge)
message += "Artifact Level: {0}\n".format(mainLevel)
message += "{0}\n".format(mythics)
message += "Raids:\n\tEmerald Nightmare: {0}\n\tTrial of Valor: {1}\n\tNighthold: {2}\n".format(en, tov, nh)
await bot.say("{0}```\nLast Updated: {1}\n<{2}>".format(message, lastModified, base_wow_armory.format(realm, playerName)))
@bot.command()
async def calc(name="bresp", realm="boulderfist", apInLevel=0, region="us"):
print("\n%s***COMMAND***: calc command with arguments name=%s realm=%s apInLevel=%s region=%s"%(get_current_time(), name, realm, apInLevel, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="achievements")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
playerName = payload['name']
mainLevel = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(29395)]
knowledge = payload['achievements']['criteriaQuantity'][payload['achievements']['criteria'].index(31466)]
multiplier = artifactKnowledge[knowledge]
artifactPoints = (artifactLevelCost[mainLevel]['total']+apInLevel)
apToLevel = 0
apToMax = 0
if mainLevel < 54:
apToLevel = artifactLevelCost[mainLevel+1]['cost'] - apInLevel
apToMax = artifactLevelCost[54]['total'] - artifactPoints
apTo35 = 0
if mainLevel < 35:
apTo35 = artifactLevelCost[35]['total'] - artifactPoints
rows = []
for reward in apRewards:
scaledReward = apRewards[reward] * multiplier
toLevel = ceil(apToLevel / scaledReward)
to35 = ceil(apTo35 / scaledReward)
toMax = ceil(apToMax / scaledReward)
rows.append(Calc(reward, toLevel, to35, toMax))
# print("Looking for {0} on {1}-{2}".format(name, region, realm))
message = "```css\n"
message += "Total AP: {0}\n".format(artifactPoints)
message += "Artifact Level: {0}\n".format(mainLevel)
message += "Artifact Knowledge: {0}\n".format(knowledge)
message += "AP in level: {0}\n".format(apInLevel)
message += "AP to next level: {0}\n".format(apToLevel)
message += "AP to 54: {0}\n\n".format(apToMax)
headers = ['mythic', 'toNextLevel', 'to35', 'toMax']
item_lens = [[getattr(row, x) for x in headers] for row in rows]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message += "\t".join('{0:{width}}'.format(x, width=y) for x, y in zip(headers, max_lens)) + '\n'
for i in rows:
message += '\t'.join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
await bot.say("{0}\n```".format(message))
@bot.command()
async def guild(guild="dragon+knight", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: guild command with arguments guild=%s realm=%s region=%s"%(get_current_time(), guild, realm, region))
guild = guild.replace("_", "+")
url = base_wow_progress + "/guild/{0}/{1}/{2}".format(region, realm, guild)
page = requests.get(url)
print("URL: {0} Status: {1}".format(url, page.status_code))
try:
soup = BeautifulSoup(page.content, "lxml", from_encoding="UTF")
progress = soup.find_all("span", class_="innerLink")
if not progress: raise ValueError("No progress found\n<{0}>".format(url))
print("Looking for %s on %s-%s"%(guild, region, realm))
message = "**{0}** *{1}*".format(guild.replace("+", " "), realm)
message += "```css\n"
for b in progress:
message += b.get_text()
await bot.say("{0}\n```<{1}>".format(message, url))
except Exception as ex:
print(str(ex))
await bot.say(str(ex))
@bot.command()
async def legendary(name="bresp", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: legendary command with arguments name=%s realm=%s region=%s"%(get_current_time(),name, realm, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="achievements,progression")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
kp = KillPoints(payload)
killpoints = kp.get_total_points()
legendaries = kp.get_legendary_count(killpoints)
till_next = kp.get_points_till_next(killpoints)
percent_till_next = kp.get_percent_till_next()
message = "**{0}** has **{1}** kill points.\n".format(payload['name'], killpoints)
message += "They should have **{0} legendaries**\n".format(legendaries)
message += "They have **{0} points** until their next legendary\n".format(till_next)
message += "They have completed **{0}%** of the progress towards their next legendary".format(percent_till_next)
await bot.say(message)
@bot.command()
async def mounts(name="bresp", realm="boulderfist", mount="", region="us"):
print("\n%s***COMMAND***: mount command with arguments name=%s mount=%s realm=%s region=%s"%(get_current_time(), name, mount, realm, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="mounts")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
playerName = payload['name']
if not mount:
collected = payload['mounts']['numCollected']
await bot.say("**{0}** has collected **{1} mounts**".format(playerName, collected))
else:
mount.replace("\"", "")
for m in payload['mounts']['collected']:
if m['name'].lower() == mount.lower() :
await bot.say("**{0}** has collected **{1}**".format(playerName, m['name']))
return
else:
await bot.say("**{0}** has *not* collected **{1}**".format(playerName, mount))
@bot.command()
async def mp(classes="", realm="connected-boulderfist", region="us"):
print("\n%s***COMMAND***: mythic plus command with arguments class=%s realm=%s region=%s"%(get_current_time(),classes, realm, region))
url_class = ""
if classes:
if classes == "death_knight":
classes = classes.replace("_", "")
url_class = "class." + classes
url = base_wow_progress + "/mythic_plus_score/{0}/{1}/{2}".format(region, realm, url_class)
page = requests.get(url)
print("URL: {0} Status: {1}".format(url, page.status_code))
try:
soup = BeautifulSoup(page.content, "lxml", from_encoding="UTF")
table = soup.find("table").find_all("td")
values = []
for i in table:
values.append(i.get_text().encode("UTF"))
characters = []
for rank, name, guild, score in zip(values[0::4], values[1::4], values[2::4], values[3::4]):
characters.append(MythicPlus(rank.decode("unicode_escape"), name.decode("unicode_escape"), guild.decode("unicode_escape"), score.decode("unicode_escape")))
headers = ['rank', 'name', 'guild', 'score']
item_lens = [[getattr(character, x) for x in headers] for character in characters]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message = "```css\nMythic plus rankings for {0}-{1}".format(region, realm)
if classes:
message += " for " + classes + "s"
message += "\n"
for i in characters:
message += "\t".join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
await bot.say("{0}\n```<{1}>".format(message, url))
except Exception as ex:
print(ex)
await bot.say("{0}\n<{1}>".format(str(ex), url))
@bot.command()
async def pvp(name="", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: pvp command with arguments name=%s realm=%s region=%s"%(get_current_time(), name, realm, region))
payload = ""
try:
payload = WowApi.get_character_profile(region, realm, name, locale="en_US", fields="pvp")
except WowApiException as ex:
print(ex)
await bot.say(str(ex))
return
playerName = payload['name']
level = payload['level']
race = race_map[payload['race']]
playerClass = class_array[payload['class']-1]
lastModified = get_time(payload['lastModified'] / 1000)
playerRealm = payload['realm']
battlegroup = payload['battlegroup']
honorableKills = payload['totalHonorableKills']
rbgRating = payload['pvp']['brackets']['ARENA_BRACKET_RBG']['rating']
twosRating = payload['pvp']['brackets']['ARENA_BRACKET_2v2']['rating']
threesRating = payload['pvp']['brackets']['ARENA_BRACKET_3v3']['rating']
message = "**{0}** *{1} {2} {3}*\n".format(playerName, level, race, playerClass)
message += "```css\n"
message += "Realm: {0}\n".format(playerRealm)
message += "Battlegroup: {0}\n".format(battlegroup)
message += "Honorable Kills: {0}\n".format(honorableKills)
message += "Rated BG Rating: {0}\n".format(rbgRating)
message += "Twos Rating: {0}\n".format(twosRating)
message += "Threes Rating: {0}\n".format(threesRating)
await bot.say("{0}```\nLast Updated: {1}\n<{2}>".format(message, lastModified, base_wow_armory.format(realm, playerName)))
@bot.command()
async def rank(name="", spec="", role="dps", realm="boulderfist", region="us"):
print("\n%s***COMMAND***: rank command with arguments name=%s spec=%s role=%s realm=%s region=%s"%(get_current_time(), name, spec, role, realm, region))
if not spec:
await bot.say("Please provide a spec to check ranks for")
return
if role not in [ 'dps', 'hps', 'krsi' ]:
await bot.say("Please provide a valid role. Your options are hps, dps, or krsi")
return
stats = {
5: { 'kills': 0, 'best': 0, 'average': 0, 'allstar_points': 0, 'size': 0},
4: { 'kills': 0, 'best': 0, 'average': 0, 'allstar_points': 0, 'size': 0},
3: { 'kills': 0, 'best': 0, 'average': 0, 'allstar_points': 0, 'size': 0}
}
character_id = ""
url = base_wc_logs + "/parses/character/{0}/{1}/{2}".format(name, realm, region)
page = requests.get(url, { 'metric': role, 'api_key': os.environ['WCLOG_APIKEY'] })
print("URL: {0} Status: {1}".format(url, page.status_code))
if page.status_code != 200:
await bot.say("No rankings found\n<{0}>".format(url))
return
else:
payload = page.json()
for i in payload:
difficulty = i['difficulty']
stats[difficulty]['size'] += 1
for j in range(0, len(i['specs'])):
character_id = i['specs'][j]['data'][0]['character_id']
if i['specs'][j]['spec'].lower() == spec.lower():
stats[difficulty]['kills'] += len(i['specs'][j]['data'])
historical_percent = i['specs'][j]['best_historical_percent']
if historical_percent > stats[difficulty]['best']:
stats[difficulty]['best'] = historical_percent
stats[difficulty]['average'] += historical_percent
stats[difficulty]['allstar_points'] += i['specs'][j]['best_allstar_points']
items = []
for key in stats:
difficulty = ""
if key == 5: difficulty = "Mythic"
elif key == 4: difficulty = "Heroic"
elif key == 3: difficulty = "Normal"
kills = stats[key]['kills']
best = stats[key]['best']
average = stats[key]['average']
size = stats[key]['size']
if size != 0:
average = round(average / size)
allstar_points = round(stats[key]['allstar_points'])
items.append(Rankings(difficulty, kills, best, average, allstar_points))
headers = ['difficulty', 'kills', 'best', 'average', 'allstar_points']
item_lens = [[getattr(item, x) for x in headers] for item in items]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message = "```css\nLatest rankings for {0} (spec={1} role={2}) on {3}-{4}\n".format(name, spec, role, region, realm)
message += "\t".join('{0:{width}}'.format(x, width=y) for x, y in zip(headers, max_lens)) + '\n'
for i in items:
message += "\t".join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
url = "https://www.warcraftlogs.com/rankings/character/{0}/latest".format(character_id)
await bot.say("{0}\n```<{1}>".format(message, url))
@bot.command()
async def realm(realm="connected-boulderfist", region="us"):
print("\n%s***COMMAND***: realm command with arguments realm=%s region=%s"%(get_current_time(), realm, region))
url = base_wow_progress + "/pve/{0}/{1}".format(region, realm)
page = requests.get(url)
print("URL: {0} Status: {1}".format(url, page.status_code))
try:
soup = BeautifulSoup(page.content, "lxml", from_encoding="UTF")
guilds = soup.find_all("a", class_="guild")
ranks = soup.find_all("span", class_="rank")
progress = soup.find_all("span", class_="ratingProgress")
items = []
for i in range(0, len(guilds)):
items.append(GuildProgress(i+1, guilds[i].get_text(), ranks[i].get_text(), progress[i].get_text()))
headers = ['rank', 'name', 'world', 'progress']
item_lens = [[getattr(guild, x) for x in headers] for guild in items]
max_lens = [len(str(max(i, key=lambda x: len(str(x))))) for i in zip(*[headers] + item_lens)]
message = "```css\nGuild progress rankings for {0}-{1}\n".format(region, realm)
for i in items:
message += '\t'.join('{0:{width}}'.format(x, width=y) for x, y in zip([getattr(i, x) for x in headers], max_lens)) + "\n"
await bot.say("{0}\n```<{1}>".format(message, url))
except Exception as ex:
print(ex)
await bot.say("{0}\n<{1}>".format(str(ex), url))
@bot.command()
async def whoisyourmaster():
await bot.reply("you are")
bot.run('MjczNTgyNTAwNTk1MzY3OTM2.C2lq3A.imEczu1BMAqrOYJfZEBTPJavOvc')
| 37.343486 | 161 | 0.631899 | 0 | 0 | 0 | 0 | 18,141 | 0.821976 | 17,950 | 0.813321 | 6,683 | 0.302809 |
47d9292775bb73955a326acc7b317a3683aeeec2 | 10,974 | py | Python | python_poc/adapters/fingrid_api_adapter.py | pervcomp/Procem | 6cefbf6c81b51af948feb9510d39820f8e6f113e | [
"MIT"
]
| 1 | 2019-01-09T14:38:44.000Z | 2019-01-09T14:38:44.000Z | python_poc/adapters/fingrid_api_adapter.py | pervcomp/Procem | 6cefbf6c81b51af948feb9510d39820f8e6f113e | [
"MIT"
]
| 4 | 2021-03-09T00:03:21.000Z | 2022-02-12T05:33:21.000Z | python_poc/adapters/fingrid_api_adapter.py | pervcomp/Procem | 6cefbf6c81b51af948feb9510d39820f8e6f113e | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Module for reading and parsing values from Fingrid APIs."""
# Copyright (c) TUT Tampere University of Technology 2015-2018.
# This software has been developed in Procem-project funded by Business Finland.
# This code is licensed under the MIT license.
# See the LICENSE.txt in the project root for the license terms.
#
# Main author(s): Ville Heikkila, Otto Hylli, Pekka Itavuo,
# Teemu Laukkarinen ja Ulla-Talvikki Virta
import copy
import csv
import datetime
import json
import requests
import time
try:
import adapters.common_utils as common_utils
import adapters.rest_utils as rest_utils
except:
# used when running the module directly
import common_utils
import rest_utils
class FingridCollection:
"""Class for holding a collection of Fingrid reader/handler objects."""
def __init__(self, params, data_queue):
# always wait at least this long before making a new query
self.__min_waiting_time = params.get("min_waiting_time_s", 10)
self.__fingrids = [] # the Fingrid objects
self.__times = [] # the calculated waiting times for each Fingrid objects until next read should be done
self.__last_check = time.time() # the time in which the last API check was done
self.__data_queue = data_queue # the queue which is used to send the received data to Procem RTL handler
self.createFingrids(params)
def createFingrids(self, params):
"""Create the Fingrid objects for the collection according to the given parameters."""
csv_filename = params.get("csv_filename", "")
config = params.get("config", {})
csv_header = config.get("csv_header", {})
rtl_id_field = csv_header.get("rtl_id", "rtl_id")
variable_id_field = csv_header.get("variable_id", "variable_id")
datatype_field = csv_header.get("datatype", "datatype")
unit_field = csv_header.get("unit", "unit")
query_interval_field = csv_header.get("query_interval", "query_interval")
query_interval_min_field = csv_header.get("query_interval_min", "query_interval_min")
store_interval_field = csv_header.get("store_interval", "store_interval")
is_prediction_field = csv_header.get("is_prediction", "is_prediction")
prediction_length_field = csv_header.get("prediction_length", "prediction_length")
name_field = csv_header.get("name", "name")
path_field = csv_header.get("path", "path")
confidential_field = csv_header.get("confidential", "confidential")
try:
with open(csv_filename, mode="r") as csv_file:
reader = csv.DictReader(csv_file, delimiter=";")
for row in reader:
new_params = copy.deepcopy(params)
new_params["rtl_id"] = int(row.get(rtl_id_field, 0))
new_params["id"] = int(row.get(variable_id_field, 0))
new_params["datatype"] = row.get(datatype_field, "float")
new_params["unit"] = row.get(unit_field, "")
new_params["time_interval_s"] = int(row.get(query_interval_field, 3600))
new_params["time_interval_min_s"] = int(row.get(query_interval_min_field, 60))
new_params["iot_ticket_name"] = row.get(name_field, "")
new_params["iot_ticket_path"] = row.get(path_field, "/Fingrid")
new_params["confidential"] = row.get(confidential_field, "") != ""
store_interval = row.get(store_interval_field, "")
if store_interval != "":
new_params["store_interval"] = int(store_interval)
is_prediction = row.get(is_prediction_field, "") != ""
if is_prediction:
new_params["is_prediction"] = is_prediction
new_params["prediction_length_s"] = int(row.get(prediction_length_field, 0))
self.__fingrids.append(Fingrid(new_params, self.__data_queue))
self.__times.append(None)
except:
pass
def getData(self):
"""Tries to get new data from the Fingrid APIs. If new data is found, it is send to the Procem RTL handler and
the function returns True. Otherwise, the function returns False."""
time_diff = time.time() - self.__last_check
success = []
for index, (fingrid, waiting_time) in enumerate(zip(self.__fingrids, self.__times)):
if waiting_time is None:
self.__times[index] = fingrid.getWaitingTime() + time_diff
continue
elif waiting_time <= time_diff:
success.append(fingrid.getData())
self.__times[index] = None
if success.count(True) > 0:
# put empty item to the queue as a mark that the buffer should be emptied
self.__data_queue.put(bytes())
return True
else:
return False
def getWaitingTime(self):
"""Returns the time in seconds that should be waited before making the next data query."""
current_time = time.time()
time_diff = current_time - self.__last_check
for index, (fingrid, waiting_time) in enumerate(zip(self.__fingrids, self.__times)):
if waiting_time is None:
self.__times[index] = fingrid.getWaitingTime()
else:
self.__times[index] = max(waiting_time - time_diff, 0.0)
min_waiting_time = min(self.__times)
self.__last_check = current_time
return max(min_waiting_time, self.__min_waiting_time)
class Fingrid:
"""Class for holding a single Fingrid API reader/handler."""
def __init__(self, params, data_queue):
self.__config = params.get("config", {})
self.__variable_id = int(params.get("id", 0))
self.__rtl_id = int(params.get("rtl_id", 0))
self.__unit = params.get("unit", "")
self.__datatype = params.get("datatype", "float")
self.__path = params.get("iot_ticket_path", "/Fingrid")
self.__name = params.get("iot_ticket_name", "")
self.__confidential = params.get("confidential", False)
self.__last_update = None # the timestamp for the latest query time
self.__last_value_dt = None # the datetime for the latest received value
self.__time_interval = params.get("time_interval_s", 3600)
self.__time_interval_min = params.get("time_interval_min_s", 60)
self.__store_interval = params.get("store_interval", 0)
self.__is_prediction = params.get("is_prediction", False)
self.__prediction_length = params.get("prediction_length_s", 0)
self.__data_queue = data_queue
def getStartTime(self):
"""Calculates and returns the start time as a timestamp for the next API query."""
if self.__store_interval > 0:
if self.__last_value_dt is not None and not self.__is_prediction:
return (self.__last_value_dt + datetime.timedelta(seconds=self.__store_interval)).timestamp()
dt_now = datetime.datetime.now().replace(microsecond=0)
if self.__last_update is None:
dt_now -= datetime.timedelta(seconds=self.__time_interval)
day_start = dt_now.replace(hour=0, minute=0, second=0)
seconds_back = int((dt_now - day_start).total_seconds()) % self.__store_interval
dt_start = dt_now - datetime.timedelta(seconds=seconds_back)
return dt_start.timestamp()
else:
if self.__last_update is None:
return time.time() - self.__time_interval
elif self.__is_prediction:
return time.time()
else:
return self.__last_update + 1
def getData(self):
"""Tries to get new data from the Fingrid API. If new data is found, it is send to the Procem RTL handler and
the function returns True. Otherwise, the function returns False."""
try:
starttime = self.getStartTime()
if self.__is_prediction:
endtime = time.time() + self.__prediction_length
else:
endtime = time.time()
# get the response from the API
kwargs = {
"config": self.__config,
"variable_id": self.__variable_id,
"start_time": starttime,
"end_time": endtime
}
req = rest_utils.runAPIQuery(**kwargs)
if req.status_code != rest_utils.STATUS_OK:
print(common_utils.getTimeString(), "Fingrid, received status code:", req.status_code,
"for variable", self.__variable_id)
return False
result_datetime_format = self.__config["result_datetime_format"]
data = json.loads(req.text)
values = []
first_dt = None
if self.__is_prediction:
self.__last_value_dt = None
for item in data:
v = item["value"]
time_str = item["start_time"]
dt = datetime.datetime.strptime(time_str, result_datetime_format)
if (self.__last_value_dt is not None and
(dt - self.__last_value_dt).total_seconds() < self.__store_interval):
continue
else:
self.__last_value_dt = dt
if first_dt is None:
first_dt = dt
ts = int(dt.timestamp() * 1000)
values.append({"v": v, "ts": ts})
if len(values) == 0:
return False
self.sendDataToProcem(values)
self.__last_update = time.time()
return True
except Exception as error:
print(common_utils.getTimeString(), "Fingrid,", error)
return False
def getWaitingTime(self):
"""Returns the time in seconds that should be waited before making the next data query."""
if self.__last_update is None:
return self.__time_interval_min / 2
else:
return max(
self.__time_interval_min,
self.__time_interval - (time.time() - self.__last_update))
def sendDataToProcem(self, values):
"""Sends the data to Procem RTL handler."""
rtl_id = self.__rtl_id
unit = self.__unit
datatype = self.__datatype
name = self.__name
path = self.__path
confidential = self.__confidential
for item in values:
v = item["v"]
ts = item["ts"]
pkt_str = common_utils.getProcemRTLpkt(name, path, v, ts, unit, datatype, rtl_id, confidential)
packet = bytes(pkt_str, "utf-8")
self.__data_queue.put(packet)
| 43.896 | 118 | 0.609714 | 10,224 | 0.931657 | 0 | 0 | 0 | 0 | 0 | 0 | 2,787 | 0.253964 |
47de6098d15918068f7f92c961c579b6396d5610 | 1,222 | py | Python | scripts/acr_make_overview.py | vogelbac/LAB-QA2GO- | be434da7399d396413309f947f4b634d8fae9a17 | [
"BSD-3-Clause"
]
| 14 | 2019-02-07T10:50:58.000Z | 2021-09-03T16:11:00.000Z | scripts/acr_make_overview.py | vogelbac/LAB-QA2GO- | be434da7399d396413309f947f4b634d8fae9a17 | [
"BSD-3-Clause"
]
| 6 | 2019-01-28T09:19:27.000Z | 2021-09-09T06:56:42.000Z | scripts/acr_make_overview.py | vogelbac/LAB-QA2GO | be434da7399d396413309f947f4b634d8fae9a17 | [
"BSD-3-Clause"
]
| 4 | 2019-01-28T09:00:58.000Z | 2021-05-25T13:54:40.000Z | # script to generate an overview file of all measurentoverview files
import os
import sys
def main(result_folder):
######### Main routine
erg_path = result_folder
fobj = open(erg_path+'overview.txt','w')
#liste mit Werten
value_list =['Datum','Geo_loc','Geo_T1sl1hor','Geo_T1sl1ver','Geo_T1sl5hor','Geo_T1sl5ver','Geo_T1sl5+45','Geo_T1sl5-45','Res_maxT1rechts1_1','Res_maxT1links1_1','Res_maxT1rechts1_0','Res_maxT1links1_0','Res_maxT1rechts0_9','Res_maxT1links0_9','Res_maxT2rechts1_1','Res_maxT2links1_1','Res_maxT2rechts1_0','Res_maxT2links1_0','Res_maxT2rechts0_9','Res_maxT2links0_9','Thick_T1','Thick_T2','Pos_T1sl2','Pos_T1sl12','Pos_T2sl2','Pos_T2sl12','PIU_T1','PIU_T2','PSG_T1','PSG_T2','LCOD_sum_T1','LCOD_sum_T2']
for v in value_list:
fobj.write(v+'\t')
fobj.write('\n')
temp_list = []
erg_folder = os.listdir(erg_path)
erg_folder.sort()
for i in erg_folder:
if os.path.isdir(erg_path+i):
measurement_folder = os.listdir(erg_path+i+'/')
measurement_folder.sort()
value_path = erg_path+i+'/overview.txt'
if os.path.exists(value_path):
values = open(value_path,'r')
for k in values:
fobj.write(k+'\t')
fobj.write('\n')
values.close()
fobj.close()
| 31.333333 | 504 | 0.720949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 619 | 0.506547 |
47de90ccb81d9e53366bae7e288742ccd559ea5d | 328 | py | Python | tests/test_protein.py | kalekundert/autosnapgene | cc019b89f7ab8842d95fd268c24987aabbe1c0b6 | [
"MIT"
]
| null | null | null | tests/test_protein.py | kalekundert/autosnapgene | cc019b89f7ab8842d95fd268c24987aabbe1c0b6 | [
"MIT"
]
| null | null | null | tests/test_protein.py | kalekundert/autosnapgene | cc019b89f7ab8842d95fd268c24987aabbe1c0b6 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import pytest
import autosnapgene as snap
def test_getters(parse_and_write):
for dna in parse_and_write('flag_tag.prot'):
assert dna.sequence == 'DYKDDDDK'
assert dna.protein_sequence == 'DYKDDDDK'
def test_setters():
dna = snap.SnapGene()
dna.protein_sequence = 'DYKDDDDK'
| 23.428571 | 49 | 0.707317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.204268 |
47e1291a9d383474886f3b6cb416cfcb840ff9bb | 1,514 | py | Python | containers/ice_block.py | craigtmoore/freezer_escape_room | 813144641c079db9ab73c873e354ffc57200a3dd | [
"MIT"
]
| null | null | null | containers/ice_block.py | craigtmoore/freezer_escape_room | 813144641c079db9ab73c873e354ffc57200a3dd | [
"MIT"
]
| null | null | null | containers/ice_block.py | craigtmoore/freezer_escape_room | 813144641c079db9ab73c873e354ffc57200a3dd | [
"MIT"
]
| null | null | null | from typing import Set, List
from inspectable import Inspectable
from interactable import Interactable
from items import Batteries, Hammer
from usable import Usable
class IceBlock(Inspectable, Interactable):
def __init__(self):
super().__init__()
self.name = 'ice block'
self.description = 'A large block of ice'
self.is_broken = False
def inspect(self) -> Set:
if self.is_broken:
print('Pieces of ice litter the floor where you shattered it earlier.')
else:
print('You look closely at the ice and see a pair of batteries are frozen inside.')
print('You attempt to smash the ice on the floor to get them out, but it is too ')
print('solid to break that way, you\'ll need something to smash it')
return set()
def interact(self, usable: Usable) -> List:
found_items = []
if self.is_broken:
print(f'You attempt to use the {usable.name} on pieces of ice and start to wonder if you\'re going crazy')
elif isinstance(usable, Hammer):
print('You smash the ice with the hammer until it shatters and the batteries fall to the floor.')
print('You collect the batteries and put them in your pocket.')
found_items.append(Batteries())
else:
print(f'You attempt to use the {usable.name} on the block of ice, but it does not look like it will work '
f'so you stop.')
return found_items
| 36.926829 | 118 | 0.640687 | 1,345 | 0.888375 | 0 | 0 | 0 | 0 | 0 | 0 | 669 | 0.441876 |
47e13e680106c821ea95e6afefa8c3825aa0febc | 97 | py | Python | holagit.py | fvenya7/practica1 | 2c7084629f0c5e3788a377f8c9d916c28fe188f4 | [
"MIT"
]
| null | null | null | holagit.py | fvenya7/practica1 | 2c7084629f0c5e3788a377f8c9d916c28fe188f4 | [
"MIT"
]
| null | null | null | holagit.py | fvenya7/practica1 | 2c7084629f0c5e3788a377f8c9d916c28fe188f4 | [
"MIT"
]
| null | null | null | print("aprendiendo git")
a=12
print("ya quedó")
print("actualizacion 1")
print("catualizacion 2") | 19.4 | 24 | 0.742268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.632653 |
47e169f6fbed0c98822c2408dc1e36d39f35b41d | 463 | py | Python | scripts/make_json_dataset.py | sethah/deeptennis | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | [
"MIT"
]
| 27 | 2018-11-23T21:37:14.000Z | 2021-11-22T08:44:35.000Z | scripts/make_json_dataset.py | sethah/deeptennis | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | [
"MIT"
]
| 6 | 2019-07-09T16:26:56.000Z | 2021-05-17T17:29:42.000Z | scripts/make_json_dataset.py | sethah/deeptennis | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | [
"MIT"
]
| 4 | 2019-06-11T06:44:30.000Z | 2021-02-27T14:49:02.000Z | import argparse
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--frames-path", type=str)
parser.add_argument("--output-path", type=str)
args = parser.parse_args()
frame_paths = [p for p in Path(args.frames_path).iterdir()]
with open(args.output_path, "w") as f:
for p in sorted(Path(args.frames_path).iterdir()):
f.write('{"image_path": "%s"}\n' % str(p))
| 33.071429 | 63 | 0.652268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.144708 |
47e20a2e721763c69d92b54d367291736f3e69c7 | 26,979 | py | Python | app/document/routes.py | DCGM/pero_ocr_web | e901027712827278f9ace914f6ccba16d3ac280f | [
"BSD-2-Clause"
]
| 2 | 2020-05-07T13:58:31.000Z | 2021-01-27T09:33:07.000Z | app/document/routes.py | DCGM/pero_ocr_web | e901027712827278f9ace914f6ccba16d3ac280f | [
"BSD-2-Clause"
]
| 47 | 2019-09-17T19:20:07.000Z | 2022-03-20T12:33:28.000Z | app/document/routes.py | DCGM/pero_ocr_web | e901027712827278f9ace914f6ccba16d3ac280f | [
"BSD-2-Clause"
]
| 1 | 2019-10-02T10:42:35.000Z | 2019-10-02T10:42:35.000Z | import _thread
import sqlalchemy
from app.document import bp
from flask_login import login_required, current_user
from flask import render_template, redirect, url_for, request, send_file, flash, jsonify
from flask import current_app
from app.document.general import create_document, check_and_remove_document, save_image, \
get_collaborators_select_data, save_collaborators, is_document_owner, is_user_owner_or_collaborator,\
remove_image, get_document_images, get_page_layout, get_page_layout_text, update_confidences, is_user_trusted,\
is_granted_acces_for_page, is_granted_acces_for_document, get_line_image_by_id, get_sucpect_lines_ids, \
compute_scores_of_doc, skip_textline, get_line, is_granted_acces_for_line, create_string_response, \
update_baselines, make_image_preview, find_textlines, get_documents_with_granted_acces, \
check_and_change_public_document, is_document_public
from werkzeug.exceptions import NotFound
from app.db.general import get_requests
from app.db.general import get_user_documents, get_document_by_id, get_user_by_email, get_all_documents,\
get_previews_for_documents, get_image_by_id, get_public_documents
from app.document.forms import CreateDocumentForm
from app.document.annotation_statistics import get_document_annotation_statistics, get_user_annotation_statistics, get_document_annotation_statistics_by_day
from io import BytesIO
import dateutil.parser
import zipfile
import time
import os
import json
import re
from natsort import natsorted
@bp.route('/documents')
@login_required
def documents():
if is_user_trusted(current_user):
user_documents = get_all_documents()
else:
user_documents = get_user_documents(current_user)
user_documents = sorted(user_documents, key=lambda x: x.created_date)[::-1]
document_ids = [d.id for d in user_documents]
previews = dict([(im.document_id, im) for im in get_previews_for_documents(document_ids)])
for d in user_documents:
if d.id not in previews:
previews[d.id] = ""
return render_template('document/documents.html', documents=user_documents, previews=previews)
@bp.route('/public_documents')
def public_documents():
db_documents = get_public_documents()
document_ids = [d.id for d in db_documents]
previews = dict([(im.document_id, im) for im in get_previews_for_documents(document_ids)])
for d in db_documents:
if d.id not in previews:
previews[d.id] = ""
return render_template('document/public_documents.html', documents=db_documents, previews=previews)
@bp.route('/annotation_statistics/<string:document_id>')
@login_required
def annotation_statistics(document_id):
if not (is_user_owner_or_collaborator(document_id, current_user) or is_user_trusted(current_user)):
flash(u'You do not have sufficient rights to view statistics for this document!', 'danger')
return redirect(url_for('main.index'))
document = get_document_by_id(document_id)
statistics = get_document_annotation_statistics(document)
return render_template('document/annotation_statistics.html', statistics=statistics, header_name=document.name)
@bp.route('/annotation_statistics')
@login_required
def annotation_statistics_global():
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to view global statistics!', 'danger')
return redirect(url_for('main.index'))
statistics = get_document_annotation_statistics()
return render_template('document/annotation_statistics.html', statistics=statistics, header_name='All documents')
@bp.route('/user_annotation_statistics/<string:user_email>')
@login_required
def user_annotation_statistics(user_email):
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to view statistics for other users!', 'danger')
return redirect(url_for('main.index'))
user = get_user_by_email(user_email)
statistics = get_user_annotation_statistics(user)
return render_template('document/annotation_statistics.html',
statistics=statistics, header_name=f'{user.first_name} {user.last_name}')
@bp.route('/user_annotation_statistics')
@login_required
def user_annotation_statistics_current_user():
statistics = get_user_annotation_statistics(current_user)
return render_template('document/annotation_statistics.html',
statistics=statistics, header_name=f'{current_user.first_name} {current_user.last_name}')
@bp.route('/user_annotation_statistics_global')
@login_required
def user_annotation_statistics_global():
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to view statistics for other users!', 'danger')
return redirect(url_for('main.index'))
statistics = get_user_annotation_statistics()
return render_template('document/annotation_statistics.html', statistics=statistics, header_name='All users')
@bp.route('/requests')
@login_required
def requests():
if is_user_trusted(current_user):
user_documents = get_all_documents()
else:
user_documents = get_user_documents(current_user)
document_ids = [d.id for d in user_documents]
requests = get_requests(document_ids)
return render_template('requests/request_list.html', requests=requests)
@bp.route('/document_history/<string:document_id>')
@login_required
def document_history(document_id):
if not (is_user_owner_or_collaborator(document_id, current_user) or is_user_trusted(current_user)):
flash(u'You do not have sufficient rights to view statistics for this document!', 'danger')
return redirect(url_for('main.index'))
db_requests = get_requests(document_ids=[document_id])
db_document = get_document_by_id(document_id)
ann_stats = get_document_annotation_statistics_by_day(db_document.id)
import altair as alt
data = [{'x': str(date), 'y': count, 'u': f'{user1} {user2}'} for date, user1, user2, count in ann_stats]
data = alt.Data(values=data)
chart = alt.Chart(data).mark_bar().encode(
x='x:T', # specify ordinal data
y='sum(y):Q', # specify quantitative data
color='u:N'
).properties(width='container', height=300)
return render_template('document/document_history.html',
requests=db_requests, document=db_document, graph_json=chart.to_json(indent=0))
@bp.route('/new_document', methods=['GET', 'POST'])
@login_required
def new_document():
form = CreateDocumentForm()
if form.validate_on_submit():
document = create_document(form.document_name.data, current_user)
flash(u'Document successfully created!', 'success')
return redirect(url_for('document.upload_images_to_document', document_id=document.id))
else:
return render_template('document/new_document.html', form=form)
@bp.route('/make_public/<string:document_id>')
@login_required
def make_public(document_id):
document_name = check_and_change_public_document(document_id, current_user, True)
if document_name:
flash(f'Document "{document_name}" in now public!', 'success')
return document_id
else:
flash(u'You do not have sufficient rights to make this document public!', 'danger')
return None
@bp.route('/make_private/<string:document_id>')
@login_required
def make_private(document_id):
document_name = check_and_change_public_document(document_id, current_user, False)
if document_name:
flash(f'Document "{document_name}" in now private!', 'success')
return document_id
else:
flash(u'You do not have sufficient rights to make this document public!', 'danger')
return None
@bp.route('/delete_document/<string:document_id>')
@login_required
def delete_document(document_id):
if check_and_remove_document(document_id, current_user):
flash(u'Document successfully deleted!', 'success')
return document_id
else:
flash(u'You do not have sufficient rights to remove this document!', 'danger')
return None
@bp.route('/upload_images_to_document/<string:document_id>', methods=['GET'])
@login_required
def upload_images_to_document(document_id):
if not is_user_owner_or_collaborator(document_id, current_user):
flash(u'You do not have sufficient rights to upload images!', 'danger')
return redirect(url_for('main.index'))
document = get_document_by_id(document_id)
images = get_document_images(document)
return render_template('document/upload_images_to_document.html', document=document, images=images)
@bp.route('/upload_image_to_document/<string:document_id>', methods=['POST'])
@login_required
def upload_image_to_document(document_id):
if not is_user_owner_or_collaborator(document_id, current_user):
flash(u'You do not have sufficient rights to upload images!', 'danger')
return '', 404
if request.method == 'POST':
f = request.files.get('file')
status = save_image(f, document_id)
if status == '':
return '', 200
return status, 409
def image_preview(image_id=None, public_access=False):
if image_id is None:
return send_file('static/img/missing_page.png', cache_timeout=10000000)
try:
db_image = get_image_by_id(image_id)
except (sqlalchemy.exc.StatementError, sqlalchemy.orm.exc.NoResultFound):
return "Image does not exist.", 404
document_id = db_image.document_id
if public_access:
db_document = get_document_by_id(db_image.document_id)
if not db_document.is_public:
return send_file('static/img/missing_page.png', cache_timeout=10000000)
else:
if not is_granted_acces_for_document(document_id, current_user):
return send_file('static/img/missing_page.png', cache_timeout=10000000)
image_preview_path = os.path.join(current_app.config['PREVIEW_IMAGES_FOLDER'], str(document_id), str(image_id) + '.jpg')
if not os.path.isfile(image_preview_path):
make_image_preview(db_image)
return send_file(image_preview_path, cache_timeout=0)
@bp.route('/get_image_preview/<string:image_id>')
@bp.route('/get_image_preview/')
@login_required
def get_image_preview(image_id=None):
return image_preview(image_id=image_id, public_access=False)
@bp.route('/get_public_image_preview/<string:image_id>')
@bp.route('/get_public_image_preview/')
def get_public_image_preview(image_id=None):
return image_preview(image_id=image_id, public_access=True)
@bp.route('/get_document_image_ids/<string:document_id>')
@login_required
def get_document_image_ids(document_id):
if not is_granted_acces_for_document(document_id, current_user):
flash(u'You do not have sufficient rights to document!', 'danger')
return redirect(url_for('main.index'))
document = get_document_by_id(document_id)
images = natsorted(get_document_images(document).all(), key=lambda x: x.filename)
return jsonify([str(x.id) for x in images])
@bp.route('/get_page_xml_regions/<string:image_id>')
@login_required
def get_page_xml_regions(image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download regions!', 'danger')
return redirect(url_for('main.index'))
page_layout = get_page_layout(db_image, only_regions=True)
filename = "{}.xml".format(os.path.splitext(page_layout.id)[0])
return create_string_response(filename, page_layout.to_pagexml_string(), minetype='text/xml')
@bp.route('/get_page_xml_lines/<string:image_id>')
@login_required
def get_page_xml_lines(image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download xml!', 'danger')
return redirect(url_for('main.index'))
page_layout = get_page_layout(db_image, only_regions=False)
filename = "{}.xml".format(os.path.splitext(page_layout.id)[0])
return create_string_response(filename, page_layout.to_pagexml_string(), minetype='text/xml')
@bp.route('/get_annotated_page_xml_lines/<string:image_id>')
@bp.route('/get_annotated_page_xml_lines/<string:image_id>/<string:from_time>/')
@login_required
def get_annotated_page_xml_lines(image_id, from_time=None):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download xml!', 'danger')
return redirect(url_for('main.index'))
if from_time:
try:
from_time = dateutil.parser.parse(from_time)
except:
return 'ERROR: Could not parse from_time argument.', 400
page_layout = get_page_layout(db_image, only_regions=False, only_annotated=True, from_time=from_time,
active_ignoring=True)
filename = "{}.xml".format(os.path.splitext(page_layout.id)[0])
return create_string_response(filename, page_layout.to_pagexml_string(), minetype='text/xml')
@bp.route('/get_alto_xml/<string:image_id>')
@login_required
def get_alto_xml(image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download alto!', 'danger')
return redirect(url_for('main.index'))
page_layout = get_page_layout(db_image, only_regions=False, only_annotated=False, alto=True)
filename = "{}.xml".format(os.path.splitext(page_layout.id)[0])
return create_string_response(filename, page_layout.to_altoxml_string(page_uuid=image_id), minetype='text/xml')
@bp.route('/get_text/<string:image_id>')
@login_required
def get_text(image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_granted_acces_for_page(image_id, current_user):
flash(u'You do not have sufficient rights to download text!', 'danger')
return redirect(url_for('main.index'))
page_layout = get_page_layout(db_image, only_regions=False, only_annotated=False)
file_name = "{}.txt".format(os.path.splitext(page_layout.id)[0])
return create_string_response(file_name, get_page_layout_text(page_layout), minetype='text/plain')
def get_image_common(image_id, public=False):
try:
image_db = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if public:
if not image_db.document.is_public:
return "Image is not public.", 403
elif not is_granted_acces_for_page(image_id, current_user):
return "You do not have access to the requested images.", 403
image_path = os.path.join(current_app.config['UPLOADED_IMAGES_FOLDER'], image_db.path)
if not os.path.isfile(image_path):
print("ERROR: Could not find image on disk. image id: {}, image path: {}.".format(image_id, image_path))
raise NotFound()
return send_file(image_path, as_attachment=True, attachment_filename=image_db.filename, cache_timeout=10000000)
@bp.route('/get_image/<string:image_id>')
@login_required
def get_image(image_id):
return get_image_common(image_id, False)
@bp.route('/get_public_image/<string:image_id>')
def get_public_image(image_id):
return get_image_common(image_id, True)
@bp.route('/download_document_pages/<string:document_id>')
@login_required
def get_document_pages(document_id):
if not is_granted_acces_for_document(document_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
memory_file = BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
document = get_document_by_id(document_id)
for image in document.images:
page_layout = get_page_layout(image, only_regions=False, only_annotated=False)
page_string = page_layout.to_pagexml_string()
text_string = get_page_layout_text(page_layout)
d_page = zipfile.ZipInfo("{}.xml".format(os.path.splitext(page_layout.id)[0]))
d_page.date_time = time.localtime(time.time())[:6]
d_page.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(d_page, page_string)
d_text = zipfile.ZipInfo("{}.txt".format(os.path.splitext(page_layout.id)[0]))
d_text.date_time = time.localtime(time.time())[:6]
d_text.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(d_text, text_string)
memory_file.seek(0)
return send_file(memory_file, attachment_filename='pages.zip', as_attachment=True)
@bp.route('/get_document_annotated_pages/<string:document_id>')
@bp.route('/download_document_annotated_pages/<string:document_id>')
@login_required
def get_document_annotated_pages(document_id):
if not is_granted_acces_for_document(document_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
memory_file = BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
document = get_document_by_id(document_id)
for image in document.images:
page_layout = get_page_layout(image, only_regions=False, only_annotated=True)
xml_string = page_layout.to_pagexml_string()
d_XML = zipfile.ZipInfo("{}.xml".format(os.path.splitext(page_layout.id)[0]))
d_XML.date_time = time.localtime(time.time())[:6]
d_XML.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(d_XML, xml_string)
memory_file.seek(0)
return send_file(memory_file, attachment_filename='pages.zip', as_attachment=True)
@bp.route('/remove_image/<string:document_id>/<string:image_id>')
@login_required
def remove_image_get(document_id, image_id):
try:
db_image = get_image_by_id(image_id)
except sqlalchemy.exc.StatementError:
return "Image does not exist.", 404
if not is_user_owner_or_collaborator(document_id, current_user):
flash(u'You do not have sufficient rights to get this image!', 'danger')
return redirect(url_for('main.index'))
if remove_image(document_id, image_id):
flash(u'Image successfully removed!', 'success')
return redirect(url_for('document.upload_images_to_document', document_id=document_id))
@bp.route('/collaborators/<string:document_id>', methods=['GET'])
@login_required
def collaborators_get(document_id):
if not is_document_owner(document_id, current_user) and not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to edit collaborators!', 'danger')
return redirect(url_for('main.index'))
else:
document = get_document_by_id(document_id)
collaborators = get_collaborators_select_data(document)
reg = re.compile('@.*')
for collaborator in collaborators:
collaborator.email_an = re.sub(reg, '@...', collaborator.user.email)
return render_template('document/edit_collaborators.html', document=document, collaborators=collaborators)
@bp.route('/collaborators/<string:document_id>', methods=['POST'])
@login_required
def collaborators_post(document_id):
collaborators_ids = request.form.getlist('collaborators')
if not is_document_owner(document_id, current_user) and not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to edit collaborators!', 'danger')
return redirect(url_for('main.index'))
else:
save_collaborators(document_id, collaborators_ids)
flash(u'Collaborators saved successfully.', 'success')
return redirect(url_for('document.collaborators_get', document_id=document_id))
@bp.route('/get_keyboard', methods=['GET'])
@login_required
def get_keyboard():
keyboard_dict = {}
for keyboard_layout in os.listdir(current_app.config['KEYBOARD_FOLDER']):
keyboard_layout_name = os.path.splitext(keyboard_layout)[0]
keyboard_layout_path = os.path.join(current_app.config['KEYBOARD_FOLDER'], keyboard_layout)
with open(keyboard_layout_path) as f:
keyboard_dict[keyboard_layout_name] = json.load(f)
return jsonify(keyboard_dict)
@bp.route('/update_confidences', methods=['POST'])
@login_required
def update_all_confidences():
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
file = request.files['data']
content = file.read()
changes = json.loads(content)
update_confidences(changes)
return redirect(url_for('document.documents'))
@bp.route('/update_baselines', methods=['POST'])
@login_required
def update_all_baselines():
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
file = request.files['data']
content = file.read()
changes = json.loads(content)
update_baselines(changes)
return redirect(url_for('document.documents'))
@bp.route('/lines_check', methods=['GET', 'POST'])
@bp.route('/lines_check/<string:document_id>', methods=['GET', 'POST'])
@login_required
def lines_check(document_id=None):
if is_user_trusted(current_user):
user_documents = get_all_documents()
else:
user_documents = get_user_documents(current_user)
selected = [False for _ in user_documents]
if document_id is not None:
for i, document in enumerate(user_documents):
if document_id == str(document.id):
selected[i] = True
if request.method == 'POST':
selected = [False for _ in user_documents]
document_ids = request.form.getlist('documents')
for i, document in enumerate(user_documents):
if document_ids != []:
if str(document.id) in document_ids:
selected[i] = True
return render_template('document/lines_check.html', documents=enumerate(user_documents), selected=selected)
@bp.route('/get_all_lines', methods=['GET'])
@login_required
def get_all_lines():
show_ignored_lines = request.headers.get('show-ignored-lines')
document_ids = json.loads(request.headers.get('documents'))
document_ids = get_documents_with_granted_acces(document_ids, current_user)
if show_ignored_lines == 'true':
show_ignored_lines = True
elif show_ignored_lines == 'false':
show_ignored_lines = False
lines = get_sucpect_lines_ids(document_ids, 'all', show_ignored_lines)
return jsonify(lines)
@bp.route('/get_annotated_lines', methods=['GET'])
@login_required
def get_annotated_lines():
show_ignored_lines = request.headers.get('show-ignored-lines')
document_ids = json.loads(request.headers.get('documents'))
document_ids = get_documents_with_granted_acces(document_ids, current_user)
if show_ignored_lines == 'true':
show_ignored_lines = True
elif show_ignored_lines == 'false':
show_ignored_lines = False
lines = get_sucpect_lines_ids(document_ids, 'annotated', show_ignored_lines)
return jsonify(lines)
@bp.route('/get_not_annotated_lines', methods=['GET'])
@login_required
def get_not_annotated_lines():
show_ignored_lines = request.headers.get('show-ignored-lines')
document_ids = json.loads(request.headers.get('documents'))
document_ids = get_documents_with_granted_acces(document_ids, current_user)
if show_ignored_lines == 'true':
show_ignored_lines = True
elif show_ignored_lines == 'false':
show_ignored_lines = False
lines = get_sucpect_lines_ids(document_ids, 'not_annotated', show_ignored_lines)
return jsonify(lines)
@bp.route('/get_cropped_image/<string:line_id>')
@login_required
def get_cropped_image(line_id):
if not is_granted_acces_for_line(line_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
image = get_line_image_by_id(line_id)
return send_file(BytesIO(image), attachment_filename='{}.jpeg' .format(line_id), mimetype='image/jpeg', as_attachment=True)
@bp.route('/compute_scores/<string:document_id>')
@login_required
def compute_scores(document_id):
if not is_user_trusted(current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
_thread.start_new_thread( compute_scores_of_doc, (document_id, ) )
flash(u'Computing scores!', 'info')
return jsonify('success')
@bp.route('/skip_line/<string:line_id>')
@login_required
def skip_line(line_id):
if not is_granted_acces_for_line(line_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
skip_textline(line_id)
return jsonify({'status': 'success'})
@bp.route('/get_line_info/<string:line_id>')
@login_required
def get_line_info(line_id):
if not is_granted_acces_for_line(line_id, current_user):
flash(u'You do not have sufficient rights to this document!', 'danger')
return redirect(url_for('main.index'))
lines = get_line(line_id)
return jsonify(lines)
@bp.route('/search', methods=['GET', 'POST'])
def search_bar():
query = ""
lines = []
if is_user_trusted(current_user):
user_documents = get_all_documents()
else:
user_documents = get_user_documents(current_user)
selected = [False for _ in user_documents]
if request.method == 'POST':
query = request.form['query']
document_ids = request.form.getlist('documents')
user_document_ids = []
for i, document in enumerate(user_documents):
if document_ids != []:
if str(document.id) in document_ids:
selected[i] = True
user_document_ids.append(str(document.id))
else:
user_document_ids.append(str(document.id))
lines = find_textlines(query, current_user, user_document_ids)
return render_template('document/search_lines.html', query=query, lines=lines, documents=enumerate(user_documents), selected=selected)
| 38.376956 | 156 | 0.725305 | 0 | 0 | 0 | 0 | 23,479 | 0.870269 | 0 | 0 | 5,883 | 0.218058 |
47e38cc73a4ca6342b90794377e31733e0fe8cef | 4,898 | py | Python | src/superdatabase3000/packet.py | JeanMax/SuperDatabase3000 | 836395c9b6ea2a5d53f81c22bb126e299f3e1bfc | [
"MIT"
]
| 1 | 2020-03-30T13:49:29.000Z | 2020-03-30T13:49:29.000Z | src/superdatabase3000/packet.py | JeanMax/SuperDatabase3000 | 836395c9b6ea2a5d53f81c22bb126e299f3e1bfc | [
"MIT"
]
| 5 | 2020-03-30T14:32:48.000Z | 2020-03-31T12:01:02.000Z | src/superdatabase3000/packet.py | JeanMax/SuperDatabase3000 | 836395c9b6ea2a5d53f81c22bb126e299f3e1bfc | [
"MIT"
]
| null | null | null | """
This module defines a packet structure
(composed of: canari, payload, payload_size, and eventually an extra payload).
You'll find a 'pack' functions allowing you to create a packet
from a payload (btyes object) you want to send, and an 'unpack' function
that can extract a payload from a packet (as a bytes object too) after
validating the packet structure (canari, checksum, length).
packet[64]: abcd abcdefghabcdefghabcd abcdefgh
^ ^ ^
canari[4] checksum[20] payload_size[8]
payload_size
<------------------------------------------->
abcdefghabcdefghabcdefghabcdefgh [...]
^ ^
payload[32] extra_payload
"""
import collections
import struct
import hashlib
CANARI = 0xdeadbeef
CANARI_SIZE = 4 # unsigned int
CHECKSUM_SIZE = 20 # sha1
INT_SIZE = 8 # unsigned long long
PAYLOAD_MIN_SIZE = 32 # TODO: tweak me based on DbClient requests size: 256-32
PACKET_MIN_SIZE = (
CANARI_SIZE + CHECKSUM_SIZE + INT_SIZE
+ PAYLOAD_MIN_SIZE
) # 64
CHECKSUM_OFFSET = CANARI_SIZE + CHECKSUM_SIZE # we'll start hasing from there
STRUCT_FORMAT = (
"!"
"I" # canari
f"{CHECKSUM_SIZE}s" # checksum
"Q" # payload_size
"{payload_size}s" # payload: complete its size using format
)
Packet = collections.namedtuple(
"Packet",
["canari", "checksum", "payload_size", "payload"]
)
def _checksum(bytes_buf):
"""Return the sha1 digest of the given 'bytes_buf'."""
return hashlib.sha1(bytes_buf[CHECKSUM_OFFSET:]).digest()
def _verify_checksum(ctrl_checksum, bytes_buf):
"""
Return True if the given 'ctrl_checksum' matches the checksum
of 'bytes_buf', otherwise throw a ValueError.
"""
if ctrl_checksum != _checksum(bytes_buf):
raise ValueError("packet: invalid checksum")
return True
def pack(payload, with_checksum=True):
"""
Create a packet from the given 'payload' byte object that you want to send.
If the 'with_checksum' argument is True, the checksum of the payload will
be calculated and inserted in the packet, otherwise the checksum will be
set to zeros.
Returns a bytes object of the created packet (ready to send).
"""
packet = Packet(
canari=CANARI,
checksum=b"\x00" * CHECKSUM_SIZE,
payload_size=len(payload),
payload=payload.ljust(PAYLOAD_MIN_SIZE, b"\x00")
)
payload_size = max(packet.payload_size, PAYLOAD_MIN_SIZE)
try:
bytes_buf = struct.pack(
STRUCT_FORMAT.format(payload_size=payload_size),
*packet
)
except struct.error as e:
raise ValueError(f"packet: {e}")
if with_checksum:
packet = packet._replace(checksum=_checksum(bytes_buf))
bytes_buf = struct.pack(
STRUCT_FORMAT.format(payload_size=payload_size),
*packet
)
return bytes_buf
def unpack(bytes_buf, with_checksum=True):
"""
Extract the payload (as a bytes object) from the given 'bytes_buf' packet.
If the 'with_checksum' argument is True, the checksum in the packet will be
checked against a calculated checksum of the packet payload. Otherwise it
will just be ignored.
Returns a bytes object of the extracted payload.
A ValueError will be thrown if an invalid packet is given as 'bytes_buf'
(invalid canari, checksum, payload length)
"""
# first, we try to unpack as if it was a 64 bytes packet
try:
packet = struct.unpack(
STRUCT_FORMAT.format(payload_size=PAYLOAD_MIN_SIZE),
bytes_buf[:PACKET_MIN_SIZE]
)
except struct.error as e:
raise ValueError(f"packet: {e}")
packet = Packet(*packet)
if packet.canari != CANARI:
raise ValueError("packet: the canari is dead")
# payload can fit in a 64 bytes packet: just verify checksum, then job done
if packet.payload_size <= PAYLOAD_MIN_SIZE:
if with_checksum:
_verify_checksum(packet.checksum, bytes_buf)
packet = packet._replace(
payload=packet.payload[:packet.payload_size]
)
return packet
# packet is actually bigger than 64 bytes (extra_payload)
if len(bytes_buf) <= PACKET_MIN_SIZE:
return packet # the payload is incomplete, and checksum not verified
try:
packet = struct.unpack(
STRUCT_FORMAT.format(payload_size=packet.payload_size),
bytes_buf
)
except struct.error as e:
raise ValueError(f"packet: {e}")
packet = Packet(*packet)
if with_checksum:
_verify_checksum(packet.checksum, bytes_buf)
return packet # complete packet with extra payload
| 32.223684 | 79 | 0.636178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,547 | 0.520008 |
47e43b3b4e3f0031df6f61702eae33c0a872be24 | 1,095 | py | Python | microcosm_flask/swagger/api.py | Sinon/microcosm-flask | c1404ebc94459c8156b04f5e04490a330117524c | [
"Apache-2.0"
]
| 11 | 2017-01-30T21:53:20.000Z | 2020-05-29T22:39:19.000Z | microcosm_flask/swagger/api.py | Sinon/microcosm-flask | c1404ebc94459c8156b04f5e04490a330117524c | [
"Apache-2.0"
]
| 139 | 2016-03-09T19:09:59.000Z | 2021-09-03T17:14:00.000Z | microcosm_flask/swagger/api.py | Sinon/microcosm-flask | c1404ebc94459c8156b04f5e04490a330117524c | [
"Apache-2.0"
]
| 10 | 2016-12-19T22:39:42.000Z | 2021-03-09T19:23:15.000Z | """
API interfaces for swagger operations.
"""
from typing import (
Any,
Iterable,
Mapping,
Tuple,
)
from marshmallow import Schema
from marshmallow.fields import Field
from microcosm_flask.swagger.parameters import Parameters
from microcosm_flask.swagger.schemas import Schemas
def build_schema(schema: Schema, strict_enums: bool = True) -> Mapping[str, Any]:
"""
Build JSON schema from a marshmallow schema.
"""
builder = Schemas(build_parameter=build_parameter, strict_enums=strict_enums)
return builder.build(schema)
def iter_schemas(schema: Schema, strict_enums: bool = True) -> Iterable[Tuple[str, Any]]:
"""
Build zero or more JSON schemas for a marshmallow schema.
Generates: name, schema pairs.
"""
builder = Schemas(build_parameter=build_parameter, strict_enums=strict_enums)
return builder.iter_schemas(schema)
def build_parameter(field: Field, **kwargs) -> Mapping[str, Any]:
"""
Build JSON parameter from a marshmallow field.
"""
builder = Parameters(**kwargs)
return builder.build(field)
| 23.804348 | 89 | 0.717808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.256621 |
47e4a20a59666f230f44fc593c648ca410af9651 | 1,286 | py | Python | converter.py | GuzTech/uart_to_hdmi | b6ea4efa85a06e59406ffc3b034028f00d5a7cbf | [
"MIT"
]
| 1 | 2020-07-04T01:09:00.000Z | 2020-07-04T01:09:00.000Z | converter.py | GuzTech/uart_to_hdmi | b6ea4efa85a06e59406ffc3b034028f00d5a7cbf | [
"MIT"
]
| null | null | null | converter.py | GuzTech/uart_to_hdmi | b6ea4efa85a06e59406ffc3b034028f00d5a7cbf | [
"MIT"
]
| null | null | null | import sys
import struct
if(len(sys.argv) != 5):
print("Usage: python converter.py <num_pixels_x> <num_pixels_y> <input file> <output file>\n")
else:
num_pixels_x = int(sys.argv[1])
num_pixels_y = int(sys.argv[2])
print(num_pixels_x)
has_alpha_channel = False
infile = open(sys.argv[3], "rb")
data = infile.read()
data_len = len(data)
if((data_len != (num_pixels_x * num_pixels_y * 4)) or
(data_len != (num_pixels_x * num_pixels_y * 3))):
AssertionError(
"File size does not match given resolution, or does not use 8bpp.")
if(data_len == (num_pixels_x * num_pixels_y * 4)):
has_alpha_channel = True
outfile = open(sys.argv[4], "wb")
infile.seek(0)
for y in range(num_pixels_y):
for x in range(num_pixels_x):
r = (int.from_bytes(infile.read(1), 'little') >> 5) & 0x7
g = (int.from_bytes(infile.read(1), 'little') >> 5) & 0x7
b = (int.from_bytes(infile.read(1), 'little') >> 6) & 0x3
if(has_alpha_channel):
# Alpha channel, we don't use this
_ = infile.read(1)
pixel = (b << 6) | (g << 3) | r
outfile.write(pixel.to_bytes(1, 'little'))
infile.close()
outfile.close()
| 30.619048 | 98 | 0.573872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.176516 |
47e4f4051c67291e2bfd6264123e2f3ba68f0903 | 319 | py | Python | project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/remove_old_image.py | akash519-gif/Handwritten-letter-detection. | f49240bc3dcea5eb8f53bade66ccb49bf8809be6 | [
"Apache-2.0"
]
| null | null | null | project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/remove_old_image.py | akash519-gif/Handwritten-letter-detection. | f49240bc3dcea5eb8f53bade66ccb49bf8809be6 | [
"Apache-2.0"
]
| null | null | null | project_handwritten-character-recognition-with-convolutional-neural-network-master/Codes/remove_old_image.py | akash519-gif/Handwritten-letter-detection. | f49240bc3dcea5eb8f53bade66ccb49bf8809be6 | [
"Apache-2.0"
]
| null | null | null | # import require package
import os
def remove_content(directory):
for file in os.scandir(directory):
print(file.path)
os.remove(file.path)
print("Old images has been deleted")
upload_dir = './uploads'
remove_content(upload_dir)
images_dir = './static/images'
remove_content(images_dir)
| 18.764706 | 44 | 0.705329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.253918 |
47e633e7aabb9cbd31dd0cb29459787e531f57cc | 15,271 | py | Python | src/scaffoldfitter/fitterstepfit.py | zekh167/scaffoldfitter | 357a312948464399433f29f19cdac4d7fd6061ef | [
"Apache-2.0"
]
| null | null | null | src/scaffoldfitter/fitterstepfit.py | zekh167/scaffoldfitter | 357a312948464399433f29f19cdac4d7fd6061ef | [
"Apache-2.0"
]
| null | null | null | src/scaffoldfitter/fitterstepfit.py | zekh167/scaffoldfitter | 357a312948464399433f29f19cdac4d7fd6061ef | [
"Apache-2.0"
]
| null | null | null | """
Fit step for gross alignment and scale.
"""
from opencmiss.utils.zinc.field import assignFieldParameters, createFieldsDisplacementGradients
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.field import Field, FieldFindMeshLocation
from opencmiss.zinc.optimisation import Optimisation
from opencmiss.zinc.result import RESULT_OK
from scaffoldfitter.fitterstep import FitterStep
class FitterStepFit(FitterStep):
_jsonTypeId = "_FitterStepFit"
def __init__(self):
super(FitterStepFit, self).__init__()
self._lineWeight = 10.0
self._markerWeight = 1.0
self._strainPenaltyWeight = 0.0
self._curvaturePenaltyWeight = 0.0
self._edgeDiscontinuityPenaltyWeight = 0.0
self._numberOfIterations = 1
self._maximumSubIterations = 1
self._updateReferenceState = False
@classmethod
def getJsonTypeId(cls):
return cls._jsonTypeId
def decodeSettingsJSONDict(self, dctIn : dict):
"""
Decode definition of step from JSON dict.
"""
assert self._jsonTypeId in dctIn
# ensure all new options are in dct
dct = self.encodeSettingsJSONDict()
dct.update(dctIn)
self._lineWeight = dct["lineWeight"]
self._markerWeight = dct["markerWeight"]
self._strainPenaltyWeight = dct["strainPenaltyWeight"]
self._curvaturePenaltyWeight = dct["curvaturePenaltyWeight"]
self._edgeDiscontinuityPenaltyWeight = dct["edgeDiscontinuityPenaltyWeight"]
self._numberOfIterations = dct["numberOfIterations"]
self._maximumSubIterations = dct["maximumSubIterations"]
self._updateReferenceState = dct["updateReferenceState"]
def encodeSettingsJSONDict(self) -> dict:
"""
Encode definition of step in dict.
:return: Settings in a dict ready for passing to json.dump.
"""
return {
self._jsonTypeId : True,
"lineWeight" : self._lineWeight,
"markerWeight" : self._markerWeight,
"strainPenaltyWeight" : self._strainPenaltyWeight,
"curvaturePenaltyWeight" : self._curvaturePenaltyWeight,
"edgeDiscontinuityPenaltyWeight" : self._edgeDiscontinuityPenaltyWeight,
"numberOfIterations" : self._numberOfIterations,
"maximumSubIterations" : self._maximumSubIterations,
"updateReferenceState" : self._updateReferenceState
}
def getLineWeight(self):
return self._lineWeight
def setLineWeight(self, weight):
assert weight >= 0.0
if weight != self._lineWeight:
self._lineWeight = weight
return True
return False
def getMarkerWeight(self):
return self._markerWeight
def setMarkerWeight(self, weight):
assert weight >= 0.0
if weight != self._markerWeight:
self._markerWeight = weight
return True
return False
def getStrainPenaltyWeight(self):
return self._strainPenaltyWeight
def setStrainPenaltyWeight(self, weight):
assert weight >= 0.0
if weight != self._strainPenaltyWeight:
self._strainPenaltyWeight = weight
return True
return False
def getCurvaturePenaltyWeight(self):
return self._curvaturePenaltyWeight
def setCurvaturePenaltyWeight(self, weight):
assert weight >= 0.0
if weight != self._curvaturePenaltyWeight:
self._curvaturePenaltyWeight = weight
return True
return False
def getEdgeDiscontinuityPenaltyWeight(self):
return self._edgeDiscontinuityPenaltyWeight
def setEdgeDiscontinuityPenaltyWeight(self, weight):
assert weight >= 0.0
if weight != self._edgeDiscontinuityPenaltyWeight:
self._edgeDiscontinuityPenaltyWeight = weight
return True
return False
def getNumberOfIterations(self):
return self._numberOfIterations
def setNumberOfIterations(self, numberOfIterations):
assert numberOfIterations > 0
if numberOfIterations != self._numberOfIterations:
self._numberOfIterations = numberOfIterations
return True
return False
def getMaximumSubIterations(self):
return self._maximumSubIterations
def setMaximumSubIterations(self, maximumSubIterations):
assert maximumSubIterations > 0
if maximumSubIterations != self._maximumSubIterations:
self._maximumSubIterations = maximumSubIterations
return True
return False
def isUpdateReferenceState(self):
return self._updateReferenceState
def setUpdateReferenceState(self, updateReferenceState):
if updateReferenceState != self._updateReferenceState:
self._updateReferenceState = updateReferenceState
return True
return False
def run(self, modelFileNameStem=None):
"""
Fit model geometry parameters to data.
:param modelFileNameStem: Optional name stem of intermediate output file to write.
"""
self._fitter.assignDataWeights(self._lineWeight, self._markerWeight);
fieldmodule = self._fitter._region.getFieldmodule()
optimisation = fieldmodule.createOptimisation()
optimisation.setMethod(Optimisation.METHOD_NEWTON)
optimisation.addDependentField(self._fitter.getModelCoordinatesField())
optimisation.setAttributeInteger(Optimisation.ATTRIBUTE_MAXIMUM_ITERATIONS, self._maximumSubIterations)
#FunctionTolerance = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_FUNCTION_TOLERANCE)
#GradientTolerance = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_GRADIENT_TOLERANCE)
#StepTolerance = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_STEP_TOLERANCE)
MaximumStep = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_MAXIMUM_STEP)
MinimumStep = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_MINIMUM_STEP)
#LinesearchTolerance = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_LINESEARCH_TOLERANCE)
#TrustRegionSize = optimisation.getAttributeReal(Optimisation.ATTRIBUTE_TRUST_REGION_SIZE)
dataScale = self._fitter.getDataScale()
#tol_scale = dataScale # *dataScale
#FunctionTolerance *= tol_scale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_FUNCTION_TOLERANCE, FunctionTolerance)
#GradientTolerance /= tol_scale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_GRADIENT_TOLERANCE, GradientTolerance)
#StepTolerance *= tol_scale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_STEP_TOLERANCE, StepTolerance)
MaximumStep *= dataScale
optimisation.setAttributeReal(Optimisation.ATTRIBUTE_MAXIMUM_STEP, MaximumStep)
MinimumStep *= dataScale
optimisation.setAttributeReal(Optimisation.ATTRIBUTE_MINIMUM_STEP, MinimumStep)
#LinesearchTolerance *= dataScale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_LINESEARCH_TOLERANCE, LinesearchTolerance)
#TrustRegionSize *= dataScale
#optimisation.setAttributeReal(Optimisation.ATTRIBUTE_TRUST_REGION_SIZE, TrustRegionSize)
#if self.getDiagnosticLevel() > 0:
# print("Function Tolerance", FunctionTolerance)
# print("Gradient Tolerance", GradientTolerance)
# print("Step Tolerance", StepTolerance)
# print("Maximum Step", MaximumStep)
# print("Minimum Step", MinimumStep)
# print("Linesearch Tolerance", LinesearchTolerance)
# print("Trust Region Size", TrustRegionSize)
dataObjective = None
deformationPenaltyObjective = None
edgeDiscontinuityPenaltyObjective = None
with ChangeManager(fieldmodule):
dataObjective = self.createDataObjectiveField()
result = optimisation.addObjectiveField(dataObjective)
assert result == RESULT_OK, "Fit Geometry: Could not add data objective field"
if (self._strainPenaltyWeight > 0.0) or (self._curvaturePenaltyWeight > 0.0):
deformationPenaltyObjective = self.createDeformationPenaltyObjectiveField()
result = optimisation.addObjectiveField(deformationPenaltyObjective)
assert result == RESULT_OK, "Fit Geometry: Could not add strain/curvature penalty objective field"
if self._edgeDiscontinuityPenaltyWeight > 0.0:
print("WARNING! Edge discontinuity penalty is not supported by NEWTON solver - skipping")
#edgeDiscontinuityPenaltyObjective = self.createEdgeDiscontinuityPenaltyObjectiveField()
#result = optimisation.addObjectiveField(edgeDiscontinuityPenaltyObjective)
#assert result == RESULT_OK, "Fit Geometry: Could not add edge discontinuity penalty objective field"
fieldcache = fieldmodule.createFieldcache()
objectiveFormat = "{:12e}"
for iter in range(self._numberOfIterations):
iterName = str(iter + 1)
if self.getDiagnosticLevel() > 0:
print("-------- Iteration " + iterName)
if self.getDiagnosticLevel() > 0:
result, objective = dataObjective.evaluateReal(fieldcache, 1)
print(" Data objective", objectiveFormat.format(objective))
if deformationPenaltyObjective:
result, objective = deformationPenaltyObjective.evaluateReal(fieldcache, deformationPenaltyObjective.getNumberOfComponents())
print(" Deformation penalty objective", objectiveFormat.format(objective))
result = optimisation.optimise()
if self.getDiagnosticLevel() > 1:
solutionReport = optimisation.getSolutionReport()
print(solutionReport)
assert result == RESULT_OK, "Fit Geometry: Optimisation failed with result " + str(result)
if modelFileNameStem:
self._fitter.writeModel(modelFileNameStem + "_fit" + iterName + ".exf")
self._fitter.calculateDataProjections(self)
if self.getDiagnosticLevel() > 0:
print("--------")
result, objective = dataObjective.evaluateReal(fieldcache, 1)
print(" END Data objective", objectiveFormat.format(objective))
if deformationPenaltyObjective:
result, objective = deformationPenaltyObjective.evaluateReal(fieldcache, deformationPenaltyObjective.getNumberOfComponents())
print(" END Deformation penalty objective", objectiveFormat.format(objective))
if self._updateReferenceState:
self._fitter.updateModelReferenceCoordinates()
self.setHasRun(True)
def createDataObjectiveField(self):
"""
Get FieldNodesetSum objective for data projected onto mesh, including markers with fixed locations.
Assumes ChangeManager(fieldmodule) is in effect.
:return: Zinc FieldNodesetSum.
"""
fieldmodule = self._fitter.getFieldmodule()
delta = self._fitter.getDataDeltaField()
weight = self._fitter.getDataWeightField()
deltaSq = fieldmodule.createFieldDotProduct(delta, delta)
#dataProjectionInDirection = fieldmodule.createFieldDotProduct(dataProjectionDelta, self._fitter.getDataProjectionDirectionField())
#dataProjectionInDirection = fieldmodule.createFieldMagnitude(dataProjectionDelta)
#dataProjectionInDirection = dataProjectionDelta
#dataProjectionInDirection = fieldmodule.createFieldConstant([ weight/dataScale ]*dataProjectionDelta.getNumberOfComponents()) * dataProjectionDelta
dataProjectionObjective = fieldmodule.createFieldNodesetSum(weight*deltaSq, self._fitter.getActiveDataNodesetGroup())
dataProjectionObjective.setElementMapField(self._fitter.getDataHostLocationField())
return dataProjectionObjective
def createDeformationPenaltyObjectiveField(self):
"""
Only call if (self._strainPenaltyWeight > 0.0) or (self._curvaturePenaltyWeight > 0.0)
:return: Zinc FieldMeshIntegral, or None if not weighted.
Assumes ChangeManager(fieldmodule) is in effect.
"""
numberOfGaussPoints = 3
fieldmodule = self._fitter.getFieldmodule()
mesh = self._fitter.getHighestDimensionMesh()
dataScale = 1.0
dimension = mesh.getDimension()
# future: eliminate effect of model scale
#linearDataScale = self._fitter.getDataScale()
#for d in range(dimension):
# dataScale /= linearDataScale
displacementGradient1, displacementGradient2 = createFieldsDisplacementGradients(self._fitter.getModelCoordinatesField(), self._fitter.getModelReferenceCoordinatesField(), mesh)
deformationTerm = None
if self._strainPenaltyWeight > 0.0:
# future: allow variable alpha components
alpha = fieldmodule.createFieldConstant([ self._strainPenaltyWeight*dataScale ]*displacementGradient1.getNumberOfComponents())
wtSqDeformationGradient1 = fieldmodule.createFieldDotProduct(alpha, displacementGradient1*displacementGradient1)
assert wtSqDeformationGradient1.isValid()
deformationTerm = wtSqDeformationGradient1
if self._curvaturePenaltyWeight > 0.0:
# future: allow variable beta components
beta = fieldmodule.createFieldConstant([ self._curvaturePenaltyWeight*dataScale ]*displacementGradient2.getNumberOfComponents())
wtSqDeformationGradient2 = fieldmodule.createFieldDotProduct(beta, displacementGradient2*displacementGradient2)
assert wtSqDeformationGradient2.isValid()
deformationTerm = (deformationTerm + wtSqDeformationGradient2) if deformationTerm else wtSqDeformationGradient2
deformationPenaltyObjective = fieldmodule.createFieldMeshIntegral(deformationTerm, self._fitter.getModelReferenceCoordinatesField(), mesh);
deformationPenaltyObjective.setNumbersOfPoints(numberOfGaussPoints)
return deformationPenaltyObjective
def createEdgeDiscontinuityPenaltyObjectiveField(self):
"""
Only call if self._edgeDiscontinuityPenaltyWeight > 0.0
Assumes ChangeManager(fieldmodule) is in effect.
:return: Zinc FieldMeshIntegralSquares, or None if not weighted.
"""
numberOfGaussPoints = 3
fieldmodule = self._fitter.getFieldmodule()
lineMesh = fieldmodule.findMeshByDimension(1)
edgeDiscontinuity = fieldmodule.createFieldEdgeDiscontinuity(self._fitter.getModelCoordinatesField())
dataScale = self._fitter.getDataScale()
weightedEdgeDiscontinuity = edgeDiscontinuity*fieldmodule.createFieldConstant(self._edgeDiscontinuityPenaltyWeight/dataScale)
edgeDiscontinuityPenaltyObjective = fieldmodule.createFieldMeshIntegralSquares(weightedEdgeDiscontinuity, self._fitter.getModelReferenceCoordinatesField(), lineMesh)
edgeDiscontinuityPenaltyObjective.setNumbersOfPoints(numberOfGaussPoints)
return edgeDiscontinuityPenaltyObjective
| 49.26129 | 185 | 0.71017 | 14,861 | 0.973152 | 0 | 0 | 71 | 0.004649 | 0 | 0 | 4,219 | 0.276275 |
47e8c93b45a616423efc06f0610ea0b349b67a78 | 261 | py | Python | src/structlog_to_seq/abs_processor.py | gjedlicska/structlog-to-seq | 44d8eb536db2cf0a5bbd8561a545b39f7584f372 | [
"MIT"
]
| null | null | null | src/structlog_to_seq/abs_processor.py | gjedlicska/structlog-to-seq | 44d8eb536db2cf0a5bbd8561a545b39f7584f372 | [
"MIT"
]
| null | null | null | src/structlog_to_seq/abs_processor.py | gjedlicska/structlog-to-seq | 44d8eb536db2cf0a5bbd8561a545b39f7584f372 | [
"MIT"
]
| null | null | null | from abc import ABCMeta, abstractmethod
from typing import Any
class AbsProcessor(metaclass=ABCMeta):
def __init__(self) -> None:
pass
@abstractmethod
def __call__(self, logger, name, event_dict) -> Any:
raise NotImplementedError
| 21.75 | 56 | 0.708812 | 195 | 0.747126 | 0 | 0 | 106 | 0.40613 | 0 | 0 | 0 | 0 |
47e9940c13fbcbe44daf1d6db6129df93471f1e5 | 2,965 | py | Python | scripts/ch4_correlation_plot.py | mathkann/understanding-random-forests | d2c5e0174d1a778be37a495083d756b2829160ec | [
"BSD-3-Clause"
]
| 353 | 2015-01-03T13:34:03.000Z | 2022-03-25T05:16:30.000Z | scripts/ch4_correlation_plot.py | mathkann/understanding-random-forests | d2c5e0174d1a778be37a495083d756b2829160ec | [
"BSD-3-Clause"
]
| 1 | 2016-06-29T05:43:41.000Z | 2016-06-29T05:43:41.000Z | scripts/ch4_correlation_plot.py | mathkann/understanding-random-forests | d2c5e0174d1a778be37a495083d756b2829160ec | [
"BSD-3-Clause"
]
| 153 | 2015-01-14T03:46:42.000Z | 2021-12-26T10:13:51.000Z | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
blue = (0, 0, 1.0)
green = (0, 0.8, 0)
red = (1.0, 0, 0)
red_alpha = (1.0, 0, 0, 0.001)
gray = (0.7, 0.7, 0.7)
results = [[],[],
["RandomForestRegressor-K=1",3.527128,2.820386,0.706743,0.063868,0.009973,0.286104,0.420639],
["RandomForestRegressor-K=2",3.036291,2.333874,0.702417,0.075537,0.011347,0.314841,0.387576],
["RandomForestRegressor-K=3",2.823907,2.109897,0.714009,0.087809,0.012335,0.349486,0.364523],
["RandomForestRegressor-K=4",2.715613,1.979086,0.736527,0.102472,0.014302,0.391750,0.344778],
["RandomForestRegressor-K=5",2.643232,1.887080,0.756151,0.111790,0.015411,0.421380,0.334772],
["RandomForestRegressor-K=6",2.642354,1.851498,0.790856,0.125342,0.016268,0.466556,0.324300],
["RandomForestRegressor-K=7",2.636296,1.822316,0.813980,0.134200,0.017159,0.495746,0.318234],
["RandomForestRegressor-K=8",2.623646,1.784344,0.839303,0.146081,0.018631,0.531100,0.308202],
["RandomForestRegressor-K=9",2.645439,1.780447,0.864992,0.152977,0.019492,0.558601,0.306390],
["RandomForestRegressor-K=10",2.638901,1.753437,0.885464,0.160371,0.020184,0.583494,0.301970],
["ExtraTreesRegressor-K=1",3.376099,2.723586,0.652514,0.051864,0.009532,0.230752,0.421761],
["ExtraTreesRegressor-K=2",2.801100,2.146534,0.654566,0.060858,0.011926,0.258086,0.396480],
["ExtraTreesRegressor-K=3",2.536644,1.886837,0.649807,0.067322,0.012756,0.273424,0.376383],
["ExtraTreesRegressor-K=4",2.409943,1.745583,0.664360,0.076519,0.016511,0.302962,0.361399],
["ExtraTreesRegressor-K=5",2.330165,1.651706,0.678459,0.086137,0.017063,0.331515,0.346944],
["ExtraTreesRegressor-K=6",2.285386,1.597063,0.688323,0.092147,0.019216,0.349667,0.338655],
["ExtraTreesRegressor-K=7",2.263983,1.553772,0.710211,0.100322,0.020510,0.378116,0.332094],
["ExtraTreesRegressor-K=8",2.246997,1.528167,0.718831,0.107167,0.021703,0.396323,0.322507],
["ExtraTreesRegressor-K=9",2.236845,1.495768,0.741077,0.115699,0.023020,0.423894,0.317183],
["ExtraTreesRegressor-K=10",2.232862,1.469781,0.763081,0.123849,0.024420,0.451778,0.311304]]
max_features = range(1, 10+1)
ax = plt.subplot(1, 2, 1)
plt.plot(max_features, [results[1+k][1] for k in max_features], 'o-', color=blue, label='Random Forest')
plt.plot(max_features, [results[1+k][2] for k in max_features], 'o--', color=blue)
plt.plot(max_features, [results[1+k][3] for k in max_features], 'o:', color=blue)
plt.plot(max_features, [results[11+k][1] for k in max_features], 'o-', color=red, label='Extremely Randomized Trees')
plt.plot(max_features, [results[11+k][2] for k in max_features], 'o--', color=red)
plt.plot(max_features, [results[11+k][3] for k in max_features], 'o:', color=red)
plt.legend(loc="best")
plt.xlabel("$K$")
plt.subplot(1, 2, 2, sharex=ax)
plt.plot(max_features, [results[1+k][4] for k in max_features], 'o-', color=blue)
plt.plot(max_features, [results[11+k][4] for k in max_features], 'o-', color=red)
plt.xlabel("$K$")
plt.ylabel("$\\rho$")
plt.show()
| 57.019231 | 117 | 0.725801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.210455 |
47e9c55cbdb85ad05b1bd86a08b95251624c0eb6 | 4,029 | py | Python | Run_Vfree-Synthetic_Flat.py | Fernandez-Trincado/DataReductionPy | f06eb975067dc80cac038a47d3b9a9dde43bfdb6 | [
"FSFAP"
]
| 1 | 2020-01-25T06:28:40.000Z | 2020-01-25T06:28:40.000Z | Run_Vfree-Synthetic_Flat.py | Fernandez-Trincado/DataReductionPy | f06eb975067dc80cac038a47d3b9a9dde43bfdb6 | [
"FSFAP"
]
| null | null | null | Run_Vfree-Synthetic_Flat.py | Fernandez-Trincado/DataReductionPy | f06eb975067dc80cac038a47d3b9a9dde43bfdb6 | [
"FSFAP"
]
| null | null | null | #!/usr/bin/python
# Created by: Jose G. Fernandez Trincado
# Date: 2013 June 28
# Program: This program correct the imagen .fit (Science) by Syntethic Flat
# 1 m Reflector telescope, National Astronomical Observatory of Venezuela
# Mode f/5, 21 arcmin x 21 arcmin
# Project: Omega Centauri, Tidal Tails.
# The program Astrometry_V1.py defined was developed by J. G. Fernandez Trincado at the Centro de Investigaciones de Astronomia "Francisco J. Duarte".
# If you have any problems, please contact J. G. Fernandez Trincado, [email protected] / [email protected]
import numpy as np
import scipy as sc
import pyfits
import sys, os
from pyraf import iraf
#run, program.
#Example:
# Next program: ./Run_Vfree-Synthetic_Flat.py GrupoX.dat
# >>> GrupoX.dat/XXX.XX.XXX.XX.XXXX.hlv*
location='/home/jfernandez/Escritorio/Tesis_2013-2014_CIDA_ULA/Data_Tesis_2013_2014_CIDA-ULA/Reflector/'
if len(sys.argv[:]) < 2.:
print '***************************************************'
print 'Warning: ./Run_Vfree-Synthetic_Flat.py GrupoX.dat'
print '***************************************************'
else:
#Combine images MEDIAN
#TASK IRAF: images.immatch.imcombine
#Function to combine images for generates Master Flat
def Master_combina(inter_img,filt):
iraf.images.immatch()
iraf.images.immatch.output=filt
iraf.images.immatch.headers=''
iraf.images.immatch.bpmasks=''
iraf.images.immatch.rejmasks=''
iraf.images.immatch.nrejmasks=''
iraf.images.immatch.expmasks=''
iraf.images.immatch.sigmas=''
iraf.images.immatch.logfile='STDOUT'
iraf.images.immatch.combine='median'
iraf.images.immatch.reject='avsigclip'
iraf.images.immatch.project='no'
iraf.images.immatch.outtype='real'
iraf.images.immatch.outlimits=''
iraf.images.immatch.offsets='none'
iraf.images.immatch.masktype='none'
iraf.images.immatch.maskvalue=0.
iraf.images.immatch.blank=1.0
iraf.images.immatch.scale='mode'
iraf.images.immatch.zero='none'
iraf.images.immatch.weight='mode'
iraf.images.immatch.statsec=''
iraf.images.immatch.expname=''
iraf.images.immatch.lthreshold='INDEF'
iraf.images.immatch.hthreshold='INDEF'
iraf.images.immatch.nlow=1.
iraf.images.immatch.nhigh=1.
iraf.images.immatch.nkeep=1.
iraf.images.immatch.mclip='yes'
iraf.images.immatch.lsigma=3.
iraf.images.immatch.hsigma=3.
iraf.images.immatch.rdnoise=7.
iraf.images.immatch.gain=1.68
iraf.images.immatch.snoise=0.
iraf.images.immatch.sigscale=0.1
iraf.images.immatch.pclip=-0.5
iraf.images.immatch.grow=0.
iraf.images.immatch.imcombine(inter_img)
data=sc.genfromtxt(sys.argv[1],dtype=str)
#Lee lista dentro de los directorios, estas listas contienen la ruta de las imagenes ya clasificadas por filtro y tiempo de exposicion.
for i in np.arange(len(data)):
temp='/Initial_list_Syntethic_flat_'
os.system('ls '+data[i]+temp+'* >temporal_classified.dat')
data_clas=sc.genfromtxt('temporal_classified.dat',dtype=str)
for j in np.arange(len(data_clas)):
if data_clas[j] == data[i]+temp+'I60':
os.system('cat '+data[i]+temp+'I60 >> MasterFlat_I60_Good.dat')
elif data_clas[j] == data[i]+temp+'I90':
os.system('cat '+data[i]+temp+'I90 >> MasterFlat_I90_Good.dat')
elif data_clas[j] == data[i]+temp+'V60':
os.system('cat '+data[i]+temp+'V60 >> MasterFlat_V60_Good.dat')
elif data_clas[j] == data[i]+temp+'V90':
os.system('cat '+data[i]+temp+'V90 >> MasterFlat_V90_Good.dat')
else:
pass
os.system('rm temporal_classified.dat')
os.system('ls MasterFlat_*_Good.dat >list_temp_gen.dat')
data_end=sc.genfromtxt('list_temp_gen.dat',dtype=str)
for k in np.arange(len(data_end)):
print 'Generating Master Flat: '+data_end[k]
print ''
Master_combina('@'+data_end[k],data_end[k]+'.fit')
print 'End of the process'
print ''
for h in np.arange(len(data)):
os.system('cp '+data_end[k]+'.fit '+data[h]+'/')
os.system('rm '+data_end[k]+'.fit')
os.system('rm list_temp_gen.dat MasterFlat_*_Good.dat')
#END
| 28.778571 | 150 | 0.70489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,789 | 0.444031 |
47e9dce96f4661d34f10811dc840595ffed5833b | 829 | py | Python | abing/backend/abing/models/feature.py | dohyungp/abitrary | 4dc3f4c79a433a2debe1f1e151d00400a2225e9c | [
"MIT"
]
| 5 | 2020-12-04T14:15:26.000Z | 2020-12-30T09:11:09.000Z | abing/backend/abing/models/feature.py | dohyungp/abitrary | 4dc3f4c79a433a2debe1f1e151d00400a2225e9c | [
"MIT"
]
| 8 | 2020-12-20T16:33:30.000Z | 2021-01-06T01:56:55.000Z | abing/backend/abing/models/feature.py | dohyungp/abitrary | 4dc3f4c79a433a2debe1f1e151d00400a2225e9c | [
"MIT"
]
| 1 | 2021-01-06T15:25:19.000Z | 2021-01-06T15:25:19.000Z | from typing import TYPE_CHECKING
from sqlalchemy import Column, Integer, String, Boolean, DateTime, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from abing.db.base_class import Base
if TYPE_CHECKING:
from .arm import Arm # noqa: F401
class Feature(Base):
__tablename__ = "features"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
key = Column(String(200), index=True)
value = Column(String)
description = Column(String)
arm_id = Column(Integer, ForeignKey("arms.id"), nullable=False)
arm = relationship("Arm", back_populates="features")
time_created = Column(DateTime(timezone=True), server_default=func.now())
time_updated = Column(
DateTime(timezone=True), server_default=func.now(), onupdate=func.now()
)
| 30.703704 | 79 | 0.731001 | 546 | 0.658625 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.055489 |
47eb8cdb6e6b5599e5209b828d0aacfe3eb4df25 | 555 | py | Python | Utilities/fe8_exp_test.py | Shahrose/lex-talionis | ef7e48124b36269f4212eb0e3a7747caf53bfadd | [
"MIT"
]
| null | null | null | Utilities/fe8_exp_test.py | Shahrose/lex-talionis | ef7e48124b36269f4212eb0e3a7747caf53bfadd | [
"MIT"
]
| null | null | null | Utilities/fe8_exp_test.py | Shahrose/lex-talionis | ef7e48124b36269f4212eb0e3a7747caf53bfadd | [
"MIT"
]
| null | null | null | mlevel = 1
elevel = 1
mclass_bonus_a = 20
eclass_bonus_a = 0
mclass_bonus_b = 60
eclass_bonus_b = 0
mclass_power = 3
eclass_power = 2
def damage_exp():
return (31 + elevel + eclass_bonus_a - mlevel - mclass_bonus_a) / mclass_power
def defeat_exp(mode=1):
return (elevel * eclass_power + eclass_bonus_b) - ((mlevel * mclass_power + mclass_bonus_b) / mode)
def kill_exp():
return damage_exp() + max(0, 20 + (defeat_exp() if defeat_exp() > 0 else defeat_exp(2)))
print(damage_exp())
print(defeat_exp())
print(defeat_exp(2))
print(kill_exp())
| 24.130435 | 103 | 0.715315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
47eb8f87adf534b765a9c50c9659d9424a7c2ade | 1,315 | py | Python | createDB.py | ansh-mehta/COVID-19-Vaccine-Slot-Notifier | b09d163ebee960089edbd8b894e3b956745504df | [
"Apache-2.0"
]
| null | null | null | createDB.py | ansh-mehta/COVID-19-Vaccine-Slot-Notifier | b09d163ebee960089edbd8b894e3b956745504df | [
"Apache-2.0"
]
| 1 | 2021-09-11T18:06:33.000Z | 2021-09-11T18:06:33.000Z | createDB.py | ansh-mehta/COVID-19-Vaccine-Slot-Notifier | b09d163ebee960089edbd8b894e3b956745504df | [
"Apache-2.0"
]
| null | null | null | import requests
import json
from pymongo import MongoClient, collection
client = MongoClient("mongodb://localhost:27017")
database = client["temp"]
states_districts = database["states_districts"]
states_districts.remove({})
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
}
response = requests.get(
"https://cdn-api.co-vin.in/api/v2/admin/location/states", headers=headers
)
states = json.loads(response.text)["states"]
custom_state_id=1
for state in states:
state_id = state["state_id"]
state_name = state["state_name"].strip()
print(state_name)
response = requests.get(
"https://cdn-api.co-vin.in/api/v2/admin/location/districts/" + str(state_id),
headers=headers,
)
custom_district_id=1
temp=[]
districts = json.loads(response.text)["districts"]
for district in districts:
district_id = district["district_id"]
district_name = district["district_name"].strip()
data={"state_name":state_name,"custom_state_id":custom_state_id,"district_name":district_name,"custom_district_id":custom_district_id,"actual_district_id":district_id}
states_districts.insert_one(data)
custom_district_id+=1
custom_state_id+=1 | 35.540541 | 175 | 0.719392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 454 | 0.345247 |
47ecac75bfa5b5456323216191e97427a888010b | 3,989 | py | Python | model_conv.py | isn350/e_hir_GAN | 53cc7530b1c4bb7ee5250d7fc057b71ceb5726b4 | [
"MIT"
]
| null | null | null | model_conv.py | isn350/e_hir_GAN | 53cc7530b1c4bb7ee5250d7fc057b71ceb5726b4 | [
"MIT"
]
| null | null | null | model_conv.py | isn350/e_hir_GAN | 53cc7530b1c4bb7ee5250d7fc057b71ceb5726b4 | [
"MIT"
]
| null | null | null | import tensorflow as tf
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
# return tf.random_normal(shape=size, stddev=xavier_stddev)
return xavier_stddev
def conv(x, w, b, stride, name):
with tf.variable_scope('conv'):
tf.summary.histogram('weight', w)
tf.summary.histogram('biases', b)
return tf.nn.conv2d(x,
filter=w,
strides=[1, stride, stride, 1],
padding='SAME',
name=name) + b
def deconv(x, w, b, shape, stride, name):
with tf.variable_scope('deconv'):
tf.summary.histogram('weight', w)
tf.summary.histogram('biases', b)
return tf.nn.conv2d_transpose(x,
filter=w,
output_shape=shape,
strides=[1, stride, stride, 1],
padding='SAME',
name=name) + b
def lrelu(x, alpha=0.2):
with tf.variable_scope('leakyReLU'):
return tf.maximum(x, alpha * x)
def discriminator(X,dim, reuse=False):
with tf.variable_scope('discriminator'):
if reuse:
tf.get_variable_scope().reuse_variables()
K = 64
M = 128
N = 256
W1 = tf.get_variable('D_W1', [4, 4, 1, K], initializer=tf.random_normal_initializer(stddev=0.1))
B1 = tf.get_variable('D_B1', [K], initializer=tf.constant_initializer())
W2 = tf.get_variable('D_W2', [4, 4, K, M], initializer=tf.random_normal_initializer(stddev=0.1))
B2 = tf.get_variable('D_B2', [M], initializer=tf.constant_initializer())
W3 = tf.get_variable('D_W3', [16*16*M, N], initializer=tf.random_normal_initializer(stddev=0.1))
B3 = tf.get_variable('D_B3', [N], initializer=tf.constant_initializer())
W4 = tf.get_variable('D_W4', [N, 1], initializer=tf.random_normal_initializer(stddev=0.1))
B4 = tf.get_variable('D_B4', [1], initializer=tf.constant_initializer())
X = tf.reshape(X, [-1, dim, dim, 1], 'reshape')
conv1 = conv(X, W1, B1, stride=2, name='conv1')
bn1 = tf.contrib.layers.batch_norm(conv1)
conv2 = conv(tf.nn.dropout(lrelu(bn1), 0.4), W2, B2, stride=2, name='conv2')
bn2 = tf.contrib.layers.batch_norm(conv2)
flat = tf.reshape(tf.nn.dropout(lrelu(bn2), 0.4), [-1, 16*16*M], name='flat')
dense = lrelu(tf.matmul(flat, W3) + B3)
logits = tf.matmul(dense, W4) + B4
prob = tf.nn.sigmoid(logits)
return prob, logits
def generator(X, dim, batch_size=64):
with tf.variable_scope('generator'):
K = 256
L = 128
M = 64
W1 = tf.get_variable('G_W1', [100, 16*16*K], initializer=tf.random_normal_initializer(stddev=0.1))
B1 = tf.get_variable('G_B1', [16*16*K], initializer=tf.constant_initializer())
W2 = tf.get_variable('G_W2', [4, 4, M, K], initializer=tf.random_normal_initializer(stddev=0.1))
B2 = tf.get_variable('G_B2', [M], initializer=tf.constant_initializer())
W3 = tf.get_variable('G_W3', [4, 4, 1, M], initializer=tf.random_normal_initializer(stddev=0.1))
B3 = tf.get_variable('G_B3', [1], initializer=tf.constant_initializer())
X = lrelu(tf.matmul(X, W1) + B1)
X = tf.reshape(X, [batch_size, 16, 16, K])
print(X)
deconv1 = deconv(X, W2, B2, shape=[batch_size, 32, 32, M], stride=2, name='deconv1')
bn1 = tf.contrib.layers.batch_norm(deconv1)
deconv2 = deconv(tf.nn.dropout(lrelu(bn1), 0.4), W3, B3, shape=[batch_size, dim, dim, 1], stride=2, name='deconv2')
XX = tf.reshape(deconv2, [-1, dim*dim], 'reshape')
return tf.nn.sigmoid(XX)
| 40.292929 | 124 | 0.552018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.073953 |
47ed721213e9d40abe12f67af339888e2d8b6e5e | 12,488 | py | Python | squad_utils.py | ashtonteng/squad_exp | 0cdcb3e41783026e805fedbe671a9a69a90d8a86 | [
"MIT"
]
| 1 | 2019-01-08T16:41:54.000Z | 2019-01-08T16:41:54.000Z | squad_utils.py | ashtonteng/squad_exp | 0cdcb3e41783026e805fedbe671a9a69a90d8a86 | [
"MIT"
]
| null | null | null | squad_utils.py | ashtonteng/squad_exp | 0cdcb3e41783026e805fedbe671a9a69a90d8a86 | [
"MIT"
]
| null | null | null | import numpy as np
# import matplotlib.pyplot as plt
# import pylab
import re
import itertools
import json
import collections
import multiprocessing as mp
import random
import sys
#sys.path.append("./src/")
#from proto import io as protoio
#from utils.multiprocessor_cpu import MultiProcessorCPU
'''
some general pre/post processing tips:
1. should strip the space at the begining or end
2. consider the influence of punctuation at the end
3. be careful about empty string when using lib re functions
'''
def LoadJsonData(filePath):
'''
Load the file.
@param filePath: filePath string
'''
with open(filePath) as dataFile:
data = json.load(dataFile)
return data
def LoadProtoData(filePath):
data = protoio.ReadArticles(filePath)
dataDict = dict()
for article in data:
title = article.title
dataDict[title] = article
return dataDict
def DumpJsonPrediction(filePath, predictions):
'''
currently only support top 1 prediction.
the output put goes in the following format:
{id : answer string}
'''
predDict = dict()
for title in predictions.keys():
for pred in predictions[title]:
if len(pred["prediction"] ) == 0:
continue
predDict[pred["id"] ] = pred["prediction"][0]
with open(filePath, "w") as outFile:
json.dump(predDict, outFile)
def StripPunct(sentence):
sentence = sentence.replace("...", "<elli>")
if sentence[-1] == '.'\
or sentence[-1] == '?' \
or sentence[-1] == '!' \
or sentence[-1] == ';' \
or sentence[-1] == ",":
sentence = sentence[:-1]
sentence = sentence.replace("<elli>", "...")
return sentence
def ParseJsonData(data):
'''
@param data is a json object. This is the version before
visualization functionality.
'''
dataPerArticle = dict()
for article in data:
text = ""
# process articles to a list of sentences represented by list of words
for paragraph in article["paragraphs"]:
text += paragraph["context"].strip() + " "
textInSentences = TextToSentence(text)
queries = list()
answers = list()
qaIds = list()
for paragraph in article["paragraphs"]:
for qaPair in paragraph["qas"]:
# turn everything into lower cases
queries.append(StripPunct(qaPair["question"].lower().strip() ) )
answers.append(StripPunct(qaPair["answers"][0]["text"].lower().strip() ) )
qaIds.append(qaPair["id"] )
dataPerArticle[article["title"] ] = { \
"textInSentences": textInSentences,
"queries": queries,
"answers": answers,
"qaIds": qaIds
}
return dataPerArticle
def TextToSentence(text):
'''
cut document into sentences with the given delimiters
@param delimiters: delimiters to cut doc to sentences as a list of char
@return sentences: list of full string of sentences
'''
caps = "([A-Z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co|Corp)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
numbers = "([-+]?)([0-9]+)(\.)([0-9]+)"
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + caps + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(caps + "[.]" + caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(caps + "[.]" + caps + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(caps + "[.] " + caps + "[.] " + caps + "[.] ","\\1<prd> \\2<prd> \\3<prd>",text)
text = re.sub(caps + "[.] " + caps + "[.] ","\\1<prd> \\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + caps + "[.]"," \\1<prd>",text)
text = re.sub(numbers, "\\g<1>\\g<2><prd>\\g<4>", text)
# # specific to current SQUAD dataset
text = text.lower()
suffixesSupp = "(\.)([a-z]+)"
text = re.sub(suffixesSupp,"<prd>\\2",text)
text = text.replace("...", "<elli>")
text = text.replace("i.e.", "i<prd>e<prd>")
text = text.replace("etc.", "etc<prd>")
text = text.replace("u.s.", "u<prd>s<prd>")
text = text.replace("v.s.", "v<prd>s<prd>")
text = text.replace("vs.", "vs<prd>")
text = text.replace(" v. ", " v<prd> ")
text = text.replace("med.sc.d", "med<prd>sc<prd>d")
text = text.replace("ecl.", "ecl<prd>")
text = text.replace("hma.", "hma<prd>")
text = text.replace("(r.", "(r<prd>") # for some year related staff
text = text.replace("(d.", "(d<prd>")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
text = text.replace("<elli>", "...")
sentences = text.split("<stop>")
sentences = [s.strip() \
for s in sentences if s.strip() != '']
return sentences
def SentenceToWord(sentences):
'''
cut sentences to list of words
@param sentences: a list of sentences
@return sentencesInWords: a list containing list of words
'''
delimiters = "[ ,;\"\n\(\)]+"
sentencesInWords = list()
for sentence in sentences:
sentence = StripPunct(sentence)
sentence = sentence.replace("...", " ...")
sentencesInWords.append(re.split(delimiters, sentence) )
# omit the empty word produced by re.split
sentencesInWords[-1] = [s.strip().lower() for s in sentencesInWords[-1] if s.strip() != '']
return sentencesInWords
############### helper to multiprocess per article task with MultiprocessorCPU
def MultipleProcess(agent, titleList, targetFunc, conservative=True, debug=False):
'''
target function is the one we want to execute
for each article. When conservative == True, the num of threads
is equal to the number of cores on the machine
'''
procs = []
manager = mp.Manager()
returnDict = manager.dict()
if debug:
for title in titleList:
targetFunc(agent, title, returnDict)
else:
for title in titleList:
p = mp.Process(target=targetFunc,
args=(agent, title, returnDict) )
procs.append(p)
processor = MultiProcessorCPU(procs)
processor.run(conservative)
return returnDict
################ helpers for protobuf based dataset#################
def ReconstructStrFromSpan(tokens, span=None):
'''
@param tokens: a protobuf object representing a list of tokens
@param span: a pair (beginId, endId). Note endId is excluded.
'''
if span is None:
span = (0, len(tokens))
string = ""
beginId, endId = span
for i in range(beginId, endId):
string += tokens[i].word + tokens[i].after
string = string.strip()
return string
def GetContextBigram(article):
'''
article is an protobuf object for apecific article
'''
bigram = []
for paragraph in article.paragraphs:
bigramByPara = list()
for s in paragraph.context.sentence:
bigramByPara.append(GetBigramBySentence(s.token) )
bigram.append(bigramByPara)
return bigram
def GetContextUnigram(article):
unigram = []
for paragraph in article.paragraphs:
unigramByPara = list()
for s in paragraph.context.sentence:
unigramBySentence = [token.word.lower() for token in s.token]
unigramByPara.append(unigramBySentence)
unigram.append(unigramByPara)
return unigram
def GetBigramBySentence(tokens):
'''
tokens is a list of proto message object tokens
'''
bigram = []
for i in range(len(tokens) - 1):
bigram.append( (tokens[i].word.lower(), tokens[i + 1].word.lower() ) )
return bigram
def GetContextConstituentSpan(article):
'''
@return span: the spans are organized by the following hierarchy
span = [spanByPara1, spanByPara2, ...] Where
spanByPara1 = [spanBySentence1, spanBySentence2, ...]
spanBySentence1 is a list of spans extracted from the parsing tree
'''
span = []
for paragraph in article.paragraphs:
spanByPara = list()
for s in paragraph.context.sentence:
# tokens = [token.word for token in s.token]
spanBySentence = GetConstituentSpanBySentence(s.parseTree)
spanByPara.append(spanBySentence)
span.append(spanByPara)
return span
def GetConstituentSpanBySentence(parseTree):
'''
@param parseTree: a protobuf object
extract span represented by nodes in the parsing trees
'''
def AddSpanToParseTree(parseTree, nextLeaf):
'''
@param parseTree: a protobuf object
fill in the yieldBeginIndex and yieldEndIndex fields for parsing trees
'''
if len(parseTree.child) == 0:
parseTree.yieldBeginIndex = nextLeaf
parseTree.yieldEndIndex = nextLeaf + 1
return parseTree, nextLeaf + 1
else:
for i in range(len(parseTree.child) ):
child, nextLeaf = \
AddSpanToParseTree(parseTree.child[i], nextLeaf)
parseTree.child[i].CopyFrom(child)
parseTree.yieldBeginIndex = parseTree.child[0].yieldBeginIndex
parseTree.yieldEndIndex = parseTree.child[-1].yieldEndIndex
return parseTree, nextLeaf
parseTree, _ = AddSpanToParseTree(parseTree, nextLeaf=0)
spans = list()
visitList = list()
visitList.append(parseTree)
tokenList = list()
while len(visitList) != 0:
node = visitList.pop(0)
spans.append( (node.yieldBeginIndex, node.yieldEndIndex) )
for subTree in node.child:
visitList.append(subTree)
spansUniq = []
[spansUniq.append(span) for span in spans if span not in spansUniq]
return spansUniq
# some functions for debug
def GetCandidateAnsListInStr(candDataPerArticle, origDataPerArtice, ids, predId):
'''
for detailed use browse to prediction function of context rnn
'''
ansList = list()
for idx in ids:
predInfo = candDataPerArticle.candidateAnswers[idx]
predParaId = predInfo.paragraphIndex
predSenId = predInfo.sentenceIndex
predSpanStart = predInfo.spanBeginIndex
predSpanEnd = predInfo.spanBeginIndex + predInfo.spanLength
tokens = origDataPerArticle.paragraphs[predParaId].context.sentence[predSenId].token[predSpanStart:predSpanEnd]
predStr = ReconstructStrFromSpan(tokens, (0, len(tokens) ) )
ansList.append(predStr)
return ansList
# for serializing complex results
def ObjDict(obj):
return obj.__dict__
# display proto tokens
def PrintProtoToken(tokens):
print([t.word for t in tokens])
# remove the and . from tokens
def StandarizeToken(tokens):
if tokens[-1].word == ".":
tokens = tokens[:-1]
if len(tokens) > 0 and (tokens[0].word == "The" or tokens[0].word == "the"):
tokens = tokens[1:]
return tokens
def GetLongestCommonSubList(s1, s2):
m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
longest, x_longest = 0, 0
for x in xrange(1, 1 + len(s1)):
for y in xrange(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def UnkrizeData(data, rate, padId, unkId):
'''
artificially set non-<pad> tokens to <unk>. The portion of
the artificial <unk> is indicated by rate.
'''
mask = np.random.uniform(low=0.0, high=1.0, size=data.shape)
mask = np.logical_and( (data != padId), (mask >= (1 - rate) ) )
data[mask] = unkId
return data
| 32.605744 | 119 | 0.596493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,850 | 0.308296 |
47ee5fdbd7a2fd709f968f5597836efd8e182df3 | 207 | py | Python | software/test/sample.py | technovus-sfu/swarmbots | 6a50193a78056c0359c426b097b96e1c37678a55 | [
"MIT"
]
| null | null | null | software/test/sample.py | technovus-sfu/swarmbots | 6a50193a78056c0359c426b097b96e1c37678a55 | [
"MIT"
]
| 3 | 2018-02-05T23:21:02.000Z | 2018-05-03T02:58:50.000Z | software/test/sample.py | technovus-sfu/swarmbots | 6a50193a78056c0359c426b097b96e1c37678a55 | [
"MIT"
]
| null | null | null | # print("Hello World!")
# sum = 2 + 2
# print(sum)
# for i in range(10,-10,-1):
# if i % 2 == 0:
# print(i)
# else:
# pass
# while(1):
# val = input("Enter ")
# print(val)
| 13.8 | 28 | 0.434783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.932367 |
47f00c4575c588196fb02578a13c75df9196c8ba | 476 | py | Python | super_nft/blueprints/datasprint/datasprint.py | Blockchain-Key/Super-NFT | 3983621127636bf9d4da740a5ac60451a3e5bbe8 | [
"MIT"
]
| 5 | 2021-05-02T00:06:41.000Z | 2021-11-30T10:34:08.000Z | super_nft/blueprints/datasprint/datasprint.py | Blockchain-Key/Super-NFT | 3983621127636bf9d4da740a5ac60451a3e5bbe8 | [
"MIT"
]
| 3 | 2021-05-06T09:31:49.000Z | 2021-05-11T05:14:32.000Z | super_nft/blueprints/datasprint/datasprint.py | Blockchain-Key/Super-NFT | 3983621127636bf9d4da740a5ac60451a3e5bbe8 | [
"MIT"
]
| 1 | 2021-05-06T15:34:24.000Z | 2021-05-06T15:34:24.000Z | # -*- coding: utf-8 -*-
"""User views."""
from flask import Blueprint, render_template, jsonify
from flask_login import login_required
from super_nft.extensions import csrf_protect
datasprint_bp = Blueprint("datasprint", __name__, url_prefix="/datasprint", static_folder="../static")
@csrf_protect.exempt
@datasprint_bp.route("/", methods=["GET", "POST"])
def index():
data = {
"result": "hello, world",
"code": "200"
}
return jsonify(data), 200
| 28 | 102 | 0.682773 | 0 | 0 | 0 | 0 | 189 | 0.397059 | 0 | 0 | 123 | 0.258403 |
47f07bbd0388ba3dd47eb8f252a382985372ec31 | 548 | py | Python | CursoemVideoPython/Desafio 34.py | Beebruna/Python | bdbe10ea76acca1b417f5960db0aae8be44e0af3 | [
"MIT"
]
| null | null | null | CursoemVideoPython/Desafio 34.py | Beebruna/Python | bdbe10ea76acca1b417f5960db0aae8be44e0af3 | [
"MIT"
]
| null | null | null | CursoemVideoPython/Desafio 34.py | Beebruna/Python | bdbe10ea76acca1b417f5960db0aae8be44e0af3 | [
"MIT"
]
| null | null | null | '''
Escreva um programa que pergunte o salário de um funcionário e calcule o valor do seu
aumento.
Para salários superiores a R$1.250,00, calcule um aumento de 10%
Para os inferiores ou iguais, o aumento é de 15%.
'''
salario = float(input('Digite o salário: R$ '))
if salario < 0:
print('Valor inválido!')
else:
if salario <= 1250:
print(f'O aumento será de R${salario*0.15} e passará a ser R${salario + salario*0.15}')
else:
print(f'O aumento será de R${salario * 0.10} e passará a ser R${salario + salario * 0.10}') | 34.25 | 99 | 0.669708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.772401 |
47f0f24b25872b88a91afd63b72991904ea663bc | 658 | py | Python | python/AULAS/aula20.py | Robert-Marchinhaki/primeiros-passos-Python | 515c2c418bfb941bd9af14cf598eca7fe2985592 | [
"MIT"
]
| null | null | null | python/AULAS/aula20.py | Robert-Marchinhaki/primeiros-passos-Python | 515c2c418bfb941bd9af14cf598eca7fe2985592 | [
"MIT"
]
| null | null | null | python/AULAS/aula20.py | Robert-Marchinhaki/primeiros-passos-Python | 515c2c418bfb941bd9af14cf598eca7fe2985592 | [
"MIT"
]
| null | null | null | def linhas(cor, txt):
if pintar in cores:
print(cores[pintar])
print(txt)
cores = {'vermelho': '\033[31m',
'azul': '\033[34m',
'amarelo': '\033[33m',
'branco': '\033[30m',
'roxo': '\033[35m',
'verde': '\033[32m',
'ciano': '\033[36m',
'limpa': '\033[m',
'preto e branco': '\033[7;30;m'}
pintar = str(input('Deseja pintar o seu texto com qual cor? ')).lower()
while pintar not in cores:
pintar = str(input('Erro! Essa cor não existe. Tente novamente: '))
if pintar in cores:
break
texto = str(input('Digite seu texto: '))
linhas(cor=pintar, txt=texto)
| 26.32 | 71 | 0.542553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.418816 |
47f182f38e59b731af6d6326b1c317ab14b2b7e5 | 992 | py | Python | FatherSon/HelloWorld2_source_code/Listing_20-2.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
]
| 1 | 2019-01-04T05:47:50.000Z | 2019-01-04T05:47:50.000Z | FatherSon/HelloWorld2_source_code/Listing_20-2.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
]
| null | null | null | FatherSon/HelloWorld2_source_code/Listing_20-2.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
]
| null | null | null | # Listing_20-2.py
# Copyright Warren & Csrter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Adding an event handler for the button
import sys
from PyQt4 import QtCore, QtGui, uic
form_class = uic.loadUiType("MyFirstGui.ui")[0]
# Class definition for the main window
class MyWindowClass(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.pushButton.clicked.connect(self.button_clicked) # connect the event handler
# the event handler for the button click
def button_clicked(self):
x = self.pushButton.x()
y = self.pushButton.y()
x += 50
y += 50
self.pushButton.move(x, y) # Move the button when we click it
app = QtGui.QApplication(sys.argv)
myWindow = MyWindowClass()
myWindow.show()
app.exec_()
| 31 | 90 | 0.654234 | 513 | 0.517137 | 0 | 0 | 0 | 0 | 0 | 0 | 388 | 0.391129 |
47f231b8a668477769e2a9abd3723ae4eedc3e54 | 1,072 | py | Python | raspberry/serial_stub.py | idf/Robot-In-Maze | 2301021c39f36a01ff97af26c54d41fedbe1608c | [
"MIT"
]
| 16 | 2015-04-04T15:26:01.000Z | 2019-10-15T16:13:03.000Z | raspberry/serial_stub.py | idf/Robot-In-Maze | 2301021c39f36a01ff97af26c54d41fedbe1608c | [
"MIT"
]
| null | null | null | raspberry/serial_stub.py | idf/Robot-In-Maze | 2301021c39f36a01ff97af26c54d41fedbe1608c | [
"MIT"
]
| 7 | 2015-10-12T21:23:12.000Z | 2021-10-13T02:41:25.000Z | from serial_comminication import *
from utils.decorators import Override
__author__ = 'Danyang'
class SerialAPIStub(SerialAPI):
@Override(SerialAPI)
def __init__(self):
super(SerialAPIStub, self).__init__(production=False)
@Override(SerialAPI)
def command_put(self, function, parameter):
if function==10:
self.responses_outgoing.put([False, SENSOR, json.dumps({
"sensors": [{"sensor": 0, "value": 20}, {"sensor": 1, "value": 20}, {"sensor": 2, "value": 40},
{"sensor": 10, "value": 10}, {"sensor": 11, "value": 30}, {"sensor": 12, "value": 30}]})])
self.responses_outgoing.put([True, FUNCTION, json.dumps({"function": function, "status": 200})])
else:
self.responses_outgoing.put([True, FUNCTION, json.dumps({"function": function, "status": 200})])
@Override(SerialAPI)
def response_pop(self):
"""
:return: [ack, type_data, data] : [bool, int, json_str]
"""
return super(SerialAPIStub, self).response_pop()
| 38.285714 | 118 | 0.602612 | 971 | 0.905784 | 0 | 0 | 924 | 0.86194 | 0 | 0 | 223 | 0.208022 |
47f249b23a7b5f7230bfbb222ddcb290a2a7adde | 5,259 | py | Python | boto3_type_annotations_with_docs/boto3_type_annotations/mobile/paginator.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
]
| 119 | 2018-12-01T18:20:57.000Z | 2022-02-02T10:31:29.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/mobile/paginator.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
]
| 15 | 2018-11-16T00:16:44.000Z | 2021-11-13T03:44:18.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/mobile/paginator.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
]
| 11 | 2019-05-06T05:26:51.000Z | 2021-09-28T15:27:59.000Z | from typing import Dict
from botocore.paginate import Paginator
class ListBundles(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Mobile.Client.list_bundles`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mobile-2017-07-01/ListBundles>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'bundleList': [
{
'bundleId': 'string',
'title': 'string',
'version': 'string',
'description': 'string',
'iconUrl': 'string',
'availablePlatforms': [
'OSX'|'WINDOWS'|'LINUX'|'OBJC'|'SWIFT'|'ANDROID'|'JAVASCRIPT',
]
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Result structure contains a list of all available bundles with details.
- **bundleList** *(list) --*
A list of bundles.
- *(dict) --*
The details of the bundle.
- **bundleId** *(string) --*
Unique bundle identifier.
- **title** *(string) --*
Title of the download bundle.
- **version** *(string) --*
Version of the download bundle.
- **description** *(string) --*
Description of the download bundle.
- **iconUrl** *(string) --*
Icon for the download bundle.
- **availablePlatforms** *(list) --*
Developer desktop or mobile app or website platforms.
- *(string) --*
Developer desktop or target mobile app or website platform.
- **NextToken** *(string) --*
A token to resume pagination.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListProjects(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Mobile.Client.list_projects`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/mobile-2017-07-01/ListProjects>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'projects': [
{
'name': 'string',
'projectId': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Result structure used for requests to list projects in AWS Mobile Hub.
- **projects** *(list) --*
List of projects.
- *(dict) --*
Summary information about an AWS Mobile Hub project.
- **name** *(string) --*
Name of the project.
- **projectId** *(string) --*
Unique project identifier.
- **NextToken** *(string) --*
A token to resume pagination.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
| 40.145038 | 224 | 0.487165 | 5,189 | 0.986689 | 0 | 0 | 0 | 0 | 0 | 0 | 4,960 | 0.943145 |
47f2d05914db9e80d9759d21867bc5761abeee91 | 1,550 | py | Python | algorithms/counting_sort.py | ArziPL/Other | 1319ac85b19a5c49fb70e902e3e37f2e7a192d0b | [
"MIT"
]
| null | null | null | algorithms/counting_sort.py | ArziPL/Other | 1319ac85b19a5c49fb70e902e3e37f2e7a192d0b | [
"MIT"
]
| null | null | null | algorithms/counting_sort.py | ArziPL/Other | 1319ac85b19a5c49fb70e902e3e37f2e7a192d0b | [
"MIT"
]
| null | null | null | # Best : O(n + k)
# Avg : O(n + k)
# Worst O(n + k)
# Space worst : O(k) - CAN GET VERY BIG BIG
# k - range of values in array
# Take every number in arr then add += 1 to index of that number in temporary arrays, then
# for every index in temporary arrays add to final_arr that amount of that index number of
# how big number at that index is - if arr[23] = 3 then add 23 23 23 and same for negatives
# 1. The whole thing can get very ineffective if numbers in arr are big
# 2. Possibility of sorting negatives greatly increase time/space complexity
# create array len of min(to_sort), do the same as positives, then for final result multiply by -1 to get negatives
# and reverse them because we were counting them as positive => [1,5,10] => [-1,-5,-10] => [-10,-5,-1] and
# add that array at beginning of positive_sorted
to_sort = [52, 63, 12, 6, 631, 6, 24, 637,
64, 421, 74, 124, 0, -5, 523, -10, -529]
def counting_sort(arr: list):
positive_list = [0] * (max(arr)+1)
negative_list = [0] * (-1*(min(arr)-1))
final_positive = []
final_negative = []
for i in arr:
if i < 0:
ti = -i
negative_list[ti] += 1
else:
positive_list[i] += 1
for inx, i in enumerate(positive_list):
final_positive.append(i*[inx])
for inx, i in enumerate(negative_list):
final_negative.append(i*[-inx])
final_negative.reverse()
return [num for sublist in final_negative + final_positive for num in sublist]
print(counting_sort(to_sort))
| 34.444444 | 119 | 0.642581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 822 | 0.530323 |
47f4980d53b9e0ce1e873da3c9bbca1b3052a8de | 5,881 | py | Python | scheduler/notebooks/figures/evaluation/utils.py | akshayka/gavel | 40a22a725f2e70478483e98c9b07c6fc588e0c40 | [
"MIT"
]
| 67 | 2020-09-07T11:50:03.000Z | 2022-03-31T04:09:08.000Z | scheduler/notebooks/figures/evaluation/utils.py | akshayka/gavel | 40a22a725f2e70478483e98c9b07c6fc588e0c40 | [
"MIT"
]
| 7 | 2020-09-27T01:41:59.000Z | 2022-03-25T05:16:43.000Z | scheduler/notebooks/figures/evaluation/utils.py | akshayka/gavel | 40a22a725f2e70478483e98c9b07c6fc588e0c40 | [
"MIT"
]
| 12 | 2020-10-13T14:31:01.000Z | 2022-02-14T05:44:38.000Z | import os
import random
import re
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import sys; sys.path.append("../../..")
from job_table import JobTable
def get_logfile_paths_helper(directory_name):
logfile_paths = []
for root, _, file_names in os.walk(directory_name):
if len(file_names) > 0:
logfile_paths.extend(
[os.path.join(root, file_name)
for file_name in file_names])
return logfile_paths
def get_logfile_paths(directory_name, static_trace=False):
logfile_paths = []
for logfile_path in get_logfile_paths_helper(directory_name):
if static_trace:
m = re.match(
r'.*v100=(\d+)\.p100=(\d+)\.k80=(\d+)/(.*)/seed=(\d+)/'
'num_total_jobs=(\d+)\.log', logfile_path)
else:
m = re.match(
r'.*v100=(\d+)\.p100=(\d+)\.k80=(\d+)/(.*)/seed=(\d+)/'
'lambda=(\d+\.\d+)\.log', logfile_path)
if m is None: continue
v100s = int(m.group(1))
p100s = int(m.group(2))
k80s = int(m.group(3))
policy = m.group(4)
seed = int(m.group(5))
lambda_or_num_total_jobs = float(m.group(6))
logfile_paths.append((v100s, p100s, k80s, policy, seed,
lambda_or_num_total_jobs, logfile_path))
return logfile_paths
def prune(logfile_paths, v100s, p100s, k80s, policy, seed=None):
if seed is None:
return sorted([(x[5], x[6], x[4]) for x in logfile_paths
if x[0] == v100s and x[1] == p100s and
x[2] == k80s and x[3] == policy])
else:
return sorted([(x[5], x[6]) for x in logfile_paths
if x[0] == v100s and x[1] == p100s and
x[2] == k80s and x[3] == policy and
x[4] == seed])
def average_jct_fn(logfile_path, min_job_id=None, max_job_id=None):
job_completion_times = []
with open(logfile_path, 'r') as f:
lines = f.readlines()
for line in lines[-10000:]:
m = re.match(r'Job (\d+): (\d+\.\d+)', line)
if m is not None:
job_id = int(m.group(1))
job_completion_time = float(m.group(2))
if min_job_id is None or min_job_id <= job_id:
if max_job_id is None or job_id <= max_job_id:
job_completion_times.append(
job_completion_time)
if len(job_completion_times) == 0:
return None
return np.mean(job_completion_times) / 3600
def average_jct_low_priority_fn(logfile_path, min_job_id=None,
max_job_id=None):
job_completion_times = []
with open(logfile_path, 'rb') as f:
f.seek(-8192, os.SEEK_END)
text = f.read().decode('utf-8')
lines = text.split('\n')
for line in lines[-5:]:
m = re.match(r'Average job completion time \(low priority\): (\d+\.\d+) seconds', line)
if m is not None:
return float(m.group(1)) / 3600
return None
def average_jct_high_priority_fn(logfile_path, min_job_id=None,
max_job_id=None):
job_completion_times = []
with open(logfile_path, 'rb') as f:
f.seek(-8192, os.SEEK_END)
text = f.read().decode('utf-8')
lines = text.split('\n')
for line in lines[-5:]:
m = re.match(r'Average job completion time \(high priority\): (\d+\.\d+) seconds', line)
if m is not None:
return float(m.group(1)) / 3600
return None
def makespan_fn(logfile_path):
job_completion_times = []
with open(logfile_path, 'r') as f:
lines = f.readlines()
for line in lines[-10000:]:
m = re.match(r'Total duration: (\d+\.\d+) seconds', line)
if m is not None:
makespan = float(m.group(1)) / 3600.
return makespan
return None
def get_job_durations(seed, generate_multigpu_jobs):
job_generator = random.Random()
job_generator.seed(seed+2)
job_durations = []
for i in range(5000):
r = job_generator.uniform(0, 1)
scale_factor = 1
if 0.7 <= r <= 0.8:
scale_factor = 2
elif 0.8 <= r <= 0.95:
scale_factor = 4
elif 0.95 <= r:
scale_factor = 8
if not generate_multigpu_jobs:
scale_factor = 1
if job_generator.random() >= 0.8:
job_duration = 60 * (10 ** job_generator.uniform(3, 4))
else:
job_duration = 60 * (10 ** job_generator.uniform(1.5, 3))
while True:
job_template = job_generator.choice(JobTable)
if (scale_factor == 1 or
(scale_factor > 1 and job_template.distributed)):
break
job_durations.append((job_duration, job_template, scale_factor))
return job_durations
def get_jcts(logfile_path, seed, min_job_id=None, max_job_id=None):
job_completion_times = []
job_durations = get_job_durations(seed, generate_multigpu_jobs=True)
with open(logfile_path, 'r') as f:
lines = f.readlines()
for line in lines[-10000:]:
m = re.match(r'Job (\d+): (\d+\.\d+)', line)
if m is not None:
job_id = int(m.group(1))
job_completion_time = float(m.group(2))
if min_job_id is None or min_job_id <= job_id:
if max_job_id is None or job_id <= max_job_id:
job_duration, job_template, scale_factor = job_durations[job_id]
job_completion_times.append(
(job_completion_time, job_duration))
return [(x[0] / 3600.0, x[1] / 3600.0) for x in job_completion_times]
| 38.188312 | 100 | 0.550757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.073117 |
47f4fb021dc13ce9ce0d5ff354639ce8927eaf9b | 883 | py | Python | scripts/practice/FB-reRun/ MoveZeroesToEnd.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
]
| null | null | null | scripts/practice/FB-reRun/ MoveZeroesToEnd.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
]
| 8 | 2020-09-05T16:04:31.000Z | 2022-02-27T09:57:51.000Z | scripts/practice/FB-reRun/ MoveZeroesToEnd.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
]
| null | null | null | """
Move Zeroes - https://leetcode.com/problems/move-zeroes/
Given an integer array nums, move all 0's to the end of it while maintaining the relative order of the non-zero elements.
Note that you must do this in-place without making a copy of the array.
Example 1:
Input: nums = [0,1,0,3,12]
Output: [1,3,12,0,0]
Example 2:
Input: nums = [0]
Output: [0]
Constraints:
1 <= nums.length <= 104
-231 <= nums[i] <= 231 - 1
Follow up: Could you minimize the total number of operations done?
"""
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
pos = 0
for idx, val in enumerate(nums):
if val != 0:
if pos!= idx:
nums[idx],nums[pos] = nums[pos],nums[idx]
pos +=1
return
| 20.534884 | 121 | 0.583239 | 374 | 0.423556 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.660249 |
47f6c684577e0c1a7c425c6ef180e8ab456e667e | 1,503 | py | Python | django_stormpath/id_site.py | stormpath/stormpath-django | af60eb5da2115d94ac313613c5d4e6b9f3d16157 | [
"Apache-2.0"
]
| 36 | 2015-01-13T00:21:07.000Z | 2017-11-07T11:45:25.000Z | django_stormpath/id_site.py | stormpath/stormpath-django | af60eb5da2115d94ac313613c5d4e6b9f3d16157 | [
"Apache-2.0"
]
| 55 | 2015-01-07T09:53:50.000Z | 2017-02-07T00:31:20.000Z | django_stormpath/id_site.py | stormpath/stormpath-django | af60eb5da2115d94ac313613c5d4e6b9f3d16157 | [
"Apache-2.0"
]
| 24 | 2015-01-06T16:17:33.000Z | 2017-04-21T14:00:16.000Z | from django.contrib.auth import login as django_login
from django.contrib.auth import logout as django_logout
from django.http import HttpResponseRedirect
from django.shortcuts import resolve_url
from django.conf import settings
from .backends import StormpathIdSiteBackend
ID_SITE_STATUS_AUTHENTICATED = 'AUTHENTICATED'
ID_SITE_STATUS_LOGOUT = 'LOGOUT'
ID_SITE_STATUS_REGISTERED = 'REGISTERED'
ID_SITE_AUTH_BACKEND = 'django_stormpath.backends.StormpathIdSiteBackend'
def _get_django_user(account):
backend = StormpathIdSiteBackend()
return backend.authenticate(account=account)
def _handle_authenticated(request, id_site_response):
user = _get_django_user(id_site_response.account)
user.backend = ID_SITE_AUTH_BACKEND
django_login(request, user)
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect(redirect_to)
def _handle_logout(request, id_site_response):
django_logout(request)
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect(redirect_to)
_handle_registered = _handle_authenticated
def handle_id_site_callback(request, id_site_response):
if id_site_response:
action = CALLBACK_ACTIONS[id_site_response.status]
return action(request, id_site_response)
else:
return None
CALLBACK_ACTIONS = {
ID_SITE_STATUS_AUTHENTICATED: _handle_authenticated,
ID_SITE_STATUS_LOGOUT: _handle_logout,
ID_SITE_STATUS_REGISTERED: _handle_registered,
}
| 28.358491 | 73 | 0.809714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.056554 |
47f782c40ce2bf55510e810deac00bf9b89ac029 | 445 | py | Python | dp/sequence/perfect-square.py | windowssocket/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
]
| 3 | 2018-05-29T02:29:40.000Z | 2020-02-05T03:28:16.000Z | dp/sequence/perfect-square.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
]
| 1 | 2019-03-08T13:22:32.000Z | 2019-03-08T13:22:32.000Z | dp/sequence/perfect-square.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
]
| 3 | 2018-05-29T11:50:24.000Z | 2018-11-27T12:31:01.000Z | # https://leetcode.com/problems/perfect-squares/description/
# dp alg, time complexity: O(n^2)
class Solution(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
dp = [n for _ in range(n+1)]
dp[0] = 0
for i in range(1, n+1):
j = 1
while i-j*j >= 0:
dp[i] = min(dp[i], dp[i-j*j] + 1)
j += 1
return dp[n]
| 22.25 | 60 | 0.438202 | 347 | 0.779775 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.334831 |
47f8383750414c949b888bd3081dff4a804800b1 | 1,416 | py | Python | Projetos_Pessoais/projeto_sorteio/sorteio.py | thiagomath/Python | dd73154e347c75a65a74e047ba880cc1f7dc1f91 | [
"MIT"
]
| null | null | null | Projetos_Pessoais/projeto_sorteio/sorteio.py | thiagomath/Python | dd73154e347c75a65a74e047ba880cc1f7dc1f91 | [
"MIT"
]
| null | null | null | Projetos_Pessoais/projeto_sorteio/sorteio.py | thiagomath/Python | dd73154e347c75a65a74e047ba880cc1f7dc1f91 | [
"MIT"
]
| null | null | null | # Programa para sorteio
from tkinter import *
'''import PySimpleGUI as sg'''
'''
#Layout
layout = [
[sg.Text('Nome:'), sg.Input()],
[sg.Button('OK')]
]
#Janela
janela = sg.Window('Janela teste', layout)
#Interação
eventos, valores = janela.Read()
#Mensagem
print(f'Olá {valores[0]}, obrigado por usar PySimpleGUI!')
#Encerramento da janela
janela.close()
'''
'''
cont = 0
participantes = dict()
for cont in range(0, 2):
participantes["nome"] = str(input('Digite o nome do participante: '))
participantes["numero"] = int(input('Digite o número do participante: '))
cont += 1
print(f'{cont} pessoas concorrendo ao sorteio!')
print(participantes)
'''
'''theme_name_list = sg.theme_list()
print(theme_name_list)'''
def pegar_cotacoes():
texto = 'xxx'
texto_cotacoes["text"] = texto
# Sempre inicia com:
janela = Tk()
janela.title('Sorteio T-force')
janela.geometry("400x400")
# Texto de orientação:
texto_de_orientacao = Label(janela, text='Clique no botão para ver as cotações das moedas')
# Posição do texto:
texto_de_orientacao.grid(column=0, row=0, padx=10, pady=10)
# Botão + função
botao = Button(janela, text="Buscar cotações Dólar, Euro e BTC", command=pegar_cotacoes)
botao.grid(column=0, row=1, padx=10, pady=10)
# Texto das cotações:
texto_cotacoes = Label(janela, text="")
texto_cotacoes.grid(column=0, row=2, padx=10, pady=10)
# Sempre termina com:
janela.mainloop()
| 24.413793 | 91 | 0.699859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 973 | 0.678049 |
47fa30bd997d1a1670c7fee27600bd53764e519c | 481 | py | Python | flask_tutorial/flask_sqlite3/__init__.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
]
| 1 | 2018-12-19T22:07:56.000Z | 2018-12-19T22:07:56.000Z | flask_tutorial/flask_sqlite3/__init__.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
]
| 12 | 2020-03-14T05:32:26.000Z | 2022-03-12T00:08:49.000Z | flask_tutorial/flask_sqlite3/__init__.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
]
| 1 | 2018-12-19T22:08:00.000Z | 2018-12-19T22:08:00.000Z | """
@author: magician
@file: __init__.py.py
@date: 2020/9/7
"""
from flask import Flask
from flask_tutorial.flask_sqlite3.flask_sqlite3 import SQLite3
app = Flask(__name__)
app.config.from_pyfile('the-config.cfg')
db = SQLite3(app)
@app.route('/')
def show_all():
"""
show_all
@return:
"""
# cur = db.connection.cursor()
# cur.execute('SELECT 1=1')
with app.app_context():
cur = db.connection.cursor()
cur.execute('SELECT 1=1')
| 17.814815 | 62 | 0.636175 | 0 | 0 | 0 | 0 | 240 | 0.49896 | 0 | 0 | 192 | 0.399168 |
47fd0f1fa3538b0a659731489a61f441085833ad | 516 | py | Python | str_to_ num.py | maiconloure/Learning_Python | 2999508909ace5f8ca0708cdea93b82abaaeafb2 | [
"MIT"
]
| null | null | null | str_to_ num.py | maiconloure/Learning_Python | 2999508909ace5f8ca0708cdea93b82abaaeafb2 | [
"MIT"
]
| null | null | null | str_to_ num.py | maiconloure/Learning_Python | 2999508909ace5f8ca0708cdea93b82abaaeafb2 | [
"MIT"
]
| null | null | null | """Transformando um string de numeros, em uma lista
com conjunto de numeros separads por \n"""
matrix = "1 2 3 4\n4 5 6 5\n7 8 9 6\n8 7 6 7"
print(matrix)
matrix = matrix.split("\n")
print(matrix)
matrix2 = []
for n in range(len(matrix)):
matrix[n] = matrix[n].split()
matrix[n] = list(map(int, matrix[n])) # EX: Tranforma a string '1' em um inteiro
matrix2.append(matrix[n][0]) # Pegar o primeiro elemento/numero de cada indice
for index in range(4):
print(matrix[index])
print()
print(matrix2) | 28.666667 | 85 | 0.672481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.436047 |
47fd4fd6cf5d22ab4c0d8b28debf25aa2a0236a1 | 517 | py | Python | Programs/__init__.py | el-vida/ITE_Classification_Using_GMMs | 8c1e751fac2c0aa873d41dbae45776b540db0889 | [
"MIT"
]
| null | null | null | Programs/__init__.py | el-vida/ITE_Classification_Using_GMMs | 8c1e751fac2c0aa873d41dbae45776b540db0889 | [
"MIT"
]
| null | null | null | Programs/__init__.py | el-vida/ITE_Classification_Using_GMMs | 8c1e751fac2c0aa873d41dbae45776b540db0889 | [
"MIT"
]
| null | null | null | from A_1_Add_columns_participant_info_multiprocessing_new import *
from A_2_Merge_new_logs import *
from B_1_Add_columns_user_performance_multiprocessing_new import *
from B_2_Merge_new_logs_with_user_groups import *
from C_1_Post_processing_log_new import *
from C_2_Merge_new_logs_post_processed import *
from D_1_Recalculate_new_levenshtein import *
from D_2_Merge_new_logs_new_levenshtein import *
from E_1_Correct_ites_new import *
from E_2_Merge_new_logs_corrected import *
from F_Finalize_dataset_slim import * | 47 | 66 | 0.895551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
47fd66e778b3e447ec3e01b548b142f968b5fb7f | 599 | py | Python | setup.py | xebialabs-community/xld-install-helper | a61baa9fabc6484afa5fd287a25fc6fb88d84670 | [
"MIT"
]
| null | null | null | setup.py | xebialabs-community/xld-install-helper | a61baa9fabc6484afa5fd287a25fc6fb88d84670 | [
"MIT"
]
| null | null | null | setup.py | xebialabs-community/xld-install-helper | a61baa9fabc6484afa5fd287a25fc6fb88d84670 | [
"MIT"
]
| 2 | 2016-12-27T12:12:09.000Z | 2020-09-24T18:06:58.000Z | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='xl-helper',
version='1.0.5',
description='XL Deploy helper',
long_description='This tool helps with installation and upgrade of XL Deploy and plugins',
author='Mike Kotsur',
author_email='[email protected]',
url='http://xebialabs.com/',
packages=find_packages(where=".", exclude=["tests*"]),
package_data={'xl_helper': ['deployit.conf', '.xl-helper.defaults']},
include_package_data=True,
install_requires=['jenkinsapi', 'argparse', 'pytz'],
scripts=['xl-helper']
)
| 31.526316 | 94 | 0.684474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 285 | 0.475793 |
47ff4464ecaa8b0b0480823b9cf4bf43b54abcec | 3,520 | py | Python | pdf_poc/search.py | cr0hn/TestingBench | 37975343cf9ccb019e8dc42404b5b321285b04b3 | [
"BSD-3-Clause"
]
| 5 | 2018-05-10T19:50:29.000Z | 2018-05-10T20:07:08.000Z | pdf_poc/search.py | cr0hn/TestingBench | 37975343cf9ccb019e8dc42404b5b321285b04b3 | [
"BSD-3-Clause"
]
| null | null | null | pdf_poc/search.py | cr0hn/TestingBench | 37975343cf9ccb019e8dc42404b5b321285b04b3 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from collections import defaultdict
from PyPDF2 import PdfFileReader
from PyPDF2.pdf import PageObject, ContentStream, TextStringObject, u_, i, b_
def is_continuation(content, item):
if content.operations[item - 1][1] == b_("Tm"):
# Search previous "Tm"
for bef in range(-2, -15, -1):
try:
if content.operations[item - bef][1] == b_("Tm"):
prev_val = content.operations[item - bef][0]
break
except IndexError:
return False
else:
return False
key_1_preve = '{0:.5f}'.format(prev_val[4]).split(".")[1]
key_2_preve = '{0:.5f}'.format(prev_val[5]).split(".")[1]
prev_curr = content.operations[item - 1][0]
key_1_curr = '{0:.5f}'.format(prev_curr[4]).split(".")[1]
key_2_curr = '{0:.5f}'.format(prev_curr[5]).split(".")[1]
# if key_1_curr != key_1_preve or key_2_curr != key_2_preve:
if key_1_curr == key_1_preve:
return True
return False
def is_header(content, item):
if content.operations[item - 1][1] == b_("Td"):
return True
elif content.operations[item - 1][1] == b_("Tm") and \
content.operations[item - 2][1] == b_("Tf"):
if content.operations[item - 3][1] == b_("BT") or \
content.operations[item - 3][1] == b_("scn"):
return True
else:
return False
else:
return False
def extractText_with_separator(self, remove_headers=False):
text = u_("")
content = self["/Contents"].getObject()
if not isinstance(content, ContentStream):
content = ContentStream(content, self.pdf)
# Note: we check all strings are TextStringObjects. ByteStringObjects
# are strings where the byte->string encoding was unknown, so adding
# them to the text here would be gibberish.
for item, (operands, operator) in enumerate(content.operations):
if operator == b_("Tj"):
# Skip headers?
if is_header(content, item):
continue
if not is_continuation(content, item):
text += "\n"
_text = operands[0]
if isinstance(_text, TextStringObject):
text += _text
elif operator == b_("T*"):
text += "\n"
elif operator == b_("'"):
text += "\n"
_text = operands[0]
if isinstance(_text, TextStringObject):
text += operands[0]
elif operator == b_('"'):
_text = operands[2]
if isinstance(_text, TextStringObject):
text += "\n"
text += _text
elif operator == b_("TJ"):
# Skip headers?
if is_header(content, item):
continue
if not is_continuation(content, item):
text += "\n"
for i in operands[0]:
if isinstance(i, TextStringObject):
text += i
# text += "\n"
return text
PageObject.extractText_with_separator = extractText_with_separator
KEYWORDS = ["procesos electorales"]
def find_in_pdf(pdf_path, keywords):
"""
Try to find a word list into pdf file.
.. note:
The line number is approximately, not exactly.
:param pdf_path: path to pdf
:type pdf_path: str
:param keywords: list of keyword to search
:type keywords: list(str)
:return: a structure like this: { PAGE_NUM: { LINE_NUM: TEXT_OF_LINE}
:rtype: dict(str: dict(int: str))
"""
pdf = PdfFileReader(open(pdf_path, 'rb'))
matches = defaultdict(dict)
for page_no, page in enumerate(pdf.pages, 1):
text = page.extractText_with_separator()
line_no = 1
# search
for keyword in keywords:
for line in text.split("\n"):
if not line:
continue
line_no += 1
if keyword in line.lower():
matches["page_%s" % page_no][line_no] = line
return matches
if __name__ == '__main__':
r = find_in_pdf("BOE.pdf", KEYWORDS)
print(r) | 23.311258 | 77 | 0.658523 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 864 | 0.245455 |
9a003487767445f7e574b64c73392ed111a08837 | 493 | py | Python | setup.py | hugorodgerbrown/django-netpromoterscore | f0a7ddc32fe942069abacfaa5a3220eaabe9e1db | [
"MIT"
]
| 8 | 2016-06-21T21:56:17.000Z | 2021-10-06T17:28:00.000Z | setup.py | hugorodgerbrown/django-netpromoterscore | f0a7ddc32fe942069abacfaa5a3220eaabe9e1db | [
"MIT"
]
| null | null | null | setup.py | hugorodgerbrown/django-netpromoterscore | f0a7ddc32fe942069abacfaa5a3220eaabe9e1db | [
"MIT"
]
| 1 | 2018-10-19T21:57:54.000Z | 2018-10-19T21:57:54.000Z | from setuptools import setup, find_packages
setup(
name = "django-netpromoterscore",
version = '0.0.2',
description = "Model, Tests, and API for collecting promoter score from users.",
author = "Austin Brennan",
author_email = "[email protected]",
url = "https://github.com/epantry/django-netpromoterscore",
keywords = ["promoter score", "net promoter score", "django"],
install_requires = [],
packages = find_packages(),
include_package_data=True,
) | 32.866667 | 84 | 0.677485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.456389 |
9a0110a8361459c9dacb7bcdc22b39b60eeea30e | 731 | py | Python | DSC_Data_Exchange/dsc-text-node/src/state/configuration_state.py | ai4eu/tutorials | 68eb2208716e655d2aa8b950a0d7d73bf6f20f3a | [
"Apache-2.0"
]
| 8 | 2020-04-21T13:29:04.000Z | 2021-12-13T08:59:09.000Z | DSC_Data_Exchange/dsc-text-node/src/state/configuration_state.py | ai4eu/tutorials | 68eb2208716e655d2aa8b950a0d7d73bf6f20f3a | [
"Apache-2.0"
]
| 3 | 2021-04-27T11:03:04.000Z | 2021-05-24T18:22:57.000Z | DSC_Data_Exchange/dsc-text-node/src/state/configuration_state.py | ai4eu/tutorials | 68eb2208716e655d2aa8b950a0d7d73bf6f20f3a | [
"Apache-2.0"
]
| 6 | 2020-07-06T08:23:25.000Z | 2021-11-24T10:39:34.000Z | import flask
class Configuration:
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(Configuration, cls).__new__(cls)
return cls.instance
recipient = None
resource_id = None
artifact_id = None
contract = None
custom_dsc = None
use_custom_dsc = False
data_send = False
def get_jsonifyed_configuration():
return flask.jsonify({
"recipient": Configuration().recipient,
"resource_id": Configuration().resource_id,
"artifact_id": Configuration().artifact_id,
"contract": Configuration().contract,
"use_custom_dsc": Configuration().use_custom_dsc,
"custom_dsc": Configuration().custom_dsc,
})
| 24.366667 | 65 | 0.653899 | 338 | 0.46238 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.116279 |
9a014ede8ba8180a42bf7abbdb2da472afc22d73 | 228 | py | Python | pynemo/core/base/property/string.py | SSripilaipong/pynemo | f4dedd2599ec78b2ffe73f55b1d2b8b5da1b1e7f | [
"MIT"
]
| null | null | null | pynemo/core/base/property/string.py | SSripilaipong/pynemo | f4dedd2599ec78b2ffe73f55b1d2b8b5da1b1e7f | [
"MIT"
]
| null | null | null | pynemo/core/base/property/string.py | SSripilaipong/pynemo | f4dedd2599ec78b2ffe73f55b1d2b8b5da1b1e7f | [
"MIT"
]
| null | null | null | from pynemo.core.base.abstract.property import Property
class StringProperty(Property):
@classmethod
def validate(cls, v):
return str(v)
@classmethod
def to_cypher(cls, v):
return repr(str(v))
| 19 | 55 | 0.666667 | 169 | 0.741228 | 0 | 0 | 127 | 0.557018 | 0 | 0 | 0 | 0 |
9a0316a49bbe3e0c8ccbf65e47f3d0ad6d7d1eaf | 6,299 | py | Python | src/folio_migration_tools/folder_structure.py | chadmcinnis/folio_migration_tools | 39ee044a713a34c323324a956e3e8b54ee05c194 | [
"MIT"
]
| 1 | 2022-03-30T07:48:33.000Z | 2022-03-30T07:48:33.000Z | src/folio_migration_tools/folder_structure.py | chadmcinnis/folio_migration_tools | 39ee044a713a34c323324a956e3e8b54ee05c194 | [
"MIT"
]
| 76 | 2022-02-04T16:36:49.000Z | 2022-03-31T11:20:29.000Z | src/folio_migration_tools/folder_structure.py | chadmcinnis/folio_migration_tools | 39ee044a713a34c323324a956e3e8b54ee05c194 | [
"MIT"
]
| 1 | 2022-02-02T17:19:05.000Z | 2022-02-02T17:19:05.000Z | import logging
import sys
from pathlib import Path
import time
from folio_uuid.folio_namespaces import FOLIONamespaces
class FolderStructure:
def __init__(
self,
base_path: Path,
object_type: FOLIONamespaces,
migration_task_name: str,
iteration_identifier: str,
add_time_stamp_to_file_names: bool,
):
logging.info("Setting up folder structure")
self.object_type: FOLIONamespaces = object_type
self.migration_task_name = migration_task_name
self.add_time_stamp_to_file_names = add_time_stamp_to_file_names
self.iteration_identifier = iteration_identifier
self.base_folder = Path(base_path)
if not self.base_folder.is_dir():
logging.critical("Base Folder Path is not a folder. Exiting.")
sys.exit(1)
self.data_folder = self.base_folder / "data"
verify_folder(self.data_folder)
verify_folder(self.data_folder / str(FOLIONamespaces.instances.name).lower())
verify_folder(self.data_folder / str(FOLIONamespaces.holdings.name).lower())
verify_folder(self.data_folder / str(FOLIONamespaces.items.name).lower())
verify_folder(self.data_folder / str(FOLIONamespaces.users.name).lower())
self.archive_folder = self.base_folder / "archive"
verify_folder(self.data_folder)
self.results_folder = self.base_folder / "results"
verify_folder(self.results_folder)
self.reports_folder = self.base_folder / "reports"
verify_folder(self.reports_folder)
self.mapping_files_folder = self.base_folder / "mapping_files"
verify_folder(self.mapping_files_folder)
gitignore = self.base_folder / ".gitignore"
verify_git_ignore(gitignore)
def log_folder_structure(self):
logging.info("Mapping files folder is %s", self.mapping_files_folder)
logging.info("Git ignore is set up correctly")
logging.info("Base folder is %s", self.base_folder)
logging.info("Reports and logs folder is %s", self.reports_folder)
logging.info("Results folder is %s", self.results_folder)
logging.info("Data folder is %s", self.data_folder)
logging.info("Source records files folder is %s", self.legacy_records_folder)
logging.info("Log file will be located at %s", self.transformation_log_path)
logging.info("Extra data will be stored at%s", self.transformation_extra_data_path)
logging.info("Data issue reports %s", self.data_issue_file_path)
logging.info("Created objects will be stored at %s", self.created_objects_path)
logging.info("Migration report file will be saved at %s", self.migration_reports_file)
def setup_migration_file_structure(self, source_file_type: str = ""):
time_stamp = f'_{time.strftime("%Y%m%d-%H%M%S")}'
time_str = time_stamp if self.add_time_stamp_to_file_names else ""
file_template = f"{self.iteration_identifier}{time_str}_{self.migration_task_name}"
object_type_string = str(self.object_type.name).lower()
if source_file_type:
self.legacy_records_folder = self.data_folder / source_file_type
elif self.object_type == FOLIONamespaces.other:
self.legacy_records_folder = self.data_folder
else:
self.legacy_records_folder = self.data_folder / object_type_string
verify_folder(self.legacy_records_folder)
self.transformation_log_path = self.reports_folder / (
f"log_{object_type_string}_{file_template}.log"
)
self.failed_recs_path = (
self.results_folder / f"failed_records_{file_template}_{time_stamp}.txt"
)
self.transformation_extra_data_path = (
self.results_folder / f"extradata_{file_template}.extradata"
)
self.data_issue_file_path = (
self.reports_folder / f"data_issues_log_{object_type_string}_{file_template}.tsv"
)
self.created_objects_path = (
self.results_folder / f"folio_{object_type_string}_{file_template}.json"
)
self.failed_bibs_file = (
self.results_folder / f"failed_bib_records_{self.iteration_identifier}{time_str}.mrc"
)
self.failed_mfhds_file = (
self.results_folder / f"failed_mfhd_records_{self.iteration_identifier}{time_str}.mrc"
)
self.migration_reports_file = (
self.reports_folder / f"transformation_report_{object_type_string}_{file_template}.md"
)
self.srs_records_path = (
self.results_folder / f"folio_srs_{object_type_string}_{file_template}.json"
)
self.instance_id_map_path = (
self.results_folder / f"instance_id_map_{self.iteration_identifier}.json"
)
self.holdings_id_map_path = (
self.results_folder / f"holdings_id_map_{self.iteration_identifier}.json"
)
# Mapping files
self.temp_locations_map_path = self.mapping_files_folder / "temp_locations.tsv"
self.material_type_map_path = self.mapping_files_folder / "material_types.tsv"
self.loan_type_map_path = self.mapping_files_folder / "loan_types.tsv"
self.temp_loan_type_map_path = self.mapping_files_folder / "temp_loan_types.tsv"
self.statistical_codes_map_path = self.mapping_files_folder / "statcodes.tsv"
self.item_statuses_map_path = self.mapping_files_folder / "item_statuses.tsv"
def verify_git_ignore(gitignore: Path):
with open(gitignore, "r+") as f:
contents = f.read()
if "results/" not in contents:
f.write("results/\n")
if "archive/" not in contents:
f.write("archive/\n")
if "data/" not in contents:
f.write("data/\n")
if "*.data" not in contents:
f.write("*.data\n")
logging.info("Made sure there was a valid .gitignore file at %s", gitignore)
def verify_folder(folder_path: Path):
if not folder_path.is_dir():
logging.critical("There is no folder located at %s. Exiting.", folder_path)
logging.critical("Create a folder by calling\n\tmkdir %s", folder_path)
sys.exit(1)
else:
logging.info("Located %s", folder_path)
| 42.275168 | 98 | 0.680743 | 5,391 | 0.85585 | 0 | 0 | 0 | 0 | 0 | 0 | 1,541 | 0.244642 |
9a03f4a30283fc5811ab209f5fab981571d780d6 | 6,064 | py | Python | ftrl_noise.py | google-research/DP-FTRL | 513500a8e31e412972a7d457e9c66756e4a48348 | [
"Apache-2.0"
]
| 8 | 2021-04-09T18:00:18.000Z | 2022-03-11T01:13:13.000Z | ftrl_noise.py | google-research/DP-FTRL | 513500a8e31e412972a7d457e9c66756e4a48348 | [
"Apache-2.0"
]
| 1 | 2021-08-18T04:59:42.000Z | 2021-12-08T00:24:24.000Z | ftrl_noise.py | google-research/DP-FTRL | 513500a8e31e412972a7d457e9c66756e4a48348 | [
"Apache-2.0"
]
| 3 | 2021-11-05T15:42:31.000Z | 2022-03-03T07:38:46.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The tree aggregation protocol for noise addition in DP-FTRL."""
import torch
from collections import namedtuple
from absl import app
class CummuNoiseTorch:
@torch.no_grad()
def __init__(self, std, shapes, device, test_mode=False):
"""
:param std: standard deviation of the noise
:param shapes: shapes of the noise, which is basically shape of the gradients
:param device: device for pytorch tensor
:param test_mode: if in test mode, noise will be 1 in each node of the tree
"""
assert std >= 0
self.std = std
self.shapes = shapes
self.device = device
self.step = 0
self.binary = [0]
self.noise_sum = [torch.zeros(shape).to(self.device) for shape in shapes]
self.recorded = [[torch.zeros(shape).to(self.device) for shape in shapes]]
self.test_mode = test_mode
@torch.no_grad()
def __call__(self):
"""
:return: the noise to be added by DP-FTRL
"""
if self.std <= 0 and not self.test_mode:
return self.noise_sum
self.step += 1
idx = 0
while idx < len(self.binary) and self.binary[idx] == 1:
self.binary[idx] = 0
for ns, re in zip(self.noise_sum, self.recorded[idx]):
ns -= re
idx += 1
if idx >= len(self.binary):
self.binary.append(0)
self.recorded.append([torch.zeros(shape).to(self.device) for shape in self.shapes])
for shape, ns, re in zip(self.shapes, self.noise_sum, self.recorded[idx]):
if not self.test_mode:
n = torch.normal(0, self.std, shape).to(self.device)
else:
n = torch.ones(shape).to(self.device)
ns += n
re.copy_(n)
self.binary[idx] = 1
return self.noise_sum
@torch.no_grad()
def proceed_until(self, step_target):
"""
Proceed until the step_target-th step. This is for the binary tree completion trick.
:return: the noise to be added by DP-FTRL
"""
if self.step >= step_target:
raise ValueError(f'Already reached {step_target}.')
while self.step < step_target:
noise_sum = self.__call__()
return noise_sum
Element = namedtuple('Element', 'height value')
class CummuNoiseEffTorch:
"""
The tree aggregation protocol with the trick in Honaker, "Efficient Use of Differentially Private Binary Trees", 2015
"""
@torch.no_grad()
def __init__(self, std, shapes, device):
"""
:param std: standard deviation of the noise
:param shapes: shapes of the noise, which is basically shape of the gradients
:param device: device for pytorch tensor
"""
self.std = std
self.shapes = shapes
self.device = device
self.step = 0
self.noise_sum = [torch.zeros(shape).to(self.device) for shape in shapes]
self.stack = []
@torch.no_grad()
def get_noise(self):
return [torch.normal(0, self.std, shape).to(self.device) for shape in self.shapes]
@torch.no_grad()
def push(self, elem):
for i in range(len(self.shapes)):
self.noise_sum[i] += elem.value[i] / (2.0 - 1 / 2 ** elem.height)
self.stack.append(elem)
@torch.no_grad()
def pop(self):
elem = self.stack.pop()
for i in range(len(self.shapes)):
self.noise_sum[i] -= elem.value[i] / (2.0 - 1 / 2 ** elem.height)
@torch.no_grad()
def __call__(self):
"""
:return: the noise to be added by DP-FTRL
"""
self.step += 1
# add new element to the stack
self.push(Element(0, self.get_noise()))
# pop the stack
while len(self.stack) >= 2 and self.stack[-1].height == self.stack[-2].height:
# create new element
left_value, right_value = self.stack[-2].value, self.stack[-1].value
new_noise = self.get_noise()
new_elem = Element(
self.stack[-1].height + 1,
[x + (y + z) / 2 for x, y, z in zip(new_noise, left_value, right_value)])
# pop the stack, update sum
self.pop()
self.pop()
# append to the stack, update sum
self.push(new_elem)
return self.noise_sum
@torch.no_grad()
def proceed_until(self, step_target):
"""
Proceed until the step_target-th step. This is for the binary tree completion trick.
:return: the noise to be added by DP-FTRL
"""
if self.step >= step_target:
raise ValueError(f'Already reached {step_target}.')
while self.step < step_target:
noise_sum = self.__call__()
return noise_sum
def main(argv):
# This is a small test. If we set the noise in each node as 1 (by setting
# test_mode=True), we should be seeing the returned noise as the number of
# 1s in the binary representations of i when cummu_noises is called i times.
def countSetBits(n):
count = 0
while (n):
n &= (n - 1)
count += 1
return count
cummu_noises = CummuNoiseTorch(1.0, [(1,)], 'cuda', test_mode=True)
for epoch in range(31):
random_noise = cummu_noises()
assert random_noise[0].cpu().numpy()[0] == countSetBits(epoch + 1)
if __name__ == '__main__':
app.run(main) | 32.956522 | 121 | 0.59812 | 4,651 | 0.766985 | 0 | 0 | 4,414 | 0.727902 | 0 | 0 | 2,146 | 0.353892 |
9a04e9a41ace038d4a501f35036632f201b9f71d | 2,782 | py | Python | ladder/tests/test_models.py | jzahedieh/django-tennis-ladder | 03a9fc9ec6d0830ac1d6648428eca11755eabb00 | [
"MIT"
]
| 13 | 2015-04-30T21:07:20.000Z | 2021-01-08T13:52:14.000Z | ladder/tests/test_models.py | jzahedieh/django-tennis-ladder | 03a9fc9ec6d0830ac1d6648428eca11755eabb00 | [
"MIT"
]
| 13 | 2015-04-05T22:48:14.000Z | 2021-12-12T17:29:16.000Z | ladder/tests/test_models.py | jzahedieh/django-tennis-ladder | 03a9fc9ec6d0830ac1d6648428eca11755eabb00 | [
"MIT"
]
| 5 | 2016-10-12T16:24:09.000Z | 2019-11-26T10:16:44.000Z | from django.test import TestCase
from ladder.models import Player, Result, League, Season
from django.db.models import Avg
class PlayerModelTest(TestCase):
def test_player_stats(self):
"""
Tests a player stats is calculated correctly.
"""
# fresh player test
player = Player(first_name='New', last_name='Player')
self.assertEqual(player.player_stats(), {
'played': "-",
'win_rate': "- %",
'average': "-"
})
# player with matches test
player = Player.objects.first()
stats = player.player_stats()
results = Result.objects.filter(player=player)
# assert games played is correct
games_played = results.count()
self.assertEqual(stats['played'], games_played)
# assert completion rate is correct
match_count = 0
for league in League.objects.filter(player=player):
match_count += league.ladder.league_set.count() - 1
self.assertEqual(stats['completion_rate'], "{0:.2f} %".format(games_played / match_count * 100.00))
# assert win rate is correct
won = player.result_player.filter(result=9).count()
self.assertEqual(stats['win_rate'], "{0:.2f} %".format(won / games_played * 100.00))
# assert average is correct
# two points for winning + 1 point for playing
additional_points = ((won * 2) + games_played) / games_played
average = list(player.result_player.aggregate(Avg('result')).values())[0]
average_with_additional = average + additional_points
self.assertEqual(stats['average'], "{0:.2f}".format(average_with_additional))
class SeasonModelTest(TestCase):
def test_season_stats(self):
season = Season.objects.first()
stats = season.get_stats()
player_count = 0
results_count = 0
total_games_count = 0.0
for ladder in season.ladder_set.all():
player_count += ladder.league_set.count()
results_count += ladder.result_set.count() / 2
total_games_count += (ladder.league_set.count() * (ladder.league_set.count() - 1)) / 2
# division stat assertion
self.assertEqual(stats['divisions'], season.ladder_set.count())
# perc played assertion
percentage_played = (results_count / total_games_count) * 100
self.assertEqual(stats['percentage_played'], "{0:.2f}".format(percentage_played))
# total games assertion
self.assertEqual(stats['total_games_count'], total_games_count)
# result count assertion
self.assertEqual(stats['results_count'], results_count)
# player count assertion
self.assertEqual(stats['player_count'], player_count) | 36.605263 | 107 | 0.638749 | 2,654 | 0.95399 | 0 | 0 | 0 | 0 | 0 | 0 | 622 | 0.22358 |
9a06d9877e200d7e5cdcb16fe42f60b4884f0200 | 6,779 | py | Python | src/authub/idp/google.py | fantix/authub | 1f8a30fe32c579e556d2b962f258e0f99527a006 | [
"BSD-3-Clause"
]
| null | null | null | src/authub/idp/google.py | fantix/authub | 1f8a30fe32c579e556d2b962f258e0f99527a006 | [
"BSD-3-Clause"
]
| null | null | null | src/authub/idp/google.py | fantix/authub | 1f8a30fe32c579e556d2b962f258e0f99527a006 | [
"BSD-3-Clause"
]
| null | null | null | """Google OpenID Connect identity provider."""
from uuid import UUID
from fastapi import Depends, status, Request
from pydantic import BaseModel
from .base import IdPRouter, oauth
from ..http import get_edgedb_pool
from ..models import IdPClient, Identity as BaseIdentity, Href, User
from ..orm import ExtendedComputableProperty, ExclusiveConstraint, with_block
class Client(IdPClient):
client_id: str
client_secret: str
class Identity(BaseIdentity):
iss: str # "https://accounts.google.com"
azp: str # client_id
aud: str # client_id
sub: str # "112506503767939677396"
hd: str # "edgedb.com"
email: str
email_verified: bool
at_hash: str # "Gn_Xy8b7J7qdPrAPTSJxqA"
name: str
picture: str # URL
given_name: str
family_name: str
locale: str # "en"
iat: int
exp: int
access_token: str
expires_in: int
scope: str
token_type: str
id_token: str
expires_at: int
# We only need the second, refs edgedb/edgedb#1939
ExtendedComputableProperty("iss_sub", "(.iss, .sub)", exclusive=True)
ExclusiveConstraint("iss", "sub")
idp = IdPRouter("google")
class GoogleClientOut(BaseModel):
name: str
client_id: str
redirect_uri: str
@idp.get(
"/clients/{idp_client_id}",
response_model=GoogleClientOut,
responses={status.HTTP_404_NOT_FOUND: {}},
summary="Get details of the specified Google OIDC client.",
)
async def get_client(
idp_client_id: UUID, request: Request, db=Depends(get_edgedb_pool)
):
result = await db.query_one(
"""
SELECT google::Client {
name,
client_id,
} FILTER .id = <uuid>$id
""",
id=idp_client_id,
)
return GoogleClientOut(
redirect_uri=request.url_for(
f"{idp.name}.authorize", idp_client_id=idp_client_id
),
**Client.from_obj(result).dict(),
)
class GoogleClientIn(BaseModel):
name: str
client_id: str
client_secret: str
@idp.post(
"/clients",
response_model=Href,
status_code=status.HTTP_201_CREATED,
summary="Configure a new Google OIDC client.",
)
async def add_client(
client: GoogleClientIn, request: Request, db=Depends(get_edgedb_pool)
):
result = await db.query_one(
"""
INSERT google::Client {
name := <str>$name,
client_id := <str>$client_id,
client_secret := <str>$client_secret
}
""",
**client.dict(),
)
return Href(
href=request.url_for(f"{idp.name}.get_client", idp_client_id=result.id)
)
async def _get_google_client(db, idp_client_id):
try:
client = getattr(oauth, idp_client_id.hex)
except AttributeError:
result = await db.query_one(
"""
SELECT google::Client {
client_id,
client_secret,
} FILTER .id = <uuid>$id
""",
id=idp_client_id,
)
client = Client.from_obj(result)
client = oauth.register(
name=idp_client_id.hex,
server_metadata_url="https://accounts.google.com/.well-known/openid-configuration",
client_id=client.client_id,
client_secret=client.client_secret,
client_kwargs={"scope": "openid email profile"},
)
return client
@idp.get(
"/clients/{idp_client_id}/login",
summary="Login through the specified Google OIDC client.",
status_code=status.HTTP_307_TEMPORARY_REDIRECT,
)
async def login(
idp_client_id: UUID, request: Request, db=Depends(get_edgedb_pool)
):
google_client = await _get_google_client(db, idp_client_id)
return await google_client.authorize_redirect(
request,
request.url_for(f"{idp.name}.authorize", idp_client_id=idp_client_id),
)
@idp.get(
"/clients/{idp_client_id}/authorize",
summary="Google OIDC redirect URI.",
)
async def authorize(
idp_client_id: UUID, request: Request, db=Depends(get_edgedb_pool)
):
google_client = await _get_google_client(db, idp_client_id)
token = await google_client.authorize_access_token(request)
user = await google_client.parse_id_token(request, token)
identity = Identity.construct(**token, **user)
client = Client.select(filters=".id = <uuid>$client_id")
result = await db.query_one(
"SELECT ("
+ identity.insert(
user=User().insert(),
client=client,
conflict_on=".iss_sub",
conflict_else=identity.update(
exclude={"iss", "sub"}, client=client
),
)
+ ") { id, user: { id }, client: { id } }",
client_id=idp_client_id,
**identity.dict(exclude={"nonce"}, exclude_unset=True),
)
if "client_id" in request.session:
from authub.oauth2 import oauth2_authorized
return await oauth2_authorized(request, User.from_obj(result.user))
else:
identity = Identity(
id=result.id,
user=User.from_obj(result.user),
client=Client.from_obj(result.client),
**identity.dict(exclude_unset=True),
)
return identity.dict()
class IdentityOut(BaseModel):
iss: str # "https://accounts.google.com"
hd: str # "edgedb.com"
email: str
email_verified: bool
name: str
picture: str # URL
given_name: str
family_name: str
locale: str # "en"
@idp.get(
"/identities/{identity_id}",
response_model=IdentityOut,
response_model_exclude_unset=True,
response_model_exclude={"user", "client"},
summary="Get the profile of the specified Google identity.",
)
async def get_identity(identity_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
Identity.select(
*IdentityOut.schema()["properties"],
filters=".id = <uuid>$id",
),
id=identity_id,
)
return IdentityOut(**Identity.from_obj(result).dict())
@idp.patch(
"/identities/{identity_id}/utilize",
response_model=User,
summary="Update the user's profile with the specified Google identity.",
)
async def utilize_identity(identity_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
with_block(
identity=Identity.select(
"user: { id }",
"email",
"name",
filters=".id = <uuid>$identity_id",
)
)
+ "SELECT ("
+ User.construct().update(
filters=".id = identity.user.id",
email="identity.email",
name="identity.name",
)
+ ") { id, email, name }",
identity_id=identity_id,
)
return User.from_obj(result)
| 27.445344 | 95 | 0.621478 | 1,181 | 0.174214 | 0 | 0 | 4,414 | 0.651128 | 4,187 | 0.617643 | 1,661 | 0.245021 |
9a07ac994b1953108c37e98bccdd7052124320ff | 1,192 | py | Python | src/holocron/_processors/import_processors.py | ikalnytskyi/holocron | f0bda50f1aab7d1013fac5bd8fb01f7ebeb7bdc3 | [
"BSD-3-Clause"
]
| 6 | 2016-11-27T11:53:18.000Z | 2021-02-08T00:37:59.000Z | src/holocron/_processors/import_processors.py | ikalnytskyi/holocron | f0bda50f1aab7d1013fac5bd8fb01f7ebeb7bdc3 | [
"BSD-3-Clause"
]
| 25 | 2017-04-12T15:27:55.000Z | 2022-01-21T23:37:37.000Z | src/holocron/_processors/import_processors.py | ikalnytskyi/holocron | f0bda50f1aab7d1013fac5bd8fb01f7ebeb7bdc3 | [
"BSD-3-Clause"
]
| 1 | 2020-11-15T17:49:36.000Z | 2020-11-15T17:49:36.000Z | """Import processors from 3rd party sources."""
import contextlib
import sys
import pkg_resources
from ._misc import parameters
@parameters(
jsonschema={
"type": "object",
"properties": {
"imports": {"type": "array", "items": {"type": "string"}},
"from_": {"type": "string"},
},
}
)
def process(app, items, *, imports, from_=None):
distribution = pkg_resources.get_distribution("holocron")
with contextlib.ExitStack() as exit:
if from_:
sys.path.insert(0, from_)
exit.callback(sys.path.pop, 0)
for import_ in imports:
entry_point = pkg_resources.EntryPoint.parse(import_, distribution)
app.add_processor(entry_point.name, entry_point.resolve())
# Processors are generators, so we must return iterable to be compliant
# with the protocol. The only reason why a top-level 'process' function is
# not a processor itself is because otherwise processors will be imported
# pipeline evaluation time while we need them be imported pipeline creation
# time.
def passgen(app, items):
yield from items
return passgen(app, items)
| 29.073171 | 79 | 0.64849 | 0 | 0 | 847 | 0.71057 | 1,058 | 0.887584 | 0 | 0 | 447 | 0.375 |
9a0859c884c636f6f47e39ee23feff85000d7d1d | 656 | py | Python | 412.fizz-buzz.py | SprintGhost/LeetCode | cdf1a86c83f2daedf674a871c4161da7e8fad17c | [
"Unlicense"
]
| 1 | 2019-03-26T13:49:14.000Z | 2019-03-26T13:49:14.000Z | 412.fizz-buzz.py | SprintGhost/LeetCode | cdf1a86c83f2daedf674a871c4161da7e8fad17c | [
"Unlicense"
]
| 5 | 2020-01-04T15:13:06.000Z | 2020-08-31T14:20:23.000Z | 412.fizz-buzz.py | SprintGhost/LeetCode | cdf1a86c83f2daedf674a871c4161da7e8fad17c | [
"Unlicense"
]
| null | null | null | #
# @lc app=leetcode.cn id=412 lang=python3
#
# [412] Fizz Buzz
#
# Accepted
# 8/8 cases passed (48 ms)
# Your runtime beats 76.37 % of python3 submissions
# Your memory usage beats 25 % of python3 submissions (14.5 MB)
# @lc code=start
class Solution:
def fizzBuzz(self, n: int):
result = list()
for each in range(1,n+1):
if each % 15 == 0:
result.append("FizzBuzz")
elif each % 3 == 0:
result.append("Fizz")
elif each % 5 == 0:
result.append("Buzz")
else:
result.append(str(each))
return result
# @lc code=end
| 22.62069 | 63 | 0.532012 | 399 | 0.608232 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.400915 |
9a08f540ce3f12537d5b6d4be1caf8051f4c1c27 | 5,875 | py | Python | selector/from_model.py | uberkinder/Robusta-AutoML | 9faee4c17ad9f37b09760f9fffea715cdbf2d1fb | [
"MIT"
]
| 2 | 2019-04-26T19:40:31.000Z | 2019-10-12T15:18:29.000Z | selector/from_model.py | uberkinder/Robusta-AutoML | 9faee4c17ad9f37b09760f9fffea715cdbf2d1fb | [
"MIT"
]
| null | null | null | selector/from_model.py | uberkinder/Robusta-AutoML | 9faee4c17ad9f37b09760f9fffea715cdbf2d1fb | [
"MIT"
]
| null | null | null | import pandas as pd
import numpy as np
from sklearn.model_selection import check_cv
from sklearn.exceptions import NotFittedError
from sklearn.base import clone, is_classifier
from robusta.importance import get_importance
from robusta.crossval import crossval
from .base import _Selector
# Original: sklearn.feature_selection.SelectFromModel
class SelectFromModel(_Selector):
"""Meta-transformer for selecting features based on importance weights.
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if cv='prefit') or a non-fitted estimator.
The estimator must have either a <feature_importances_> or <coef_>
attribute after fitting.
threshold : string, float, optional (default None)
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the <threshold> value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None, drop features
only based on <max_features>.
max_features : int, float or None, optional (default 0.5)
The maximum number of features selected scoring above <threshold>.
If float, interpreted as proportion of all features.
To disable <threshold> and only select based on <max_features>,
set <threshold> to -np.inf.
cv : int, cross-validation generator, iterable or "prefit"
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to disable cross-validation and train single estimator
on whole dataset (default).
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
- "prefit" string constant.
If "prefit" is passed, it is assumed that <estimator> has been
fitted already and <fit> function will raise error.
Attributes
----------
estimator_ : list of fitted estimators, or single fitted estimator
If <cv> is 'prefit'. If <cv> is None, return single estimator.
Otherwise return list of fitted estimators, length (n_folds, ).
feature_importances_ : Series of shape (n_features, )
Feature importances, extracted from estimator(s)
threshold_ : float
The threshold value used for feature selection
max_features_ : int
Maximum number of features for feature selection
use_cols_ : list of str
Columns to select
"""
def __init__(self, estimator, cv=None, threshold=None, max_features=None):
self.estimator = estimator
self.threshold = threshold
self.max_features = max_features
self.cv = cv
def fit(self, X, y, groups=None):
if self.cv is 'prefit':
raise NotFittedError("Since 'cv=prefit', call transform directly")
elif self.cv is None:
self.estimator_ = clone(self.estimator).fit(X, y)
else:
self.estimator_ = []
cv = check_cv(self.cv, y, is_classifier(self.estimator_))
for trn, _ in cv.split(X, y, groups):
X_trn, y_trn = X.iloc[trn], y.iloc[trn]
estimator = clone(self.estimator).fit(X_trn, y_trn)
self.estimator_.append(estimator)
return self
@property
def feature_importances_(self):
imps = []
if self.cv is 'prefit':
estimators = [self.estimator]
elif self.cv is None:
estimators = [self.estimator_]
else:
estimators = self.estimator_
for estimator in estimators:
imp = get_importance(estimator)
imps.append(imp)
return pd.concat(imps, axis=1).mean(axis=1)
def get_features(self):
imp = self.feature_importances_
self.threshold_ = _check_threshold(imp, self.threshold)
threshold_mask = (imp >= self.threshold_)
self.max_features_ = _check_max_features(imp, self.max_features)
ranking_mask = (imp.rank(ascending=False) <= self.max_features_)
use_cols = imp.index[threshold_mask & ranking_mask]
return list(use_cols)
def _check_max_features(importances, max_features):
"""Interpret the max_features value"""
n_features = len(importances)
if max_features is None:
max_features = n_features
elif isinstance(max_features, int):
max_features = min(n_features, max_features)
elif isinstance(max_features, float):
max_features = int(n_features * max_features)
return max_features
def _check_threshold(importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
threshold = -np.inf
elif isinstance(threshold, str):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
| 30.440415 | 79 | 0.637787 | 4,080 | 0.694468 | 0 | 0 | 431 | 0.073362 | 0 | 0 | 2,656 | 0.452085 |
9a0a7b6a486f4199dd2e8181f3e83788c1d07d18 | 1,875 | py | Python | trainer.py | jinxixiang/PC-TMB | c6f2fc62629c7f026865774cdfb9d826464397ea | [
"MIT"
]
| null | null | null | trainer.py | jinxixiang/PC-TMB | c6f2fc62629c7f026865774cdfb9d826464397ea | [
"MIT"
]
| null | null | null | trainer.py | jinxixiang/PC-TMB | c6f2fc62629c7f026865774cdfb9d826464397ea | [
"MIT"
]
| null | null | null | import torch
import torch.nn as nn
import torch_optimizer as optim
import pandas as pd
# customized libs
import criterions
import models
import datasets
def get_model(conf):
net = getattr(models, conf.Model.base)
return net(**conf.Model.params)
def get_loss(conf):
conf_loss = conf.Loss.base_loss
assert hasattr(nn, conf_loss.name) or hasattr(criterions, conf_loss.name)
loss = None
if hasattr(nn, conf_loss.name):
loss = getattr(nn, conf_loss.name)
elif hasattr(criterions, conf_loss.name):
loss = getattr(criterions, conf_loss.name)
if len(conf_loss.weight) > 0:
weight = torch.Tensor(conf_loss.weight)
conf_loss["weight"] = weight
return loss(**conf_loss.params)
def get_optimizer(conf):
conf_optim = conf.Optimizer
name = conf_optim.optimizer.name
if hasattr(torch.optim, name):
optimizer_cls = getattr(torch.optim, name)
else:
optimizer_cls = getattr(optim, name)
if hasattr(conf_optim, "lr_scheduler"):
scheduler_cls = getattr(torch.optim.lr_scheduler, conf_optim.lr_scheduler.name)
else:
scheduler_cls = None
return optimizer_cls, scheduler_cls
def get_dataset(conf, kfold, mode='train'):
folds_csv = pd.read_csv(conf.General.folds)
if conf.General.cross_validation:
if mode == 'train':
data_idx = folds_csv[folds_csv['fold'] != kfold].index
else:
data_idx = folds_csv[folds_csv['fold'] == kfold].index
else:
data_idx = folds_csv[folds_csv['fold'] == mode].index
name = conf.Data.dataset.name
dataset_cls = getattr(datasets, name)
dataset_ = dataset_cls(folds_csv.loc[data_idx].reset_index(drop=True),
folds_csv.loc[data_idx].reset_index(drop=True)[conf.General.target_col],
conf)
return dataset_ | 29.296875 | 99 | 0.670933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.037867 |
9a0b750755a4f2eb69f71eb1f7890678edaaee12 | 1,733 | py | Python | falmer/search/queries.py | sussexstudent/services-api | ae735bd9d6177002c3d986e5c19a78102233308f | [
"MIT"
]
| 2 | 2017-04-27T19:35:59.000Z | 2017-06-13T16:19:33.000Z | falmer/search/queries.py | sussexstudent/falmer | ae735bd9d6177002c3d986e5c19a78102233308f | [
"MIT"
]
| 975 | 2017-04-13T11:31:07.000Z | 2022-02-10T07:46:18.000Z | falmer/search/queries.py | sussexstudent/services-api | ae735bd9d6177002c3d986e5c19a78102233308f | [
"MIT"
]
| 3 | 2018-05-09T06:42:25.000Z | 2020-12-10T18:29:30.000Z | import graphene
from fuzzywuzzy import process
from falmer.search.types import SearchQuery
from falmer.search.utils import get_falmer_results_for_term, get_msl_results_for_term, \
SearchTermResponseData
def get_item_id(item):
model = item.__class__.__name__ if hasattr(item, '__class__') else 'MSL'
if model == 'Page':
model = 'PageResult'
id = item.pk if hasattr(item, 'pk') else item.uuid
return f'{model}_{id}'
def get_item_title(item):
if hasattr(item, 'title'):
return item.title
if hasattr(item, 'name'):
return item.name
return ''
class Query(graphene.ObjectType):
search = graphene.Field(SearchQuery, query=graphene.String())
def resolve_search(self, info, query):
falmer_results = get_falmer_results_for_term(query)
msl_results = get_msl_results_for_term(query)
all_unsorted = falmer_results.content \
+ falmer_results.groups \
+ falmer_results.events \
+ msl_results.pages \
+ msl_results.news
title_map = {}
for item in all_unsorted:
title_map[get_item_title(item)] = get_item_id(item)
try:
fuzz_sorted = process.extract(query, title_map.keys(), limit=15)
top = [title_map[fuzz_result[0]] for fuzz_result in fuzz_sorted]
except RuntimeError:
top = []
results = SearchTermResponseData(
content=falmer_results.content,
events=falmer_results.events,
groups=falmer_results.groups,
pages=msl_results.pages,
news=msl_results.news,
top=top,
)
return results
| 28.883333 | 88 | 0.623774 | 1,129 | 0.651471 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.039238 |
9a0ef79b4f00de681e34f8ae67dfe78a084e7151 | 700 | py | Python | SYMBOLS/heart.py | charansaim1819/Python_Patterns | 02e636855003346ec84c3d69f2be174dc9e9e3cb | [
"MIT"
]
| null | null | null | SYMBOLS/heart.py | charansaim1819/Python_Patterns | 02e636855003346ec84c3d69f2be174dc9e9e3cb | [
"MIT"
]
| null | null | null | SYMBOLS/heart.py | charansaim1819/Python_Patterns | 02e636855003346ec84c3d69f2be174dc9e9e3cb | [
"MIT"
]
| null | null | null | #Shape of heart:
def for_heart():
"""printing shape of'heart' using for loop"""
for row in range(6):
for col in range(7):
if row-col==2 or row+col==8 or col%3!=0 and row==0 or col%3==0 and row==1:
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_heart():
"""printing shape of'heart' using while loop"""
i=0
while i<6:
j=0
while j<7:
if i-j==2 or i+j==8 or j%3!=0 and i==0 or j%3==0 and i==1:
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
print()
i+=1
| 24.137931 | 87 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.192857 |
9a0f75acc0d453f223e437b74a0cfe99d7909068 | 338 | py | Python | test_core.py | DominikPutz/lecture-spring-2021 | cce0970e261d45cbc16b3955d0659ca295ed8fc2 | [
"Apache-2.0"
]
| null | null | null | test_core.py | DominikPutz/lecture-spring-2021 | cce0970e261d45cbc16b3955d0659ca295ed8fc2 | [
"Apache-2.0"
]
| null | null | null | test_core.py | DominikPutz/lecture-spring-2021 | cce0970e261d45cbc16b3955d0659ca295ed8fc2 | [
"Apache-2.0"
]
| 3 | 2021-03-23T14:48:38.000Z | 2022-01-13T09:45:08.000Z | from core import add
from core import sub
def test_add():
"""Check that `add()` works as expected"""
assert add(2, 3) == 5
def test_add_z():
"""Check that `add()` works as expected"""
assert add(2, 3, 1) == 6
def test_sub():
"""Check that `sub()` works as expected"""
assert sub(3, 1) == 2
| 16.9 | 46 | 0.553254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.372781 |
9a1128153dbcf8a364098445381dc767e17a1621 | 73 | py | Python | setup.py | AnnaUstiuzhanina/flake8_extension | 4d3c4a7ac6b8af4d0ed62bbe42c897edabe93383 | [
"MIT"
]
| null | null | null | setup.py | AnnaUstiuzhanina/flake8_extension | 4d3c4a7ac6b8af4d0ed62bbe42c897edabe93383 | [
"MIT"
]
| null | null | null | setup.py | AnnaUstiuzhanina/flake8_extension | 4d3c4a7ac6b8af4d0ed62bbe42c897edabe93383 | [
"MIT"
]
| null | null | null | from __future__ import annotations
from setuptools import setup
setup()
| 14.6 | 34 | 0.835616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9a11a05b881b07a6c93ac169600004f78ada2754 | 434 | py | Python | exercicios/Lista5/Q9.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
]
| null | null | null | exercicios/Lista5/Q9.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
]
| null | null | null | exercicios/Lista5/Q9.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
]
| null | null | null | #9. Façaumam função que receba a altura e o raio de um cilindro circular e retorne o volume
#do cilindro. O volume de um cilindro circular é calculado por meio da seguinte fórmula:
#V =pi*raio^2 x altura, onde pi = 3.141592.
def volCilindro(raio,altura):
return 3.1415926535*pow(raio,2)*altura
r=float(input("Informe o raio do cilindro: "))
alt=float(input("Informe a altura do cilindro: "))
volume=volCilindro(3,2)
print(volume) | 43.4 | 91 | 0.751152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.658314 |
9a122e1fac70741e43cd706c1bfea367874d0fa7 | 1,714 | py | Python | sachima/publish.py | gitter-badger/Sachima | 76547fb6a21f1fea597994e6ee02c5db080d1e7a | [
"MIT"
]
| null | null | null | sachima/publish.py | gitter-badger/Sachima | 76547fb6a21f1fea597994e6ee02c5db080d1e7a | [
"MIT"
]
| null | null | null | sachima/publish.py | gitter-badger/Sachima | 76547fb6a21f1fea597994e6ee02c5db080d1e7a | [
"MIT"
]
| null | null | null | import requests
from bs4 import BeautifulSoup
from sachima import conf
class Publisher(object):
@classmethod
def get_csrf_token(self, html):
soup = BeautifulSoup(html, "html.parser")
csrf = soup.find(id="csrf_token").attrs["value"]
return csrf
@classmethod
def to_superset(self, name, type_, param):
addr = conf.get("SUPERSET_WEBSERVER_ADDRESS")
port = conf.get("SUPERSET_WEBSERVER_PORT")
user = conf.get("SUPERSET_USERNAME")
pwd = conf.get("SUPERSET_PASSWORD")
bp_post = conf.get("SUPERSET_API_TABLE_BP")
if addr and port:
url = ":".join([addr.rstrip("/"), str(port)])
with requests.session() as s:
# 登陆
r = s.get(url + "/login/")
login_data = dict(
username=user,
password=pwd,
csrf_token=self.get_csrf_token(r.text),
)
r = s.post(url + "/login/", data=login_data)
# 调用接口
if r.url.endswith("welcome"):
r = s.post(
url + bp_post,
headers={
"Content-Type": "application/json; charset=utf-8",
"X-CSRFToken": self.get_csrf_token(r.text),
},
json={
"slice_name": name,
"api": type_,
"params": param,
},
)
print(r.text)
print("publish service to superset")
else:
pass
| 32.961538 | 78 | 0.449242 | 1,651 | 0.956547 | 0 | 0 | 1,616 | 0.936269 | 0 | 0 | 315 | 0.182503 |
9a13bf9dda86cde96d1e704297f9ca1d15b1b6aa | 3,254 | pyw | Python | src/mediator/Main.pyw | fuqinshen/Python-- | aaa5230354258e1bba761e483c8b9fb6be00402a | [
"MIT"
]
| 31 | 2018-10-19T15:28:36.000Z | 2022-02-14T03:01:25.000Z | src/mediator/Main.pyw | fuqinshen/Python-- | aaa5230354258e1bba761e483c8b9fb6be00402a | [
"MIT"
]
| null | null | null | src/mediator/Main.pyw | fuqinshen/Python-- | aaa5230354258e1bba761e483c8b9fb6be00402a | [
"MIT"
]
| 10 | 2019-01-10T04:02:12.000Z | 2021-11-17T01:52:15.000Z | import tkinter
class Main(tkinter.Frame):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.pack()
self.checkValue = tkinter.StringVar()
self.checkGuest = tkinter.Radiobutton(self, text="Guest", variable=self.checkValue, value="Guest",
anchor=tkinter.W)
self.checkLogin = tkinter.Radiobutton(self, text="Login", variable=self.checkValue, value="Login",
anchor=tkinter.W)
usernameLabel = tkinter.Label(self, text="Username:", anchor=tkinter.W, underline=0)
self.textUser = tkinter.Entry(self, width=10, state="disable")
passwordLabel = tkinter.Label(self, text="Password:", anchor=tkinter.W, underline=0)
self.textPassword = tkinter.Entry(self, width=10, show='*', state="disable")
self.buttonOk = tkinter.Button(self, text="OK", state="normal")
self.buttonCancel = tkinter.Button(self, text="Cancel", command=self.quit)
self.checkGuest.select()
self.checkLogin.deselect()
self.checkGuest.grid(row=0, column=0, padx=2, pady=2,
sticky=tkinter.W)
self.checkLogin.grid(row=0, column=1, padx=2, pady=2,
sticky=tkinter.EW)
usernameLabel.grid(row=1, column=0, padx=2, pady=2,
sticky=tkinter.W)
self.textUser.grid(row=1, column=1, padx=2, pady=2,
sticky=tkinter.EW)
passwordLabel.grid(row=2, column=0, padx=2, pady=2,
sticky=tkinter.W)
self.textPassword.grid(row=2, column=1, padx=2, pady=2,
sticky=tkinter.EW)
self.buttonOk.grid(row=3, column=0, padx=2, pady=2,
sticky=tkinter.EW)
self.buttonCancel.grid(row=3, column=1, padx=2, pady=2,
sticky=tkinter.EW)
self.checkGuest.focus_set()
self.checkGuest.bind("<Button-1>", self.checkChange)
self.checkLogin.bind("<Button-1>", self.checkChange)
self.textUser.bind("<Key>", self.checkChange)
self.textPassword.bind("<Key>", self.checkChange)
def checkChange(self, event):
if self.checkValue == "Guest":
self.textUser["state"] = "disable"
self.textPassword["state"] = "disable"
self.buttonOk["state"] = "normal"
else:
self.textUser["state"] = "normal"
self.userpassChanged()
def userpassChanged(self):
if len(self.textUser.get()) > 0:
self.textPassword["state"] = "normal"
if len(self.textPassword.get()) > 0:
self.buttonOk["state"] = "normal"
else:
self.buttonOk["state"] = "disable"
else:
self.textPassword["state"] = "disable"
self.buttonOk["state"] = "disable"
def quit(self, event=None):
self.parent.destroy()
if __name__ == '__main__':
application = tkinter.Tk()
application.title("Mediator Sample")
window = Main(application)
application.protocol("WM_DELETE_WINDOW", window.quit)
application.mainloop()
| 39.682927 | 106 | 0.567609 | 3,019 | 0.927781 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.098648 |
9a14a2d004a0836d3daffc7ee2ad09d95986fb4d | 2,190 | py | Python | runtests.py | ojii/django-statictemplate | 73a541b19ff39e92b02de5d2ee74e4df7d486d81 | [
"BSD-3-Clause"
]
| 4 | 2015-09-28T10:06:45.000Z | 2019-09-20T05:53:03.000Z | runtests.py | ojii/django-statictemplate | 73a541b19ff39e92b02de5d2ee74e4df7d486d81 | [
"BSD-3-Clause"
]
| 8 | 2015-06-15T13:06:43.000Z | 2018-12-23T13:37:20.000Z | runtests.py | ojii/django-statictemplate | 73a541b19ff39e92b02de5d2ee74e4df7d486d81 | [
"BSD-3-Clause"
]
| 2 | 2015-09-23T05:07:00.000Z | 2015-10-20T15:43:19.000Z | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
urlpatterns = [
]
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
'statictemplate',
],
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3'
}
},
LANGUAGES=(
('en-us', 'English'),
('it', 'Italian'),
),
ROOT_URLCONF='runtests',
SITE_ID=1,
MIDDLEWARE_CLASSES=[
'django.middleware.http.ConditionalGetMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
)
def runtests():
import django
from django.conf import settings
DEFAULT_SETTINGS['TEMPLATES'] = [{
'NAME': 'django',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
}
}]
# Compatibility with Django 1.7's stricter initialization
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
if hasattr(django, 'setup'):
django.setup()
from django.test.runner import DiscoverRunner
test_args = ['statictemplate']
failures = DiscoverRunner(
verbosity=1, interactive=True, failfast=False
).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests()
| 28.815789 | 70 | 0.622374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,111 | 0.507306 |
9a1583710b1d1ad4cc13f28020664d7f22387e1e | 585 | py | Python | Courses/YandexAlgo/1/petya_the_inventor.py | searayeah/sublime-snippets | deff53a06948691cd5e5d7dcfa85515ddd8fab0b | [
"MIT"
]
| null | null | null | Courses/YandexAlgo/1/petya_the_inventor.py | searayeah/sublime-snippets | deff53a06948691cd5e5d7dcfa85515ddd8fab0b | [
"MIT"
]
| null | null | null | Courses/YandexAlgo/1/petya_the_inventor.py | searayeah/sublime-snippets | deff53a06948691cd5e5d7dcfa85515ddd8fab0b | [
"MIT"
]
| null | null | null | x = input()
z = input()
splitter = [x[i:] for i in range(len(x))]
found_splitter = False
next_z = ""
for i in range(1, len(x) + 1):
if z[:i] in splitter:
found_splitter = True
next_z = z[i:]
if next_z[: len(x)] == x:
break
if i == len(z):
break
if next_z == "":
if found_splitter is False:
print(z)
else:
if found_splitter is True:
while True:
if next_z[0 : len(x)] == x:
next_z = next_z.replace(x, "", 1)
else:
print(next_z)
break
| 19.5 | 49 | 0.471795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.010256 |
9a163185c3befcd4de02a4c3e143213f59c12c77 | 117 | py | Python | first_request.py | sgriffith3/2021_05_10_pyna | d732e1dd0fa03f1cef8f72fc9dcc09ec947f31a5 | [
"MIT"
]
| null | null | null | first_request.py | sgriffith3/2021_05_10_pyna | d732e1dd0fa03f1cef8f72fc9dcc09ec947f31a5 | [
"MIT"
]
| null | null | null | first_request.py | sgriffith3/2021_05_10_pyna | d732e1dd0fa03f1cef8f72fc9dcc09ec947f31a5 | [
"MIT"
]
| null | null | null | import urllib.request
url = "https://google.com"
data = urllib.request.urlopen(url)
print(data)
print(data.read())
| 14.625 | 34 | 0.726496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.17094 |
9a1676b9866c375100521ac48277fdcc219264ce | 1,592 | py | Python | datawinners/blue/urls.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
]
| 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | datawinners/blue/urls.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
]
| null | null | null | datawinners/blue/urls.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
]
| null | null | null | from django.conf.urls.defaults import patterns, url
from datawinners.blue import view
from datawinners.blue.view import new_xform_submission_post, edit_xform_submission_post, get_attachment, attachment_download, guest_survey, public_survey
from datawinners.blue.view import ProjectUpload, ProjectUpdate
from datawinners.blue.view import new_xform_submission_get
from datawinners.project.views.submission_views import edit_xform_submission_get
urlpatterns = patterns('',
url(r'^guest_survey/(?P<link_uid>.+?)/$', guest_survey, name='guest_survey'),
url(r'^survey/(?P<org_id>.+?)/(?P<anonymous_link_id>.+?)/*$', public_survey, name='public_survey'),
url(r'^xlsform/upload/$', ProjectUpload.as_view(), name="import_project"),
url(r'^xlsform/download/$', view.project_download),
url(r'^xlsform/upload/update/(?P<project_id>\w+?)/$', ProjectUpdate.as_view(), name="update_project"),
url(r'^xlsform/(?P<project_id>.+?)/web_submission/(?P<survey_response_id>[^\\/]+?)/$', edit_xform_submission_get, name="edit_xform_submission"),
url(r'^xlsform/(?P<project_id>\w+?)/web_submission/$', new_xform_submission_get, name="xform_web_questionnaire"),
url(r'^xlsform/web_submission/(?P<survey_response_id>.+?)/$', edit_xform_submission_post, name="update_web_submission"),
url(r'^xlsform/web_submission/$', new_xform_submission_post, name="new_web_submission"),
url(r'^attachment/(?P<document_id>.+?)/(?P<attachment_name>[^\\/]+?)/$', get_attachment),
url(r'^download/attachment/(?P<document_id>.+?)/(?P<attachment_name>[^\\/]+?)/$', attachment_download)
)
| 61.230769 | 153 | 0.741834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 693 | 0.435302 |
9a16fe83f8b00c1ae4b19a89510efa6538193e44 | 95 | py | Python | test2.py | marionettenspieler/pyneta | 56a2dba736daf57464b06978c80383787a736ced | [
"Apache-2.0"
]
| null | null | null | test2.py | marionettenspieler/pyneta | 56a2dba736daf57464b06978c80383787a736ced | [
"Apache-2.0"
]
| null | null | null | test2.py | marionettenspieler/pyneta | 56a2dba736daf57464b06978c80383787a736ced | [
"Apache-2.0"
]
| null | null | null | print('hello man')
print('hello man')
print('hello man')
print('hello man')
print('hello man')
| 15.833333 | 18 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.578947 |
9a1837e6b67fec245ab3af4f52d7d449ca21cff5 | 4,013 | py | Python | nvtabular/ds_writer.py | benfred/NVTabular | 5ab6d557868ac01eda26e9725a1a6e5bf7eda007 | [
"Apache-2.0"
]
| null | null | null | nvtabular/ds_writer.py | benfred/NVTabular | 5ab6d557868ac01eda26e9725a1a6e5bf7eda007 | [
"Apache-2.0"
]
| null | null | null | nvtabular/ds_writer.py | benfred/NVTabular | 5ab6d557868ac01eda26e9725a1a6e5bf7eda007 | [
"Apache-2.0"
]
| null | null | null | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import cudf
import numpy as np
import pyarrow.parquet as pq
try:
import cupy as cp
except ImportError:
import numpy as cp
class FileIterator:
def __init__(self, path, nfiles, shuffle=True, **kwargs):
self.path = path
self.nfiles = nfiles
self.shuffle = shuffle
self.ind = 0
self.inds = np.arange(self.nfiles)
if self.shuffle:
np.random.shuffle(self.inds)
def __iter__(self):
self.ind = 0
self.inds = np.arange(self.nfiles)
if self.shuffle:
np.random.shuffle(self.inds)
return self
def __next__(self):
if self.ind >= self.nfiles:
raise StopIteration
self.ind += 1
# if self.name, return that naming convention.
return "%s/ds_part.%d.parquet" % (self.path, self.ind - 1)
class DatasetWriter:
def __init__(self, path, nfiles=1, **kwargs):
self.path = path
self.nfiles = nfiles
self.writers = {fn: None for fn in FileIterator(path, nfiles)}
self.shared_meta_path = str(path) + "/_metadata"
self.metadata = None
self.new_metadata = {fn: [] for fn in FileIterator(path, nfiles)}
# Check for _metadata
metafile = glob.glob(self.shared_meta_path)
if metafile:
self.metadata = pq.ParquetDataset(metafile[0]).metadata
def write(self, gdf, shuffle=True):
# Shuffle the dataframe
gdf_size = len(gdf)
if shuffle:
sort_key = "__sort_index__"
arr = cp.arange(gdf_size)
cp.random.shuffle(arr)
gdf[sort_key] = cudf.Series(arr)
gdf = gdf.sort_values(sort_key).drop(columns=[sort_key])
# Write to
chunk_size = int(gdf_size / self.nfiles)
for i, fn in enumerate(FileIterator(self.path, self.nfiles)):
s1 = i * chunk_size
s2 = (i + 1) * chunk_size
if i == (self.nfiles - 1):
s2 = gdf_size
chunk = gdf[s1:s2]
pa_table = chunk.to_arrow()
if self.writers[fn] is None:
self.writers[fn] = pq.ParquetWriter(
fn, pa_table.schema, metadata_collector=self.new_metadata[fn],
)
self.writers[fn].write_table(pa_table)
def write_metadata(self):
self.close_writers() # Writers must be closed to get metadata
fns = [fn for fn in FileIterator(self.path, self.nfiles, shuffle=False)]
if self.metadata is not None:
_meta = self.metadata
i_start = 0
else:
_meta = self.new_metadata[fns[0]]
if _meta:
_meta = _meta[0]
i_start = 1
for i in range(i_start, len(fns)):
_meta_new = self.new_metadata[fns[i]]
if _meta_new:
_meta.append_row_groups(_meta_new[0])
with open(self.shared_meta_path, "wb") as fil:
_meta.write_metadata_file(fil)
self.metadata = _meta
return
def close_writers(self):
for fn, writer in self.writers.items():
if writer is not None:
writer.close()
# Set row-group file paths
self.new_metadata[fn][0].set_file_path(os.path.basename(fn))
writer = None
def __del__(self):
self.close_writers()
| 32.104 | 82 | 0.595315 | 3,260 | 0.81236 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.198854 |
9a18608a6d3310b926afa6ca71ff25504d52035f | 481 | py | Python | example/my_hook.py | Globidev/github-docker-hook | 716de2f79ca30221edd2b70f3f7c85e5d033bae9 | [
"MIT"
]
| 2 | 2015-09-24T07:38:07.000Z | 2015-11-05T18:33:43.000Z | example/my_hook.py | Globidev/github-docker-hook | 716de2f79ca30221edd2b70f3f7c85e5d033bae9 | [
"MIT"
]
| 2 | 2015-11-04T17:34:14.000Z | 2015-11-09T02:05:31.000Z | example/my_hook.py | Globidev/github-docker-hook | 716de2f79ca30221edd2b70f3f7c85e5d033bae9 | [
"MIT"
]
| null | null | null | ROUTE = '/push'
PORT = 4242
IMAGE_NAME = 'globidocker/github-hook'
import docker
cli = docker.Client()
from lib.git import clone_tmp
def on_push(data, logger):
url = data['repository']['html_url']
logger.info('Cloning repository: "{}"...'.format(url))
with clone_tmp(url) as repo:
logger.info('Building image...')
cli.build(repo.path, IMAGE_NAME)
logger.info('Pushing image...')
cli.push(IMAGE_NAME)
logger.info('done')
| 19.24 | 58 | 0.634096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.261954 |
9a1861ac2df97b1bcfbdb3654e5d9c31f32e9e49 | 12,403 | py | Python | scripts/TestSuite/run_tests.py | ghorwin/MasterSim | 281b71e228435ca8fa02319bf2ce86b66b8b2b45 | [
"BSD-3-Clause"
]
| 5 | 2021-11-17T07:12:54.000Z | 2022-03-16T15:06:39.000Z | scripts/TestSuite/run_tests.py | ghorwin/MasterSim | 281b71e228435ca8fa02319bf2ce86b66b8b2b45 | [
"BSD-3-Clause"
]
| 25 | 2021-09-09T07:39:13.000Z | 2022-01-23T13:00:19.000Z | scripts/TestSuite/run_tests.py | ghorwin/MasterSim | 281b71e228435ca8fa02319bf2ce86b66b8b2b45 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
# Solver test suite runner script, used for
# * regression tests (default)
# * test-init runs (with --test-init option)
# * performance evaluation (with --performance option)
#
# 1. Regression tests (the default)
# - runs set of projects and compares physical results and solver stats
# - meant to be run with either sequential or parallel solver
# - performance is monitored, but not so important (very short tests!)
# - expects jobs to have reference result directory, otherwise warning is issued
# and simulation is skipped (with --run-always option all simulations are done even without
# reference result dirs)
# - result of script:
# for each job show old/new stats and metrics
# show summary table with timings for all successful jobs
#
# 2. Initialization tests
# - checks if solver can initialize set of project files
# - script parses directory structure, generates list of test-init jobs
# and executes test initialization
# - result of script:
# for each job result status and time needed for test init (only for information)
#
# 3. Performance tests
# - collects list of jobs, runs each job 3 times and stores timings for all cases
# - result of script:
# for each job print individual timings and best evalualtion time in a table
#
# License:
# BSD License
#
# Authors:
# Andreas Nicolai <[email protected]>
#
# Syntax:
# > python run_tests.py --path <path/to/testsuite> --solver <path/to/solver/binary> --extension <project file extension>
#
# Example:
# > python run_tests.py --path ../../data/tests --solver ./DelphinSolver --extension d6p
# > python run_tests.py -p ../../data/tests -s ./DelphinSolver -e d6p
#
# Returns:
# 0 - if all tests could be simulated successfully and if all solver results/metrics match those of reference results
# 1 - if anything failed
#
# Note: if run with --run-all option, test cases without reference results will always be accepted.
#
import subprocess # import the module for calling external programs (creating subprocesses)
import sys
import os
import os.path
import shutil
import filecmp # for result file comparison
import argparse
import platform # to detect current OS
from colorama import *
from SolverStats import *
from print_funcs import *
from config import USE_COLORS
def configCommandLineArguments():
"""
This method sets the available input parameters and parses them.
Returns a configured argparse.ArgumentParser object.
"""
parser = argparse.ArgumentParser("run_tests.py")
parser.description = '''
Runs the regression test suite. Can be used for init-tests (--test-init)
or performance evaluation (--performance) as well.'''
parser.add_argument('-p', '--path', dest='path', required=True, type=str,
help='Path to test suite root directory.')
parser.add_argument('-s', '--solver', dest='solver', required=True, type=str,
help='Path to solver binary.')
parser.add_argument('-e', '--extension', dest="extension", required=True, type=str,
help='Project file extension.')
parser.add_argument('--no-colors', dest="no_colors", action='store_true',
help='Disables colored console output.')
parser.add_argument('--test-init', dest="test_init", action='store_true',
help='Enables test-initialization mode (runs solvers with --test-init argument and '
'skips result evaluation).')
parser.add_argument('--performance', dest="performance", action='store_true',
help='Enables performance evaluation mode (runs solvers three times '
'without result evaluation and dumps timings of all cases and best-of-three timings).')
parser.add_argument('--run-all', dest="run_all", action='store_true',
help='If set (in regression test mode), also the test cases without reference results '
'are simulated (can be used to generate reference results for all cases).')
return parser.parse_args()
def checkResults(dir1, dir2, evalTimes):
"""
Compares two result directories for equal contents.
Compared are:
- physical results
- solver counters (/log/summary.txt)
This function uses IBK.SolverStats
Arguments:
* dir1 (reference results) and dir2 (computed results)
* evalTimes is a dictionary with filepath (key) and wall clock time (value),
new entries are always added to the dictionary
Returns: True on success, False on error
"""
try:
# open stat files and compare them
stats1 = SolverStats()
if not stats1.read(dir1 + "/log/summary.txt"):
return False
stats2 = SolverStats()
if not stats2.read(dir2 + "/log/summary.txt"):
return False
if not SolverStats.compareStats(stats1, stats2, []):
printError("Mismatching statistics.")
return False
# compare all result files (d60, tsv), if any reference result files exist
if os.path.exists(dir1 + "/results"):
if not SolverStats.compareResults(dir1 + "/results", dir2 + "/results"):
printError("Mismatching values.")
return False
evalTimes[dir2] = stats2.timers['WallClockTime']
except Exception as e:
printError("Error comparing simulation results, error: {}".format(e))
return True
def run_performance_evaluation(args, projects):
# we basically do the same as the main script, but this time we run all test cases
# whether they have reference results or not and simply remember the run times
# we store evaluation times in a dictionary, key is the path to the project file,
# value is a list of evaluation times
eval_times = dict()
failed_projects = []
ITERATIONS = 3
for iter in range(ITERATIONS):
for project in projects:
print(project)
path, fname = os.path.split(project)
#print "Path : " + path
#print "Project : " + fname
cmdline = [args.solver, project]
# try to read commandline file
cmdlineFilePath = project + ".cmdline"
if os.path.exists(cmdlineFilePath):
fobj = open(cmdlineFilePath)
cmdlineAddOn = fobj.readline()
del fobj
cmdline.append(cmdlineAddOn)
print("Applying cmdline addon: " + cmdlineAddOn)
try:
# run solver
FNULL = open(os.devnull, 'w')
if platform.system() == "Windows":
cmdline.append("-x")
cmdline.append("--verbosity-level=0")
retcode = subprocess.call(cmdline, creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
retcode = subprocess.call(cmdline, stdout=FNULL, stderr=subprocess.STDOUT)
# check return code
if retcode == 0:
# read summary file
resultsFolder = project[:-(1+len(args.extension))]
# open stat files and compare them
stats1 = SolverStats()
if stats1.read(resultsFolder + "/log/summary.txt"):
if not eval_times.has_key(project):
eval_times[project] = []
eval_times[project].append(stats1.timers['WallClockTime'])
else:
# mark project as failed
failed_projects.append(project)
# and print error message
printError("Simulation failed, see screenlog file {}".format(os.path.join(os.getcwd(),
resultsFolder+"/log/screenlog.txt" ) ) )
except OSError as e:
printError("Error starting solver executable '{}', error: {}".format(args.solver, e))
exit(1)
print("\nSuccessful projects:\n")
print("{:60s} {}".format("Project path", "Wall clock times [s], last column is min of all runs"))
filenames = eval_times.keys()
filenames.sort()
perfstats = open(os.path.join(args.path, "performance_stats.txt"), 'w')
for filename in filenames:
fname = os.path.basename(filename)
onedir = os.path.join(os.path.basename(os.path.dirname(filename)), os.path.basename(filename))
s = "{:65s}".format(onedir)
minVal = 1e20;
for t in range(len(eval_times[filename])):
duration = eval_times[filename][t]
s = s + (" {:>10.3f}".format(duration))
minVal = min(minVal, duration)
s= s + (" {:>10.3f}".format(minVal))
printNotification(s)
perfstats.write(s + '\n')
del perfstats
if len(failed_projects) > 0:
print("\nFailed projects:")
for p in failed_projects:
printError(p)
print("\n")
printError("*** Failure ***")
exit(1)
return 0
# *** main script ***
args = configCommandLineArguments()
if not args.no_colors:
init() # init ANSI code filtering for windows
config.USE_COLORS = True
printNotification("Enabling colored console output")
if args.test_init and args.performance:
printError("Either use --test-init or --performance, but not both together.")
exit(1)
# process all directories under test suite directory
currentOS = platform.system()
compilerID = None
if currentOS == "Linux" :
compilerID = "gcc_linux"
elif currentOS == "Windows" :
compilerID = "VC14_win64"
elif currentOS == "Darwin" :
compilerID = "gcc_mac"
if compilerID == None:
printError("Unknown/unsupported platform")
exit(1)
else:
print("Compiler ID : " + compilerID)
print("Test suite : " + args.path)
print("Solver : " + args.solver)
print("Project file extension : " + args.extension)
# walk all subdirectories (except .svn) within testsuite and collect project file names
projects = []
for root, dirs, files in os.walk(args.path, topdown=False):
for name in files:
if name.endswith('.'+args.extension):
projectFilePath = os.path.join(root, name)
projects.append(projectFilePath)
projects.sort()
print("Number of projects : {}\n".format(len(projects)))
# performance tests?
if args.performance:
res = run_performance_evaluation(args, projects)
exit(res)
failed_projects = []
eval_times = dict() # key - file path to project, value - eval time in [s]
for project in projects:
print(project)
path, fname = os.path.split(project)
#print("Path : " + path)
#print ("Project : " + fname)
# compose path of result folder
resultsFolder = project[:-(1+len(args.extension))]
# remove entire directory with previous results
if os.path.exists(resultsFolder):
shutil.rmtree(resultsFolder)
cmdline = [args.solver, project]
# if in test-init mode, append --test-init to command line
if args.test_init:
cmdline.append("--test-init")
skipResultCheck = True
args.run_all = True
else:
skipResultCheck = False
referenceFolder = resultsFolder + "." + compilerID
if not os.path.exists(referenceFolder):
if not args.run_all:
failed_projects.append(project)
printError("Missing reference data directory '{}'".format(os.path.split(referenceFolder)[1]))
continue
else:
skipResultCheck = True
try:
# run solver
FNULL = open(os.devnull, 'w')
if platform.system() == "Windows":
cmdline.append("-x")
cmdline.append("--verbosity-level=0")
retcode = subprocess.call(cmdline, creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
if args.test_init:
# in test-init mode we want to see the output
retcode = subprocess.call(cmdline)
else:
retcode = subprocess.call(cmdline, stdout=FNULL, stderr=subprocess.STDOUT)
# check return code
if retcode == 0:
# successful run
if not skipResultCheck:
# now check against reference results
if not checkResults(referenceFolder, resultsFolder, eval_times):
if not project in failed_projects:
failed_projects.append(project) # mark as failed
printError("Mismatching results.")
else:
# mark project as failed
failed_projects.append(project)
# and print error message
printError("Simulation failed, see screenlog file {}".format(os.path.join(os.getcwd(),
resultsFolder+"/log/screenlog.txt" ) ) )
except OSError as e:
printError("Error starting solver executable '{}', error: {}".format(args.solver, e))
exit(1)
print("\nSuccessful projects:\n")
print("{:80s} {}".format("Project path", "Wall clock time [s]"))
filenames = eval_times.keys()
filenames = sorted(filenames)
for filename in filenames:
fname = os.path.basename(filename)
onedir = os.path.join(os.path.basename(os.path.dirname(filename)), os.path.basename(filename))
printNotification("{:80s} {:>10.3f}".format(onedir, eval_times[filename]))
if len(failed_projects) > 0:
print("\nFailed projects:")
for p in failed_projects:
printError(p)
print("\n")
printError("*** Failure ***")
exit(1)
printNotification("*** Success ***")
exit(0)
| 33.252011 | 120 | 0.689511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,021 | 0.485447 |
9a1b2360ed8259c0fd8d46c53ed3e0ed659879cf | 4,815 | py | Python | sigr/Anechoic.py | JerameyATyler/sigR | 25c895648c5f90f57baa95f2cdd097cd33259a07 | [
"MIT"
]
| null | null | null | sigr/Anechoic.py | JerameyATyler/sigR | 25c895648c5f90f57baa95f2cdd097cd33259a07 | [
"MIT"
]
| null | null | null | sigr/Anechoic.py | JerameyATyler/sigR | 25c895648c5f90f57baa95f2cdd097cd33259a07 | [
"MIT"
]
| null | null | null | from torch.utils.data import Dataset
class Anechoic(Dataset):
def __init__(self, root, ttv, download=False, transform=None, target_transform=None, columns=None,
output_path=None):
from pathlib import Path
import os
ttvs = ['train', 'test', 'validate']
assert ttv in ttvs, f'Acceptable values for ttv are {", ".join(ttvs)}'
self.ttv = ttv
self.transform = transform
self.target_transform = target_transform
self.root = Path(root).__str__()
self.data_path = (Path(self.root) / self.ttv).__str__()
self.label_path = f'{self.data_path}_recipe'
self.output_path = output_path
if download:
self.download()
else:
assert os.path.isdir(self.root), f'Root directory {self.root} must exist if download=False'
assert os.path.isdir(self.data_path), f'Data directory {self.data_path} must exist if download=False'
assert os.path.isdir(self.label_path), f'Label directory {self.label_path} must exist if download=False'
self.labels = self.set_labels(columns)
def download(self):
from pathlib import Path
import requests
import zipfile
import io
import shutil
import os
if not os.path.isdir(self.root):
os.mkdir(self.root)
_download_url = 'https://reflections.speakeasy.services'
print(f'Downloading dataset at {_download_url}/{self.ttv}.zip')
r = requests.get(f'{_download_url}/{self.ttv}.zip', stream=True)
z = zipfile.ZipFile(io.BytesIO(r.content))
print(f'Finished downloading')
if not os.path.isdir(self.data_path):
os.mkdir(self.data_path)
if not os.path.isdir(self.label_path):
os.mkdir(self.label_path)
print('Extracting dataset')
for f in z.namelist():
filename = Path(f).name
if not filename:
continue
source = z.open(f)
if filename.endswith('.zip'):
target = open((Path(self.root) / filename).__str__(), 'wb')
else:
target = open((Path(self.data_path) / filename).__str__(), 'wb')
print(f'\tExtracting file: {filename}')
with source, target:
shutil.copyfileobj(source, target)
assert os.path.isfile(f'{self.label_path}.zip'), f'{self.label_path}.zip missing'
z = zipfile.ZipFile(f'{self.label_path}.zip')
z.extractall(self.label_path)
def set_labels(self, columns):
from data_loader import read_recipe
if columns is not None:
if type(columns) is not None:
columns = [columns]
if 'filepath' not in columns:
columns.append('filepath')
return read_recipe(self.label_path)[columns]
return read_recipe(self.label_path)
def __len__(self):
return self.labels.shape[0]
def __getitem__(self, item):
from pydub import AudioSegment
from pathlib import Path
from utils import audiosegment_to_array
labels = self.labels.iloc[item]
audio = AudioSegment.from_wav((Path(self.data_path) / f"{labels['filepath']}.wav").__str__())
if self.transform:
audio = self.transform(audio)
else:
audio = audiosegment_to_array(audio)
if self.target_transform:
labels = self.target_transform(labels)
return audio, labels
def play_sample(self, item):
from pathlib import Path
from pydub import AudioSegment
from utils import play_audio
from IPython.display import display
import os
filepath = f'{(Path(self.data_path) / self.labels.iloc[item]["filepath"]).__str__()}.wav'
assert os.path.isfile(filepath), f'{filepath} does not exist'
audio = AudioSegment.from_wav(filepath)
return display(play_audio(audio))
def get_ttv(root, download=False, transform=None, target_transform=None, columns=None, batch_size=60):
from torch.utils.data import DataLoader
train = DataLoader(
Anechoic(root, 'train', download=download, transform=transform, target_transform=target_transform,
columns=columns), batch_size=batch_size, shuffle=True)
test = DataLoader(Anechoic(root, 'test', download=download, transform=transform, target_transform=target_transform,
columns=columns), batch_size=batch_size, shuffle=False)
validate = DataLoader(
Anechoic(root, 'validate', download=download, transform=transform, target_transform=target_transform,
columns=columns), batch_size=batch_size, shuffle=True)
return train, test, validate
| 37.038462 | 119 | 0.627414 | 3,972 | 0.824922 | 0 | 0 | 0 | 0 | 0 | 0 | 759 | 0.157632 |
9a1b3e2dbb66fc996ec081ab5ef13e302246dd49 | 1,410 | py | Python | scripts/update_covid_tracking_data.py | TomGoBravo/covid-data-public | 76cdf384f4e6b5088f0a8105a4fabc37c899015c | [
"MIT"
]
| null | null | null | scripts/update_covid_tracking_data.py | TomGoBravo/covid-data-public | 76cdf384f4e6b5088f0a8105a4fabc37c899015c | [
"MIT"
]
| null | null | null | scripts/update_covid_tracking_data.py | TomGoBravo/covid-data-public | 76cdf384f4e6b5088f0a8105a4fabc37c899015c | [
"MIT"
]
| null | null | null | import logging
import datetime
import pathlib
import pytz
import requests
import pandas as pd
DATA_ROOT = pathlib.Path(__file__).parent.parent / "data"
_logger = logging.getLogger(__name__)
class CovidTrackingDataUpdater(object):
"""Updates the covid tracking data."""
HISTORICAL_STATE_DATA_URL = "http://covidtracking.com/api/states/daily"
COVID_TRACKING_ROOT = DATA_ROOT / "covid-tracking"
@property
def output_path(self) -> pathlib.Path:
return self.COVID_TRACKING_ROOT / "covid_tracking_states.csv"
@property
def version_path(self) -> pathlib.Path:
return self.COVID_TRACKING_ROOT / "version.txt"
@staticmethod
def _stamp():
# String of the current date and time.
# So that we're consistent about how we mark these
pacific = pytz.timezone('US/Pacific')
d = datetime.datetime.now(pacific)
return d.strftime('%A %b %d %I:%M:%S %p %Z')
def update(self):
_logger.info("Updating Covid Tracking data.")
response = requests.get(self.HISTORICAL_STATE_DATA_URL)
data = response.json()
df = pd.DataFrame(data)
df.to_csv(self.output_path, index=False)
version_path = self.version_path
version_path.write_text(f"Updated at {self._stamp()}\n")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
CovidTrackingDataUpdater().update()
| 30 | 75 | 0.685816 | 1,104 | 0.782979 | 0 | 0 | 512 | 0.363121 | 0 | 0 | 342 | 0.242553 |
9a1e2ae93bc1197259db93644405545d0b6670ce | 1,100 | py | Python | src/chatty/auth/auth.py | CITIZENSHIP-CHATTY/backend | 8982a0f3cff8ba2efe6a903bb4ab47f9c6044487 | [
"MIT"
]
| null | null | null | src/chatty/auth/auth.py | CITIZENSHIP-CHATTY/backend | 8982a0f3cff8ba2efe6a903bb4ab47f9c6044487 | [
"MIT"
]
| 4 | 2020-04-19T09:25:46.000Z | 2020-05-07T20:20:04.000Z | src/chatty/auth/auth.py | CITIZENSHIP-CHATTY/backend | 8982a0f3cff8ba2efe6a903bb4ab47f9c6044487 | [
"MIT"
]
| null | null | null | from chatty import utils
from aiohttp import web
from chatty.auth.models import User
async def registration(request):
auth_data = await request.json()
if not await utils.validation_credentials(auth_data):
return web.json_response({"message": "Invalid credentials"}, status=400)
await User.create(auth_data)
return web.json_response({"message": "Registration successfully"}, status=200)
async def login(request):
auth_data = await request.json()
try:
username = auth_data['username']
password = auth_data['password']
except KeyError:
return web.json_response({"message": "Username or password were not provided"}, status=400)
user = await User.get_by_username(username)
if not user:
return web.json_response({"message": "User with such name does not exists"}, status=400)
if not await utils.check_password(password, user['password']):
return web.json_response({'message': "Password incorrect"})
token = await utils.generate_jwt_token(user)
return web.json_response({"token": token}, status=200)
| 32.352941 | 99 | 0.708182 | 0 | 0 | 0 | 0 | 0 | 0 | 1,008 | 0.916364 | 227 | 0.206364 |
9a21fd4ed5ae1c86fb6e590a1edd2f37df8e132c | 1,220 | py | Python | CustomerProfiles/delete-customer-profile.py | adavidw/sample-code-python | e02f8856c11439cebd67d98fb43431cd4b95316e | [
"MIT"
]
| 36 | 2015-11-18T22:35:39.000Z | 2022-03-21T10:13:23.000Z | CustomerProfiles/delete-customer-profile.py | adavidw/sample-code-python | e02f8856c11439cebd67d98fb43431cd4b95316e | [
"MIT"
]
| 23 | 2016-02-02T06:09:16.000Z | 2020-03-06T22:54:55.000Z | CustomerProfiles/delete-customer-profile.py | adavidw/sample-code-python | e02f8856c11439cebd67d98fb43431cd4b95316e | [
"MIT"
]
| 82 | 2015-11-22T11:46:33.000Z | 2022-03-18T02:46:48.000Z | import os, sys
import imp
from authorizenet import apicontractsv1
from authorizenet.apicontrollers import *
constants = imp.load_source('modulename', 'constants.py')
def delete_customer_profile(customerProfileId):
merchantAuth = apicontractsv1.merchantAuthenticationType()
merchantAuth.name = constants.apiLoginId
merchantAuth.transactionKey = constants.transactionKey
deleteCustomerProfile = apicontractsv1.deleteCustomerProfileRequest()
deleteCustomerProfile.merchantAuthentication = merchantAuth
deleteCustomerProfile.customerProfileId = customerProfileId
controller = deleteCustomerProfileController(deleteCustomerProfile)
controller.execute()
response = controller.getresponse()
if (response.messages.resultCode=="Ok"):
print("Successfully deleted customer with customer profile id %s" % deleteCustomerProfile.customerProfileId)
else:
print(response.messages.message[0]['text'].text)
print("Failed to delete customer profile with customer profile id %s" % deleteCustomerProfile.customerProfileId)
return response
if(os.path.basename(__file__) == os.path.basename(sys.argv[0])):
delete_customer_profile(constants.customerProfileId)
| 38.125 | 120 | 0.788525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.129508 |
9a25b6f7b3f250cb0ca3c95cee4acba5e53203f1 | 3,659 | py | Python | python/xskipper/indexbuilder.py | guykhazma/xskipper | 058712e744e912bd5b22bc337b9d9ff2fc6b1036 | [
"Apache-2.0"
]
| 31 | 2021-01-27T15:03:18.000Z | 2021-12-13T11:09:58.000Z | python/xskipper/indexbuilder.py | guykhazma/xskipper | 058712e744e912bd5b22bc337b9d9ff2fc6b1036 | [
"Apache-2.0"
]
| 20 | 2021-02-01T16:42:17.000Z | 2022-01-26T10:48:59.000Z | python/xskipper/indexbuilder.py | guykhazma/xskipper | 058712e744e912bd5b22bc337b9d9ff2fc6b1036 | [
"Apache-2.0"
]
| 12 | 2021-01-27T14:50:11.000Z | 2021-08-10T22:13:46.000Z | # Copyright 2021 IBM Corp.
# SPDX-License-Identifier: Apache-2.0
from pyspark.sql.dataframe import DataFrame
from py4j.java_collections import MapConverter
class IndexBuilder:
"""
Helper class for building indexes
:param sparkSession: SparkSession object
:param uri: the URI of the dataset / the identifier of the hive table on which the index is defined
:param xskipper: the xskipper instance associated with this IndexBuilder
"""
def __init__(self, spark, uri, xskipper):
self._jindexBuilder = spark._jvm.io.xskipper.index.execution.IndexBuilder(spark._jsparkSession, uri,
xskipper.xskipper)
self.xskipper = xskipper
self.spark = spark
def addMinMaxIndex(self, col, keyMetadata=None):
"""
Adds a MinMax index for the given column
:param col: the column to add the index on
:param keyMetadata: optional key metadata
"""
if keyMetadata:
self._jindexBuilder.addMinMaxIndex(col, keyMetadata)
else:
self._jindexBuilder.addMinMaxIndex(col)
return self
def addValueListIndex(self, col, keyMetadata=None):
"""
Adds a ValueList index on the given column
:param col: the column to add the index on
:param keyMetadata: optional key metadata
"""
if keyMetadata:
self._jindexBuilder.addValueListIndex(col, keyMetadata)
else:
self._jindexBuilder.addValueListIndex(col)
return self
def addBloomFilterIndex(self, col, keyMetadata=None):
"""
Adds a BloomFilter index on the given column
:param col: the column to add the index on
:param keyMetadata: optional key metadata
"""
if keyMetadata:
self._jindexBuilder.addBloomFilterIndex(col, keyMetadata)
else:
self._jindexBuilder.addBloomFilterIndex(col)
return self
def addCustomIndex(self, indexClass, cols, params, keyMetadata=None):
"""
Adds a Custom index on the given columns
:param cols: a sequence of cols
:param params: a map of index specific parameters
:param keyMetadata: optional key metadata
"""
gateway = self.spark.sparkContext._gateway
jmap = MapConverter().convert(params, gateway._gateway_client)
objCls = gateway.jvm.String
colsArr = gateway.new_array(objCls, len(cols))
for i in range(len(cols)):
colsArr[i] = cols[i]
if keyMetadata:
self._jindexBuilder.addCustomIndex(indexClass, colsArr, jmap, keyMetadata)
else:
self._jindexBuilder.addCustomIndex(indexClass, colsArr, jmap)
return self
def build(self, reader=None):
"""
Builds the index
:param dataFrameReader: if uri in the xskipper instance is a table identifier \
a DataFrameReader instance to enable reading the URI as a DataFrame
Note: The reader is assumed to have all of the parameters configured.
`reader.load(Seq(<path>))` will be used by the indexing code to read each
object separately
:return: dataFrame object containing statistics about the build operation
"""
if reader:
return DataFrame(self._jindexBuilder.build(reader._jreader), self.spark._wrapped)
else:
# build for tables
return DataFrame(self._jindexBuilder.build(), self.spark._wrapped)
| 38.114583 | 108 | 0.625854 | 3,500 | 0.956546 | 0 | 0 | 0 | 0 | 0 | 0 | 1,643 | 0.44903 |
9a25e9fa72dd391d4676f6e0a6bb06f9710db5d6 | 1,837 | py | Python | matilda/data_pipeline/data_streaming/consumer.py | AlainDaccache/Quantropy | 6cfa06ed2b764471382ebf94d40af867f10433bb | [
"MIT"
]
| 45 | 2021-01-28T04:12:21.000Z | 2022-02-24T13:15:50.000Z | matilda/data_pipeline/data_streaming/consumer.py | AlainDaccache/Quantropy | 6cfa06ed2b764471382ebf94d40af867f10433bb | [
"MIT"
]
| 32 | 2021-03-02T18:45:16.000Z | 2022-03-12T00:53:10.000Z | matilda/data_pipeline/data_streaming/consumer.py | AlainDaccache/Quantropy | 6cfa06ed2b764471382ebf94d40af867f10433bb | [
"MIT"
]
| 10 | 2020-12-25T15:02:40.000Z | 2021-12-30T11:40:15.000Z | from kafka.consumer import KafkaConsumer
from json import loads
from mongoengine import *
from matilda.data_pipeline import object_model
consumer = KafkaConsumer(
'numtest', # kafka topic
bootstrap_servers=['localhost:9092'], # same as our producer
# It handles where the consumer restarts reading after breaking down or being turned off and can be set either
# to earliest or latest. When set to latest, the consumer starts reading at the end of the log.
# When set to earliest, the consumer starts reading at the latest committed offset.
auto_offset_reset='earliest',
enable_auto_commit=True, # makes sure the consumer commits its read offset every interval.
# join a consumer group for dynamic partition assignment and offset commits
# a consumer needs to be part of a consumer group to make the auto commit work.
# otherwise, need to do it manually i.e. consumer.assign([TopicPartition('foobar', 2)]); msg = next(consumer)
group_id='my-group',
# deserialize encoded values
value_deserializer=lambda x: loads(x.decode('utf-8')))
def get_atlas_db_url(username, password, dbname):
return f"mongodb+srv://{username}:{password}@cluster0.ptrie.mongodb.net/{dbname}?retryWrites=true&w=majority&" \
f"ssl=true"
atlas_url = get_atlas_db_url(username='AlainDaccache', password='qwerty98', dbname='matilda-db')
db = connect(host=atlas_url)
# The consumer iterator returns ConsumerRecords, which are simple namedtuples
# that expose basic message attributes: topic, partition, offset, key, and value:
for message in consumer:
message = message.value
print(message)
object_model.Test(number=message['number']).save()
print('{} added to db'.format(message))
# # Then to check whats in it:
# for doc in object_model.Test.objects:
# print(doc._data)
| 42.72093 | 116 | 0.740338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,155 | 0.628743 |
9a26ea77dac1512349aaac759f21f3e326122e27 | 746 | py | Python | src/graphs/python/bfs/src/bfs.py | djeada/GraphAlgorithms | 0961303ec20430f90053a4efb9074185f96dfddc | [
"MIT"
]
| 2 | 2021-05-31T13:01:33.000Z | 2021-12-20T19:48:18.000Z | src/graphs/python/bfs/src/bfs.py | djeada/GraphAlgorithms | 0961303ec20430f90053a4efb9074185f96dfddc | [
"MIT"
]
| null | null | null | src/graphs/python/bfs/src/bfs.py | djeada/GraphAlgorithms | 0961303ec20430f90053a4efb9074185f96dfddc | [
"MIT"
]
| null | null | null | from graph import Graph
def bfs(graph, source, destination):
if not (graph.contains(source) and graph.contains(destination)):
return float("inf")
distances = dict()
visited = dict()
for vertex in graph.vertices():
distances[vertex] = float("inf")
visited[vertex] = False
queue = [source]
distances[source] = 0
visited[source] = True
while queue:
u = queue.pop(0)
for edge in graph.edges_from_vertex(u):
v = edge.destination
if not visited[v]:
visited[v] = True
distances[v] = distances[u] + edge.distance
queue.append(v)
return distances[destination]
| 21.314286 | 69 | 0.548257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.013405 |
9a2736448f820e4e81087e8a5353235f998513f8 | 55,584 | py | Python | fhir/resources/tests/test_claim.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
]
| 144 | 2019-05-08T14:24:43.000Z | 2022-03-30T02:37:11.000Z | fhir/resources/tests/test_claim.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
]
| 82 | 2019-05-13T17:43:13.000Z | 2022-03-30T16:45:17.000Z | fhir/resources/tests/test_claim.py | cstoltze/fhir.resources | 52f99738935b7313089d89daf94d73ce7d167c9d | [
"BSD-3-Clause"
]
| 48 | 2019-04-04T14:14:53.000Z | 2022-03-30T06:07:31.000Z | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Claim
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import claim
def impl_claim_1(inst):
assert inst.accident.date == fhirtypes.Date.validate("2014-07-09")
assert inst.accident.locationAddress.text == "Grouse Mountain Ski Hill"
assert inst.accident.type.coding[0].code == "SPT"
assert inst.accident.type.coding[0].display == "Sporting Accident"
assert (
inst.accident.type.coding[0].system
== "http://terminology.hl7.org/CodeSystem/v3-ActIncidentCode"
)
assert inst.billablePeriod.end == fhirtypes.DateTime.validate(
"2014-08-16T12:09:24+00:06"
)
assert inst.billablePeriod.start == fhirtypes.DateTime.validate(
"2014-08-15T12:09:24+00:06"
)
assert inst.careTeam[0].provider.reference == "Practitioner/example"
assert inst.careTeam[0].qualification.coding[0].code == "physician"
assert (
inst.careTeam[0].qualification.coding[0].system
== "http://example.org/fhir/CodeSystem/provider-qualification"
)
assert inst.careTeam[0].responsible is True
assert inst.careTeam[0].role.coding[0].code == "primary"
assert (
inst.careTeam[0].role.coding[0].system
== "http://example.org/fhir/CodeSystem/claim-careteamrole"
)
assert inst.careTeam[0].sequence == 1
assert inst.created == fhirtypes.DateTime.validate("2014-08-16T12:09:24+00:06")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "654456"
assert inst.diagnosis[0].packageCode.coding[0].code == "400"
assert inst.diagnosis[0].packageCode.coding[0].display == "Head trauma - concussion"
assert inst.diagnosis[0].packageCode.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/ex-" "diagnosisrelatedgroup"
)
assert inst.diagnosis[0].sequence == 1
assert inst.diagnosis[0].type[0].coding[0].code == "admitting"
assert (
inst.diagnosis[0].type[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-diagnosistype"
)
assert (
inst.enterer.identifier.system
== "http://jurisdiction.org/facilities/HOSP1234/users"
)
assert inst.enterer.identifier.value == "UC1234"
assert inst.facility.identifier.system == "http://jurisdiction.org/facilities"
assert inst.facility.identifier.value == "HOSP1234"
assert inst.id == "960151"
assert inst.identifier[0].system == "http://happyhospital.com/claim"
assert inst.identifier[0].value == "96123451"
assert inst.insurance[0].businessArrangement == "BA987123"
assert inst.insurance[0].coverage.reference == "Coverage/9876B1"
assert inst.insurance[0].focal is True
assert inst.insurance[0].preAuthRef[0] == "PA2014G56473"
assert inst.insurance[0].sequence == 1
assert inst.insurer.reference == "Organization/2"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(125.0)
assert inst.item[0].productOrService.coding[0].code == "exam"
assert (
inst.item[0].productOrService.coding[0].system
== "http://hl7.org/fhir/ex-serviceproduct"
)
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.item[0].unitPrice.currency == "USD"
assert float(inst.item[0].unitPrice.value) == float(125.0)
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "Patient/1"
assert inst.payee.type.coding[0].code == "provider"
assert inst.priority.coding[0].code == "normal"
assert inst.provider.identifier.system == "http://npid.org/providerid"
assert inst.provider.identifier.value == "NJ12345"
assert inst.status == "active"
assert inst.supportingInfo[0].category.coding[0].code == "employmentimpacted"
assert inst.supportingInfo[0].category.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/claiminformationcatego" "ry"
)
assert inst.supportingInfo[0].sequence == 1
assert inst.supportingInfo[0].timingPeriod.end == fhirtypes.DateTime.validate(
"2014-08-16T12:09:24+00:06"
)
assert inst.supportingInfo[0].timingPeriod.start == fhirtypes.DateTime.validate(
"2014-08-16T12:09:24+00:06"
)
assert inst.supportingInfo[1].category.coding[0].code == "hospitalized"
assert inst.supportingInfo[1].category.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/claiminformationcatego" "ry"
)
assert inst.supportingInfo[1].sequence == 2
assert inst.supportingInfo[1].timingPeriod.end == fhirtypes.DateTime.validate(
"2014-08-16T12:09:24+00:06"
)
assert inst.supportingInfo[1].timingPeriod.start == fhirtypes.DateTime.validate(
"2014-08-15T12:09:24+00:06"
)
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the Claim</div>"
)
assert inst.text.status == "generated"
assert inst.total.currency == "USD"
assert float(inst.total.value) == float(125.0)
assert inst.type.coding[0].code == "institutional"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "claim"
def test_claim_1(base_settings):
"""No. 1 tests collection for Claim.
Test File: claim-example-institutional-rich.json
"""
filename = (
base_settings["unittest_data_dir"] / "claim-example-institutional-rich.json"
)
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_1(inst2)
def impl_claim_2(inst):
assert inst.careTeam[0].provider.reference == "Practitioner/example"
assert inst.careTeam[0].sequence == 1
assert inst.created == fhirtypes.DateTime.validate("2014-08-16T12:09:24+00:06")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "654456"
assert inst.diagnosis[0].sequence == 1
assert inst.id == "860150"
assert inst.identifier[0].system == "http://happypdocs.com/claim"
assert inst.identifier[0].value == "8612345"
assert inst.insurance[0].coverage.reference == "Coverage/9876B1"
assert inst.insurance[0].focal is True
assert inst.insurance[0].sequence == 1
assert inst.insurer.reference == "Organization/2"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(75.0)
assert inst.item[0].productOrService.coding[0].code == "exam"
assert (
inst.item[0].productOrService.coding[0].system
== "http://hl7.org/fhir/ex-serviceproduct"
)
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.item[0].unitPrice.currency == "USD"
assert float(inst.item[0].unitPrice.value) == float(75.0)
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "Patient/1"
assert inst.payee.type.coding[0].code == "provider"
assert inst.priority.coding[0].code == "normal"
assert inst.provider.reference == "Organization/1"
assert inst.status == "active"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the Claim</div>"
)
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "professional"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "claim"
def test_claim_2(base_settings):
"""No. 2 tests collection for Claim.
Test File: claim-example-professional.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-professional.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_2(inst2)
def impl_claim_3(inst):
assert inst.careTeam[0].provider.reference == "Practitioner/example"
assert inst.careTeam[0].sequence == 1
assert inst.created == fhirtypes.DateTime.validate("2014-08-16T12:09:24+00:06")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "123456"
assert inst.diagnosis[0].sequence == 1
assert inst.id == "100150"
assert inst.identifier[0].system == "http://happyvalley.com/claim"
assert inst.identifier[0].value == "12345"
assert inst.insurance[0].coverage.reference == "Coverage/9876B1"
assert inst.insurance[0].focal is True
assert inst.insurance[0].identifier.system == "http://happyvalley.com/claim"
assert inst.insurance[0].identifier.value == "12345"
assert inst.insurance[0].sequence == 1
assert inst.insurer.reference == "Organization/2"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(135.57)
assert inst.item[0].productOrService.coding[0].code == "1200"
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.item[0].unitPrice.currency == "USD"
assert float(inst.item[0].unitPrice.value) == float(135.57)
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "Patient/1"
assert inst.payee.type.coding[0].code == "provider"
assert inst.priority.coding[0].code == "normal"
assert inst.provider.reference == "Organization/1"
assert inst.status == "active"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the Oral Health Claim</div>"
)
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "oral"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "claim"
def test_claim_3(base_settings):
"""No. 3 tests collection for Claim.
Test File: claim-example.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_3(inst2)
def impl_claim_4(inst):
assert inst.careTeam[0].provider.reference == "Practitioner/example"
assert inst.careTeam[0].sequence == 1
assert inst.created == fhirtypes.DateTime.validate("2014-08-16T12:09:24+00:06")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "654321"
assert inst.diagnosis[0].sequence == 1
assert inst.id == "660150"
assert inst.identifier[0].system == "http://happysight.com/claim"
assert inst.identifier[0].value == "6612345"
assert inst.insurance[0].coverage.reference == "Coverage/9876B1"
assert inst.insurance[0].focal is True
assert inst.insurance[0].sequence == 1
assert inst.insurer.reference == "Organization/2"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(80.0)
assert inst.item[0].productOrService.coding[0].code == "exam"
assert (
inst.item[0].productOrService.coding[0].system
== "http://example.org/fhir/CodeSystem/ex-visionservice"
)
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.item[0].unitPrice.currency == "USD"
assert float(inst.item[0].unitPrice.value) == float(80.0)
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "Patient/1"
assert inst.payee.type.coding[0].code == "provider"
assert inst.priority.coding[0].code == "normal"
assert inst.provider.reference == "Organization/1"
assert inst.status == "active"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the Vision Claim</div>"
)
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "vision"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "claim"
def test_claim_4(base_settings):
"""No. 4 tests collection for Claim.
Test File: claim-example-vision.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-vision.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_4(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_4(inst2)
def impl_claim_5(inst):
assert inst.careTeam[0].provider.reference == "Practitioner/example"
assert inst.careTeam[0].sequence == 1
assert inst.contained[0].id == "device-frame"
assert inst.contained[1].id == "device-lens"
assert inst.created == fhirtypes.DateTime.validate("2014-08-16T12:09:24+00:06")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "654321"
assert inst.diagnosis[0].sequence == 1
assert inst.id == "660152"
assert inst.identifier[0].system == "http://happysight.com/claim"
assert inst.identifier[0].value == "6612347"
assert inst.insurance[0].claimResponse.reference == "ClaimResponse/R3500"
assert inst.insurance[0].coverage.reference == "Coverage/9876B1"
assert inst.insurance[0].focal is False
assert inst.insurance[0].preAuthRef[0] == "PR7652387237"
assert inst.insurance[0].sequence == 1
assert inst.insurance[1].coverage.reference == "Coverage/9876B1"
assert inst.insurance[1].focal is True
assert inst.insurance[1].preAuthRef[0] == "AB543GTD7567"
assert inst.insurance[1].sequence == 2
assert inst.insurer.reference == "Organization/2"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].category.coding[0].code == "F6"
assert inst.item[0].category.coding[0].display == "Vision Coverage"
assert (
inst.item[0].category.coding[0].system
== "http://example.org/fhir/CodeSystem/benefit-subcategory"
)
assert inst.item[0].detail[0].category.coding[0].code == "F6"
assert inst.item[0].detail[0].category.coding[0].display == "Vision Coverage"
assert (
inst.item[0].detail[0].category.coding[0].system
== "http://example.org/fhir/CodeSystem/benefit-subcategory"
)
assert float(inst.item[0].detail[0].factor) == float(1.1)
assert inst.item[0].detail[0].modifier[0].coding[0].code == "rooh"
assert (
inst.item[0].detail[0].modifier[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/modifiers"
)
assert inst.item[0].detail[0].net.currency == "USD"
assert float(inst.item[0].detail[0].net.value) == float(110.0)
assert inst.item[0].detail[0].productOrService.coding[0].code == "frame"
assert (
inst.item[0].detail[0].productOrService.coding[0].system
== "http://example.org/fhir/CodeSystem/ex-visionservice"
)
assert inst.item[0].detail[0].revenue.coding[0].code == "0010"
assert inst.item[0].detail[0].revenue.coding[0].display == "Vision Clinic"
assert (
inst.item[0].detail[0].revenue.coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-revenue-center"
)
assert inst.item[0].detail[0].sequence == 1
assert inst.item[0].detail[0].udi[0].reference == "#device-frame"
assert inst.item[0].detail[0].unitPrice.currency == "USD"
assert float(inst.item[0].detail[0].unitPrice.value) == float(100.0)
assert inst.item[0].detail[1].category.coding[0].code == "F6"
assert inst.item[0].detail[1].category.coding[0].display == "Vision Coverage"
assert (
inst.item[0].detail[1].category.coding[0].system
== "http://example.org/fhir/CodeSystem/benefit-subcategory"
)
assert inst.item[0].detail[1].net.currency == "USD"
assert float(inst.item[0].detail[1].net.value) == float(110.0)
assert inst.item[0].detail[1].productOrService.coding[0].code == "lens"
assert (
inst.item[0].detail[1].productOrService.coding[0].system
== "http://example.org/fhir/CodeSystem/ex-visionservice"
)
assert inst.item[0].detail[1].programCode[0].coding[0].code == "none"
assert (
inst.item[0].detail[1].programCode[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-programcode"
)
assert float(inst.item[0].detail[1].quantity.value) == float(2)
assert inst.item[0].detail[1].revenue.coding[0].code == "0010"
assert inst.item[0].detail[1].revenue.coding[0].display == "Vision Clinic"
assert (
inst.item[0].detail[1].revenue.coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-revenue-center"
)
assert inst.item[0].detail[1].sequence == 2
assert inst.item[0].detail[1].subDetail[0].category.coding[0].code == "F6"
assert (
inst.item[0].detail[1].subDetail[0].category.coding[0].display
== "Vision Coverage"
)
assert (
inst.item[0].detail[1].subDetail[0].category.coding[0].system
== "http://example.org/fhir/CodeSystem/benefit-subcategory"
)
assert float(inst.item[0].detail[1].subDetail[0].factor) == float(1.1)
assert inst.item[0].detail[1].subDetail[0].modifier[0].coding[0].code == "rooh"
assert (
inst.item[0].detail[1].subDetail[0].modifier[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/modifiers"
)
assert inst.item[0].detail[1].subDetail[0].net.currency == "USD"
assert float(inst.item[0].detail[1].subDetail[0].net.value) == float(66.0)
assert inst.item[0].detail[1].subDetail[0].productOrService.coding[0].code == "lens"
assert (
inst.item[0].detail[1].subDetail[0].productOrService.coding[0].system
== "http://example.org/fhir/CodeSystem/ex-visionservice"
)
assert inst.item[0].detail[1].subDetail[0].programCode[0].coding[0].code == "none"
assert (
inst.item[0].detail[1].subDetail[0].programCode[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-programcode"
)
assert float(inst.item[0].detail[1].subDetail[0].quantity.value) == float(2)
assert inst.item[0].detail[1].subDetail[0].revenue.coding[0].code == "0010"
assert (
inst.item[0].detail[1].subDetail[0].revenue.coding[0].display == "Vision Clinic"
)
assert (
inst.item[0].detail[1].subDetail[0].revenue.coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-revenue-center"
)
assert inst.item[0].detail[1].subDetail[0].sequence == 1
assert inst.item[0].detail[1].subDetail[0].udi[0].reference == "#device-lens"
assert inst.item[0].detail[1].subDetail[0].unitPrice.currency == "USD"
assert float(inst.item[0].detail[1].subDetail[0].unitPrice.value) == float(30.0)
assert inst.item[0].detail[1].subDetail[1].category.coding[0].code == "F6"
assert (
inst.item[0].detail[1].subDetail[1].category.coding[0].display
== "Vision Coverage"
)
assert (
inst.item[0].detail[1].subDetail[1].category.coding[0].system
== "http://example.org/fhir/CodeSystem/benefit-subcategory"
)
assert float(inst.item[0].detail[1].subDetail[1].factor) == float(1.1)
assert inst.item[0].detail[1].subDetail[1].modifier[0].coding[0].code == "rooh"
assert (
inst.item[0].detail[1].subDetail[1].modifier[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/modifiers"
)
assert inst.item[0].detail[1].subDetail[1].net.currency == "USD"
assert float(inst.item[0].detail[1].subDetail[1].net.value) == float(33.0)
assert (
inst.item[0].detail[1].subDetail[1].productOrService.coding[0].code
== "hardening"
)
assert (
inst.item[0].detail[1].subDetail[1].productOrService.coding[0].system
== "http://example.org/fhir/CodeSystem/ex-visionservice"
)
assert float(inst.item[0].detail[1].subDetail[1].quantity.value) == float(2)
assert inst.item[0].detail[1].subDetail[1].revenue.coding[0].code == "0010"
assert (
inst.item[0].detail[1].subDetail[1].revenue.coding[0].display == "Vision Clinic"
)
assert (
inst.item[0].detail[1].subDetail[1].revenue.coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-revenue-center"
)
assert inst.item[0].detail[1].subDetail[1].sequence == 2
assert inst.item[0].detail[1].subDetail[1].unitPrice.currency == "USD"
assert float(inst.item[0].detail[1].subDetail[1].unitPrice.value) == float(15.0)
assert inst.item[0].detail[1].subDetail[2].category.coding[0].code == "F6"
assert (
inst.item[0].detail[1].subDetail[2].category.coding[0].display
== "Vision Coverage"
)
assert (
inst.item[0].detail[1].subDetail[2].category.coding[0].system
== "http://example.org/fhir/CodeSystem/benefit-subcategory"
)
assert float(inst.item[0].detail[1].subDetail[2].factor) == float(1.1)
assert inst.item[0].detail[1].subDetail[2].modifier[0].coding[0].code == "rooh"
assert (
inst.item[0].detail[1].subDetail[2].modifier[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/modifiers"
)
assert inst.item[0].detail[1].subDetail[2].net.currency == "USD"
assert float(inst.item[0].detail[1].subDetail[2].net.value) == float(11.0)
assert (
inst.item[0].detail[1].subDetail[2].productOrService.coding[0].code
== "UV coating"
)
assert (
inst.item[0].detail[1].subDetail[2].productOrService.coding[0].system
== "http://example.org/fhir/CodeSystem/ex-visionservice"
)
assert float(inst.item[0].detail[1].subDetail[2].quantity.value) == float(2)
assert inst.item[0].detail[1].subDetail[2].revenue.coding[0].code == "0010"
assert (
inst.item[0].detail[1].subDetail[2].revenue.coding[0].display == "Vision Clinic"
)
assert (
inst.item[0].detail[1].subDetail[2].revenue.coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-revenue-center"
)
assert inst.item[0].detail[1].subDetail[2].sequence == 3
assert inst.item[0].detail[1].subDetail[2].unitPrice.currency == "USD"
assert float(inst.item[0].detail[1].subDetail[2].unitPrice.value) == float(5.0)
assert inst.item[0].detail[1].unitPrice.currency == "USD"
assert float(inst.item[0].detail[1].unitPrice.value) == float(55.0)
assert inst.item[0].detail[2].category.coding[0].code == "F6"
assert inst.item[0].detail[2].category.coding[0].display == "Vision Coverage"
assert (
inst.item[0].detail[2].category.coding[0].system
== "http://example.org/fhir/CodeSystem/benefit-subcategory"
)
assert float(inst.item[0].detail[2].factor) == float(0.07)
assert inst.item[0].detail[2].net.currency == "USD"
assert float(inst.item[0].detail[2].net.value) == float(15.4)
assert inst.item[0].detail[2].productOrService.coding[0].code == "fst"
assert (
inst.item[0].detail[2].productOrService.coding[0].system
== "http://example.org/fhir/CodeSystem/ex-visionservice"
)
assert inst.item[0].detail[2].revenue.coding[0].code == "0010"
assert inst.item[0].detail[2].revenue.coding[0].display == "Vision Clinic"
assert (
inst.item[0].detail[2].revenue.coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-revenue-center"
)
assert inst.item[0].detail[2].sequence == 3
assert inst.item[0].detail[2].unitPrice.currency == "USD"
assert float(inst.item[0].detail[2].unitPrice.value) == float(220.0)
assert inst.item[0].modifier[0].coding[0].code == "rooh"
assert (
inst.item[0].modifier[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/modifiers"
)
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(235.4)
assert inst.item[0].productOrService.coding[0].code == "glasses"
assert (
inst.item[0].productOrService.coding[0].system
== "http://example.org/fhir/CodeSystem/ex-visionservice"
)
assert inst.item[0].programCode[0].coding[0].code == "none"
assert (
inst.item[0].programCode[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-programcode"
)
assert inst.item[0].revenue.coding[0].code == "0010"
assert inst.item[0].revenue.coding[0].display == "Vision Clinic"
assert (
inst.item[0].revenue.coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-revenue-center"
)
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.item[0].unitPrice.currency == "USD"
assert float(inst.item[0].unitPrice.value) == float(235.4)
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "Patient/1"
assert inst.payee.type.coding[0].code == "provider"
assert inst.prescription.reference == "http://www.optdocs.com/prescription/12345"
assert inst.priority.coding[0].code == "normal"
assert inst.provider.reference == "Organization/1"
assert inst.status == "active"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the Vision Claim for Glasses</div>"
)
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "vision"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "claim"
def test_claim_5(base_settings):
"""No. 5 tests collection for Claim.
Test File: claim-example-vision-glasses-3tier.json
"""
filename = (
base_settings["unittest_data_dir"] / "claim-example-vision-glasses-3tier.json"
)
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_5(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_5(inst2)
def impl_claim_6(inst):
assert inst.careTeam[0].provider.reference == "Practitioner/example"
assert inst.careTeam[0].sequence == 1
assert inst.created == fhirtypes.DateTime.validate("2014-08-16T12:09:24+00:06")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "654456"
assert inst.diagnosis[0].sequence == 1
assert (
inst.enterer.identifier.system
== "http://jurisdiction.org/facilities/HOSP1234/users"
)
assert inst.enterer.identifier.value == "UC1234"
assert inst.facility.identifier.system == "http://jurisdiction.org/facilities"
assert inst.facility.identifier.value == "HOSP1234"
assert inst.id == "960150"
assert inst.identifier[0].system == "http://happyhospital.com/claim"
assert inst.identifier[0].value == "9612345"
assert inst.insurance[0].coverage.reference == "Coverage/9876B1"
assert inst.insurance[0].focal is True
assert inst.insurance[0].sequence == 1
assert inst.insurer.reference == "Organization/2"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].encounter[0].reference == "Encounter/example"
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(125.0)
assert inst.item[0].productOrService.coding[0].code == "exam"
assert (
inst.item[0].productOrService.coding[0].system
== "http://hl7.org/fhir/ex-serviceproduct"
)
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.item[0].unitPrice.currency == "USD"
assert float(inst.item[0].unitPrice.value) == float(125.0)
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "Patient/1"
assert inst.payee.type.coding[0].code == "provider"
assert inst.priority.coding[0].code == "normal"
assert inst.procedure[0].date == fhirtypes.DateTime.validate(
"2014-08-16T11:15:33+10:00"
)
assert inst.procedure[0].procedureCodeableConcept.coding[0].code == "SDI9901"
assert (
inst.procedure[0].procedureCodeableConcept.text
== "Subcutaneous diagnostic implant"
)
assert inst.procedure[0].sequence == 1
assert inst.procedure[0].type[0].coding[0].code == "primary"
assert inst.procedure[0].udi[0].reference == "Device/example"
assert inst.provider.reference == "Organization/1"
assert inst.status == "active"
assert inst.subType.coding[0].code == "emergency"
assert (
inst.subType.coding[0].system
== "http://terminology.hl7.org/CodeSystem/ex-claimsubtype"
)
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the Claim</div>"
)
assert inst.text.status == "generated"
assert inst.total.currency == "USD"
assert float(inst.total.value) == float(125.0)
assert inst.type.coding[0].code == "institutional"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "claim"
def test_claim_6(base_settings):
"""No. 6 tests collection for Claim.
Test File: claim-example-institutional.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-institutional.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_6(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_6(inst2)
def impl_claim_7(inst):
assert inst.careTeam[0].provider.reference == "#provider-1"
assert inst.careTeam[0].sequence == 1
assert inst.contained[0].id == "org-insurer"
assert inst.contained[1].id == "org-org"
assert inst.contained[2].id == "provider-1"
assert inst.contained[3].id == "patient-1"
assert inst.contained[4].id == "coverage-1"
assert inst.created == fhirtypes.DateTime.validate("2014-08-16T12:09:24+00:06")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "123456"
assert inst.diagnosis[0].sequence == 1
assert inst.id == "100152"
assert inst.identifier[0].system == "http://happyvalley.com/claim"
assert inst.identifier[0].value == "12347"
assert inst.insurance[0].coverage.reference == "#coverage-1"
assert inst.insurance[0].focal is True
assert inst.insurance[0].sequence == 1
assert inst.insurer.reference == "#org-insurer"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(135.57)
assert inst.item[0].productOrService.coding[0].code == "1200"
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.item[0].unitPrice.currency == "USD"
assert float(inst.item[0].unitPrice.value) == float(135.57)
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "#patient-1"
assert inst.payee.type.coding[0].code == "provider"
assert inst.priority.coding[0].code == "normal"
assert inst.provider.reference == "#org-org"
assert inst.status == "active"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the Oral Health Claim</div>"
)
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "oral"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "claim"
def test_claim_7(base_settings):
"""No. 7 tests collection for Claim.
Test File: claim-example-oral-contained.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-oral-contained.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_7(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_7(inst2)
def impl_claim_8(inst):
assert inst.careTeam[0].provider.reference == "Practitioner/example"
assert inst.careTeam[0].sequence == 1
assert inst.created == fhirtypes.DateTime.validate("2014-08-16T12:09:24+00:06")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "654456"
assert inst.diagnosis[0].sequence == 1
assert inst.id == "760151"
assert inst.identifier[0].system == "http://happypharma.com/claim"
assert inst.identifier[0].value == "7612345"
assert inst.insurance[0].coverage.reference == "Coverage/9876B1"
assert inst.insurance[0].focal is True
assert inst.insurance[0].sequence == 1
assert inst.insurer.reference == "Organization/2"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].detail[0].net.currency == "USD"
assert float(inst.item[0].detail[0].net.value) == float(45.0)
assert inst.item[0].detail[0].productOrService.coding[0].code == "drugcost"
assert (
inst.item[0].detail[0].productOrService.coding[0].system
== "http://hl7.org/fhir/ex-pharmaservice"
)
assert inst.item[0].detail[0].sequence == 1
assert inst.item[0].detail[1].net.currency == "USD"
assert float(inst.item[0].detail[1].net.value) == float(9.0)
assert inst.item[0].detail[1].productOrService.coding[0].code == "markup"
assert (
inst.item[0].detail[1].productOrService.coding[0].system
== "http://hl7.org/fhir/ex-pharmaservice"
)
assert inst.item[0].detail[1].sequence == 2
assert inst.item[0].detail[2].net.currency == "USD"
assert float(inst.item[0].detail[2].net.value) == float(36.0)
assert inst.item[0].detail[2].productOrService.coding[0].code == "dispensefee"
assert (
inst.item[0].detail[2].productOrService.coding[0].system
== "http://hl7.org/fhir/ex-pharmaservice"
)
assert inst.item[0].detail[2].sequence == 3
assert inst.item[0].informationSequence[0] == 1
assert inst.item[0].informationSequence[1] == 2
assert inst.item[0].informationSequence[2] == 3
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(90.0)
assert inst.item[0].productOrService.coding[0].code == "562721"
assert (
inst.item[0].productOrService.coding[0].display == "Alprazolam 0.25mg (Xanax)"
)
assert (
inst.item[0].productOrService.coding[0].system
== "http://www.nlm.nih.gov/research/umls/rxnorm"
)
assert inst.item[0].quantity.code == "TAB"
assert inst.item[0].quantity.system == "http://unitsofmeasure.org"
assert inst.item[0].quantity.unit == "TAB"
assert float(inst.item[0].quantity.value) == float(90)
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert (
inst.originalPrescription.reference
== "http://pharmacy.org/MedicationRequest/AB1202B"
)
assert inst.patient.reference == "Patient/1"
assert inst.payee.type.coding[0].code == "provider"
assert (
inst.prescription.reference == "http://pharmacy.org/MedicationRequest/AB1234G"
)
assert inst.priority.coding[0].code == "stat"
assert inst.provider.reference == "Organization/1"
assert inst.status == "active"
assert inst.supportingInfo[0].category.coding[0].code == "pharmacyrefill"
assert inst.supportingInfo[0].code.coding[0].code == "new"
assert (
inst.supportingInfo[0].code.coding[0].system
== "http://example.org/fhir/CodeSystem/pharmacy-refill"
)
assert inst.supportingInfo[0].sequence == 1
assert inst.supportingInfo[1].category.coding[0].code == "pharmacyinformation"
assert inst.supportingInfo[1].code.coding[0].code == "refillsremaining"
assert (
inst.supportingInfo[1].code.coding[0].system
== "http://example.org/fhir/CodeSystem/pharmacy-information"
)
assert inst.supportingInfo[1].sequence == 2
assert float(inst.supportingInfo[1].valueQuantity.value) == float(2)
assert inst.supportingInfo[2].category.coding[0].code == "pharmacyinformation"
assert inst.supportingInfo[2].code.coding[0].code == "dayssupply"
assert (
inst.supportingInfo[2].code.coding[0].system
== "http://example.org/fhir/CodeSystem/pharmacy-information"
)
assert inst.supportingInfo[2].sequence == 3
assert float(inst.supportingInfo[2].valueQuantity.value) == float(90)
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the Pharmacy Claim</div>"
)
assert inst.text.status == "generated"
assert inst.total.currency == "USD"
assert float(inst.total.value) == float(90.0)
assert inst.type.coding[0].code == "pharmacy"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "claim"
def test_claim_8(base_settings):
"""No. 8 tests collection for Claim.
Test File: claim-example-pharmacy-medication.json
"""
filename = (
base_settings["unittest_data_dir"] / "claim-example-pharmacy-medication.json"
)
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_8(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_8(inst2)
def impl_claim_9(inst):
assert inst.careTeam[0].provider.reference == "Practitioner/example"
assert inst.careTeam[0].sequence == 1
assert inst.created == fhirtypes.DateTime.validate("2015-03-16T12:09:24+00:06")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "123457"
assert (
inst.diagnosis[0].diagnosisCodeableConcept.coding[0].system
== "http://hl7.org/fhir/sid/icd-10"
)
assert inst.diagnosis[0].sequence == 1
assert inst.fundsReserve.coding[0].code == "provider"
assert inst.id == "100153"
assert inst.identifier[0].system == "http://happyvalley.com/claim"
assert inst.identifier[0].value == "12355"
assert inst.insurance[0].coverage.reference == "Coverage/9876B1"
assert inst.insurance[0].focal is True
assert inst.insurance[0].sequence == 1
assert inst.insurer.reference == "Organization/2"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].detail[0].net.currency == "USD"
assert float(inst.item[0].detail[0].net.value) == float(1000.0)
assert inst.item[0].detail[0].productOrService.coding[0].code == "ORTHOEXAM"
assert (
inst.item[0].detail[0].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert inst.item[0].detail[0].sequence == 1
assert inst.item[0].detail[0].unitPrice.currency == "USD"
assert float(inst.item[0].detail[0].unitPrice.value) == float(1000.0)
assert inst.item[0].detail[1].net.currency == "USD"
assert float(inst.item[0].detail[1].net.value) == float(1500.0)
assert inst.item[0].detail[1].productOrService.coding[0].code == "ORTHODIAG"
assert (
inst.item[0].detail[1].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert inst.item[0].detail[1].sequence == 2
assert inst.item[0].detail[1].unitPrice.currency == "USD"
assert float(inst.item[0].detail[1].unitPrice.value) == float(1500.0)
assert inst.item[0].detail[2].net.currency == "USD"
assert float(inst.item[0].detail[2].net.value) == float(500.0)
assert inst.item[0].detail[2].productOrService.coding[0].code == "ORTHOINITIAL"
assert (
inst.item[0].detail[2].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert inst.item[0].detail[2].sequence == 3
assert inst.item[0].detail[2].unitPrice.currency == "USD"
assert float(inst.item[0].detail[2].unitPrice.value) == float(500.0)
assert inst.item[0].detail[3].productOrService.coding[0].code == "ORTHOMONTHS"
assert (
inst.item[0].detail[3].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert float(inst.item[0].detail[3].quantity.value) == float(24)
assert inst.item[0].detail[3].sequence == 4
assert inst.item[0].detail[4].net.currency == "USD"
assert float(inst.item[0].detail[4].net.value) == float(250.0)
assert inst.item[0].detail[4].productOrService.coding[0].code == "ORTHOPERIODIC"
assert (
inst.item[0].detail[4].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert float(inst.item[0].detail[4].quantity.value) == float(24)
assert inst.item[0].detail[4].sequence == 5
assert inst.item[0].detail[4].unitPrice.currency == "USD"
assert float(inst.item[0].detail[4].unitPrice.value) == float(250.0)
assert inst.item[0].diagnosisSequence[0] == 1
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(9000.0)
assert inst.item[0].productOrService.coding[0].code == "ORTHPLAN"
assert (
inst.item[0].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2015-05-16")
assert inst.item[0].unitPrice.currency == "USD"
assert float(inst.item[0].unitPrice.value) == float(9000.0)
assert inst.item[1].bodySite.coding[0].code == "21"
assert (
inst.item[1].bodySite.coding[0].system == "http://fdi.org/fhir/oraltoothcodes"
)
assert inst.item[1].careTeamSequence[0] == 1
assert inst.item[1].net.currency == "USD"
assert float(inst.item[1].net.value) == float(105.0)
assert inst.item[1].productOrService.coding[0].code == "21211"
assert (
inst.item[1].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert inst.item[1].sequence == 2
assert inst.item[1].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.item[1].subSite[0].coding[0].code == "L"
assert (
inst.item[1].subSite[0].coding[0].system
== "http://fdi.org/fhir/oralsurfacecodes"
)
assert inst.item[1].unitPrice.currency == "USD"
assert float(inst.item[1].unitPrice.value) == float(105.0)
assert inst.item[2].bodySite.coding[0].code == "36"
assert (
inst.item[2].bodySite.coding[0].system == "http://fdi.org/fhir/oraltoothcodes"
)
assert inst.item[2].careTeamSequence[0] == 1
assert inst.item[2].detail[0].net.currency == "USD"
assert float(inst.item[2].detail[0].net.value) == float(750.0)
assert inst.item[2].detail[0].productOrService.coding[0].code == "27211"
assert (
inst.item[2].detail[0].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert inst.item[2].detail[0].sequence == 1
assert inst.item[2].detail[0].unitPrice.currency == "USD"
assert float(inst.item[2].detail[0].unitPrice.value) == float(750.0)
assert inst.item[2].detail[1].net.currency == "USD"
assert float(inst.item[2].detail[1].net.value) == float(350.0)
assert inst.item[2].detail[1].productOrService.coding[0].code == "lab"
assert (
inst.item[2].detail[1].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert inst.item[2].detail[1].sequence == 2
assert inst.item[2].detail[1].unitPrice.currency == "USD"
assert float(inst.item[2].detail[1].unitPrice.value) == float(350.0)
assert inst.item[2].net.currency == "USD"
assert float(inst.item[2].net.value) == float(1100.0)
assert inst.item[2].productOrService.coding[0].code == "27211"
assert (
inst.item[2].productOrService.coding[0].system
== "http://example.org/fhir/oralservicecodes"
)
assert inst.item[2].sequence == 3
assert inst.item[2].servicedDate == fhirtypes.Date.validate("2014-08-16")
assert inst.item[2].unitPrice.currency == "USD"
assert float(inst.item[2].unitPrice.value) == float(1100.0)
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "Patient/1"
assert inst.payee.type.coding[0].code == "provider"
assert inst.priority.coding[0].code == "normal"
assert inst.provider.reference == "Organization/1"
assert inst.status == "active"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of the Oral Health Claim</div>"
)
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "oral"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "preauthorization"
def test_claim_9(base_settings):
"""No. 9 tests collection for Claim.
Test File: claim-example-oral-orthoplan.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-oral-orthoplan.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_9(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_9(inst2)
def impl_claim_10(inst):
assert inst.careTeam[0].provider.reference == "Practitioner/example"
assert inst.careTeam[0].sequence == 1
assert inst.contained[0].id == "patient-1"
assert inst.contained[1].id == "coverage-1"
assert inst.created == fhirtypes.DateTime.validate("2015-10-16T00:00:00-07:00")
assert inst.diagnosis[0].diagnosisCodeableConcept.coding[0].code == "M96.1"
assert (
inst.diagnosis[0].diagnosisCodeableConcept.coding[0].display
== "Postlaminectomy syndrome"
)
assert (
inst.diagnosis[0].diagnosisCodeableConcept.coding[0].system
== "http://hl7.org/fhir/sid/icd-10"
)
assert inst.diagnosis[0].sequence == 1
assert inst.diagnosis[1].diagnosisCodeableConcept.coding[0].code == "G89.4"
assert (
inst.diagnosis[1].diagnosisCodeableConcept.coding[0].display
== "Chronic pain syndrome"
)
assert (
inst.diagnosis[1].diagnosisCodeableConcept.coding[0].system
== "http://hl7.org/fhir/sid/icd-10"
)
assert inst.diagnosis[1].sequence == 2
assert inst.diagnosis[2].diagnosisCodeableConcept.coding[0].code == "M53.88"
assert inst.diagnosis[2].diagnosisCodeableConcept.coding[0].display == (
"Other specified dorsopathies, sacral and sacrococcygeal " "region"
)
assert (
inst.diagnosis[2].diagnosisCodeableConcept.coding[0].system
== "http://hl7.org/fhir/sid/icd-10"
)
assert inst.diagnosis[2].sequence == 3
assert inst.diagnosis[3].diagnosisCodeableConcept.coding[0].code == "M47.816"
assert inst.diagnosis[3].diagnosisCodeableConcept.coding[0].display == (
"Spondylosis without myelopathy or radiculopathy, lumbar " "region"
)
assert (
inst.diagnosis[3].diagnosisCodeableConcept.coding[0].system
== "http://hl7.org/fhir/sid/icd-10"
)
assert inst.diagnosis[3].sequence == 4
assert inst.id == "MED-00050"
assert inst.identifier[0].system == "http://CedarArmsMedicalCenter.com/claim"
assert inst.identifier[0].value == "MED-00050"
assert inst.insurance[0].coverage.reference == "#coverage-1"
assert inst.insurance[0].focal is True
assert (
inst.insurance[0].identifier.system == "http://CedarArmsMedicalCenter.com/claim"
)
assert inst.insurance[0].identifier.value == "MED-00050"
assert inst.insurance[0].sequence == 1
assert inst.insurer.display == "Humana Inc."
assert inst.insurer.identifier.system == "http://www.bindb.com/bin"
assert inst.insurer.identifier.value == "123456"
assert inst.item[0].careTeamSequence[0] == 1
assert inst.item[0].diagnosisSequence[0] == 2
assert inst.item[0].diagnosisSequence[1] == 4
assert inst.item[0].informationSequence[0] == 1
assert inst.item[0].locationCodeableConcept.coding[0].code == "24"
assert (
inst.item[0].locationCodeableConcept.coding[0].display
== "Ambulatory Surgical Center"
)
assert inst.item[0].locationCodeableConcept.coding[0].system == (
"https://www.cms.gov/medicare/coding/place-of-service-"
"codes/place_of_service_code_set.html"
)
assert inst.item[0].net.currency == "USD"
assert float(inst.item[0].net.value) == float(12500.0)
assert inst.item[0].productOrService.coding[0].code == "62264"
assert (
inst.item[0].productOrService.coding[0].display
== "Surgical Procedures on the Spine and Spinal Cord"
)
assert (
inst.item[0].productOrService.coding[0].system
== "http://www.ama-assn.org/go/cpt"
)
assert inst.item[0].sequence == 1
assert inst.item[0].servicedDate == fhirtypes.Date.validate("2015-10-13")
assert inst.item[0].unitPrice.currency == "USD"
assert float(inst.item[0].unitPrice.value) == float(12500.0)
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patient.reference == "#patient-1"
assert inst.payee.party.reference == "Organization/1"
assert inst.payee.type.coding[0].code == "provider"
assert (
inst.payee.type.coding[0].system
== "http://terminology.hl7.org/CodeSystem/payeetype"
)
assert inst.priority.coding[0].code == "normal"
assert inst.provider.reference == "Organization/1"
assert inst.status == "active"
assert inst.subType.coding[0].code == "831"
assert inst.subType.coding[0].system == "https://www.cms.gov/codes/billtype"
assert inst.supportingInfo[0].category.coding[0].code == "hospitalized"
assert inst.supportingInfo[0].category.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/claiminformationcatego" "ry"
)
assert inst.supportingInfo[0].sequence == 1
assert inst.supportingInfo[0].timingPeriod.end == fhirtypes.DateTime.validate(
"2015-10-05T00:00:00-07:00"
)
assert inst.supportingInfo[0].timingPeriod.start == fhirtypes.DateTime.validate(
"2015-10-01T00:00:00-07:00"
)
assert inst.supportingInfo[1].category.coding[0].code == "discharge"
assert inst.supportingInfo[1].category.coding[0].system == (
"http://terminology.hl7.org/CodeSystem/claiminformationcatego" "ry"
)
assert inst.supportingInfo[1].code.coding[0].code == "01"
assert (
inst.supportingInfo[1].code.coding[0].display
== "Discharge to Home or Self Care"
)
assert inst.supportingInfo[1].code.coding[0].system == (
"https://www.cms.gov/Outreach-and-Education/Medicare-"
"Learning-Network-MLN/MLNMattersArticles/downloads/SE0801.pdf"
)
assert inst.supportingInfo[1].sequence == 2
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">A human-readable'
" rendering of a CMS 1500 Claim</div>"
)
assert inst.text.status == "generated"
assert inst.total.currency == "USD"
assert float(inst.total.value) == float(12500.0)
assert inst.type.coding[0].code == "institutional"
assert (
inst.type.coding[0].system == "http://terminology.hl7.org/CodeSystem/claim-type"
)
assert inst.use == "claim"
def test_claim_10(base_settings):
"""No. 10 tests collection for Claim.
Test File: claim-example-cms1500-medical.json
"""
filename = base_settings["unittest_data_dir"] / "claim-example-cms1500-medical.json"
inst = claim.Claim.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Claim" == inst.resource_type
impl_claim_10(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Claim" == data["resourceType"]
inst2 = claim.Claim(**data)
impl_claim_10(inst2)
| 43.527016 | 88 | 0.664904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13,726 | 0.246942 |
9a27eb495106ade83e880e4a8a449d48c322f96d | 2,708 | py | Python | bin/main.py | ubern-mia/point-cloud-segmentation-miccai2019 | b131b62dc5016de53611f3a743c56cc0061e050f | [
"MIT"
]
| 20 | 2019-10-14T06:03:10.000Z | 2022-02-04T04:44:38.000Z | bin/main.py | ubern-mia/point-cloud-segmentation-miccai2019 | b131b62dc5016de53611f3a743c56cc0061e050f | [
"MIT"
]
| 11 | 2019-06-10T12:31:23.000Z | 2022-03-12T00:04:28.000Z | bin/main.py | fabianbalsiger/point-cloud-segmentation-miccai2019 | b131b62dc5016de53611f3a743c56cc0061e050f | [
"MIT"
]
| 3 | 2019-11-06T14:06:44.000Z | 2021-08-11T18:46:25.000Z | import argparse
import os.path
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import pymia.deeplearning.logging as log
import tensorflow as tf
import pc.configuration.config as cfg
import pc.data.handler as hdlr
import pc.data.split as split
import pc.model.point_cnn as net
import pc.utilities.filesystem as fs
import pc.utilities.seeding as seed
import pc.utilities.training as train
def main(config_file: str):
config = cfg.load(config_file, cfg.Configuration)
# set up directories and logging
model_dir, result_dir = fs.prepare_directories(config_file, cfg.Configuration,
lambda: fs.get_directory_name(config))
config.model_dir = model_dir
config.result_dir = result_dir
print(config)
# set seed before model instantiation
print('Set seed to {}'.format(config.seed))
seed.set_seed(config.seed, config.cudnn_determinism)
# load train and valid subjects from split file
subjects_train, subjects_valid, _ = split.load_split(config.split_file)
print('Train subjects:', subjects_train)
print('Valid subjects:', subjects_valid)
# set up data handling
data_handler = hdlr.PointCloudDataHandler(config, subjects_train, subjects_valid, None)
with tf.Session() as sess:
# extract a sample for model initialization
data_handler.dataset.set_extractor(data_handler.extractor_train)
data_handler.dataset.set_transform(data_handler.extraction_transform_train)
sample = data_handler.dataset[0]
model = net.PointCNN(sess, sample, config)
logger = log.TensorFlowLogger(config.model_dir, sess,
model.epoch_summaries(),
model.batch_summaries(),
model.visualization_summaries())
# trainer = train.AssemblingTester(data_handler, logger, config, model, sess)
trainer = train.SegmentationTrainer(data_handler, logger, config, model, sess)
tf.get_default_graph().finalize() # to ensure that no ops are added during training, which would lead to
# a growing graph
trainer.train()
logger.close()
if __name__ == '__main__':
"""The program's entry point.
Parse the arguments and run the program.
"""
parser = argparse.ArgumentParser(description='Deep learning for shape learning on point clouds')
parser.add_argument(
'--config_file',
type=str,
default='./bin/config.json',
help='Path to the configuration file.'
)
args = parser.parse_args()
main(args.config_file)
| 33.85 | 113 | 0.679838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.223412 |
9a285d7173b98f84f370605c57bfb8c26d5b2158 | 1,586 | py | Python | spynoza/unwarping/topup/nodes.py | spinoza-centre/spynoza | d71d69e3ea60c9544f4e63940f053a2d1b3ac65f | [
"MIT"
]
| 7 | 2016-06-21T11:51:07.000Z | 2018-08-10T15:41:37.000Z | spynoza/unwarping/topup/nodes.py | spinoza-centre/spynoza | d71d69e3ea60c9544f4e63940f053a2d1b3ac65f | [
"MIT"
]
| 12 | 2017-07-05T09:14:31.000Z | 2018-09-13T12:19:14.000Z | spynoza/unwarping/topup/nodes.py | spinoza-centre/spynoza | d71d69e3ea60c9544f4e63940f053a2d1b3ac65f | [
"MIT"
]
| 8 | 2016-09-26T12:35:59.000Z | 2021-06-05T05:50:23.000Z | from nipype.interfaces.utility import Function
def topup_scan_params(pe_direction='y', te=0.025, epi_factor=37):
import numpy as np
import os
import tempfile
scan_param_array = np.zeros((2, 4))
scan_param_array[0, ['x', 'y', 'z'].index(pe_direction)] = 1
scan_param_array[1, ['x', 'y', 'z'].index(pe_direction)] = -1
scan_param_array[:, -1] = te * epi_factor
spa_txt = str('\n'.join(
['\t'.join(['%1.3f' % s for s in sp]) for sp in scan_param_array]))
fn = os.path.join(tempfile.gettempdir(), 'scan_params.txt')
np.savetxt(fn, scan_param_array, fmt='%1.3f')
return fn
Topup_scan_params = Function(function=topup_scan_params,
input_names=['pe_direction', 'te', 'epi_factor'],
output_names=['fn'])
def apply_scan_params(pe_direction='y', te=0.025, epi_factor=37, nr_trs=1):
import numpy as np
import os
import tempfile
scan_param_array = np.zeros((nr_trs, 4))
scan_param_array[:, ['x', 'y', 'z'].index(pe_direction)] = 1
scan_param_array[:, -1] = te * epi_factor
spa_txt = str('\n'.join(
['\t'.join(['%1.3f' % s for s in sp]) for sp in scan_param_array]))
fn = os.path.join(tempfile.gettempdir(), 'scan_params_apply.txt')
np.savetxt(fn, scan_param_array, fmt='%1.3f')
return fn
Apply_scan_params = Function(function=apply_scan_params,
input_names=['pe_direction', 'te', 'epi_factor',
'nr_trs'],
output_names=['fn'])
| 33.041667 | 78 | 0.592686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.12169 |
9a287484855658cc91349375e1c4b8e475ab1fe0 | 1,317 | py | Python | manage_env.py | sandeep-gh/OpenBSDRemoteIT | 1690e67b6e2eb106c5350c75915065457fb1b9b2 | [
"MIT"
]
| null | null | null | manage_env.py | sandeep-gh/OpenBSDRemoteIT | 1690e67b6e2eb106c5350c75915065457fb1b9b2 | [
"MIT"
]
| null | null | null | manage_env.py | sandeep-gh/OpenBSDRemoteIT | 1690e67b6e2eb106c5350c75915065457fb1b9b2 | [
"MIT"
]
| null | null | null | import os
import pickle
from deployConfig import workDir
import sys
env_fp = f"{workDir}/env.pickle"
def add_to_env(varname, path):
with open(env_fp, "rb") as fh:
envvars = pickle.load(fh)
if varname in envvars.keys():
if path not in envvars[varname]:
envvars[varname].append(path)
else:
envvars[varname] = []
envvars[varname].append(path)
with open(env_fp, "wb") as fh:
pickle.dump(envvars, fh)
def build_env():
with open(env_fp, "rb") as fh:
envvars = pickle.load(fh)
# env_str = "\n".join(
# [f"""export {key}={":".join(envvars[key])}:${key}""" for key in envvars.keys()])
env_str = ""
for key in envvars.keys():
suffix = ""
if key in ["PATH", "LD_LIBRARY_PATH"]:
suffix = f":${key}:"
env_str = f"""{env_str}\nexport {key}={":".join(envvars[key])}{suffix}"""
return env_str
if not os.path.exists(env_fp):
env = {}
with open(env_fp, "wb") as fh:
pickle.dump(env, fh)
# add_to_env("LD_LIBRARY_PATH", "/usr/local/lib/eopenssl11/")
# add_to_env("LD_LIBRARY_PATH", f"{project_root}/Builds/Python-3.10.0/")
# add_to_env("PATH", f"{project_root}/Builds/Python-3.10.0/bin")
# add_to_env("PATH", f"{project_root}/Builds/postgresql-14.0/bin")
| 28.630435 | 90 | 0.59757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.39104 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.