blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
215c25ff82445f20287bdd8bbd15f05641fd6053 | 1201e90015f2e068fe6ee9fb2540687578847ae8 | /hostel/docs/views.py | c1c47819f01ce59b9d779d248751fb0c9940a89d | [
"Apache-2.0"
] | permissive | sincere32/hostel | ad33f814bc9f5614d5202dc431615fadfb776bdd | 34408e969cedde247b9121934cd531f224ddb1ae | refs/heads/master | 2023-01-02T04:54:58.063968 | 2020-10-21T15:04:15 | 2020-10-21T15:04:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,920 | py | from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404, redirect, reverse
import hostel.common.helper_functions as h
from hostel.clients.models import Client
from hostel.companies.models import Company
from hostel.settings import LOGIN_URL
from hostel.spy.models import Spy
from .forms import AgreementForm, ApplicationForm
from .models import Application, Agreement, ApplicationSearch, AgreementSearch
@login_required(login_url=LOGIN_URL)
@permission_required('docs.delete_agreement')
def delete_agreement(request):
agreement = get_object_or_404(Agreement, pk=request.POST.get('id'))
try:
agreement.delete()
except:
messages.add_message(request, messages.ERROR, "Ошибка при удалении документа.")
return redirect(view_agreement, agreement_id=agreement.pk)
Spy().log(object=agreement, action=Spy.DELETE, user=request.user)
messages.add_message(request, messages.SUCCESS, "Договор и все сопутствующие файлы удалены")
return redirect(view_agreements)
@login_required(login_url=LOGIN_URL)
@permission_required('docs.change_agreement')
def delete_agreement_file(request):
agreement = get_object_or_404(Agreement, pk=request.POST.get('id'))
agreement.delete_file()
messages.add_message(request, messages.SUCCESS, "Скан договора удален.")
return redirect(view_agreement, agreement_id=agreement.pk)
@login_required(login_url=LOGIN_URL)
@permission_required('docs.add_agreement')
def add_agreement(request):
if request.method == 'POST': # Если данные приехали из POST, то пытаемся добавить клиента
form = AgreementForm(request.POST)
if form.is_valid():
try:
form.instance.creator = request.user
form.save()
Spy().log(object=form.instance, form=form, action=Spy.CREATE, user=request.user)
except:
messages.add_message(request, messages.ERROR, 'Ошибка целостности БД.')
context = {'form': form, 'app': 'docs', 'tab': 'add'}
return render(request, 'bs3/docs/add_agreement.html', context)
agreement = form.instance
# Trying to handle file if exists
if request.FILES.get('file'):
result = agreement.handle_file(request.FILES['file'])
if result:
messages.add_message(request, messages.SUCCESS, 'Файл "%s" успешно загружен' % result)
else:
messages.add_message(request, messages.ERROR, "Ошибка сохранения файла")
else:
messages.add_message(request, messages.ERROR, "Загрузите скан документа")
Spy().log(object=agreement, action=Spy.CREATE, user=request.user)
return redirect(view_agreement, agreement_id=agreement.pk)
else:
choices = [('', '')]
clients = Client.objects.filter(clientname__isnull=False).order_by("clientname")
for client in clients:
choice = (client.pk, "%s — %s" % (client.clientname, client.netname))
choices.append(choice)
form = h.inject_choices(form=form, field_name='client',
choices=choices, required=False)
messages.add_message(request, messages.ERROR, 'Форма содержит ошибки')
context = {'form': form, 'app': 'docs', 'tab': 'add'}
return render(request, 'bs3/docs/add_agreement.html', context)
else:
initial = {}
company_id = request.GET.get('company')
if company_id:
initial['company'] = get_object_or_404(Company, pk=company_id)
form = AgreementForm(initial=initial)
context = {'form': form, 'app': 'docs', 'tab': 'add'}
return render(request, 'bs3/docs/add_agreement.html', context)
@login_required(login_url=LOGIN_URL)
@permission_required('docs.add_application')
def add_application(request, agreement_id):
context = {'app': 'docs'}
agreement = get_object_or_404(Agreement, pk=agreement_id)
context['agreement'] = agreement
form = ApplicationForm()
context['form'] = form
if request.method == 'POST':
form = ApplicationForm(request.POST, request.FILES)
context['form'] = form
if form.is_valid():
agreement = Agreement.objects.get(pk=agreement_id)
try:
form.instance.agreement = agreement
form.instance.creator = request.user
Spy().log(object=form.instance, form=form, action=Spy.CREATE, user=request.user)
form.save()
except Exception as e:
messages.add_message(request, messages.ERROR, 'Ошибка целостности БД: %s' % e)
context = {'form': form, 'app': 'docs'}
return render(request, 'bs3/docs/add_application.html', context)
application = form.instance
application.agreement = agreement
application.save()
# Trying to handle file if exists
if request.FILES.get('file'):
result = application.handle_file(request.FILES['file'])
if result:
messages.add_message(request, messages.SUCCESS, 'Файл "%s" успешно загружен' % result)
else:
messages.add_message(request, messages.ERROR, "Ошибка сохранения файла")
else:
messages.add_message(request, messages.ERROR, "Загрузите скан документа")
return redirect(view_agreement, agreement_id=agreement.pk)
return render(request, 'bs3/docs/add_application.html', context)
@login_required(login_url=LOGIN_URL)
@permission_required('docs.change_agreement')
def update_agreement(request, agreement_id):
context = {'app': 'docs'}
agreement = get_object_or_404(Agreement, pk=agreement_id)
context['agreement'] = agreement
form = AgreementForm(instance=agreement)
context['form'] = form
if request.method == 'POST':
form = AgreementForm(data=request.POST, instance=agreement)
if form.is_valid():
form.save()
Spy().log(object=agreement, form=form, action=Spy.CHANGE, user=request.user)
messages.add_message(request, messages.SUCCESS, 'Информация обновлена')
# Trying to handle file if exists
if request.FILES.get('file'):
result = agreement.handle_file(request.FILES['file'])
if result:
messages.add_message(request, messages.SUCCESS, 'Файл "%s" успешно загружен' % result)
else:
messages.add_message(request, messages.ERROR, "Ошибка сохранения файла")
return redirect(view_agreement, agreement_id=agreement.pk)
return render(request, 'bs3/docs/update_agreement.html', context)
@login_required(login_url=LOGIN_URL)
def view_agreement(request, agreement_id):
return view_agreement_applications(request, agreement_id)
@login_required(login_url=LOGIN_URL)
def view_agreement_applications(request, agreement_id):
context = {'app': 'docs', 'tab': 'applications'}
agreement = get_object_or_404(Agreement, pk=agreement_id)
context['agreement'] = agreement
search_string = request.GET.get('search_string')
context['search_string'] = search_string
logs = Spy.objects.filter(object_name='agreement', object_id=agreement.pk).order_by('-time')
context['logs'] = logs
applications = agreement.applications.all()
if search_string:
applications = ApplicationSearch(queryset=applications).search(search_string)
context['applications'] = applications
return render(request, 'bs3/docs/view_agreement.html', context)
@login_required(login_url=LOGIN_URL)
def view_agreement_orders(request, agreement_id):
context = {'app': 'docs', 'tab': 'orders'}
agreement = get_object_or_404(Agreement, pk=agreement_id)
context['agreement'] = agreement
logs = Spy.objects.filter(object_name='agreement', object_id=agreement.pk).order_by('-time')
context['logs'] = logs
search_string = request.GET.get('search_string')
context['search_string'] = search_string
orders = agreement.applications.filter(application_type__in=[Application.APP_TYPE_ORDER,
Application.APP_TYPE_APPLICATION])
orders = orders.order_by('order_number')
# generating dict with orders
order_pairs = {}
for order in orders:
if order.order_number in order_pairs:
order_pairs[order.order_number]['orders'].append(order)
else:
order_pairs[order.order_number] = {'orders': [order], 'akts': []}
for akt in agreement.applications.filter(application_type=Application.APP_TYPE_AKT):
if akt.order_number in order_pairs:
order_pairs[akt.order_number]['akts'].append(akt)
else:
pair = {'orders': [], 'akts': [akt]}
order_pairs[akt.order_number] = pair
groups = [{'order_number': key, 'data': value}
for key, value in order_pairs.items()]
context['groups'] = groups
return render(request, 'bs3/docs/view_agreement.html', context)
@login_required(login_url=LOGIN_URL)
def view_agreement_preview(request, agreement_id):
context = {'app': 'docs', 'tab': 'preview'}
agreement = get_object_or_404(Agreement, pk=agreement_id)
context['agreement'] = agreement
logs = Spy.objects.filter(object_name='agreement', object_id=agreement.pk).order_by('-time')
context['logs'] = logs
if request.POST:
if not request.user.has_perm('docs.change_agreement'):
messages.error(request, 'Недостаточно прав для изменения договоров')
return redirect(reverse('agreement_preview', args=[agreement_id]))
action = request.POST.get('action')
if action == 'delete_file':
agreement.filename = ''
agreement.save()
messages.success(request, 'Скан договора удален')
return render(request, 'bs3/docs/view_agreement.html', context)
@login_required(login_url=LOGIN_URL)
def view_application(request, application_id):
context = {'app': 'docs'}
application = get_object_or_404(Application, pk=application_id)
context['application'] = application
tab = request.GET.get('tab', 'document')
context['tab'] = tab
if request.POST:
action = request.POST.get('action')
if action == 'delete_file':
application.delete_file()
messages.success(request, 'Файл приложения удален')
return redirect(view_application, application_id=application.pk)
if action == 'delete_application':
application.delete()
agreement_id = application.agreement.pk
messages.success(request, 'Приложение удалено')
return redirect(view_agreement, agreement_id=agreement_id)
logs = Spy.objects.filter(object_name='application', object_id=application.pk).order_by('-time')
context['logs'] = logs
return render(request, 'bs3/docs/view_application.html', context)
@login_required(login_url=LOGIN_URL)
@permission_required('docs.change_application')
def update_application(request, application_id):
context = {'app': 'docs', 'mode': 'edit'}
application = get_object_or_404(Application, pk=application_id)
context['application'] = application
form = ApplicationForm(instance=application)
context['form'] = form
if request.method == 'POST':
form = ApplicationForm(data=request.POST, instance=application)
context['form'] = form
if form.is_valid():
form.save()
Spy().log(object=form.instance, form=form, action=Spy.CHANGE, user=request.user)
messages.add_message(request, messages.SUCCESS, 'Информация обновлена')
# Trying to handle file if exists
if request.FILES.get('file'):
result = application.handle_file(request.FILES['file'])
if result:
messages.add_message(request, messages.SUCCESS, 'Файл "%s" успешно загружен' % result)
else:
messages.add_message(request, messages.ERROR, "Ошибка сохранения файла")
return redirect(view_application, application_id=application.pk)
return render(request, 'bs3/docs/update_application.html', context)
@login_required(login_url=LOGIN_URL)
def view_agreements(request):
context = {'app': 'docs'}
agreements = Agreement.objects.all().prefetch_related('client', 'applications', 'client__manager')
search_string = request.GET.get('search')
context['search_string'] = search_string
if search_string:
agreements = AgreementSearch(queryset=agreements).search(search_string)
context['listing'] = agreements
else:
agreements = agreements.order_by('-pk')
p = request.GET.get('page', 1)
paginator = Paginator(agreements, request.user.pagination_count)
page = paginator.page(p)
context['p'] = page
context['listing'] = page
return render(request, 'bs3/docs/view_agreements.html', context)
@login_required(login_url=LOGIN_URL)
def delete_application(request):
application = get_object_or_404(Application, pk=request.POST.get('id'))
agreement = application.agreement
try:
application.delete()
except:
messages.add_message(request, messages.ERROR, "Ошибка при удалении документа.")
return redirect(view_application, application_id=application.pk)
Spy().log(object=application, action=Spy.DELETE, user=request.user)
messages.add_message(request, messages.SUCCESS, "Документ удален")
return redirect(view_agreement, agreement_id=agreement.pk)
@login_required(login_url=LOGIN_URL)
def search_agreement(request):
search_string = request.GET.get('search', None)
agreements = AgreementSearch().search(search_string)
context = {
"agreements": agreements,
"search_string": search_string,
"tab": "search",
}
return render(request, 'bs3/docs/view_agreements.html', context)
| [
"[email protected]"
] | |
5730b69a0802e196a75c473d7b494b184765e4a9 | 20952210ba89eac3e7d74a8a4c35eef48037c659 | /eventex/subscriptions/tests/test_admin.py | 33e893e5549911f60d871cb733fd6d11f5a3025b | [] | no_license | cpatrickalves/eventex | 181491f6ec978597704cc66dbc8f897cb3f9869b | dd8233ef6d9a6b3278d2388d254d8f512c3c0819 | refs/heads/master | 2020-09-28T05:53:16.635145 | 2017-01-16T18:17:04 | 2017-01-16T18:17:04 | 67,431,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | from unittest.mock import Mock
from django.test import TestCase
from eventex.subscriptions.admin import SubscriptionModelAdmin, Subscription, admin
class SubscriptionModelAdminTest(TestCase):
def setUp(self):
# Prepara o BD
Subscription.objects.create(name='Patrick alves', cpf='12345678901',
email='[email protected]', phone='91-00003333')
self.model_admin = SubscriptionModelAdmin(Subscription, admin.site)
def test_has_action(self):
''' Action mark_as_paid should be installed '''
self.assertIn('mark_as_paid', self.model_admin.actions)
def test_mark_all(self):
''' It should mark all selected subscriptions as paid '''
self.call_action()
self.assertEqual(1, Subscription.objects.filter(paid=True).count())
def test_message(self):
'''It shoulf send a message to the user'''
# Monta uma Query
mock = self.call_action()
mock.assert_called_once_with(None, '1 inscrição foi marcada como paga.')
def call_action(self):
# Monta uma Query
queryset = Subscription.objects.all()
mock = Mock()
old_message_user = SubscriptionModelAdmin.message_user
SubscriptionModelAdmin.message_user = mock
# Chama a Action
self.model_admin.mark_as_paid(None, queryset)
SubscriptionModelAdmin.message_user = old_message_user
return mock | [
"[email protected]"
] | |
b56b95773ecf1e085ce8d479c4276dc62671e7b1 | 5d77027f93fe251388bfc52d6da553a7f23cafa6 | /umm-python/app/domain/composite_solution_map.py | 35d93302bb202be074b3e1dc66929b7eab7b14fc | [
"Apache-2.0"
] | permissive | suomitek/cubeai | d86e1d57fec4cb30ac5de38f613d1d0de9bd2808 | cc4c0f5f445a552d239910da63944307c1f06e37 | refs/heads/master | 2022-12-08T13:12:57.279914 | 2020-08-29T18:12:40 | 2020-08-29T18:12:40 | 291,183,681 | 0 | 0 | Apache-2.0 | 2020-08-29T02:11:57 | 2020-08-29T02:11:56 | null | UTF-8 | Python | false | false | 272 | py | class CompositeSolutionMap:
def __init__(self):
self.id = None
self.parentUuid = None
self.childUuid = None
def from_record(self, record):
self.id = record[0]
self.parentUuid = record[1]
self.childUuid = record[2]
| [
"[email protected]"
] | |
b6d0c2267d52b5f791315417a324c2657e86e1db | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-dataform/samples/generated_samples/dataform_v1beta1_generated_dataform_list_workflow_invocations_async.py | dde7ff28eefb1bca3666f553e8ef38cb254977af | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,967 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListWorkflowInvocations
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataform
# [START dataform_v1beta1_generated_Dataform_ListWorkflowInvocations_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataform_v1beta1
async def sample_list_workflow_invocations():
# Create a client
client = dataform_v1beta1.DataformAsyncClient()
# Initialize request argument(s)
request = dataform_v1beta1.ListWorkflowInvocationsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_workflow_invocations(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dataform_v1beta1_generated_Dataform_ListWorkflowInvocations_async]
| [
"[email protected]"
] | |
cec1ff8ea345d786e98431b3d65d0e89ae236165 | 5cbf2ddb902eb7fb0da3c8b91de768d0848c06be | /pages/pages_project/urls.py | cb52bf5d771a4354bc949b2e398f5dbf824970f3 | [] | no_license | conectis360/django-begginer2.0 | 60b031594ba4f9c359916e3912518bc1e2a3335c | 429d352084787d7dd0aca4fd310c75478e497852 | refs/heads/master | 2022-04-26T18:57:53.591800 | 2019-08-17T05:02:28 | 2019-08-17T05:02:28 | 202,827,107 | 0 | 0 | null | 2022-04-22T22:19:24 | 2019-08-17T02:50:36 | Python | UTF-8 | Python | false | false | 829 | py | """pages_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('pages.urls')),
]
| [
"[email protected]"
] | |
d608a0cf8b8a9c849b83ab55e2264e12e37f0771 | f8a010ac8a9f9689b22d18f50a3e23d5d695b0e1 | /Modeling/FitSeqPredModel.py | fcd4e4d709f422b2fb3120cce5f4dabcff0b0948 | [] | no_license | janesunflower/SpatioTemporalPredictiveModeling | 79cb81931075165e32d131d3e00681adcdea6785 | f28a481550ca54b79ebc124ca77adfcf4ec28b7f | refs/heads/master | 2020-03-22T19:08:55.835039 | 2017-08-09T22:02:48 | 2017-08-09T22:02:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,671 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 10 09:43:54 2017
@author: xiaomuliu
"""
import numpy as np
#from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn import preprocessing
#NOTE: scikit learn v0.18 changed 'cross_validation' module and 'grid_search' module to 'model_selection'
from sklearn.model_selection import GridSearchCV
#def _SeqData_scaler(X, scaling='standardize', mean=None, sd=None):
# # scaling Structured sequential data for LSTM models
# # input X is of shape (#samples, time steps, #features)
# # output y is of shape (#samples,)
# # scaling applied w.r.t the 3rd dimension
# if scaling=='standardize':
# if mean is None:
# mean = np.apply_over_axes(np.mean, X, [0,1])
# if sd is None:
# sd = np.apply_over_axes(np.std, X, [0,1])
# elif scaling=='minmax':
# if mean is None:
# mean = np.apply_over_axes(np.min, X, [0,1])
# if sd is None:
# sd = (np.apply_over_axes(np.max, X, [0,1])-np.apply_over_axes(np.min, X, [0,1]))
# sd[sd==0] = 1 # if min and max values are the same
#
# X_scaled = (X-mean) / sd
# return X_scaled, mean, sd
#def fit_pred(X_train, y_train, X_test, clf, clf_name, tune_param_dict, CV_obj, scaling='standardize'):
# """
# Tune model('clf')'s parameters in tune_param_dicts
# using training data and cross validation. Then fit the entire training data
# using the optimal parameters and predict the test data
# """
# if scaling is not None:
# X_train, m, sd = _SeqData_scaler(X_train,scaling)
# X_test, _, _ = _SeqData_scaler(X_test,scaling, m, sd)
#
# pipe = Pipeline(steps=[(clf_name, clf)])
# estimator = GridSearchCV(pipe, tune_param_dict, cv=CV_obj, scoring='roc_auc', n_jobs=1)
# estimator.fit(X_train, y_train);
# if hasattr(clf,'predict_proba'):
# y_scores = estimator.best_estimator_.predict_proba(X_test)
# elif hasattr(clf,'decision_function'):
# y_scores = estimator.best_estimator_.decision_function(X_test)
#
# if y_scores.ndim>1:
# y_scores = y_scores[:,1] # Some classifier return scores for both classes
#
# return dict(pred_score=y_scores, CV_result=estimator.cv_results_, best_param=estimator.best_params_)
def model_fit(X_train, y_train, clf, clf_name, tune_param_dict, CV_obj, scaling='standardize'):
"""
Tune model('clf')'s parameters in tune_param_dicts
using training data and cross validation. Then fit the entire training data
using the optimal parameters
"""
if scaling is not None:
if scaling=='standardize':
scaler = preprocessing.StandardScaler()
elif scaling=='minmax':
scaler = preprocessing.MinMaxScaler()
pipe = Pipeline(steps=[('standardize',scaler), (clf_name, clf)]) if scaling is not None else \
Pipeline(steps=[(clf_name, clf)])
pipe = Pipeline(steps=[(clf_name, clf)])
estimator = GridSearchCV(pipe, tune_param_dict, cv=CV_obj, scoring='roc_auc', n_jobs=1)
estimator.fit(X_train, y_train);
return dict(model=estimator.best_estimator_, CV_result=estimator.cv_results_, best_param=estimator.best_params_)
def model_pred(X_test, model):
"""
"""
if hasattr(model,'predict_proba'):
y_scores = model.predict_proba(X_test)
elif hasattr(model,'decision_function'):
y_scores = model.decision_function(X_test)
if y_scores.ndim>1:
y_scores = y_scores[:,1] # Some classifier return scores for both classes
return y_scores
if __name__ == '__main__':
import re
import time
import cPickle as pickle
from ModelSpec import get_model_params, get_cv_obj
import sys
sys.path.append('..')
import StructureData.LoadData as ld
from Misc.ComdArgParse import ParseArg
args = ParseArg()
infiles = args['input']
outpath = args['output']
params = args['param']
group_pkl = re.search('(?<=group=)([\w\./]+)',infiles).group(1)
train_data = re.search('(?<=traindata=)([\w\./]+)',infiles).group(1)
test_data = re.search('(?<=testdata=)([\w\./\s]+)(?=cluster)',infiles).group(1) # File names are seperated by whitespace
#test_data_list = re.findall('[\w\./]+',test_data.group(0))
test_data_list = np.array(test_data.rstrip().split('\n'))
# sort test data list by chunk No
chunkNo_list = np.zeros(len(test_data_list)).astype(int)
for idx, fn in enumerate(test_data_list):
chunkNo_list[idx] = int(re.search('(?<=chunk)(\d+)',fn).group(1))
new_idx = np.argsort(chunkNo_list)
test_data_list = test_data_list[new_idx]
cluster_pkl = re.search('(?<=cluster=)([\w\./]+)',infiles).group(1)
district_pkl = re.search('(?<=district=)([\w\./]+)',infiles).group(1)
filePath_save = outpath if outpath is not None else './SharedData/ModelData/'
# Assign parameters
target_crime = re.search('(?<=targetcrime=)(\w+)',params).group(1)
clf_name = re.search('(?<=model=)(\w+)',params).group(1)
kfolds = int(re.search('(?<=kfolds=)(\d+)',params).group(1))
r_seed = int(re.search('(?<=rseed=)(\d+)',params).group(1))
train_region = re.search('(?<=trainregion=)([A-Za-z]+)',params).group(1)
test_region = re.search('(?<=testregion=)([A-Za-z]+)',params).group(1)
# If more than one cluster/district is provided (connected by '_'), the union of their regions will be assumed
cluster_Nos = {'train':None,'test':None}
district_Nos = {'train':None,'test':None}
if train_region != 'city':
train_region_num_str = re.search('(?<=trainregionNo=)([\d_]+)',params).group(1)
if train_region == 'cluster':
cluster_Nos['train'] = map(int,train_region_num_str.split('_'))
elif train_region == 'district':
district_Nos['train'] = train_region_num_str.split('_')
else:
train_region_num_str = ''
if test_region != 'city':
test_region_num_str = re.search('(?<=testregionNo=)([\d_]+)',params).group(1)
if test_region == 'cluster':
cluster_Nos['test'] = map(int,test_region_num_str.split('_'))
elif test_region == 'district':
district_Nos['test'] = test_region_num_str.split('_')
else:
test_region_num_str = ''
CV_skf = get_cv_obj(kfolds,r_seed)
scaling = 'minmax'
filename_dict = dict(group=group_pkl)
if cluster_pkl != 'NA':
filename_dict['cluster'] = cluster_pkl
if district_pkl != 'NA':
filename_dict['district'] = district_pkl
#----------------------------------------#
mask_sample_region = {'train':train_region,'test':test_region}
start = time.time()
# if train_region=='city' and test_region=='city':
# filename_dict['feature'] = feature_balanced_h5
# X_train, y_train, X_test, _, = ld.load_train_test_seq_data(filename_dict['feature'],target_crime,'Label',balanced=True)
# sample_mask = dict(train=np.ones(len(X_train)).astype(bool),test=np.ones(len(X_test)).astype(bool))
#
# clf = get_model_params(clf_name,rand_seed=r_seed,X=X_train)['model']
# tuning_params = get_model_params(clf_name,rand_seed=r_seed,X=X_train)['tuning_params']
# tuning_param_dicts = dict([(clf_name+'__'+key, val) for key,val in tuning_params.items()])
# else:
# filename_dict['feature'] = feature_h5
# loading_info = ld.load_paired_train_test_seq_data(filename_dict, mask_sample_region=mask_sample_region, \
# cluster_Nos=cluster_Nos, district_Nos=district_Nos, \
# balance=True, rand_seed=r_seed, load_city=False,\
# save_format='h5',crime_type=target_crime,target='Label')
# sample_mask = loading_info['sample_mask']
# X_train, y_train = loading_info['train_data_subset']
# X_test, _ = loading_info['test_data_subset']
#
# clf = get_model_params(clf_name,rand_seed=r_seed,X=X_train)['model']
# tuning_params = get_model_params(clf_name,rand_seed=r_seed,X=X_train)['tuning_params']
# tuning_param_dicts = dict([(clf_name+'__'+key, val) for key,val in tuning_params.items()])
sample_mask = {}
if train_region=='city':
filename_dict['train'] = train_data
X_train, y_train = ld.load_struct_data_h5(filename_dict['train'],target_crime,'Label',split='train')
clf = get_model_params(clf_name,rand_seed=r_seed,X=X_train)['model']
tuning_params = get_model_params(clf_name,rand_seed=r_seed,X=X_train)['tuning_params']
tuning_param_dicts = dict([(clf_name+'__'+key, val) for key,val in tuning_params.items()])
# train models
fitting = model_fit(X_train, y_train, clf, clf_name, tuning_param_dicts, CV_skf, scaling)
model, cv_results, best_params = fitting['model'], fitting['CV_result'], fitting['best_param']
print('CV results ('+train_region+train_region_num_str+'):')
print(cv_results)
print('Best_parameters ('+train_region+train_region_num_str+'):')
print(best_params)
sample_mask['train']=np.ones(len(X_train)).astype(bool)
if test_region=='city':
# test models by test sample chunks
pred_scores_stacked = []
for fn in test_data_list:
filename_dict['test'] = fn
X_test, y_test = ld.load_struct_data_h5(filename_dict['test'],target_crime,'Label', split='test')
pred_scores_stacked.append(model_pred(X_test,model))
pred_scores_stacked = np.hstack(pred_scores_stacked)
sample_mask['test']=np.ones(len(pred_scores_stacked)).astype(bool)
end = time.time()
print('Elapsed time: %.1f' % (end - start))
# save prediction scores
score_save = filePath_save+'PredScore_'+train_region+train_region_num_str+'_'+test_region+test_region_num_str+'.csv'
np.savetxt(score_save,pred_scores_stacked,delimiter=',')
mask_save = filePath_save+'SampleMask_'+train_region+train_region_num_str+'_'+test_region+test_region_num_str+'.pkl'
with open(mask_save,'wb') as out_file:
pickle.dump(sample_mask, out_file, pickle.HIGHEST_PROTOCOL) | [
"[email protected]"
] | |
fdc0f41cf2ed69d4d148067fd373352e36deb85e | a1e94de283e5fbb5d7546cf3673101f6df4aee21 | /python/tf_seg_model.py | b88f1faec83677ef7b7a653b26cb3b8354d80508 | [] | no_license | gongxijun/tfqieci | 81a35eb53e928b409459fbbb99083b69cc54eb01 | ae9abb200427382b0c3ef09d8eef0ecc1c5538a3 | refs/heads/master | 2020-04-16T17:31:27.238862 | 2019-01-15T04:41:23 | 2019-01-15T04:41:23 | 165,778,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,739 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
----------------------------------
Version : ??
File Name : tf_seg_model.py
Description :
Author : xijun1
Email : [email protected]
Date : 2018/11/13
-----------------------------------
Change Activiy : 2018/11/13
-----------------------------------
"""
__author__ = 'xijun1'
import tensorflow as tf
from tfmodel import TfModel
from sentence_breaker import SentenceBreaker
import math
import codecs
import numpy as np
from viterbi_decode import *
import esmre
import esm
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("transition_node_name", "transitions", "the transitions node in graph model")
tf.app.flags.DEFINE_string("scores_node_name", "Reshape_7", "the final emission node in graph model")
tf.app.flags.DEFINE_string("input_node_name", "input_placeholder", "the input placeholder node in graph model")
tf.app.flags.DEFINE_integer("num_shards" ,4 ,"number of vecs ")
tf.app.flags.DEFINE_integer("embedding_size",200, "embedding size")
tf.app.flags.DEFINE_string("w2v_path","/data0/xijun1/tfqieci/vec.txt", "word2vector path")
basic_vocab = '/data0/xijun1/tfqieci/models/bbasic_vocab.txt'
model_dir = '/data0/xijun1/tfqieci/logs/'
class CaseInsensitiveDict(dict):
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def load_vocab(path, pVocab_map):
"""
load vocab
:param path:
:param pVocab_map:
:return:
"""
for line in codecs.open(path, mode="rb", encoding="utf-8"):
line = line.strip("\n")
line = line.strip("\r")
arr_words = line.split("\t")
if len(arr_words) != 2:
print "line len not comformed to dimension:%s:%d\n".format(line, len(arr_words))
return
word = arr_words[0]
if word in ["</s>"]:
continue
if word not in pVocab_map:
pVocab_map[word] = int(arr_words[1])
return pVocab_map
class TfSegModel:
def __init__(self):
"""
@todo 暂时不增加自定义字典功能
"""
self.max_sentence_len_ = None
self.breaker_ = None
self.model = None
self.transitions_ = list()
self.vocab_ = CaseInsensitiveDict()
self.scores_ = None
self.bp_ = None
self.scanner_ = esm.Index()
self.num_tags_ = 0
self.tagger_ = None
def loadUserDict(self, userDictPath):
"""
自定义字典
:param userDictPath:
:return:
"""
for line in codecs.open(userDictPath, mode="rb", encoding="utf-8"):
line = line.strip("\n")
line = line.strip("\r")
arr_words = line.split("\t")
if len(arr_words) != 2:
print "line len not comformed to dimension:%s:%d\n".format(line, len(arr_words))
return
word = arr_words[0]
if word in ["</s>"]:
continue
def load_w2v(self,num_shards, path, expectDim):
with open(path, "r") as fp:
print("load data from:", path)
line = next(fp)
line =line.strip()
ss = line.split(" ")
total = int(ss[0])
dim = int(ss[1])
assert (dim == expectDim),"dim:%d , expectDim: %d".format(dim,expectDim)
ws = []
mv = np.zeros(dim , dtype=np.float) #[0 for i in range(dim)]
second = -1
for t,line in enumerate(fp):#@todo total
if ss[0] in [ '<UNK>',"<unk>"]:
second = t
line = line.strip()
ss = line.split(" ")
assert (len(ss) == (dim + 1))
vals = map(float , ss[1:])
#for i in range(1, dim + 1):
# fv = float(ss[i])
# mv[i - 1] += fv
# vals.append(fv)
mv+= vals
ws.append(vals)
if len(ws)%50000==0:
print ("wordtvec data loading :",len(ws))
#if len(ws) > 50000 :
# break
mv /= total
#second = len(ws)
while len(ws)%num_shards != 0:
ws.append(mv)
#?~^?~J| ?~@个UNK?~M置
#mv /=total
assert (second != -1)
# append one more token , maybe useless
#ws.append(mv)
if second != 1:
t = ws[1]
ws[1] = ws[second]
ws[second] = t
print ("loading commpleted .....")
print("make array 2d to 3d")
total = len(ws)
range_size = total / num_shards
begin_ = 0
ends_ = range_size
ws = np.asarray(ws, dtype=np.float32)
sub_ws= []
for i in xrange(0 , num_shards ):
begin_ = i*range_size
if (i+1)*range_size < total :
ends_ = (i+1)*range_size
else:
ends_ = total
assert ends_ - begin_ == range_size
sub_ws.append( ws[ int(begin_) : int(ends_) , ])
return np.array(sub_ws ,dtype = np.float32)
def LoadModel(self, modelPath, vocabPath, maxSentenceLen, userDictPath):
self.max_sentence_len_ = maxSentenceLen
self.breaker_ = SentenceBreaker(maxSentenceLen);
self.model = TfModel()
self.model.restore(modelPath,"best_model-50000")
self.w2v = self.load_w2v( FLAGS.num_shards , FLAGS.w2v_path, FLAGS.embedding_size)
x_tensor = self.model.session.graph.get_tensor_by_name(name= FLAGS.transition_node_name + ":0")
print "Reading from layer", x_tensor.name
self.words =[]
with tf.device("/cpu:0"):
for i in range( 0 , FLAGS.num_shards):
words_tensor_i = self.model.session.graph.get_tensor_by_name(name="words-%02d:0" % i)
self.words.append( words_tensor_i )
sess = self.model.session;
flat_tensor = tf.reshape(x_tensor, [-1]) # 将多维的数据转换成一维
count = sess.run(tf.size(flat_tensor))
self.num_tags_ = int(math.sqrt(count))
print "got num tag:", self.num_tags_
self.transitions_ = sess.run(x_tensor)
#print self.transitions_
# load vocab
self.vocab_ = load_vocab(vocabPath, self.vocab_)
print " Total word: ", len(self.vocab_)
self.scores_ = np.zeros([2, self.num_tags_], dtype=np.float32)
self.bp_ = np.zeros([self.max_sentence_len_ , self.num_tags_], dtype=np.float32)
# @todo add dict function
def Segment(self, sentences, pTopResults):
"""
切分句子
:param sentences:
:param pTopResults:
:return:
"""
assert (sentences is not None and len(sentences) > 0), "sentence can not be empty"
input_tensor = self.model.session.graph.get_tensor_by_name(name=FLAGS.input_node_name + ":0")
sess = self.model.session;
input_tensor_mapped = np.zeros(shape=[len(sentences), self.max_sentence_len_], dtype=np.int32)
for (k, words) in enumerate(sentences):
len_words = len(words)
print words.encode('utf8')
if len_words <= 0:
print "zero length str"
return
if len_words > self.max_sentence_len_:
len_words = self.max_sentence_len_
for i in range(0, len_words, 1):
word = words[i]
print word.encode("utf8") ,self.vocab_["<UNK>"]
if word in self.vocab_:
print k,i,self.vocab_[word]
input_tensor_mapped[k][i] = self.vocab_[word]
else:
input_tensor_mapped[k][i] = self.vocab_["<UNK>"]
for i in range(len_words, self.max_sentence_len_, 1):
input_tensor_mapped[k][i] = 0
print "----- sentence tensor: {}".format(input_tensor_mapped[k])
# tf.assign(input_tensor, input_tensor_mapped)
pre_output_tensors = self.model.session.graph.get_tensor_by_name(name=FLAGS.scores_node_name + ":0")
feed_dict_p = {input_tensor:input_tensor_mapped};
for i in xrange(0, FLAGS.num_shards ):
feed_dict_p[ self.words[ i ] ] = self.w2v[i];
predictions = sess.run(pre_output_tensors, feed_dict=feed_dict_p);
for (k, words) in enumerate(sentences):
len_words = len(words)
if len_words <= 0:
print "zero length str"
return
# @todo 需要增加自定义字典功能
resultTags = list()
self.scores_=np.zeros([2, self.num_tags_], dtype=np.float32)
self.bp_ = np.zeros([ self.max_sentence_len_ , self.num_tags_], dtype=np.float32)
self.bp_, self.scores_, resultTags = get_best_path(predictions, k, len_words, self.transitions_,
self.bp_,self.scores_, resultTags, self.num_tags_)
#print self.scores_
#print self.bp_
assert len_words == len(resultTags), "num tag should equals setence len"
resEle = list()
start = 0
for j in xrange(0, len_words, 1):
if resultTags[len_words - j - 1] == 0:
if start < j:
resEle.append((start, j - start))
resEle.append((j, 1))
start = j + 1
elif resultTags[len_words - j - 1] == 1:
if start < j:
resEle.append((start, j - start))
start = j
elif resultTags[len_words - j - 1] == 2:
continue
elif resultTags[len_words - j - 1] == 3:
resEle.append((start, j - start + 1))
start = j + 1
else:
print "Unkonw tag:", resultTags[len_words - j - 1]
if start < len_words:
resEle.append((start, len_words - start))
pTopResults.append(resEle)
return pTopResults
def SegmentpTags(self, sentence, pTopResults, pTags):
"""
:param sentence:
:param pTopResults:
:param pTags:
:return:
"""
assert (sentence is not None and len(sentence.decode('utf8')) > 0), "sentence can not be empty"
sentences = list()
sentences = self.breaker_.breakSentences(sentence, sentences)
if len(sentences) < 1:
return None
topResults = list()
topResults = self.Segment(sentences, topResults)
for (k, words) in enumerate(sentences):
len_nn = len(topResults[k])
todo = list()
print words.encode("utf8")
print topResults[k]
for i in xrange(0, len_nn, 1):
pTopResults.append(words[int(topResults[k][i][0]): int(topResults[k][i][0] + topResults[k][i][1])])
todo.append(words[int(topResults[k][i][0]): int(topResults[k][i][0] + topResults[k][i][1])])
# if ( pTags is not None ) && self.
return pTopResults
if __name__ == '__main__':
dstr = "安装好tensorflow,切换到kcws代码目录";
print dstr.decode('utf8')[0:3].encode('utf8')
tfmodel_ = TfSegModel();
tfmodel_.LoadModel(model_dir, basic_vocab, 80, None)
pTopResults = list()
pTopResults = tfmodel_.SegmentpTags("中华人民共和国", pTopResults, None)
for seg in pTopResults:
print seg
| [
"[email protected]"
] | |
e33d27a85a6cb1e15658fad8e903d6c2f9211338 | fd3750f3830329b10ef20e3b24272cbdbd3b025f | /servidor1.py | 50d682394dfd8d8c4809f8178ec3eab4d6ec63d3 | [] | no_license | AlvaroCS1023/Cliente-Servidor | 208f94d0c8b772ac9a12f63dc8e10d3b64ef957c | 7527b427f2041974c5c271aa6277ec7aac277994 | refs/heads/main | 2023-01-24T19:50:53.147026 | 2020-12-01T06:05:43 | 2020-12-01T06:05:43 | 317,441,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | import socket
import threading
import sys
import pickle
class Servidor():
"""docstring for Servidor"""
def _init_(self, host="localhost", port=9000):
self.clientes = []
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((str(host), int(port)))
self.sock.listen(10)
self.sock.setblocking(False)
aceptar = threading.Thread(target=self.aceptarCon)
procesar = threading.Thread(target=self.procesarCon)
aceptar.daemon = True
aceptar.start()
procesar.daemon = True
procesar.start()
while True:
msg = input('->')
if msg == 'salir':
self.sock.close()
sys.exit()
else:
pass
def msg_to_all(self, msg, cliente):
for c in self.clientes:
try:
if c != cliente:
c.send(msg)
except:
self.clientes.remove(c)
def aceptarCon(self):
print("aceptarCon iniciado")
while True:
try:
conn, addr = self.sock.accept()
conn.setblocking(False)
self.clientes.append(conn)
except:
pass
def procesarCon(self):
print("Procesar Con iniciado")
while True:
if len(self.clientes) > 0:
for c in self.clientes:
try:
data = c.recv(1024)
if data:
self.msg_to_all(data,c)
except:
pass
s = Servidor()
| [
"[email protected]"
] | |
1ca728aa0a5257ab2f309ad100c10869fffcc888 | f272656b91e70f0e514009d70d566dcb390bc277 | /ozgun_liste.py | 172bdc9e796f305b747145f43ed0102e18354f3e | [] | no_license | mfarukoz/8.hafta_odevler-Fonksiyonlar | 65a7024e847b828629add3e4c3d81559099cb3ae | 1046062340f40dbb4db314c016d2b8423fee8b7d | refs/heads/master | 2020-06-29T13:36:41.096497 | 2019-08-09T01:16:07 | 2019-08-09T01:16:07 | 200,552,385 | 0 | 0 | null | 2019-08-04T23:38:43 | 2019-08-04T23:38:43 | null | UTF-8 | Python | false | false | 293 | py | # coding=utf-8
def ozgun_liste(*arg):
#fonksiyon ismi ve sınırsız parametre tanımlama
liste=set(arg)
#tupel'i set'e dnusturme(tekrarlı elemanlar otomatikman alınmamıs olacak
return print(list(liste))
#return ile sonucun listeye donusturulup ekrana yazdırılması
| [
"[email protected]"
] | |
f928e54f0dac7709a51fa243d1d8ea979e3bc8fd | 188f5f04e4c904213727872ea9a2e3f772d9b3af | /constant_variable_example/1509906484_constant_variable_example.py | adfb3d001f0144a65995ca2c54c02ec8ee73eb2e | [] | no_license | KatarzynaSzlachetka/Individual_project | 43b2379de96a19bc1bde34cc386928bd61700094 | 722dc6d7bd882558b50d7d0837a7f53c7b4709a6 | refs/heads/master | 2021-09-04T06:37:47.141300 | 2018-01-16T20:02:37 | 2018-01-16T20:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | def constant(a,b):
c = "mut"
a = 8.393460901275843
if a<b:
b = 1.7542498485112656
else:
return 9.268545497053575
if c == "mut":
return False
constant(6,1) | [
"[email protected]"
] | |
460a5f4a032121c254f1e60a432315653fa13d15 | 10aa9f2d34080a61e26afabde4fd9a6c0a269fed | /ticketbot.py | 6e0fb8db840041148f3849755054ff495397ab0a | [] | no_license | chetan1728/ticketbot.py2 | cc8e8283ccfac7fa1331707fe1caa4254d93ec4f | 3129838a1e8a54dbc1f700a224b2849b25d8dee1 | refs/heads/master | 2020-04-13T13:36:45.365785 | 2018-12-27T02:47:40 | 2018-12-27T02:47:40 | 163,236,316 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 40,060 | py | import discord
from discord.ext.commands import Bot
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
import asyncio
import platform
import colorsys
import random
import os
import time
from discord.voice_client import VoiceClient
from discord import Game, Embed, Color, Status, ChannelType
Forbidden= discord.Embed(title="Permission Denied", description="1) Please check whether you have permission to perform this action or not. \n2) Please check whether my role has permission to perform this action in this channel or not. \n3) Please check my role position.", color=0x00ff00)
client = Bot(description="DarkBot Bot is best", command_prefix="d!", pm_help = True)
client.remove_command('help')
async def status_task():
while True:
await client.change_presence(game=discord.Game(name='for d!help'))
await asyncio.sleep(5)
await client.change_presence(game=discord.Game(name='with '+str(len(set(client.get_all_members())))+' users'))
await asyncio.sleep(5)
await client.change_presence(game=discord.Game(name='in '+str(len(client.servers))+' servers'))
await asyncio.sleep(5)
@client.event
async def on_ready():
print('Logged in as '+client.user.name+' (ID:'+client.user.id+') | Connected to '+str(len(client.servers))+' servers | Connected to '+str(len(set(client.get_all_members())))+' users')
print('--------')
print('--------')
print('Started Dark BOT')
print('Created by Utkarsh')
client.loop.create_task(status_task())
def is_owner(ctx):
return ctx.message.author.id == "420525168381657090, 395535610548322326"
def is_dark(ctx):
return ctx.message.author.id == "420525168381657090"
def is_shreyas(ctx):
return ctx.message.author.id == "376602841625919488"
def is_gameworld(ctx):
return ctx.message.author.id == "402075464694366211"
def is_ranger(ctx):
return ctx.message.author.id == "304911836460089345"
@client.command(pass_context = True)
@commands.check(is_owner)
async def restart():
await client.logout()
@client.event
async def on_message(message):
await client.process_commands(message)
@client.event
async def on_member_join(member):
print("In our server" + member.name + " just joined")
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(color = discord.Color((r << 16) + (g << 8) + b))
embed.set_author(name='Welcome message')
embed.add_field(name = '__Welcome to Our Server__',value ='**Hope you will be active here. Check Our server rules and never try to break any rules. ',inline = False)
embed.set_image(url = 'https://media.giphy.com/media/OkJat1YNdoD3W/giphy.gif')
await client.send_message(member,embed=embed)
print("Sent message to " + member.name)
channel = discord.utils.get(client.get_all_channels(), server__name='DarkBot Official Server', name='darkbot-servers-join-leave-log')
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(title=f'Welcome {member.name} to {member.server.name}', description='Do not forget to check <#474572305192845312> and never try to break any one of them', color = discord.Color((r << 16) + (g << 8) + b))
embed.add_field(name='__Thanks for joining__', value='**Hope you will be active here.**', inline=True)
embed.add_field(name='Your join position is', value=member.joined_at)
embed.set_image(url = 'https://media.giphy.com/media/OkJat1YNdoD3W/giphy.gif')
embed.set_thumbnail(url=member.avatar_url)
await client.send_message(channel, embed=embed)
@client.command(pass_context=True)
@commands.has_permissions(kick_members=True)
async def unbanall(ctx):
server=ctx.message.server
ban_list=await client.get_bans(server)
await client.say('Unbanning {} members'.format(len(ban_list)))
for member in ban_list:
await client.unban(server,member)
@client.command(pass_context = True)
@commands.has_permissions(kick_members=True)
@commands.cooldown(rate=5,per=86400,type=BucketType.user)
async def access(ctx, member: discord.Member):
role = discord.utils.get(member.server.roles, name='Access')
await client.add_roles(member, role)
embed=discord.Embed(title="User Got Access!", description="**{0}** got access from **{1}**!".format(member, ctx.message.author), color=0xff00f6)
await client.say(embed=embed)
await asyncio.sleep(45*60)
await client.remove_roles(member, role)
@client.command(pass_context=True)
@commands.has_permissions(kick_members=True)
async def getuser(ctx, role: discord.Role = None):
if role is None:
await client.say('There is no "STAFF" role on this server!')
return
empty = True
for member in ctx.message.server.members:
if role in member.roles:
await client.say("{0.name}: {0.id}".format(member))
empty = False
if empty:
await client.say("Nobody has the role {}".format(role.mention))
@client.command(pass_context = True)
async def play(ctx, *, url):
author = ctx.message.author
voice_channel = author.voice_channel
try:
vc = await client.join_voice_channel(voice_channel)
msg = await client.say("Loading...")
player = await vc.create_ytdl_player("ytsearch:" + url)
player.start()
await client.say("Succesfully Loaded ur song!")
await client.delete_message(msg)
except Exception as e:
print(e)
await client.say("Reconnecting")
for x in client.voice_clients:
if(x.server == ctx.message.server):
await x.disconnect()
nvc = await client.join_voice_channel(voice_channel)
msg = await client.say("Loading...")
player2 = await nvc.create_ytdl_player("ytsearch:" + url)
player2.start()
@client.command(pass_context = True)
@commands.check(is_dark)
async def dmall(ctx, *, msg: str):
for server_member in ctx.message.server.members:
await client.send_message(server_member, msg)
await client.delete_message(ctx.message)
@client.command(pass_context = True)
async def stop(ctx):
for x in client.voice_clients:
if(x.server == ctx.message.server):
return await x.disconnect()
return await client.say("I am not playing anyting???!")
@client.command(pass_context=True)
@commands.has_permissions(kick_members=True)
async def joinvoice(ctx):
author = ctx.message.author
channel = author.voice_channel
await client.join_voice_channel(channel)
@client.command(pass_context=True, aliases=['em', 'e'])
async def modmail(ctx, *, msg=None):
channel = discord.utils.get(client.get_all_channels(), name='📬mod-mails📬')
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
color = discord.Color((r << 16) + (g << 8) + b)
if not msg:
await client.say("Please specify a message to send")
else:
await client.send_message(channel, embed=discord.Embed(color=color, description=msg + '\n Message From-' + ctx.message.author.id))
await client.delete_message(ctx.message)
return
@client.command(pass_context = True)
@commands.has_permissions(kick_members=True)
async def userinfo(ctx, user: discord.Member):
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(title="{}'s info".format(user.name), description="Here's what I could find.", color = discord.Color((r << 16) + (g << 8) + b))
embed.add_field(name="Name", value=user.name, inline=True)
embed.add_field(name="ID", value=user.id, inline=True)
embed.add_field(name="Status", value=user.status, inline=True)
embed.add_field(name="Highest role", value=user.top_role)
embed.add_field(name="Joined", value=user.joined_at)
embed.set_thumbnail(url=user.avatar_url)
await client.say(embed=embed)
@client.command(pass_context = True)
@commands.check(is_dark)
async def iamdark(ctx):
author = ctx.message.author
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Utkarsh Kumar')
await client.add_roles(ctx.message.author, role)
print('Added Dark role in ' + (ctx.message.author.name))
await client.send_message(author, embed=embed)
@client.command(pass_context = True)
@commands.check(is_shreyas)
async def iamshreyas(ctx):
author = ctx.message.author
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='ShreyasMF')
await client.add_roles(ctx.message.author, role)
print('Added SHREYAS role in ' + (ctx.message.author.name))
await client.send_message(author, embed=embed)
@client.command(pass_context = True)
@commands.check(is_ranger)
async def iamgameworld(ctx):
author = ctx.message.author
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Gameworld')
await client.add_roles(ctx.message.author, role)
print('Added GAMEWORLD role in ' + (ctx.message.author.name))
await client.send_message(author, embed=embed)
@client.command(pass_context = True)
@commands.check(is_ranger)
async def iamnotranger(ctx):
author = ctx.message.author
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Dark Ranger')
await client.remove_roles(ctx.message.author, role)
print('Removed DarkRanger role in ' + (ctx.message.author.name))
await client.send_message(author, embed=embed)
@client.command(pass_context=True)
async def registerme(ctx):
author = ctx.message.author
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(title="Successfully added", description="REGISTERED role", color = discord.Color((r << 16) + (g << 8) + b))
embed.set_image(url = 'https://preview.ibb.co/e3iyap/ezgif_3_7dcc4d6bec.gif')
embed.add_field(name="Enjoy! ", value="Thanks for registering in Mini Militia Tournament", inline=True)
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='W4w tourney')
await client.add_roles(ctx.message.author, role)
print('Added REGISTERED role in ' + (ctx.message.author.name))
await client.send_message(author, embed=embed)
@client.command(pass_context=True)
async def iamcoder(ctx):
author = ctx.message.author
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(title="Successfully added", description="Programmer role", color = discord.Color((r << 16) + (g << 8) + b))
embed.add_field(name="Enjoy! ", value="Happy Coding :-). Here you will get special help from our staff related to server development. ", inline=True)
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Programmer')
await client.add_roles(ctx.message.author, role)
print('Added codies role in ' + (ctx.message.author.name))
await client.send_message(author, embed=embed)
@client.command(pass_context=True)
async def iamnotcoder(ctx):
author = ctx.message.author
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(title="Successfully removed", description="Programmer role", color = discord.Color((r << 16) + (g << 8) + b))
embed.add_field(name="Enjoy! ", value="Hope you will try our other features as well", inline=True)
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Programmer')
await client.remove_roles(ctx.message.author, role)
print('Removed codies role from ' + (ctx.message.author.name))
await client.send_message(author, embed=embed)
@client.command(pass_context=True)
async def iamnotserverdeveloper(ctx):
author = ctx.message.author
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(title="Successfully removed", description="Server developer role", color = discord.Color((r << 16) + (g << 8) + b))
embed.add_field(name="Enjoy! ", value="Hope you will try our other features as well", inline=True)
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Server Developer')
await client.remove_roles(ctx.message.author, role)
print('Removed server developer role from ' + (ctx.message.author.name))
await client.send_message(author, embed=embed)
@client.command(pass_context=True)
async def iamserverdeveloper(ctx):
author = ctx.message.author
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(title="Successfully added", description="Server Developer role", color = discord.Color((r << 16) + (g << 8) + b))
embed.add_field(name="Enjoy! ", value="Happy Server Development. Here you will get special support from our support team related to server development", inline=True)
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Server Developer')
await client.add_roles(ctx.message.author, role)
print('Added codies role in ' + (ctx.message.author.name))
await client.send_message(author, embed=embed)
@client.command(pass_context = True)
@commands.has_permissions(manage_roles=True)
async def role(ctx, user: discord.Member, *, role: discord.Role = None):
if role is None:
return await client.say("You haven't specified a role! ")
if role not in user.roles:
await client.add_roles(user, role)
return await client.say("{} role has been added to {}.".format(role, user))
if role in user.roles:
await client.remove_roles(user, role)
return await client.say("{} role has been removed from {}.".format(role, user))
@client.command(pass_context = True)
@commands.has_permissions(kick_members=True)
async def warn(ctx, userName: discord.User, *, message:str):
await client.send_message(userName, "You have been warned for: **{}**".format(message))
await client.say(":warning: __**{0} Has Been Warned!**__ :warning: ** Reason:{1}** ".format(userName,message))
pass
@client.command(pass_context=True)
async def ownerinfo(ctx):
embed = discord.Embed(title="Information about owner", description="Bot Name- DarkBot", color=0x00ff00)
embed.set_footer(text="Copyright@UK Soft")
embed.set_author(name=" Bot Owner Name- DarkLegend#3807,|Sunny Singh|™✓#4856,Tag<!--Back-->#1488\nID:420525168381657090,395535610548322326,399274658027012098")
embed.add_field(name="Site- https://bit.ly/darkbotauth", value="Thanks for adding our bot", inline=True)
await client.say(embed=embed)
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def setup(ctx):
author = ctx.message.author
server = ctx.message.server
mod_perms = discord.Permissions(manage_messages=True, kick_members=True, manage_nicknames =True,mute_members=True)
admin_perms = discord.Permissions(ADMINISTRATOR=True)
await client.create_role(author.server, name="Owner", permissions=admin_perms)
await client.create_role(author.server, name="Admin", permissions=admin_perms)
await client.create_role(author.server, name="Senior Moderator", permissions=mod_perms)
await client.create_role(author.server, name="G.O.H")
await client.create_role(author.server, name="Moderator", permissions=mod_perms)
await client.create_role(author.server, name="Muted")
await client.create_role(author.server, name="Friend of Owner")
await client.create_role(author.server, name="Verified")
everyone_perms = discord.PermissionOverwrite(send_messages=False, read_messages=True)
everyone = discord.ChannelPermissions(target=server.default_role, overwrite=everyone_perms)
user_perms = discord.PermissionOverwrite(read_messages=True)
user = discord.ChannelPermissions(target=server.default_role, overwrite=user_perms)
private_perms = discord.PermissionOverwrite(read_messages=False)
private = discord.ChannelPermissions(target=server.default_role, overwrite=private_perms)
await client.create_channel(server, '🎉welcome🎉',everyone)
await client.create_channel(server, '🎯rules🎯',everyone)
await client.create_channel(server, '🎥featured-content🎥',everyone)
await client.create_channel(server, '📢announcements📢',everyone)
await client.create_channel(server, '📢vote_polls📢',everyone)
await client.create_channel(server, 'private_chat',private)
await client.create_channel(server, '🎮general_chat🎮',user)
await client.create_channel(server, '🎮general_media🎮',user)
await client.create_channel(server, '👍bots_zone👍',user)
await client.create_channel(server, '🎥youtube_links🎥',user)
await client.create_channel(server, '🎥giveaway_links🎥',user)
await client.create_channel(server, '🎥other_links🎥',user)
await client.create_channel(server, '🔥Music Zone🔥', type=discord.ChannelType.voice)
await client.create_channel(server, '🔥music_command🔥s',user)
await client.create_channel(server, '🔥Chill Zone🔥', type=discord.ChannelType.voice)
@client.command(pass_context = True)
@commands.has_permissions(manage_nicknames=True)
async def setnick(ctx, user: discord.Member, *, nickname):
await client.change_nickname(user, nickname)
await client.delete_message(ctx.message)
@client.command(pass_context=True)
async def poll(ctx, question, *options: str):
if len(options) <= 1:
await client.say('You need more than one option to make a poll!')
return
if len(options) > 10:
await client.say('You cannot make a poll for more than 10 things!')
return
if len(options) == 2 and options[0] == 'yes' and options[1] == 'no':
reactions = ['👍', '👎']
else:
reactions = ['1\u20e3', '2\u20e3', '3\u20e3', '4\u20e3', '5\u20e3', '6\u20e3', '7\u20e3', '8\u20e3', '9\u20e3', '\U0001f51f']
description = []
for x, option in enumerate(options):
description += '\n {} {}'.format(reactions[x], option)
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(title=question, description=''.join(description), color = discord.Color((r << 16) + (g << 8) + b))
react_message = await client.say(embed=embed)
for reaction in reactions[:len(options)]:
await client.add_reaction(react_message, reaction)
embed.set_footer(text='Poll ID: {}'.format(react_message.id))
await client.edit_message(react_message, embed=embed)
@client.command(pass_context = True)
async def googlefy(ctx, *, msg = None):
if msg.content == "@everyone":
return
if msg.content == "@here":
return
if not msg: await client.say("Please specify a string")
else:
await client.say('http://lmgtfy.com/?q=' + msg)
return
@client.command(pass_context = True)
async def help(ctx):
author = ctx.message.author
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(color = discord.Color((r << 16) + (g << 8) + b))
embed.set_author(name='Help')
embed.set_image(url = 'https://image.ibb.co/caM2BK/help.gif')
embed.add_field(name = '``Our Help Server Link`` ',value ='https://discord.gg/vMvv5rr',inline = False)
embed.add_field(name = 'd!modhelp ',value ='Explaines all the commands which are only usable by Those who has moderation permissions. Like- Manage Nicknames, Manage Messages, Kick/Ban Members,etc.',inline = False)
embed.add_field(name = 'd!generalhelp ',value ='Explaines all the commands which are usable by everyone.',inline = False)
await client.send_message(author,embed=embed)
await client.say('📨 Check DMs For Information')
@client.command(pass_context = True)
async def modhelp(ctx):
author = ctx.message.author
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(color = discord.Color((r << 16) + (g << 8) + b))
embed.set_author(name='Moderation Commands Help')
embed.set_image(url = 'https://image.ibb.co/caM2BK/help.gif')
embed.add_field(name = 'd!say(Admin permission required) ',value ='Use it like ``d!say <text>``',inline = False)
embed.add_field(name = 'd!embed(Admin permission required) ',value ='Use it like ``d!embed <text>``',inline = False)
embed.add_field(name = 'd!membercount(Kick members Permission Required) ',value ='Use it like ``d!membercount`` to get membercount',inline = False)
embed.add_field(name = 'd!removemod(Admin Permission Required)',value ='Use it like ``d!removemod @user`` to remove him from mod. Note-You need Moderator role in your server below darkbot to use it.',inline = False)
embed.add_field(name = 'd!makemod(Admin Permission Required)',value ='Use it like ``d!makemod @user`` to make him mod. Note-You need Moderator role in your server below darkbot to use it.',inline = False)
embed.add_field(name = 'd!setup(Admin Permission Required)',value ='Use it to add channels, voice channels and roles if your server is not developed currently and you have just 1-2 channels. Note- Use it only 1 time. If you will use same command again then it will do same thing again .i.e It will add true copy of previous channels + true copy of roles that made in previous command use. So be careful.',inline = False)
embed.add_field(name = 'd!friend(Admin Permission Required) ',value ='Use it like ``d!friend @user`` to give anyone Friend of Owner role',inline = False)
embed.add_field(name = 'd!role(Manage Roles Permission Required)',value ='Use it like ``d!role @user <rolename>``.',inline = False)
embed.add_field(name = 'd!setnick(Manage nickname permission required)',value ='Use it like ``d!setnick @user <New nickname>`` to change the nickname of tagged user.',inline = False)
embed.add_field(name = 'd!english(Kick members Permission Required)',value ='Use it like ``d!english @user`` when someone speaks languages other than English.',inline = False)
embed.add_field(name = 'd!serverinfo(Kick members Permission Required) ',value ='Use it like ``d!serverinfo`` to get server info',inline = False)
embed.add_field(name = 'd!userinfo(Kick members Permission Required) ',value ='Use it like ``d!userinfo @user`` to get some basic info of tagged user',inline = False)
embed.add_field(name = 'd!kick(Kick members Permission Required)',value ='Use it like ``d!kick @user`` to kick any user',inline = False)
embed.add_field(name = 'd!roles(Kick members Permission Required) ',value ='Use it to check roles present in server',inline = False)
embed.add_field(name = 'd!clear(Manage Messages Permission Required)',value ='Use it like ``d!clear <number>`` to clear any message',inline = False)
embed.add_field(name = 'd!mute(Mute members Permission Required)',value ='Use it like ``d!mute @user <time>`` to mute any user',inline = False)
embed.add_field(name = 'd!unmute(Mute members Permission Required) ',value ='Use it like ``d!unmute @user`` to unmute anyone',inline = False)
embed.add_field(name = 'd!ban(Ban members Permission Required) ',value ='Use it like ``d!ban @user`` to ban any user',inline = False)
embed.add_field(name = 'd!rules(Kick members Permission Required)',value ='Use it like ``d!rules @user <violation type>`` to warn user',inline = False)
embed.add_field(name = 'd!warn(Kick members Permission Required)',value ='Use it like ``d!warn @user <violation type>`` to warn any user',inline = False)
embed.add_field(name = 'd!norole(Kick members Permission Required) ',value ='Use it like ``d!norole @user`` to warn anyone if he/she asks for promotion',inline = False)
embed.add_field(name = 'd!getuser(Kick members Permission Required) ',value ='Use it like ``d!getuser @rolename`` to get list of all users having a particular role',inline = False)
await client.send_message(author,embed=embed)
await client.say('📨 Check DMs For Information')
@client.command(pass_context = True)
async def generalhelp(ctx):
author = ctx.message.author
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(color = discord.Color((r << 16) + (g << 8) + b))
embed.add_field(name = 'd!poll ',value ='Use it like ``d!poll "Question" "Option1" "Option2" ..... "Option9"``.',inline = False)
embed.add_field(name = 'd!guess ',value ='To play guess game use ``d!guess <number> and number should be between 1-10``',inline = False)
embed.add_field(name = 'd!github ',value ='Use it like- ``d!github uksoftworld/DarkBot``',inline = False)
embed.add_field(name = 'd!bottutorial ',value ='Use it like ``d!bottutorial <tutorial name by darklegend>``',inline = False)
embed.add_field(name = 'd!dyno ',value ='Use it like ``d!d!dyno <dyno command name>``',inline = False)
embed.add_field(name = 'd!donate ',value ='Use it to donate us and get a special post on Official DarkBot server.',inline = False)
embed.add_field(name = 'd!ownerinfo ',value ='To get basic information about owner.',inline = False)
embed.add_field(name = 'd!sourcecode ',value ='Use it to see darkbot sourcecode.',inline = False)
embed.add_field(name = 'd!upvote ',value ='Use it to Upvote our bot and help us to grow',inline = False)
embed.add_field(name = 'd!authlink ',value ='Use it to get authorizing link to authorize this bot to your server.',inline = False)
embed.add_field(name = 'd!happybirthday @user ',value ='To wish someone happy birthday',inline = False)
embed.add_field(name = 'd!technews ',value ='Use it to get tech news',inline = False)
embed.add_field(name = 'd!googlefy ',value ='Use it like ``d!googlefy <string>``.',inline = False)
embed.add_field(name = 'd!spacenews ',value ='Use it to get space news',inline = False)
embed.add_field(name = 'd!phynews ',value ='Use it to get physycs',inline = False)
embed.add_field(name = 'd!verify ',value ='Use it to get verified role. Note- It needs proper setup.',inline = False)
embed.add_field(name = 'd!flipcoin ',value ='Flipps coin',inline = False)
embed.add_field(name = 'd!rolldice ',value ='Rolls dice',inline = False)
embed.add_field(name = 'd!avatar @user ',value ='Shows avatar',inline = False)
await client.send_message(author,embed=embed)
await client.say('📨 Check DMs For Information')
@client.command(pass_context=True)
@commands.has_permissions(kick_members=True)
async def kick(ctx,user:discord.Member):
if user.server_permissions.kick_members:
await client.say('**He is mod/admin and i am unable to kick him/her**')
return
try:
await client.kick(user)
await client.say(user.name+' was kicked. Good bye '+user.name+'!')
await client.delete_message(ctx.message)
except discord.Forbidden:
await client.say('Permission denied.')
return
@client.command(pass_context = True)
@commands.has_permissions(manage_messages=True)
async def clear(ctx, number):
if ctx.message.author.server_permissions.manage_messages:
mgs = [] #Empty list to put all the messages in the log
number = int(number) #Converting the amount of messages to delete to an integer
async for x in client.logs_from(ctx.message.channel, limit = number+1):
mgs.append(x)
try:
await client.delete_messages(mgs)
await client.say(str(number)+' messages deleted')
except discord.Forbidden:
await client.say(embed=Forbidden)
return
except discord.HTTPException:
await client.say('clear failed.')
return
await client.delete_messages(mgs)
@client.command(pass_context=True)
@commands.has_permissions(ban_members=True)
async def ban(ctx,user:discord.Member):
if user.server_permissions.ban_members:
await client.say('**He is mod/admin and i am unable to ban him/her**')
return
try:
await client.ban(user)
await client.say(user.name+' was banned. Good bye '+user.name+'!')
except discord.Forbidden:
await client.say('Permission denied.')
return
except discord.HTTPException:
await client.say('ban failed.')
return
@client.command(pass_context=True)
@commands.has_permissions(ban_members=True)
async def unban(ctx):
ban_list = await client.get_bans(ctx.message.server)
# Show banned users
await client.say("Ban list:\n{}".format("\n".join([user.name for user in ban_list])))
# Unban last banned user
if not ban_list:
await client.say('Ban list is empty.')
return
try:
await client.unban(ctx.message.server, ban_list[-1])
await client.say('Unbanned user: `{}`'.format(ban_list[-1].name))
except discord.Forbidden:
await client.say('Permission denied.')
return
except discord.HTTPException:
await client.say('unban failed.')
return
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def say(ctx, *, msg = None):
await client.delete_message(ctx.message)
if not msg: await client.say("Please specify a message to send")
else: await client.say(msg)
return
@client.command(pass_context = True)
@commands.has_permissions(kick_members=True)
async def rules(ctx, *, msg = None):
await client.delete_message(ctx.message)
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Please specify a user to warn")
else: await client.say(msg + ', Please Read Rules again and never break any one of them again otherwise i will mute/kick/ban you next time.')
return
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def bans(ctx):
'''Gets A List Of Users Who Are No Longer With us'''
x = await client.get_bans(ctx.message.server)
x = '\n'.join([y.name for y in x])
embed = discord.Embed(title = "List of The Banned Idiots", description = x, color = 0xFFFFF)
return await client.say(embed = embed)
@client.command(pass_context=True)
@commands.has_permissions(kick_members=True)
async def serverinfo(ctx):
'''Displays Info About The Server!'''
server = ctx.message.server
roles = [x.name for x in server.role_hierarchy]
role_length = len(roles)
if role_length > 50: #Just in case there are too many roles...
roles = roles[:50]
roles.append('>>>> Displaying[50/%s] Roles'%len(roles))
roles = ', '.join(roles);
channelz = len(server.channels);
time = str(server.created_at); time = time.split(' '); time= time[0];
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
join = discord.Embed(description= '%s '%(str(server)),title = 'Server Name', color = discord.Color((r << 16) + (g << 8) + b));
join.set_thumbnail(url = server.icon_url);
join.add_field(name = '__Owner__', value = str(server.owner) + '\n' + server.owner.id);
join.add_field(name = '__ID__', value = str(server.id))
join.add_field(name = '__Member Count__', value = str(server.member_count));
join.add_field(name = '__Text/Voice Channels__', value = str(channelz));
join.add_field(name = '__Roles (%s)__'%str(role_length), value = roles);
join.set_footer(text ='Created: %s'%time);
return await client.say(embed = join);
@client.command(pass_context = True)
@commands.has_permissions(kick_members=True)
async def norole(ctx, *, msg = None):
await client.delete_message(ctx.message)
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Please specify a user to warn")
else: await client.say(msg + ', Please Do not ask for promotions check Rules again.')
return
@client.command(pass_context = True)
async def happybirthday(ctx, *, msg = None):
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Please specify a user to wish")
await client.say('Happy birthday ' + msg + ' \nhttps://asset.holidaycardsapp.com/assets/card/b_day399-22d0564f899cecd0375ba593a891e1b9.png')
return
@client.command(pass_context = True)
@commands.has_permissions(kick_members=True)
async def english(ctx, *, msg = None):
await client.delete_message(ctx.message)
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Please specify a user to warn")
else: await client.say(msg + ', Please do not use language other than **English.**')
return
@client.command(pass_context = True)
@commands.has_permissions(kick_members=True)
async def hindi(ctx, *, msg = None):
await client.delete_message(ctx.message)
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Please specify a user to warn")
else: await client.say(msg + ' abe oo angrez ke bacche chup chap hindi me baat kar nahi to mai pagla jaunga')
return
@client.command(pass_context = True)
async def htmltutorial(ctx, *, msg = None):
await client.delete_message(ctx.message)
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Please specify a user")
else: await client.say('Welcome' + msg + ', Please check http://uksoft.000webhostapp.com/Programming-Tutorials/index.html')
return
@client.command(pass_context = True)
async def github(ctx, *, msg = None):
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Please specify respo. ``Format- https://github.com/uksoftworld/DarkBot``")
else: await client.say('https://github.com/' + msg)
return
@client.command(pass_context = True)
async def reactionroles(ctx, *, msg = None):
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Check this video to setup YAGPDB BOT- https://www.youtube.com/watch?v=icAqiw6txRQ")
else: await client.say('Check this video to setup YAGPDB BOT- https://www.youtube.com/watch?v=icAqiw6txRQ ' + msg)
return
@client.command(pass_context = True)
async def bottutorial(ctx, *, msg = None):
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Tutorial not found or maybe you have mistyped it")
else: await client.say('https://github.com/uksoftworld/discord.py-tutorial/blob/master/' + msg + '.py')
return
@client.command(pass_context = True)
async def dyno(ctx, *, msg = None):
if '@here' in msg or '@everyone' in msg:
return
if not msg: await client.say("Command name not found or maybe you have mistyped it")
else: await client.say('https://github.com/uksoftworld/dynoCC/blob/master/' + msg)
return
@client.command(pass_context=True)
async def unverify(ctx):
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Unverified')
await client.add_roles(ctx.message.author, role)
@client.command(pass_context=True)
async def verify(ctx):
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Verified')
await client.add_roles(ctx.message.author, role)
@client.command(pass_context=True)
@commands.has_permissions(administrator=True)
async def friend(ctx, user:discord.Member,):
await client.delete_message(ctx.message)
role = discord.utils.get(ctx.message.server.roles, name='Friend of Owner')
await client.add_roles(ctx.message.mentions[0], role)
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def makemod(ctx, user: discord.Member):
nickname = '♏' + user.name
await client.change_nickname(user, nickname=nickname)
role = discord.utils.get(ctx.message.server.roles, name='Moderator')
await client.add_roles(user, role)
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
embed = discord.Embed(color = discord.Color((r << 16) + (g << 8) + b))
embed.set_author(name='Congratulations Message')
embed.add_field(name = '__Congratulations__',value ='**Congratulations for mod.Hope you will be more active here. Thanks for your help and support.**',inline = False)
embed.set_image(url = 'https://preview.ibb.co/i1izTz/ezgif_5_e20b665628.gif')
await client.send_message(user,embed=embed)
await client.delete_message(ctx.message)
@client.command(pass_context = True)
@commands.has_permissions(administrator=True)
async def removemod(ctx, user: discord.Member):
nickname = user.name
await client.change_nickname(user, nickname=nickname)
role = discord.utils.get(ctx.message.server.roles, name='Moderator')
await client.remove_roles(user, role)
await client.delete_message(ctx.message)
@client.command(pass_context = True)
async def botwarncode(ctx):
await client.say('https://hastebin.com/ibogudoxot.py')
return
@client.command(pass_context=True)
async def guess(ctx, number):
try:
arg = random.randint(1, 10)
except ValueError:
await client.say("Invalid number")
else:
await client.say('The correct answer is ' + str(arg))
@client.command(pass_context=True)
@commands.has_permissions(kick_members=True)
async def roles(context):
"""Displays all of the roles with their ids"""
roles = context.message.server.roles
result = "The roles are "
for role in roles:
result += '``' + role.name + '``' + ": " + '``' + role.id + '``' + "\n "
await client.say(result)
@client.command(pass_context=True, aliases=['server'])
@commands.has_permissions(kick_members=True)
async def membercount(ctx, *args):
"""
Shows stats and information about current guild.
ATTENTION: Please only use this on your own guilds or with explicit
permissions of the guilds administrators!
"""
if ctx.message.channel.is_private:
await bot.delete_message(ctx.message)
return
g = ctx.message.server
gid = g.id
membs = str(len(g.members))
membs_on = str(len([m for m in g.members if not m.status == Status.offline]))
users = str(len([m for m in g.members if not m.bot]))
users_on = str(len([m for m in g.members if not m.bot and not m.status == Status.offline]))
bots = str(len([m for m in g.members if m.bot]))
bots_on = str(len([m for m in g.members if m.bot and not m.status == Status.offline]))
created = str(g.created_at)
em = Embed(title="Membercount")
em.description = "```\n" \
"Members: %s (%s)\n" \
" Users: %s (%s)\n" \
" Bots: %s (%s)\n" \
"Created: %s\n" \
"```" % (membs, membs_on, users, users_on, bots, bots_on, created)
await client.send_message(ctx.message.channel, embed=em)
await client.delete_message(ctx.message)
@client.command(pass_context=True)
@commands.has_permissions(administrator=True)
async def embed(ctx, *args):
"""
Sending embeded messages with color (and maby later title, footer and fields)
"""
argstr = " ".join(args)
r, g, b = tuple(int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1))
text = argstr
color = discord.Color((r << 16) + (g << 8) + b)
await client.send_message(ctx.message.channel, embed=Embed(color = color, description=text))
await client.delete_message(ctx.message)
client.run(os.getenv('Token'))
| [
"[email protected]"
] | |
e0e558727efb272b2f35cc42380b49cd5cb301c2 | 7dd2bac7616b295e0a95538007879595051c8f5c | /DocParser/docparser.py | 5819382f43936e594bb4d91ca2f52294c25ef5f5 | [
"MIT"
] | permissive | mrchocoborider/smallprojects | 40a2b6c338b8a0174d1188e3670ae368fda63441 | 2335484e1644a8d9ec76e62f1c3b8c6b1d9db93a | refs/heads/master | 2021-05-04T20:25:03.533585 | 2019-01-29T04:58:22 | 2019-01-29T04:58:22 | 119,809,625 | 0 | 0 | MIT | 2019-01-29T04:58:23 | 2018-02-01T08:57:37 | Python | UTF-8 | Python | false | false | 6,349 | py | import re
import docx
from docx.shared import Pt
import copy
import sys
reg = re.compile(r'[a-zA-Z]+')
#This was written specifically to help my fiancee who had huge documents of text in
#Chinese with English translation, but wanted to remove empty lines, change English font size to 11
#and also change the order of English and Chinese lines.
#Most of these methods will have to be customized to new documents
#Be aware of that fact!
def reOrder(doc1, doc2):
#docname should be a string
doc = docx.Document(doc1)
fullText = []
ctxt = []
etxt = []
#make a variable called lastline to see if the last line
#was English or Chinese (they need to be kept together)
lastLine = ""
#we're going to switch the order of English and Chinese lines
#k = 0
for k, i in enumerate(doc.paragraphs):
#First two lines should be appended without change
if k == 0 or k == 1:
fullText.append(i)
if k != 0 and k != 1:
#English
if reg.search(i.text) != None:
thisLine = "eng"
#I was overthinking it before, should be good now, with the exception of the first 10 lines or so
#which I just reordered manually, I just needed to put the Chinese on hold and then paste the English when
#I got to it, and paste the Chinese that was stored up after that.
if lastLine == "chi" and thisLine == "eng":
#append English first
fullText.append(i)
#append Chinese next
if len(ctxt) > 0:
for c in ctxt:
fullText.append(c)
if k <= 10:
#print(c.text)
#clear the lists
etxt = []
ctxt = []
#etxt.append(i)
lastLine = "eng"
#Still not sure if there will be more english!
elif lastLine == "eng" and thisLine == "eng":
etxt.append(i)
lastLine = "eng"
#Chinese
if reg.search(i.text) == None:
thisLine = "chi"
#append first line of Chinese and append to ctxt
if lastLine == "" and thisLine == "chi":
ctxt.append(i)
lastLine = "chi"
#if this line is Chinese and last line was Eng
#there may be Chinese and Eng in the queue that we need
#to append to fulltext before continuing
elif thisLine == "chi" and lastLine == "eng":
#append English first
if len(etxt) > 0:
for e in etxt:
fullText.append(e)
#append Chinese next
if len(ctxt) > 0:
for c in ctxt:
fullText.append(c)
#clear the lists
etxt = []
ctxt = []
#put this line in Chinese list
ctxt.append(i)
lastLine = "chi"
#append the chinese to ctxt and skip until a language change
elif thisLine == "chi" and lastLine == "chi":
ctxt.append(i)
lastLine = "chi"
#k += 1
new = docx.Document()
for j in fullText:
new.add_paragraph(j.text)
new.save(doc2)
#this is for changing font size of English only lines
def fontSize(doc1, doc2):
#docname should be a string
doc = docx.Document(doc1)
for i in doc.paragraphs:
#we have to access all the run objects to get to font size
#for the first doc, each line was a new paragraph
#another doc needs to break it down to runs
for j in i.runs:
if reg.search(j.text) != None:
j.font.size = Pt(11)
doc.save(doc2)
#delete all blank lines in the document
def delEmpty(doc1, doc2):
#This is written for a specific file that has some weird
#characteristics. All the text is organized into just eleven
#paragraphs
doc = docx.Document(doc1)
new = docx.Document()
fullText = []
#print len(doc.paragraphs)
for i in doc.paragraphs:
fullText.append(i.text)
for j in fullText:
#remove newlines
j = j.replace('\n\n','\n')
new.add_paragraph(j)
new.save(doc2)
#get arguments so we can say which method to call from command line
#first arg should be del, order, or font, to call the method
#second and third args should be
def main():
#check for python 3 or 2.x
py3 = sys.version_info[0] > 2
#message prompt for method
mtdmsg = """If you would like to reorder the text, type: order;
to remove blank lines, type: del;
to change font size, type: font. \n"""
if py3:
mtd = input(mtdmsg)
doc1 = input("what is the name of the file you would like to change?\n")
doc2 = input("what should the name of the file be after changing?\n")
if mtd == 'order':
reOrder(doc1, doc2)
print('finished!')
elif mtd == 'del':
delEmpty(doc1, doc2)
print('finished!')
elif mtd == 'font':
fontSize(doc1, doc2)
print('finished!')
else:
print('something went wrong, please try again and make sure you choose one of the 3 commands.')
else:
mtd = raw_input(mtdmsg)
doc1 = raw_input("what is the name of the file you would like to change?\n")
doc2 = raw_input("what should the name of the file be after changing?\n")
if mtd == 'order':
reOrder(doc1, doc2)
print('finished!')
elif mtd == 'del':
delEmpty(doc1, doc2)
print('finished!')
elif mtd == 'font':
fontSize(doc1, doc2)
print('finished!')
else:
print('something went wrong, please try again and make sure you choose one of the 3 commands.')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d0421b44a25e1116f523a219dbfed2dc534f5518 | f5485d955fa942711f337286a86f05374ea58a72 | /migrations/versions/cbe0f844650d_.py | 95626d4f1467e756a549807f7e637093f64f7ef7 | [] | no_license | bingfengjiyu/flask_demo | f6245d7e8696b3bc492ed3f922948bd01319be30 | 3feef9fcee6e3c8f8fae46fb0fb5f5a8bdb82f4d | refs/heads/master | 2020-03-24T11:04:03.012486 | 2018-07-28T12:16:01 | 2018-07-28T12:16:01 | 142,674,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | """empty message
Revision ID: cbe0f844650d
Revises:
Create Date: 2018-07-07 12:10:18.303153
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cbe0f844650d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tbl_authors', sa.Column('email', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tbl_authors', 'email')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
3d63334af90eb146e2448b01450835ace0a19062 | 037969c85708a1964490a6445a5d616c362000e1 | /py-misc/prepare_categories.py | f5ed2d69a466f469cc3b7e8fdd40c43a0d582649 | [
"MIT"
] | permissive | thehemen/dc-contest-2021-r2 | a2e6fe76097b28325137439e606edbca3715a1bd | 601ec2dbdf410f91b573e9ad9aee6188b588ec6a | refs/heads/main | 2023-09-03T08:19:27.475523 | 2021-05-02T15:01:57 | 2021-05-02T15:01:57 | 429,071,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,804 | py | import sys
import json
import pprint
import random
import argparse
from PySide6 import QtCore, QtWidgets, QtGui
from PySide6.QtCore import Qt as qt
from data_record import DataRecord
def fix_by_newline(s, threshold=110):
s_new = ''
for line in s.split('\n'):
for i in range(len(line) // threshold + 1):
s_new += line[i * threshold: (i + 1) * threshold] + '\n'
return s_new
class TgChannelManager:
def __init__(self, start_index, category_dict, tg_channels, log_name):
self.__category_dict = category_dict
self.__tg_channels = tg_channels
self.__is_main_category_used = True
self.__index = start_index
self.__category_indices = []
self.__log_name = log_name
self.__last_category_name = ''
self.__loaded_indices = set()
# Create the file if it doesn't exist
with open(log_name, 'a') as f:
pass
with open(log_name, 'r') as f:
for line in f.readlines():
idx = int(line.split(' ')[0])
self.__loaded_indices.add(idx)
def forward(self, category_index):
if not self.__is_main_category_used:
meta_category_name = list(self.__category_dict.keys())[self.__category_indices[0]]
if category_index >= len(self.__category_dict[meta_category_name]):
return False
elif category_index >= len(self.__category_dict):
return False
self.__category_indices.append(category_index)
if not self.__is_main_category_used:
self.__is_main_category_used = True
self.__log()
if self.__index < len(self.__tg_channels) - 1:
self.__index += 1
return True
else:
return False
else:
self.__is_main_category_used = False
return True
def back(self):
self.__is_main_category_used = True
if self.__index > 0:
self.__index -= 1
return True
else:
return False
def reset(self):
self.__is_main_category_used = True
self.__category_indices.clear()
def skip(self):
self.reset()
if self.__index < len(self.__tg_channels) - 1:
self.__index += 1
return True
else:
return False
def get_index(self):
return self.__index
def get_channel(self):
return self.__tg_channels[self.__index]
def is_main_category_used(self):
return self.__is_main_category_used
def get_last_category_name(self):
return self.__last_category_name
def get_all_index_count(self):
return len(self.__loaded_indices)
def __log(self):
meta_category_name = list(self.__category_dict.keys())[self.__category_indices[0]]
category_name = self.__category_dict[meta_category_name][self.__category_indices[1]]
self.__last_category_name = category_name
with open(self.__log_name, 'a') as f:
f.write(f'{self.__index} {category_name}\n')
self.__loaded_indices.add(self.__index)
self.__category_indices.clear()
def __len__(self):
return len(self.__tg_channels)
class MyWidget(QtWidgets.QWidget):
digit_keys = [qt.Key_1, qt.Key_2, qt.Key_3, qt.Key_4, qt.Key_5, qt.Key_6, qt.Key_7, qt.Key_8, qt.Key_9, qt.Key_0]
category_indices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
def __init__(self, categoryDict, tgChannelManager):
super().__init__()
self.channelTitle = QtWidgets.QLabel('', alignment=QtCore.Qt.AlignLeft)
self.channelDescription = QtWidgets.QLabel('', alignment=QtCore.Qt.AlignLeft)
self.channelRecentPosts = QtWidgets.QListWidget()
self.channelRecentPosts.setFocusPolicy(qt.NoFocus)
self.channelRecentPosts.setHorizontalScrollBarPolicy(qt.ScrollBarAlwaysOff);
self.leftDescriptionLayout = QtWidgets.QVBoxLayout()
self.leftDescriptionLayout.addWidget(self.channelTitle)
self.leftDescriptionLayout.addWidget(self.channelDescription)
self.leftDescriptionLayout.addStretch()
self.leftChannelLayout = QtWidgets.QVBoxLayout()
self.leftChannelLayout.addLayout(self.leftDescriptionLayout, 1)
self.leftChannelLayout.addWidget(self.channelRecentPosts, 12)
self.leftLayout = QtWidgets.QVBoxLayout()
self.leftLayout.addLayout(self.leftChannelLayout, 3)
self.rightStackedLayout = QtWidgets.QStackedLayout()
self.rightStackedLayout.addWidget(self.get_category_layout(categoryDict.keys()))
for categoryKey in categoryDict.keys():
self.rightStackedLayout.addWidget(self.get_category_layout(categoryDict[categoryKey]))
self.rightLayout = QtWidgets.QVBoxLayout()
self.rightLayout.addLayout(self.rightStackedLayout)
self.rightLayout.addStretch()
self.contentLayout = QtWidgets.QHBoxLayout()
self.contentLayout.addLayout(self.leftLayout, 1)
self.contentLayout.addLayout(self.rightLayout, 1)
self.statusBar = QtWidgets.QStatusBar()
self.layout = QtWidgets.QVBoxLayout()
self.layout.addLayout(self.contentLayout)
self.layout.addWidget(self.statusBar)
self.setLayout(self.layout)
self.tgChannelManager = tgChannelManager
self.update_channel()
def keyPressEvent(self, event):
keyPressed = event.key()
if keyPressed in self.digit_keys:
keyIndex = self.digit_keys.index(keyPressed)
status = self.tgChannelManager.forward(keyIndex)
if status:
if not self.tgChannelManager.is_main_category_used():
self.rightStackedLayout.setCurrentIndex(keyIndex + 1)
else:
self.rightStackedLayout.setCurrentIndex(0)
self.update_channel()
elif keyPressed == qt.Key_Backspace:
self.tgChannelManager.back()
self.rightStackedLayout.setCurrentIndex(0)
self.update_channel()
elif keyPressed == qt.Key_Space:
self.tgChannelManager.skip()
self.rightStackedLayout.setCurrentIndex(0)
self.update_channel()
elif keyPressed == qt.Key_Escape:
self.tgChannelManager.reset()
self.rightStackedLayout.setCurrentIndex(0)
def get_category_layout(self, categories):
container = QtWidgets.QWidget()
layout = QtWidgets.QVBoxLayout(container)
for i, category in enumerate(categories):
category_index = self.category_indices[i]
label = QtWidgets.QLabel(f'[{category_index}] {category}', alignment=QtCore.Qt.AlignLeft)
layout.addWidget(label)
return container
def update_channel(self):
index = self.tgChannelManager.get_index()
statusBarText = f'{index} / {len(self.tgChannelManager)}'
all_index_count = self.tgChannelManager.get_all_index_count()
statusBarText += f' ({all_index_count})'
if len(self.tgChannelManager.get_last_category_name()) > 0:
statusBarText += f' {self.tgChannelManager.get_last_category_name()}'
self.statusBar.showMessage(statusBarText)
channel = self.tgChannelManager.get_channel()
self.channelTitle.setText(channel.title)
self.channelDescription.setText(fix_by_newline(channel.description))
self.channelRecentPosts.clear()
for i in range(len(channel.recent_posts)):
recentPost = fix_by_newline(channel.recent_posts[i])
self.channelRecentPosts.addItem(QtWidgets.QListWidgetItem(recentPost))
if __name__ == '__main__':
with open('categories.json', 'r') as f:
categoryDict = json.load(f)
parser = argparse.ArgumentParser()
parser.add_argument('--start_index', type=int, default=0)
parser.add_argument('--dataset_name', default='../../preprocessed/dc-concat-ar-1k-translated.txt')
parser.add_argument('--out_log_name', default='../../outputs/dc-concat-ar-1k-ground-truth.txt')
args = parser.parse_args()
pp = pprint.PrettyPrinter(indent=4)
tgChannels = []
with open(args.dataset_name, 'r') as f:
for line in f.readlines():
tgChannel = DataRecord(line, pp)
tgChannels.append(tgChannel)
tgChannelManager = TgChannelManager(args.start_index, categoryDict, tgChannels, args.out_log_name)
app = QtWidgets.QApplication([])
widget = MyWidget(categoryDict, tgChannelManager)
widget.setWindowTitle('Data Clustering Contest 2021 - Markup')
widget.show()
with open('style.qss', 'r') as f:
_style = f.read()
app.setStyleSheet(_style)
sys.exit(app.exec_())
| [
"[email protected]"
] | |
dd103ca7787849a3bf82828cff7fb9713f864637 | 1ac3359142adf0f508f1f526e90d4d5ac917ebda | /scripts/evaluation_script.py | 4da0ce576fbd14c3b3a3a80951b1afe2cf2519d9 | [] | no_license | yasminsarkhosh/fyp2021p04g04 | 6cc5b11a796ddc6d5df3e200f677aa8e58655d7e | cbc0290b091ccd400fb4c27b81111e0ced31668b | refs/heads/main | 2023-06-29T11:01:34.287728 | 2021-06-06T17:38:20 | 2021-06-06T17:38:20 | 362,124,047 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,670 | py | # usage: evaluaton_script.py [-h] [--tweeteval_path TWEETEVAL_PATH]
# [--predictions_path PREDICTIONS_PATH] [--task TASK]
# optional arguments:
# -h, --help: show this help message and exit
# --tweeteval_path: Path to TweetEval dataset
# --predictions_path: Path to predictions files
# --task: Use this to get single task detailed results
# (emoji|emotion|hate|irony|offensive|sentiment|stance)
#
from sklearn.metrics import classification_report
import argparse
import os
TASKS = [
'emoji',
'emotion',
'hate',
'irony',
'offensive',
'sentiment',
'stance']
STANCE_TASKS = [
'abortion',
'atheism',
'climate',
'feminist',
'hillary']
def load_gold_pred(args):
tweeteval_path = args.tweeteval_path
predictions_path = args.predictions_path
task = args.task
if 'stance' in task:
gold = []
pred = []
for stance_t in STANCE_TASKS:
gold_path = os.path.join(tweeteval_path,task,stance_t,'test_labels.txt')
pred_path = os.path.join(predictions_path,task,stance_t+'.txt')
gold.append(open(gold_path).read().split("\n")[:-1])
pred.append(open(pred_path).read().split("\n")[:-1])
# flatten lists of lists
gold = [p for each_target in gold for p in each_target]
pred = [p for each_target in pred for p in each_target]
else:
gold_path = os.path.join(tweeteval_path,task,'test_labels.txt')
pred_path = os.path.join(predictions_path,task+'.txt')
gold = open(gold_path).read().split("\n")[:-1]
pred = open(pred_path).read().split("\n")[:-1]
return gold, pred
def single_task_results(args):
task = args.task
tweeteval_result = -1
results = {}
try:
gold, pred = load_gold_pred(args)
results = classification_report(gold, pred, output_dict=True)
# Emoji (Macro f1)
if 'emoji' in task:
tweeteval_result = results['macro avg']['f1-score']
# Emotion (Macro f1)
elif 'emotion' in task:
tweeteval_result = results['macro avg']['f1-score']
# Hate (Macro f1)
elif 'hate' in task:
tweeteval_result = results['macro avg']['f1-score']
# Irony (Irony class f1)
elif 'irony' in task:
tweeteval_result = results['1']['f1-score']
# Offensive (Macro f1)
elif 'offensive' in task:
tweeteval_result = results['macro avg']['f1-score']
# Sentiment (Macro Recall)
elif 'sentiment' in task:
tweeteval_result = results['macro avg']['recall']
# Stance (Macro F1 of 'favor' and 'against' classes)
elif 'stance' in task:
f1_against = results['1']['f1-score']
f1_favor = results['2']['f1-score']
tweeteval_result = (f1_against+f1_favor) / 2
except Exception as ex:
print(f"Issues with task {task}: {ex}")
return tweeteval_result, results
def is_all_good(all_tweeteval_results):
return all([r != -1 for r in all_tweeteval_results.values()])
if __name__=="__main__":
parser = argparse.ArgumentParser(description='TweetEval evaluation script.')
parser.add_argument('--tweeteval_path', default="./datasets/", type=str, help='Path to TweetEval datasets')
parser.add_argument('--predictions_path', default="./predictions/", type=str, help='Path to predictions files')
parser.add_argument('--task', default="", type=str, help='Indicate this parameter to get single task detailed results')
args = parser.parse_args()
if args.task == "":
all_tweeteval_results = {}
# Results for each task
for t in TASKS:
args.task = t
all_tweeteval_results[t], _ = single_task_results(args)
# Print results (score=-1 if some results are missing)
print(f"{'-'*30}")
if is_all_good(all_tweeteval_results):
tweeteval_final_score = sum(all_tweeteval_results.values())/len(all_tweeteval_results.values())
else:
tweeteval_final_score = -1
for t in TASKS:
# Each score
print(f"{t}: {all_tweeteval_results[t]}")
# Final score
print(f"{'-'*30}\nTweetEval Score: {tweeteval_final_score}")
else:
# Detailed results of one single task (--task parameter)
tweeteval_resut, results = single_task_results(args)
for k in results:
print(k, results[k])
print(f"{'-'*30}\nTweetEval Score ({args.task}): {tweeteval_resut}")
| [
"[email protected]"
] | |
49a688770364efb8e0684b72da81da98ad348e05 | c53770c8e3012fa78b1c2c98ce68b192cc432f0c | /crypto-exploitation/padding-oracle-attack/POCs/oracle-demo-all-blocks.py | b45017efafd5e11f284758d4f6d41067c0d85263 | [] | no_license | 1m6h0st/exploitation-training | 2cf100be50f37aa0f71adae8ea6acf1af197a447 | 0375284ec19b19dd278ab344e6a3031c94608303 | refs/heads/master | 2023-08-20T17:03:52.727037 | 2021-04-05T15:37:46 | 2021-04-05T15:37:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,553 | py |
"""
In this demo, we demonstrate a Padding Oracle Attacking, guessing the the plaintext from the ciphertext.
For decrypting the very first block, we also need the IV, which in this demo assume that we have it
"""
import random
from copy import copy
from Crypto.Cipher import AES
from Crypto.Util import Padding
BLOCK_SIZE = 16
KEY = b'0123456789ABCDEF'
IV = b'\x00'*BLOCK_SIZE
assert(len(KEY) == BLOCK_SIZE)
assert(len(IV) == BLOCK_SIZE)
PLAINTEXT = """Divinity: Original Sin 2 is one of the most beautiful looking isometric RPGs and that hasn't changed.
When most RPGs let you push on and experience almost everything in a single playthrough, it is impossible to experience
all that this one has to offer in one play, or maybe even two or three. The staff of PC Gamer voted it as their game of the year
for 2017, where it was also nominated for the "Best Co-Op Game" award. The staff of GameSpot voted it as their fifth best,
while Eurogamer ranked it 11th on their list of the "Top 50 Games of 2017".
Readers and staff of Game Informer gave it the "Best PC Exclusive", "Best Turn-Based Combat", and "Best Side-Quests" awards,
and also placed it second for the "Best Co-op Multiplayer" award.
The game was also nominated for "Role-Playing Game of the Year" at the D.I.C.E. Awards, for "Game Engineering" and
"Game, Franchise Role Playing" at the NAVGTR Awards, and for "Best Sound Design for an Indie Game" and
"Best Music for an Indie Game" at the Game Audio Network Guild Awards; and won the award for "Multiplayer" at the
14th British Academy Games Awards. It was also nominated for "Music Design" and "Writing or Narrative Design" at the
2018 Develop Awards. The PlayStation 4 and Xbox One versions were nominated for "Best RPG" at the 2018 Game Critics Awards,
and won the award for "Best Role-Playing Game" at Gamescom 2018, whereas its other nomination was for "Best Strategy Game".
"""
PLAINTEXT = bytearray(PLAINTEXT, encoding='utf-8')
def encrypt(plaintext):
cipher = AES.new(KEY, AES.MODE_CBC, IV)
return cipher.encrypt(Padding.pad(plaintext, BLOCK_SIZE, style='pkcs7'))
def decrypt(ciphertext):
cipher = AES.new(KEY, AES.MODE_CBC, IV)
return Padding.unpad(cipher.decrypt(ciphertext), BLOCK_SIZE, style='pkcs7')
def blockify(buffer):
return [buffer[i:i+BLOCK_SIZE] for i in range(0, len(buffer), BLOCK_SIZE)]
def printBlokcs(blocks, header=None, footer=None):
if header is not None:
print(header)
block_sizes = [len(block) for block in blocks]
print('Index i: ' + ''.join([' %02d ' % i for i in range(0, max(block_sizes))]))
for i in range(0, len(blocks)):
block = blocks[i]
print("Block {}: ".format(i) + ''.join(['0x%02x ' % b for b in block]))
if footer is not None:
print(footer)
ciphertext = encrypt(PLAINTEXT)
assert(decrypt(ciphertext) == PLAINTEXT)
ciphertext_blocks = blockify(ciphertext)
ciphertext_blocks.insert(0, IV) # Prepend the IV so that we can decrypt the first block without treating it as a special case
printBlokcs(blockify(Padding.pad(PLAINTEXT, BLOCK_SIZE, style='pkcs7')), "\nPadded Plaintext blocks", "")
printBlokcs(ciphertext_blocks, "Ciphertext blocks", "")
print("Attempting decryption of Ciphertext")
decrypted_blocks = [ [0xffffffff for _ in range(BLOCK_SIZE)] for _ in ciphertext_blocks ]
for ctb_idx in range(len(ciphertext_blocks)-1, 0, -1): #ctb_idx == ciphertext block index
proccessed_block = [ 0xffffffff for _ in range(BLOCK_SIZE)] # Block which holds the values for the Decrypt(KEY, ciphertext_blocks[ctb_idx])
for padding in range(1, BLOCK_SIZE+1):
n = (BLOCK_SIZE-1) - (padding-1) # index of the first element in the c0_prime block that IS associated with the padding attack
r = random.randint(1, 255)
for g in range(0, 256):
# We remove 0 from the iteration space, sine it is the zero elemenet in the XOR operator, i.e. a^0 = a.
# We want the first n "irrelevant-random" elements of c0_prime to be different than c0
# If there are consequtive elements with the same value, we also want them in c0_prime to have different values
c0_prime = bytearray([ciphertext_blocks[ctb_idx-1][i] ^ (1 + ((r+i) % 255)) for i in range(0, n)])
c0_prime = c0_prime + bytes([ (ciphertext_blocks[ctb_idx-1][n] ^ g ^ padding) ])
c0_prime = c0_prime + bytes([ (ciphertext_blocks[ctb_idx-1][n+i] ^ proccessed_block[n+i] ^ padding) for i in range(1, padding) ])
c1 = ciphertext_blocks[ctb_idx]
assert(len(c0_prime) == BLOCK_SIZE)
try:
# Here, we do not need to send all the previous ciphertext blocks up until ciphertext_blocks[ctb_idx-2] in order to
# achieve our attack.
plaintext_prime = decrypt(c0_prime+c1)
plaintext_prime_blocks = blockify(plaintext_prime)
if padding < BLOCK_SIZE:
assert(len(plaintext_prime_blocks) == 2)
elif padding == BLOCK_SIZE:
assert(len(plaintext_prime_blocks) == 1)
else:
assert(False and "Unreachable Statement")
# All the decrypted blocks are garbage. We are only interested in the fact that the decryption was successful,
# i.e. we did not get a padding error. This means that our current guess g satisfies the condition
# g == plaintext_blocks[ctb_idx][n], i.e. g equals to the Byte#(ctb_idx*BLOCK_SIZE)+n of the plaintext.
# Note that plaintext_blocks is not the same with plaintext_prime_blocks.
# plaintext_blocks = blockify(pad(PLAINTEXT))
proccessed_block[n] = g
break
except ValueError as err:
assert(str(err) == "Padding is incorrect." or str(err) == "PKCS#7 padding is incorrect.")
print("Successful decryption of block %02d!" % ctb_idx)
decrypted_blocks[ctb_idx] = proccessed_block
decrypted_blocks = decrypted_blocks[1:] # Discard the first block, which was inserted due to the addition of the IV
printBlokcs(decrypted_blocks, "\nSuccessful decryption of all blocks! Decrypted data are: ", "")
printBlokcs(blockify(Padding.pad(PLAINTEXT, BLOCK_SIZE, style='pkcs7')), "\nExpected blocks", "")
flat_plaintext = b''
for block in decrypted_blocks:
flat_plaintext += bytearray(block)
flat_plaintext = Padding.unpad(flat_plaintext, BLOCK_SIZE, style='pkcs7').decode('ascii')
print(flat_plaintext)
| [
"[email protected]"
] | |
66084b78d97cf842d8272e1d96df1f910dffd84d | 07df38b4ab8733c12611397b8de7b9d51015351b | /bienesmuebles/views.py | d52ed2aa57074af35ad3ff70293ba949f57d58af | [] | no_license | bitia1/nuevo | f60bf4985617924d398e9961d97e832f0c2955dc | 24d700f6ad866facd4b390925e8246a6b6f14645 | refs/heads/master | 2020-03-21T08:42:33.801157 | 2018-06-23T00:49:47 | 2018-06-23T00:49:47 | 138,360,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.views.generic import View
from .forms import MuebleForm
# Create your views here.
def homeview(request):
return render(request, 'homeBM.html')
class ArtuculoView(View):
''' OrganizacionEstudio '''
form_class= MuebleForm
initial=''
errores=[]
template_name = 'register.html'
cont=0
respuesta=0
def get(self, request):
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
usuario=request.user
if form.is_valid():
form=form.save()
form.usuario=usuario
form.save()
cont=Contador(form)
respuesta=valores(0,cont)
print("contando los no ",cont, "Respuesta ",respuesta)
form.diagnostico=respuesta
form.save(update_fields=["diagnostico"])
print("guardado")
return HttpResponseRedirect("/anexo13/gracias/")
else:
self.errores.append(form.errors)
print ("No es valido el formulario")
print(self.errores)
return render(request, self.template_name, {'form': form})
| [
"[email protected]"
] | |
e8fcc78c1879609c22a8e380e5cb5530520e3343 | c724a87ccb758398ac333577c637ed6ae9419efb | /DataMining/group_project/main.py | 83463d5d78062c8d40ef8bb6c026e490965079dd | [] | no_license | Inkozi/School | 9b33028cd2b6d5dffafc652fd4866b77130fed50 | e3a878e4f00b9066c2ae9432d7030d3ca3692b12 | refs/heads/master | 2021-10-23T23:28:37.057769 | 2018-03-25T02:07:55 | 2018-03-25T02:07:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,491 | py |
import Statistics
import numpy as np
import pandas as pd
import operator
import copy
def writeToFile(df,s):
subs = ['COCAINE_REF', 'ETHANOL_REF', 'HEROINE_REF', 'METHAMPHETAMINE_REF', 'MORPHINE_REF', 'NICOTINE_REF']
for i in range(len(s.pValues)):
f = open(subs[i], 'w+')
for j in range(len(s.pValues[i])):
if (s.pValues[i][j] < .001/46000): #and s.pValues[i][j] > 0.000001):
f.write(str(j) + '\n')
f.close()
def writeToFile2(df,s):
subs = ['COCAINE', 'ETHANOL', 'HEROINE', 'METHAMPHETAMINE', 'MORPHINE', 'NICOTINE']
for i in range(len(s.pValues)):
f = open(subs[i], 'w+')
for j in range(len(s.pValues[i])):
if (s.pValues[i][j] < .001/46000): #and s.pValues[i][j] > 0.000001):
f.write(df['geneID'][j] + '\n')
f.close()
'''
def cleanLabels(df):
labels = list(df)
labels.remove('geneID')
remov = []
for label in labels:
delete = True
for i in label:
if (i == '1' or i == '8'):
delete = False
if (delete == True):
remov.append(label)
for i in remov:
labels.remove(i)
labels = labels[:-24]#cutout controls
return labels
'''
def sort(results, df):
sg = []
tempSG = []
acc = 0
for i in range(len(results)):
for j in range(len(results)):
sg.append('geneID')
c = 'COCAINE'
h = 'HEROINE'
m = 'METHAMPHETAMINE'
a = 'ETHANOL'
mo = 'MORPHINE'
n = 'NICOTINE'
features = [c,a,h,m,mo,n]
tempRes = []
for i in range(len(results[0])):
tempRes.append(results[0][i])
sg.append(df['geneID'][i])
for i in range(200):
max_index, max_value = max(enumerate(tempRes), key=operator.itemgetter(1))
tempSG.append(sg[max_index])
del tempRes[max_index]
del sg[max_index]
f = open('FEATURES','r')
for line in f:
line = line[:-1]
correct = False
for i in tempSG:
if (line == i):
correct = True
if (correct):
acc += 1.0
#print('ACCURACY = ' + str(acc))
def test(df, stats):
results = np.empty([6, len(df)])
for i in range(6):
results[i] = abs(stats.avgs[i][0] - stats.avgs[i][3])
#print(results)
return results
def generateFeatures():
c = 'COCAINE_REF'
h = 'HEROINE_REF'
m = 'METHAMPHETAMINE_REF'
a = 'ETHANOL_REF'
mo = 'MORPHINE_REF'
n = 'NICOTINE_REF'
features = [c,h,m,a,mo,n]
temp = set()
for i in features:
f = open(i, 'r')
for line in f:
line = line[:-1]
if (line not in temp):
temp.add(line)
f.close()
f = open('FEATURES_REF', 'w+')
for i in temp:
f.write(i + '\n')
f.close()
def generateFeatures2():
c = 'COCAINE'
h = 'HEROINE'
m = 'METHAMPHETAMINE'
a = 'ETHANOL'
mo = 'MORPHINE'
n = 'NICOTINE'
features = [c,h,m,a,mo,n]
temp = set()
for i in features:
f = open(i, 'r')
for line in f:
line = line[:-1]
if (line not in temp):
temp.add(line)
f.close()
f = open('FEATURES', 'w+')
for i in temp:
f.write(i + '\n')
f.close()
def writeFeatures(df):
f = open('FEATURES_REF', 'r')
fn = open('FEATURES', 'w+')
for line in f:
line = line[:-1]
line = int(line)
fn.write(df['geneID'][line] + '\n')
def main():
fn = 'MiceAddictionExpressionProfile.csv'
df = pd.read_csv(fn)
s = Statistics.Statistics(df)
print("AVERAGES")
print(s.avgs)
print("Standard Deviations")
print(s.stds)
print("t-Values")
print(s.tValues)
print("p-Values")
print(s.pValues)
#test(df,s)
#sigGenes, Nums = sort(test(df,s), df)
#print(Nums)
#print(sigGenes)
#s.Output()
writeToFile(df,s)
writeToFile2(df,s)
generateFeatures()
generateFeatures2()
writeFeatures(df)
sort(test(df,s), df)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
ac9cb79b3dcf49071e853fea387f1c293579dd72 | 22ad9f0e83cbbe8dc8382d701a6c744a3219b98b | /ls8/cpu.py | 207f1649d52e573c09756de2d07747541ac1da70 | [] | no_license | Jrive204/Sprint-Challenge--Computer-Architecture | cd9feabb80d86abc077ee764bec0cc1df178d948 | 9a3dd803b85c32b9b36cee5390fc536d6630cc73 | refs/heads/master | 2022-11-05T18:43:31.244393 | 2020-06-19T17:40:06 | 2020-06-19T17:40:06 | 273,552,685 | 0 | 0 | null | 2020-06-19T17:38:44 | 2020-06-19T17:38:43 | null | UTF-8 | Python | false | false | 8,365 | py | """CPU functionality."""
import sys
import re
# instance = {
# "LDI": 0b10000010, # Sets the Value of a reg to an int
# "HLT": 0b00000001, # halts the program, "ends it"/"stops it"
# "PRN": 0b01000111, # Prints the value at the next reg
# "MUL": 0b10100010, # multiply reg at +1 with reg at +2
# }
class CPU:
"""Main CPU class."""
def __init__(self):
"""Construct a new CPU."""
self.ram = [0] * 256
self.reg = [0] * 8
self.pc = 0
self.is_run = False
self.fl = 0b00000000
self.sp = 0xF4
def ram_read(self, mar):
print("MAR: ", mar)
return self.reg[mar]
def ram_write(self, mdr, value):
self.reg[mdr] = value
def load(self, program_file):
"""Load a program into memory."""
address = 0
file = open(program_file, "r")
for line in file.readlines():
# load a line into memory (not including comments)
try:
x = line[:line.index("#")]
except ValueError:
x = line
try:
# convert binary to decimal
y = int(x, 2)
self.ram[address] = y
except ValueError:
continue
address += 1
def alu(self, op, reg_a, reg_b):
"""ALU operations."""
if op == "ADD":
self.reg[reg_a] += self.reg[reg_b]
# elif op == "SUB": etc
else:
raise Exception("Unsupported ALU operation")
def trace(self):
"""
Handy function to print out the CPU state. You might want to call this
from run() if you need help debugging.
"""
print(f"TRACE: %02X | %02X %02X %02X |" % (
self.pc,
# self.fl,
# self.ie,
self.ram_read(self.pc),
self.ram_read(self.pc + 1),
self.ram_read(self.pc + 2)
), end='')
for i in range(8):
print(" %02X" % self.reg[i], end='')
print()
def LDI(self):
reg_index = self.ram[self.pc + 1]
value = self.ram[self.pc + 2]
self.reg[reg_index] = value
self.pc += 3
def HLT(self):
self.is_run = False
self.pc += 1
def PRN(self):
index = self.ram[self.pc + 1]
value = self.reg[index]
# print(f"Value: {value}, Register Index : {index}")
print(f"Value: {value}")
self.pc += 2
def MUL(self):
num1 = self.ram_read(self.ram[self.pc + 1])
num2 = self.ram_read(self.ram[self.pc + 2])
self.reg[self.ram[self.pc + 1]] = num1 * num2
self.pc += 3
def POP(self):
# take from the stack and add it to the reg location
# weird cause they both are in reg
# top parts of reg is stack
value = self.ram[self.sp]
self.reg[self.ram[self.pc + 1]] = value
self.sp += 1
self.pc += 2
def PSH(self):
# Decrement the SP
self.sp -= 1
# write the value in ram at pc to the stack (top parts of RAM)
# save ram value to stack
reg_num = self.ram[self.pc + 1]
value = self.reg[reg_num]
top_of_stack_addr = self.sp
self.ram[top_of_stack_addr] = value
self.pc += 2
def ADD(self):
reg_a = self.ram[self.pc + 1]
reg_b = self.ram[self.pc + 2]
self.alu("ADD", reg_a, reg_b)
self.pc += 3
def AND(self):
pass
def CALL(self):
"""
push current pc to stack, so we can return later
we do not want to inc the pc while pushing
set pc to the address in the given register...
register will hold a location that points to
somewhere in ram, we will go there and do the things
.....value is stored in a reg
"""
self.sp -= 1
self.ram[self.sp] = self.pc
self.pc = self.reg[self.ram[self.pc + 1]]
def CMP(self):
"""
comepare 2 given regs
set flags according to the out put
reg_a === reg_b = flag E to 1 or 0
reg_a < reg_b = flag L to 1 or 0
reg_a > reg_b = flag G to 1 or 0
"""
reg_a = self.reg[self.ram[self.pc + 1]]
reg_b = self.reg[self.ram[self.pc + 2]]
if reg_a == reg_b:
self.fl = 1
else:
self.fl = 0
self.pc += 3
def DEC(self):
pass
def DIV(self):
pass
def INC(self):
pass
def INT(self):
pass
def IRET(self):
pass
def JEQ(self):
"""
if the Equal flag is true (0b00000001)
then jump to the register
"""
a = self.fl
if a == 1:
self.pc = self.reg[self.ram[self.pc + 1]]
else:
self.pc += 2
def JGE(self):
pass
def JGT(self):
pass
def JLE(self):
pass
def JLT(self):
pass
def JMP(self):
self.pc = self.reg[self.ram[self.pc + 1]]
return True
def JNE(self):
"""
jump to given register if E flag is false
"""
a = self.fl
if a == 0:
self.pc = self.reg[self.ram[self.pc + 1]]
else:
self.pc += 2
pass
def LD(self):
pass
def MOD(self):
pass
def NOP(self):
pass
def NOT(self):
pass
def OR(self):
pass
def PRA(self):
pass
def RET(self):
"""
retrive are saved location, should be in stack
and set the to pc
inc pc 2 so we dont run call again.
dec sp because we are poppin lockin
"""
self.pc = self.ram[self.sp]
self.pc += 2
self.sp -= 1
pass
def SHL(self):
pass
def SHR(self):
pass
def ST(self):
pass
def SUB(self):
pass
def XLA(self):
pass
def call_func(self, n):
func_stack = {
0b10000010: self.LDI,
0b00000001: self.HLT,
0b01000111: self.PRN,
0b10100010: self.MUL,
0b01000110: self.POP,
0b01000101: self.PSH,
0b10100000: self.ADD,
0b10101000: self.AND,
0b01010000: self.CALL,
0b10100111: self.CMP,
0b01100110: self.DEC,
0b10100011: self.DIV,
0b01100101: self.INC,
0b01010010: self.INT,
0b00010011: self.IRET,
0b01010101: self.JEQ,
0b01011010: self.JGE,
0b01010111: self.JGT,
0b01011001: self.JLE,
0b01011000: self.JLT,
0b01010100: self.JMP,
0b01010110: self.JNE,
0b10000011: self.LD,
0b10100100: self.MOD,
0b00000000: self.NOP,
0b01101001: self.NOT,
0b10101010: self.OR,
0b01001000: self.PRA,
0b00010001: self.RET,
0b10101100: self.SHL,
0b10101101: self.SHR,
0b10000100: self.ST,
0b10100001: self.SUB,
0b10101011: self.XLA
}
if n in func_stack:
func_stack[n]()
else:
print(f"No instruction found! IR: {n}")
sys.exit(1)
def run(self):
"""Run the CPU."""
self.is_run = True
while self.is_run:
ir = self.ram[self.pc] # the instruction or code to run
self.call_func(ir)
# if ir == instance["LDI"]:
# self.ram_write(self.ram[self.pc + 1], self.ram[self.pc + 2])
# self.pc += 3
# elif ir == instance["PRN"]:
# reg_num = self.ram[self.pc + 1]
# print(self.reg[reg_num])
# self.pc += 2
# elif ir == instance["HLT"]:
# self.is_run = False
# self.pc += 1
# elif ir == instance["MUL"]:
# num1 = self.ram_read(self.ram[self.pc + 1])
# num2 = self.ram_read(self.ram[self.pc + 2])
# print("MUL answere: ", num1 * num2)
# self.pc += 3
# else:
# print(f'Unknown instruction {ir} at address {self.pc}')
# sys.exit(1)
| [
"[email protected]"
] | |
7570a7e69cff44f1cc225814c4f9dba828edaecb | 0ba78125ea7ee061511f7dab58bbf2b9284c7fec | /formsfive/tests/forms.py | 8e6a539e422af2c55c1dd3845631648ccd3df517 | [
"BSD-3-Clause"
] | permissive | iamjstates/django-formsfive | 2f8deab8aff736a67fb19edd1e0a5878e1fc1e6f | 6eab78ef921724a2be5ce3e8aceb82e4f49d3ff1 | refs/heads/master | 2021-01-13T02:29:59.727273 | 2012-07-19T20:00:53 | 2012-07-19T20:00:53 | 2,867,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | #!/usr/bin/env python
from django.utils.translation import ugettext_lazy as _
from formsfive.tests.models import Todo
import formsfive as forms
class TodoForm(forms.HTML5ModelForm):
body = forms.CharField(label=_(u'Post or Story Body'), placeholder=_(u'listed'), required=True)
sample = forms.IntegerField(max=25, min=10, step=5)
class Meta:
model = Todo
exclude = ('slug', 'date')
def __init__(self, *args, **kwargs):
super(TodoForm, self).__init__(*args, **kwargs)
self.fields['task'].widget.placeholder = _(u'A Task to do')
self.fields['units'].widget.min = 0
self.fields['units'].widget.max = 100
self.fields['units'].widget.step = 5
| [
"[email protected]"
] | |
0105ae6a0f432e906d8c0f3a428f8f6209277bd8 | d5cfc0b983e3f3304dd465eda010c7549e33bcf5 | /enneagram/skeleton.py | 87723fce65645dca2cc33a1d5c0575278c012218 | [] | no_license | jmhossler/enneagram | b83a034099fc064b32274147e396f355b06b0e65 | de39c6d1588db6e0110e81c82e75c6b80ba0b957 | refs/heads/master | 2023-04-01T16:54:47.902439 | 2020-10-29T03:59:53 | 2020-11-27T08:56:30 | 62,271,349 | 1 | 0 | null | 2023-03-16T06:50:31 | 2016-06-30T02:03:12 | Python | UTF-8 | Python | false | false | 2,871 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following line in the
entry_points section in setup.cfg:
console_scripts =
fibonacci = enneagram.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
from __future__ import division, print_function, absolute_import
import argparse
import sys
import logging
from enneagram import __version__
__author__ = "John Hossler"
__copyright__ = "John Hossler"
__license__ = "none"
_logger = logging.getLogger(__name__)
def fib(n):
"""Fibonacci example function
Args:
n (int): integer
Returns:
int: n-th Fibonacci number
"""
assert n > 0
a, b = 1, 1
for i in range(n-1):
a, b = b, a+b
return a
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Just a Fibonnaci demonstration")
parser.add_argument(
'--version',
action='version',
version='enneagram {ver}'.format(ver=__version__))
parser.add_argument(
dest="n",
help="n-th Fibonacci number",
type=int,
metavar="INT")
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting crazy calculations...")
print("The {}-th Fibonacci number is {}".format(args.n, fib(args.n)))
_logger.info("Script ends here")
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| [
"[email protected]"
] | |
11321f1540a39b69acdb058cfd8d55e5558bdb84 | 21b2614a048da03f860710399a9c871f1eb2052e | /classes/Guest.py | 3f5d4526c07df5b13abc83b48ecb563f489c9a7c | [] | no_license | Naydire007/codeclan_caraoke | 72e88b1d5f2df29337b41ade852e24c644ba8121 | 9832059c09f9b930f18e3b77ea57ddffefdc51f3 | refs/heads/main | 2023-06-04T19:48:55.882883 | 2021-06-28T02:46:16 | 2021-06-28T02:46:16 | 380,885,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | class Guest:
def __init__(self,name,wallet,favorite_song):
self.name = name
self.wallet = wallet
favorite_song = favorite_song
def pay_entry(self,amount):
self.wallet -= amount
| [
"[email protected]"
] | |
fa0b4aecc20679a3ec3655aa71d64f4b62bbf0f4 | 6afd2c59870217ec93d539f812ddfc70c13d5d3a | /src/apps/calificacion/models.py | 6ac811ff70282daa27bb0e9d774213785c0d59f5 | [] | no_license | heraldmatias/acajef | f03972f09121a05d09e4ca3d6f407d4bf2920ad2 | 0a47a565110dcc60f79fd7c9e62472c6acf8c38c | refs/heads/master | 2021-01-25T12:07:23.878398 | 2012-04-16T06:49:22 | 2012-04-16T06:49:22 | 2,861,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from django.db import models
from curso.models import CursoDocente
from boleta.models import Boleta
from campus.models import AlumnoCampus
class Nota(models.Model):
curso_docente = models.ForeignKey(CursoDocente)
alumno_campus = models.ForeignKey(AlumnoCampus)
nota = models.IntegerField('Nota')
def __unicode__(self):
return u'%s' % self.nota
class Recuperacion(models.Model):
nota = models.ForeignKey(Nota)
boleta = models.ForeignKey(Boleta)
new_nota = models.IntegerField('Nueva nota', default=0)
old_nota = models.IntegerField('Vieja nota', default=0)
| [
"[email protected]"
] | |
331941ba245a3efd4bff501824095597344e2597 | 8b808179d023896c520996abe1e6eae35da1673f | /data/mapper.py | d3b5565403db9308b363ac6e905c7e9832e7b5a2 | [
"MIT"
] | permissive | emily-barnes/Barnes_E_DataViz | 4d7fa3bfb18bf60247472d3a32e707967ecfe7f7 | 4e6cdffde1772964ac173b444b68e03198daa9b1 | refs/heads/main | 2023-02-01T15:21:33.517214 | 2020-12-20T04:42:49 | 2020-12-20T04:42:49 | 320,649,778 | 0 | 0 | MIT | 2020-12-20T04:41:58 | 2020-12-11T18:13:21 | HTML | UTF-8 | Python | false | false | 326 | py | import matplotlib.pyplot as plt
years = [1924, 1936, 1952, 1964, 1976, 1988, 1998, 2002, 2010, 2014]
medals =[9, 13, 17, 7, 3, 6, 49, 76, 93, 90]
plt.plot(years, medals, linewidth=3.0)
plt.ylabel("Total Medals Awarded")
plt.xlabel("Years")
plt.title("Total Medals Won by Canada Over the Years" , pad="20")
plt.show()
| [
"[email protected]"
] | |
9c77d1de75a3f39cbe795c825a8d1208e277e1fe | c8b3454cee3ac3107cbd50665db49e1ce5edbe22 | /schedules/migrations/0002_auto_20190622_0958.py | d76818f0df4623a361356f02573fbb7ff46dd669 | [] | no_license | FerchoGD/Gestion-Monitorias | e1d631f31390b724366badc30b726193af208670 | 0f85e595268ef9dd358fdc3c2b73ee7498f197a8 | refs/heads/master | 2020-03-27T01:56:17.000965 | 2019-08-20T00:56:23 | 2019-08-20T00:56:23 | 145,755,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # Generated by Django 2.2.2 on 2019-06-22 14:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedules', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='timeslot',
name='day',
field=models.CharField(choices=[('Lunes', 'Lunes'), ('Martes', 'Martes'), ('Miércoles', 'Miércoles'), ('Jueves', 'Jueves'), ('Viernes', 'Viernes'), ('Sábado', 'Sábado')], max_length=30, null=True, verbose_name='Día'),
),
]
| [
"[email protected]"
] | |
d29b8d8c7da8d36517a6fa5b1025972204040926 | 84576eeaa849b622735f8310bec2857885608cf8 | /game/test_client_message_handler.py | 93b5600933378e39be58924e213f7454d64e696e | [] | no_license | Escario/arduino-milestag | 2c334a43594c639bade48ede41ab75ab9e89df93 | 56c9ea23f31360a642d22e773847dd886de8d30e | refs/heads/master | 2020-03-20T18:32:41.217270 | 2018-05-28T21:06:53 | 2018-05-28T21:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | "Test how the client handles messages from the server"
from client import Client, Main
import proto
def noop():
"No op function for use as a stub"
pass
def test_ping(mocker):
"Test handling of PING message"
mocker.patch.object(Client, '_openConnection', autospec=True)
client_obj = Client(mocker.MagicMock())
mocker.patch.object(client_obj, 'queueMessage', autospec=True)
assert client_obj.handleMsg("E(123def,1516565652,Ping())")
client_obj.queueMessage.assert_called_once_with("Pong(1516565652,1)")
def test_simple_pong(mocker):
"Test handling of PONG message which doesn't need a response"
mocker.patch.object(Client, '_openConnection', autospec=True)
client_obj = Client(mocker.MagicMock(), timeProvider=lambda: 1516566052)
mocker.patch.object(client_obj, 'queueMessage', autospec=True)
assert client_obj.handleMsg("E(123def,1516565852,Pong(1516565652,0))")
def test_reply_pong(mocker):
"Test handling of PONG message which requests a response"
mocker.patch.object(Client, '_openConnection', autospec=True)
client_obj = Client(mocker.MagicMock(), timeProvider=lambda: 1516566052)
mocker.patch.object(client_obj, 'queueMessage', autospec=True)
assert client_obj.handleMsg("E(123def,1516565852,Pong(1516565652,1))")
client_obj.queueMessage.assert_called_once_with("Pong(1516565852,0)")
| [
"[email protected]"
] | |
2afd63762f350a46cd3d44a1180b6360ef64572a | 30aba5246cd3c0fb5fe3299cfffd301e4b51db3b | /importData.py | fb15296ccd9d923b55c36088ef227df6b8e9e085 | [] | no_license | Brisk-Rui/SpectraReconstruction | bb0b67e6637075186234c20392188d70702acfe4 | a40265c8138267f9cfa4ac541fffdf60497668b3 | refs/heads/main | 2023-07-29T03:14:08.879811 | 2021-08-31T07:50:20 | 2021-08-31T07:51:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,452 | py | """
SPECTRA PROCESSING
Copyright (C) 2020 Josef Brandt, University of Gothenborg.
<[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program, see COPYING.
If not, see <https://www.gnu.org/licenses/>.
"""
import os
import numpy as np
from typing import List, Tuple
import h5py
from functions import getNMostDifferentSpectra, reduceSpecsToNWavenumbers, remapSpectrumToWavenumbers
def load_microFTIR_spectra(specLength: int, maxCorr: float = 1.0) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Loads the Spectra from the MicroFTIR Spectra directory.
:param specLength: Number of wavenumbers for the spectra
:param maxCorr: Highest correlation between clean and noisy spec to take.
:return: (NoisyPolymers, CleanPolymers, polymNames, wavenumbers)
"""
path: str = "MicroFTIRSpectra"
allSpecs: np.ndarray = np.load(os.path.join(path, "polymers.npy"))
allSpecs = reduceSpecsToNWavenumbers(allSpecs, specLength)
wavenumbers = allSpecs[:, 0].copy()
polyms_noisy_all = np.transpose(allSpecs[:, 1:])
polymNames = np.genfromtxt(os.path.join(path, "polymerNames.txt"), dtype=str)
uniqueAssignments: List[str] = list(np.unique(polymNames))
uniqueSpectra: List[np.ndarray] = []
for assignment in uniqueAssignments:
cleanSpec = np.loadtxt(os.path.join(path, assignment+".txt"), delimiter=',')
cleanSpec = remapSpectrumToWavenumbers(cleanSpec, wavenumbers)
uniqueSpectra.append(cleanSpec)
polyms_noisy: List[np.ndarray] = []
polyms_Clean: List[np.ndarray] = []
for i, assignment in enumerate(polymNames):
specIndex = uniqueAssignments.index(assignment)
cleanSpec = uniqueSpectra[specIndex][:, 1]
noisySpec = polyms_noisy_all[i, :]
if np.corrcoef(cleanSpec, noisySpec)[0, 1] <= maxCorr:
polyms_noisy.append(noisySpec)
polyms_Clean.append(cleanSpec)
return np.array(polyms_noisy), np.array(polyms_Clean), polymNames, wavenumbers
def load_reference_Raman_spectra() -> np.ndarray:
specs = []
file = h5py.File(r"RamanReferenceSpectra/Raman reference spectra.h5")
for i, sample in enumerate(file['Samples'].keys()):
sample = file['Samples'][str(sample)]
spec = sample['Spectra']
data = spec[str(list(spec.keys())[0])]
origSpec = data['SpectralData<p:ArbitrarySpacedOriginalSpectrum>']
spec = np.array(origSpec)
spec = (spec - spec.min()) / (spec.max() - spec.min())
if i == 0: # Estimate wavenumbers, I manually took the values from a PET spectrum (it's the last one)
peak1, peak2 = 1750, 3100
ind1, ind2 = 690, 1311
wavenums = np.arange(len(spec))
wavenums -= ind1
wavenums = wavenums * (peak2 - peak1) / (ind2 - ind1)
wavenums += peak1
specs.append(wavenums)
specs.append(spec)
return np.array(specs).transpose()
def load_specCSVs_from_directory(path: str, fixName: str = None, maxSpectra=1e6) -> Tuple[List[str], np.ndarray]:
"""
Reads Spectra from CSV viles in path. If given, a fix name is assigned to each spectrum
:param path: Directory path
:param fixName: If None, each spectrum has the filename as name, otherwise the indicated fixName
:param maxSpectra: Max number of spectra to take.
:return: Tuple[Assignment List, spectra array]
"""
spectra: np.ndarray = None
names: list = []
for file in os.listdir(path):
if file.lower().endswith('.csv'):
curSpec: list = []
specName = fixName if fixName is not None else file.lower().split('.csv')[0]
names.append(specName)
with open(os.path.join(path, file), 'r') as fp:
if spectra is None:
wavenumbers = []
# for index, row in enumerate(reader):
for line in fp.readlines():
wavenum, intensity = get_numbers_from_line(line)
curSpec.append(intensity)
wavenumbers.append(wavenum)
spectra = np.array(wavenumbers)
else:
tmpSpec = []
tmpWavenumber = []
for line in fp.readlines():
wavenum, intensity = get_numbers_from_line(line)
tmpSpec.append(intensity)
tmpWavenumber.append(wavenum)
tmpSpec = np.array(tmpSpec)
tmpWavenumber = np.array(tmpWavenumber)
for number in spectra[:, 0]:
index = np.argmin(np.abs(tmpWavenumber - number))
curSpec.append(tmpSpec[index])
if len(spectra.shape) == 1:
spectra = np.append(spectra[:, np.newaxis], np.array(curSpec)[:, np.newaxis], axis=1)
else:
spectra = np.append(spectra, np.array(curSpec)[:, np.newaxis], axis=1)
numSpectra = spectra.shape[1] - 1
if numSpectra > maxSpectra:
names, spectra = getNMostDifferentSpectra(names, spectra, maxSpectra)
return names, spectra
def get_numbers_from_line(line: str) -> Tuple[float, float]:
"""
Takes a line from a csv or txt document and checks for delimiter and decimal separator to yield exactly
two float numbers
:param line:
:return: the two float numbers as Tuple
"""
origline = line.strip()
try:
line = origline.split(';')
assert len(line) == 2
numbers: Tuple[float, float] = float(line[0].replace(',', '.')), float(line[1].replace(',', '.'))
except AssertionError:
line = origline.split(',')
assert len(line) == 2
numbers: Tuple[float, float] = float(line[0]), float(line[1])
except AssertionError as e:
print(e)
raise
return numbers
| [
"[email protected]"
] | |
02b8c59f8dd6c7884da2b597847a776eb586f9c6 | 330cb5350c330887fef9f5a1c9381b60e2e9204b | /todo/test_forms.py | 870dcbcdc5722d9f34beb65db05ab457ab8cfaf6 | [] | no_license | nealbrophy/CI_hello-django_new | c62b098aaf5f0de538719ac715cf8fbc85631b4b | 61498d5710898ad87bd69c6f32f334db99e58899 | refs/heads/master | 2023-08-12T10:31:43.522740 | 2021-06-11T15:05:53 | 2021-06-11T15:05:53 | 267,389,249 | 0 | 0 | null | 2021-09-22T19:07:30 | 2020-05-27T17:54:20 | HTML | UTF-8 | Python | false | false | 626 | py | from django.test import TestCase
from .forms import ItemForm
class TestItemForm(TestCase):
def test_item_name_is_required(self):
form = ItemForm({'name': ''})
self.assertFalse(form.is_valid())
self.assertIn('name', form.errors.keys())
self.assertEqual(form.errors['name'][0], 'This field is required.')
def test_done_field_is_not_required(self):
form = ItemForm({'name': 'Test Todo Item'})
self.assertTrue(form.is_valid)
def test_fields_are_explicit_in_form_metaclass(self):
form = ItemForm()
self.assertEqual(form.Meta.fields, ['name', 'done'])
| [
"[email protected]"
] | |
f91c694e6c4343a359d8011596dbf65dddee08e7 | 0f5fadb3b735ff8de20f6c068a2cc52a018c5f4b | /Solicitud/models.py | 74acca60d0c98219f3b790791769cf1d9ba55d7b | [] | no_license | NoelChaparro/Siobicx | 193c8ed92f905f46f645f5caaa82f0bea31bd608 | 0d655de3ad6d2c7e3f245ff09d4ea25deb706b7d | refs/heads/master | 2021-01-15T23:50:41.930456 | 2014-12-10T20:05:06 | 2014-12-10T20:05:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,305 | py | #encoding:utf-8
from ConexosAgropecuarios.models import Persona
from Programas.models import Programa
from django.db import models
# Create your models here.
class Solicitud(models.Model): #Clase que genera el modelo para gestionar la solicitud de aseguramiento del socio en la base de datos.
DECLARACION_SOLICITUD = (
('ANUAL','ANUAL'),
('A DECLARACIÓN','A DECLARACIÓN'),
)
IdSolicitud = models.AutoField(primary_key=True)
FolioSolicitud = models.CharField(max_length=20, null=True, blank = True,verbose_name='Folio de Solicitud')
FechaSolicitud = models.DateTimeField(verbose_name='Fecha de Solicitud',null=True, blank = True)
PersonaSolicitante = models.ForeignKey(Persona, related_name = 'PersonaSolicitante')
PersonaAsegurada = models.ForeignKey(Persona, related_name = 'PersonaAsegurada')
PersonaContratante = models.ForeignKey(Persona, related_name = 'PersonaContratante')
Programa = models.ForeignKey(Programa)
Unidades = models.IntegerField(null=True, blank = True)
ValorUnidad = models.DecimalField(max_digits = 12, decimal_places = 2, null=True, blank = True)
DeclaracionSolicitud = models.CharField(max_length = 13, null= True, blank = True, choices = DECLARACION_SOLICITUD)
Observaciones = models.TextField(null=True, blank=True, verbose_name ="Observaciones de la Solicitud")
Estatus = models.NullBooleanField(null=True, blank=True, verbose_name ="Aceptado o Rechazado")
class Meta:
verbose_name = 'Solicitud'
verbose_name_plural = 'Solicitudes'
ordering = ('IdSolicitud',)
db_table = 'Solicitud'
def __unicode__(self):
return self.IdSolicitud
class Beneficiario(models.Model): #Clase para la generacion del modelo de gestion de beneficiarios SOlicitud
IdBeneficiario = models.AutoField(primary_key=True)
Solicitud = models.ForeignKey(Solicitud)
PersonaBeneficiario = models.ForeignKey(Persona)
Porcentaje = models.DecimalField(max_digits = 12, decimal_places = 2, null=True, blank = True)
class Meta:
verbose_name = 'Beneficiario'
verbose_name_plural = 'Beneficiarios'
ordering = ('IdBeneficiario',)
db_table = 'Beneficiario'
def __unicode__(self):
return self.IdBeneficiario
class RelacionAnexaSolicitud(models.Model): #Modelo para la relacion anexa de la solicitud
IdRelacionAnexaSolicitud = models.AutoField(primary_key=True)
Solicitud = models.ForeignKey(Solicitud, verbose_name = "The related Solicitud")
UbicacionBienLat = models.CharField(max_length=15, null=True, blank=True, verbose_name ="Latitud del Bien")
UbicacionBienLng = models.CharField(max_length=15, null=True, blank=True, verbose_name ="Longitud del Bien")
CP = models.CharField(max_length=5, null=True, blank=True, verbose_name="Codigo Postal")
DescripcionBienAsegurado = models.TextField(null=True, blank=True, verbose_name ="Descripcion del Bien Asegurado")
ObservacionesSolicitante = models.TextField(null=True, blank=True, verbose_name ="Observaciones del Solicitante")
FechaRelacionAnexa = models.DateField(null=True, blank=True, verbose_name ="Fecha de ELaboracion Relacion Anexa")
class Meta:
verbose_name = 'RelacionAnexaSolicitud'
verbose_name_plural = 'RelacionAnexaSolicitudes'
ordering = ('Solicitud',)
db_table = 'RelacionAnexaSolicitud'
def __unicode__(self):
return self.DescripcionBienAsegurado
class DescripcionDetalladaBienSolicitado(models.Model): #Se genera el modelo para guardar la descripcion de los bienes para asegurar
DECLARACION_SOLICITUD = (
('FACTURA','FACTURA'),
('PEDIMENTO','PEDIMENTO'),
('MANIFESTACIÓN','MANIFESTACIÓN'),
('AVALÚO','AVALÚO'),
('DECLARACIÓN DEL SOCIO','DECLARACIÓN DEL SOCIO'),
('OTROS','OTROS'),
)
IdDescripcionDetalladaBienSolicitado = models.AutoField(primary_key=True)
RelacionAnexaSolicitud = models.ForeignKey(RelacionAnexaSolicitud, verbose_name = "The related RelacionAnexaSolicitud")
NombreEquipo = models.CharField(max_length=50, null=True, blank=True, verbose_name = "Nombre del Equipo")
Marca = models.CharField(max_length=30, null=True, blank=True, verbose_name="Marca del Equipo")
Modelo = models.CharField(max_length=10, null=True, blank=True, verbose_name="Modelo del Equipo")
Serie = models.CharField(max_length=20, null=True, blank=True, verbose_name="Serie del Equipo")
FechaBien = models.DateField(null=True, blank=True, verbose_name="Fecha Elaboracion del Bien")
DocumentacionEvaluacion = models.CharField(max_length=30,null=True, blank=True, choices = DECLARACION_SOLICITUD,verbose_name="Documentacion para su Evaluacion")
Cantidad = models.IntegerField(null=True, blank=True, verbose_name="Cantidad")
ValorUnitario = models.DecimalField(max_digits = 12, decimal_places = 2, null=True, blank = True, verbose_name="Valor Unitario")
class Meta:
verbose_name = 'DescripcionDetalladaBienSolicitado'
verbose_name_plural = 'DescripcionDetalladaBienesSolicitados'
ordering = ('IdDescripcionDetalladaBienSolicitado',)
db_table = 'DescripcionDetalladaBienSolicitado'
def __unicode__(self):
return self.NombreEquipo
class ActaVerificacionSolicitud(models.Model): #Modelo para Guardar el acta de verificacion
IdActaVerificacionSolicitud = models.AutoField(primary_key=True)
Solicitud = models.ForeignKey(Solicitud, verbose_name = "The related Solicitud")
FechaPrellenada = models.DateField(null=True, blank=True, verbose_name="Fecha Elaboracion de Acta Prellenada")
FechaCampo = models.DateField(null=True, blank=True, verbose_name="Fecha Elaboracion Acta de Campo")
DictamenInspeccion = models.TextField(null=True, blank=True, verbose_name ="Dictamen de la Inspeccion")
class Meta:
verbose_name = 'ActaVerificacionSolicitud'
verbose_name_plural = 'ActaVerificacionSolicitudes'
ordering = ('IdActaVerificacionSolicitud',)
db_table = 'ActaVerificacionSolicitud'
def __unicode__(self):
return self.IdActaVerificacionSolicitud
class MedidaSeguridadActaVerificacion(models.Model): #Modelo que guarda las medidas de seguridad que tiene el bien asegurable
MEDIDAS_SEGURIDAD = (
('EXTINTORES','EXTINTORES'),
('HIDRANTES', 'HIDRANTES'),
)
IdMedidaSeguridad = models.AutoField(primary_key=True)
ActaVerificacionSolicitud = models.ForeignKey(ActaVerificacionSolicitud, verbose_name = "The related ActaVerificacionSolicitud")
MedidasSeguridad = models.CharField(max_length=50,null=True, blank=True, choices = MEDIDAS_SEGURIDAD,verbose_name="Medidas de Seguridad")
class Meta:
verbose_name = "MedidaSeguridadActaVerificacion"
verbose_name_plural = "MedidasSeguridadActaVerificacion"
ordering = ("IdMedidaSeguridad",)
db_table = 'MedidaSeguridadActaVerificacion'
def __unicode__(self):
return self.IdMedidaSeguridad
class RelacionAnexaActaVerificacion(models.Model): #Modelo para la relacion anexa al acta de verificacion
IdRelacionAnexaActaVerificacion = models.AutoField(primary_key=True)
Solicitud = models.ForeignKey(Solicitud, verbose_name = "The related Solicitud")
UbicacionBienLat = models.CharField(max_length=15, null=True, blank=True, verbose_name ="Latitud del Bien")
UbicacionBienLng = models.CharField(max_length=15, null=True, blank=True, verbose_name ="Longitud del Bien")
CP = models.CharField(max_length=5, null=True, blank=True, verbose_name="Codigo Postal")
DescripcionBienAsegurado = models.TextField(null=True, blank=True, verbose_name ="Descripcion del Bien Asegurado")
FechaRelacionAnexaActaVerificacion = models.DateField(null=True, blank=True, verbose_name ="Fecha de Elaboracion Relacion Anexa Acta Verificacion")
class Meta:
verbose_name = 'RelacionAnexaActaVerificacion'
verbose_name_plural = 'RelacionAnexaActasVerificacion'
ordering = ('Solicitud',)
db_table = 'RelacionAnexaActaVerificacion'
def __unicode__(self):
return self.DescripcionBienAsegurado
class DescripcionBienActaVerificacion(models.Model): #Se genera el modelo para guardar la descripcion de los bienes para asegurar en la relacion anexa al acta de verificacion
DECLARACION_SOLICITUD = (
('FACTURA','FACTURA'),
('PEDIMENTO','PEDIMENTO'),
('MANIFESTACIÓN','MANIFESTACIÓN'),
('AVALÚO','AVALÚO'),
('DECLARACIÓN DEL SOCIO','DECLARACIÓN DEL SOCIO'),
('OTROS','OTROS'),
)
IdDescripcionBienActaVerificacion = models.AutoField(primary_key=True)
RelacionAnexaActaVerificacion = models.ForeignKey(RelacionAnexaActaVerificacion, verbose_name = "The related RelacionAnexaActaVerificacion")
NombreEquipo = models.CharField(max_length=50, null=True, blank=True, verbose_name = "Nombre del Equipo")
Marca = models.CharField(max_length=30, null=True, blank=True, verbose_name="Marca del Equipo")
Modelo = models.CharField(max_length=10, null=True, blank=True, verbose_name="Modelo del Equipo")
Serie = models.CharField(max_length=20, null=True, blank=True, verbose_name="Serie del Equipo")
FechaBien = models.DateField(null=True, blank=True, verbose_name="Fecha Elaboracion del Bien")
DocumentacionEvaluacion = models.CharField(max_length=30,null=True, blank=True, choices = DECLARACION_SOLICITUD,verbose_name="Documentacion para su Evaluacion")
Cantidad = models.IntegerField(null=True, blank=True, verbose_name="Cantidad")
ValorUnitario = models.DecimalField(max_digits = 12, decimal_places = 2, null=True, blank = True, verbose_name="Valor Unitario")
class Meta:
verbose_name = 'DescripcionBienActaVerificacion'
verbose_name_plural = 'DescripcionBienesActaVerificacion'
ordering = ('IdDescripcionBienActaVerificacion',)
db_table = 'DescripcionBienActaVerificacion'
def __unicode__(self):
return self.NombreEquipo | [
"[email protected]"
] | |
c97200edd3289bc742f4771a37ae118ebe0817da | 18aca150e55480c15aa49bbcc2e18f1a2d1c91ff | /polls/views.py | 2851477e79ae56689b8ea120c9ce38beeaa5b170 | [] | no_license | CassioSalvador/Django2.0Tutorial | 86bf00c871be96d434f39e735914fcb0f91f5188 | 50e1771af4eda3a7870756062fed8a9bd9d9e17a | refs/heads/master | 2021-05-10T10:00:36.102985 | 2018-01-25T17:36:23 | 2018-01-25T17:36:23 | 118,945,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,158 | py | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Choice, Question
# Create your views here.
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5] # lte (Less than or equal to)
#def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# context = {
# 'latest_question_list': latest_question_list,
# }
# return render(request, 'polls/index.html', context)
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
#def detail(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/detail.html', {'question': question})
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
#def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {'question': question, 'error_message': "You didn't select a choice.",})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,))) | [
"[email protected]"
] | |
533876a381911e6ccb0850999d0c0075332f05f4 | c3711aa405fb1ad731888ed9b041ef7d287833c8 | /app.py | 36da1a19e02dadd105c0f1a6b67718189ed14a56 | [] | no_license | kims99/VTM_site | d5e2b30fe6fdef9dee1035b222dcc52ada561755 | 264696fd7523b26195d75eded5ea94b35adda92f | refs/heads/master | 2021-07-14T09:54:54.216815 | 2020-02-18T18:26:04 | 2020-02-18T18:26:04 | 241,269,907 | 0 | 0 | null | 2021-03-20T02:55:31 | 2020-02-18T04:13:13 | HTML | UTF-8 | Python | false | false | 1,274 | py | from flask import Flask
from flask import render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', pageTitle='Flask Server Home Page')
@app.route('/about')
def about():
return render_template('about.html', pageTitle='About VTM')
@app.route('/kim', methods=['GET', 'POST'])
def kim():
if request.method == 'POST':
form = request.form
radius = float(form['tankRad'])
height = float(form['tankHt'])
print(radius)
print(height)
PI = 3.14
MATERIALCOST = 25
LABORCOST = 15
topArea = PI * radius**2
sideArea = (2 * (PI * (radius * height)))
totalAreaSqFt = (topArea + sideArea)/144 #convert from square inches to square feet
# print(topArea)
# print(sideArea)
# print(totalAreaSqFt)
totalMaterialCost = totalAreaSqFt * MATERIALCOST
totalLaborCost = totalAreaSqFt * LABORCOST
# print(totalMaterialCost)
# print(totalLaborCost)
totalBidPrice = totalMaterialCost + totalLaborCost
print(totalBidPrice)
return render_template('kim.html', pageTitle='Tank Painting Estimate')
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
a9b443fd0f2e781a1c90b3b0906673a657fdb5ff | b4776512598925029ed2784a016f748955318bea | /tests/api_tests/test_api_language.py | 0c921cd2c60f1804ed2f5df6ad80453222219b9c | [
"Apache-2.0"
] | permissive | kvtb/dedoc | e95f1b3968c2c1f8cad5a3e015af33c725b7348f | 34e4f7011cdcc58bcf37ec90ef07c4841d43b841 | refs/heads/master | 2023-06-16T19:05:56.134453 | 2021-07-15T15:11:58 | 2021-07-15T15:11:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,359 | py | from tests.api_tests.abstrac_api_test import AbstractTestApiDocReader
class TestApiDocReader(AbstractTestApiDocReader):
def test_en_doc(self):
file_name = "english_doc.doc"
result = self._send_request(file_name, dict(language="eng", structure_type="tree"))
content = result["content"]
structure = content["structure"]
self.assertEqual("THE GREAT ENGLISH DOCUMENT", structure["subparagraphs"][0]["text"])
list_elements = structure["subparagraphs"][1]["subparagraphs"]
self.assertEqual("1) Fisrst item", list_elements[0]["text"])
self.assertEqual("2) Second item", list_elements[1]["text"])
table = content["tables"][0]
self.assertListEqual(['London', 'The capital of Great Britain'], table["cells"][0])
self.assertListEqual(['Speek', 'From my heart'], table["cells"][1])
def test_en_docx(self):
file_name = "english_doc.docx"
result = self._send_request(file_name, dict(language="eng", structure_type="tree"))
content = result["content"]
structure = content["structure"]
self.assertEqual("THE GREAT ENGLISH DOCUMENT", structure["subparagraphs"][0]["text"])
list_elements = structure["subparagraphs"][1]["subparagraphs"]
self.assertEqual("1) Fisrst item", list_elements[0]["text"])
self.assertEqual("2) Second item", list_elements[1]["text"])
table = content["tables"][0]
self.assertListEqual(['London', 'The capital of Great Britain'], table["cells"][0])
self.assertListEqual(['Speek', 'From my heart'], table["cells"][1])
def test_en_odt(self):
file_name = "english_doc.odt"
result = self._send_request(file_name, dict(language="eng", structure_type="tree"))
content = result["content"]
structure = content["structure"]
self.assertEqual("THE GREAT ENGLISH DOCUMENT", structure["subparagraphs"][0]["text"])
list_elements = structure["subparagraphs"][1]["subparagraphs"]
self.assertEqual("1) Fisrst item", list_elements[0]["text"])
self.assertEqual("2) Second item", list_elements[1]["text"])
table = content["tables"][0]
self.assertListEqual(['London', 'The capital of Great Britain'], table["cells"][0])
self.assertListEqual(['Speek', 'From my heart'], table["cells"][1])
| [
"[email protected]"
] | |
5d59554ebd3eeeb3cc7d1a44287dc99666250f1d | 2388cf1916978e4356061182c89226e69546b64f | /mt9001_driver/scripts/runAndShow.py | 7d3754a2400354c4f3747c7e90823662b71a93ae | [] | no_license | cnping/BBBcam | dcf1e225e8ee535136c0588d4f4bc8e6a4389cfa | ef94222a1ea79fd31ed442f329df2c3566bbbc48 | refs/heads/10bit | 2021-01-18T08:53:10.261389 | 2016-02-18T02:11:00 | 2016-02-18T02:11:00 | 52,163,465 | 0 | 1 | null | 2016-02-20T16:40:18 | 2016-02-20T16:40:18 | null | UTF-8 | Python | false | false | 470 | py | import os
import numpy as np
import sys
from matplotlib import cm
prefix = sys.argv[1]
name= prefix + 'sum.dat'
def main(prefix):
arr =np.reshape(np.fromfile(name, dtype = 'uint32'), (1024, 1280))
#plt.imshow(arr, interpolation='none')
plt.imshow(arr, interpolation='none', cmap = cm.Greys_r)
plt.savefig('images/' + name[:-3] + 'png')
plt.show()
os.system('time sudo ./run_mt9m001 1 -o ' + prefix + ' -n 40 -r 0 255' + name + ' .')
main(prefix)
| [
"debian@beaglebone.(none)"
] | debian@beaglebone.(none) |
8c8ebf1b96236c4c10d7bab4940997e6a9ff6d30 | 3fe31c7f38a785d71d51917020c3022bdd466937 | /cpxButtons.py | 33a2f2f4a8472604ef5809bd5324132c1b7361d1 | [] | no_license | mkiser71/python | 8191b063527288d2626f116a08b0414c02de36c2 | f485fcfd2f569683acc002cf4ae5a16167d64460 | refs/heads/main | 2023-04-28T19:50:28.083607 | 2021-05-21T12:00:49 | 2021-05-21T12:00:49 | 338,573,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from adafruit_circuitplayground.express import cpx
while True:
if cpx.button_a:
cpx.pixels[2] = (255, 0, 0)
elif cpx.button_b:
cpx.pixels[7] = (0, 255, 0)
else:
cpx.pixels.fill((0, 0, 0))
| [
"[email protected]"
] | |
56de5b85ec3189cb5b85c474e96d22a39866e6e9 | 5496f2f8e1e92387fd113d255fc9f484765281b1 | /PK/kri6_semoga fix.py | efdfb331cdcf8799fb3c133aebbc6ee201960e7e | [] | no_license | TRUI-GLADIATOS/latihan | 01035f8dfc0bba660906693c98f6cf5b90069c7a | 6ec5b98f8b14882d4c15ce9973aa85d29a820483 | refs/heads/master | 2021-07-21T18:39:40.128194 | 2021-07-11T15:38:12 | 2021-07-11T15:38:12 | 157,535,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,473 | py | import cv2
import numpy as np
import serial
from time import sleep
import time
#oii ser = serial.Serial("/dev/OpenCM9.04", 9600) # Open port with baud rate
# data = 0
def B():
cap = cv2.VideoCapture(0)
# Set camera resolution
cap.set(3, 480) # 3,480
cap.set(4, 320) # 4,320
_, frame = cap.read()
rows, cols, _ = frame.shape
x_medium = int(cols / 2)
center = int(cols / 2)
position = 90 # degrees
while True:
_, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# red color
low_red = np.array([0, 0, 255])
high_red = np.array([0, 0, 255])
red_mask = cv2.inRange(hsv_frame, low_red, high_red)
contours, _ = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
x_medium = int((x + x + w) / 2)
break
cv2.line(frame, (x_medium, 0), (x_medium, 480), (0, 255, 0), 2)
if x_medium < 640 and x_medium > 0:
print("On Track!")
data = 'W'
else:
print("I don't see the line")
break
cv2.imshow("Frame", frame)
cv2.imshow("contours", red_mask)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
def A():
cap = cv2.VideoCapture(0)
# Set camera resolution
cap.set(3, 480) # 3,480
cap.set(4, 320) # 4,320
_, frame = cap.read()
rows, cols, _ = frame.shape
x_medium = int(cols / 2)
center = int(cols / 2)
position = 90 # degrees
while True:
_, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# red color
low_red = np.array([0, 0, 255])
high_red = np.array([0, 0, 255])
red_mask = cv2.inRange(hsv_frame, low_red, high_red)
contours, _ = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
x_medium = int((x + x + w) / 2)
break
cv2.line(frame, (x_medium, 0), (x_medium, 480), (0, 255, 0), 2)
# Move servo motor
if x_medium < center - 40:
position += 2
if x_medium > center + 40:
position -= 2
if position > 180:
position = 180
if position < 10:
position = 10
cv2.line(frame, (x_medium, 0), (x_medium, 480), (0, 255, 0), 2)
print(x_medium, position)
if position < 100:
# oii ser.write('0' + str.encode(str(position)))
print("kiri")
elif position < 10:
# oii ser.write('0' + '0' + str.encode(str(position)))
print("kanan")
else:
# oii ser.write(str.encode(str(position)))
print("cari")
cv2.imshow("Frame", frame)
cv2.imshow("contours", red_mask)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
delay = 200 ###for 10 seconds delay
sekarang = time.time()
close_time = time.time() + delay
while True:
B()
A()
if time.time() > close_time:
break
cv2.destroyAllWindows() | [
"[email protected]"
] | |
3cdb1a1be34179b6668f1e9da47a1e17c6f87961 | 5df755b8dc70c128138c1e6e304a15a8f4d392a1 | /ch2/06_pr_02_remainder_of_two.py | 60528a59cd2f2a529c75308f78df3116ccd978dd | [] | no_license | atharv4git/pyLearn | bff2c2212aefce8832e94bf7caf19bdd0fd4de8d | d768b23f3f81798892d5caa0ed40ef66565a7ecb | refs/heads/main | 2023-06-29T07:24:30.533661 | 2021-07-30T11:28:55 | 2021-07-30T11:28:55 | 391,033,742 | 0 | 0 | null | 2021-07-30T11:28:56 | 2021-07-30T11:03:07 | null | UTF-8 | Python | false | false | 104 | py | a = input("Enter a number:")
a = int(a)
print("The remainder when" , a , "is devided by 2 is" , a%2) | [
"[email protected]"
] | |
ee130f7ff914a89d881715663eba4f61cc90bcfb | ab4f74d127bfc89813ee359bb9c779eca5426ddc | /script/label_image.runfiles/org_tensorflow/tensorflow/core/framework/tensor_pb2.py | 9adcb52c8c3ea21bdcdcb2006d01b4c3f6a19493 | [
"MIT"
] | permissive | harshit-jain-git/ImageNET | cdfd5a340b62862ad8d1cc3b9a0f30cccc481744 | 1cd4c2b70917e4709ce75422c0205fe3735a1b01 | refs/heads/master | 2022-12-11T12:47:46.795376 | 2017-12-19T05:47:26 | 2017-12-19T05:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | /home/co/.cache/bazel/_bazel_co/2e35bede1f3fd334ff5ab28da2fc1540/execroot/org_tensorflow/bazel-out/k8-opt/genfiles/tensorflow/core/framework/tensor_pb2.py | [
"[email protected]"
] | |
c3f930a57f742244a509c73f88c22f23c4c54e86 | 53e1cfd01f4fb6ff6160b5292c471a3d77a48660 | /python/PyMOTW-2.0.1/PyMOTW/asyncore/asyncore_http_client.py | 1dc2f6ad29a731144b7d470a057f592bef0002d7 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | MarkTseng/mySampleCode | 446df156c14c04519fbfdad933ef3e04fb1d4e04 | e3c4c8589b634dc2b26681b5c2c704a39665a280 | refs/heads/master | 2022-06-04T08:28:49.317400 | 2020-06-12T07:23:32 | 2020-06-12T07:23:32 | 7,599,118 | 10 | 10 | null | 2022-05-14T00:31:27 | 2013-01-14T05:45:34 | C | UTF-8 | Python | false | false | 2,145 | py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2009 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
import asyncore
import logging
import socket
from cStringIO import StringIO
import urlparse
class HttpClient(asyncore.dispatcher):
def __init__(self, url):
self.url = url
self.logger = logging.getLogger(self.url)
self.parsed_url = urlparse.urlparse(url)
asyncore.dispatcher.__init__(self)
self.write_buffer = 'GET %s HTTP/1.0\r\n\r\n' % self.url
self.read_buffer = StringIO()
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
address = (self.parsed_url.netloc, 80)
self.logger.debug('connecting to %s', address)
self.connect(address)
def handle_connect(self):
self.logger.debug('handle_connect()')
def handle_close(self):
self.logger.debug('handle_close()')
self.close()
def writable(self):
is_writable = (len(self.write_buffer) > 0)
if is_writable:
self.logger.debug('writable() -> %s', is_writable)
return is_writable
def readable(self):
self.logger.debug('readable() -> True')
return True
def handle_write(self):
sent = self.send(self.write_buffer)
self.logger.debug('handle_write() -> "%s"',
self.write_buffer[:sent])
self.write_buffer = self.write_buffer[sent:]
def handle_read(self):
data = self.recv(8192)
self.logger.debug('handle_read() -> %d bytes', len(data))
self.read_buffer.write(data)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
clients = [
HttpClient('http://www.doughellmann.com/'),
HttpClient('http://www.doughellmann.com/PyMOTW/about/'),
]
logging.debug('LOOP STARTING')
asyncore.loop()
logging.debug('LOOP DONE')
for c in clients:
response_body = c.read_buffer.getvalue()
print c.url, 'got', len(response_body), 'bytes'
| [
"[email protected]"
] | |
a0a19a07d37174229a775fc5ab451b3a3396a995 | d9e0585e57b482d91e8af7514e683e2488e23381 | /padinfo/view/series_scroll.py | 8d6a99de4701c691fb50fc7dd9820cae2b135354 | [
"MIT"
] | permissive | TrendingTechnology/pad-cogs | d08abb8da8bf2763a4091a29139168d8c1d2333a | b913a4e16a6473b8b53fae4bda564bedcc82c876 | refs/heads/master | 2023-08-11T01:10:22.088761 | 2021-09-19T00:41:43 | 2021-09-19T00:41:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,190 | py | from typing import TYPE_CHECKING, List
from discordmenu.embed.base import Box
from discordmenu.embed.components import EmbedMain, EmbedField
from discordmenu.embed.text import BoldText
from discordmenu.embed.view import EmbedView
from tsutils.emoji import char_to_emoji
from tsutils.menu.footers import embed_footer_with_state
from tsutils.query_settings import QuerySettings
from padinfo.common.config import UserConfig
from padinfo.view.components.monster.header import MonsterHeader
from padinfo.view.components.view_state_base import ViewStateBase
if TYPE_CHECKING:
from dbcog.models.monster_model import MonsterModel
from dbcog.database_context import DbContext
class SeriesScrollViewState(ViewStateBase):
MAX_ITEMS_PER_PANE = 11
def __init__(self, original_author_id, menu_type, raw_query, query, color, series_id,
paginated_monsters: List[List["MonsterModel"]], current_page, rarity: int,
query_settings: QuerySettings,
all_rarities: List[int],
title, message,
current_index: int = None,
max_len_so_far: int = None,
reaction_list=None, extra_state=None,
child_message_id=None):
super().__init__(original_author_id, menu_type, raw_query,
extra_state=extra_state)
self.current_index = current_index
self.all_rarities = all_rarities
self.paginated_monsters = paginated_monsters
self.current_page = current_page or 0
self.series_id = series_id
self.rarity = rarity
self.query_settings = query_settings
self.idle_message = message
self.child_message_id = child_message_id
self.title = title
self.reaction_list = reaction_list
self.color = color
self.query = query
self._max_len_so_far = max(max_len_so_far or len(self.monster_list), len(self.monster_list))
@property
def monster_list(self) -> List["MonsterModel"]:
return self.paginated_monsters[self.current_page]
@property
def max_len_so_far(self) -> int:
self._max_len_so_far = max(len(self.monster_list), self._max_len_so_far)
return self._max_len_so_far
@property
def current_monster_id(self) -> int:
return self.monster_list[self.current_index].monster_id
@property
def pages_in_rarity(self) -> int:
return len(self.paginated_monsters)
def serialize(self):
ret = super().serialize()
ret.update({
'pane_type': SeriesScrollView.VIEW_TYPE,
'series_id': self.series_id,
'query_settings': self.query_settings.serialize(),
'current_page': self.current_page,
'pages_in_rarity': self.pages_in_rarity,
'title': self.title,
'rarity': self.rarity,
'all_rarities': self.all_rarities,
'reaction_list': self.reaction_list,
'child_message_id': self.child_message_id,
'idle_message': self.idle_message,
'max_len_so_far': self.max_len_so_far,
'current_index': self.current_index,
})
return ret
def get_serialized_child_extra_ims(self, emoji_names, menu_type):
extra_ims = {
'is_child': True,
'reaction_list': emoji_names,
'menu_type': menu_type,
'resolved_monster_id': self.current_monster_id,
'query_settings': self.query_settings.serialize(),
'idle_message': self.idle_message
}
return extra_ims
@staticmethod
async def deserialize(dbcog, user_config: UserConfig, ims: dict):
if ims.get('unsupported_transition'):
return None
series_id = ims['series_id']
rarity = ims['rarity']
all_rarities = ims['all_rarities']
query_settings = QuerySettings.deserialize(ims.get('query_settings'))
paginated_monsters = await SeriesScrollViewState.do_query(dbcog, series_id, rarity, query_settings.server)
current_page = ims['current_page']
title = ims['title']
raw_query = ims['raw_query']
query = ims.get('query') or raw_query
original_author_id = ims['original_author_id']
menu_type = ims['menu_type']
reaction_list = ims.get('reaction_list')
child_message_id = ims.get('child_message_id')
current_index = ims.get('current_index')
current_monster_list = paginated_monsters[current_page]
max_len_so_far = max(ims['max_len_so_far'] or len(current_monster_list), len(current_monster_list))
idle_message = ims.get('idle_message')
return SeriesScrollViewState(original_author_id, menu_type, raw_query, query, user_config.color, series_id,
paginated_monsters, current_page, rarity, query_settings,
all_rarities,
title, idle_message,
current_index=current_index,
max_len_so_far=max_len_so_far,
reaction_list=reaction_list,
extra_state=ims,
child_message_id=child_message_id)
@staticmethod
async def do_query(dbcog, series_id, rarity, server):
db_context: "DbContext" = dbcog.database
all_series_monsters = db_context.get_monsters_by_series(series_id, server=server)
base_monsters_of_rarity = list(filter(
lambda m: db_context.graph.monster_is_base(m) and m.rarity == rarity, all_series_monsters))
paginated_monsters = [base_monsters_of_rarity[i:i + SeriesScrollViewState.MAX_ITEMS_PER_PANE]
for i in range(
0, len(base_monsters_of_rarity), SeriesScrollViewState.MAX_ITEMS_PER_PANE)]
return paginated_monsters
@staticmethod
def query_all_rarities(dbcog, series_id, server):
db_context: "DbContext" = dbcog.database
return sorted({m.rarity for m in db_context.get_all_monsters(server) if
m.series_id == series_id and db_context.graph.monster_is_base(m)})
@staticmethod
async def query_from_ims(dbcog, ims) -> List[List["MonsterModel"]]:
series_id = ims['series_id']
rarity = ims['rarity']
query_settings = QuerySettings.deserialize(ims['query_settings'])
paginated_monsters = await SeriesScrollViewState.do_query(dbcog, series_id, rarity, query_settings.server)
return paginated_monsters
async def decrement_page(self, dbcog):
if self.current_page > 0:
self.current_page = self.current_page - 1
self.current_index = None
else:
# if there are multiple rarities, decrementing first page will change rarity
if len(self.all_rarities) > 1:
rarity_index = self.all_rarities.index(self.rarity)
self.rarity = self.all_rarities[rarity_index - 1]
self.paginated_monsters = await SeriesScrollViewState.do_query(dbcog, self.series_id, self.rarity,
self.query_settings.server)
self.current_index = None
self.current_page = len(self.paginated_monsters) - 1
if len(self.paginated_monsters) > 1:
self.current_index = None
async def increment_page(self, dbcog):
if self.current_page < len(self.paginated_monsters) - 1:
self.current_page = self.current_page + 1
self.current_index = None
else:
# if there are multiple rarities, incrementing last page will change rarity
if len(self.all_rarities) > 1:
rarity_index = self.all_rarities.index(self.rarity)
self.rarity = self.all_rarities[(rarity_index + 1) % len(self.all_rarities)]
self.paginated_monsters = await SeriesScrollViewState.do_query(dbcog, self.series_id, self.rarity,
self.query_settings.server)
self.current_index = None
self.current_page = 0
if len(self.paginated_monsters) > 1:
self.current_index = None
async def decrement_index(self, dbcog):
if self.current_index is None:
self.current_index = len(self.monster_list) - 1
return
if self.current_index > 0:
self.current_index = self.current_index - 1
return
await self.decrement_page(dbcog)
self.current_index = len(self.monster_list) - 1
async def increment_index(self, dbcog):
if self.current_index is None:
self.current_index = 0
return
if self.current_index < len(self.monster_list) - 1:
self.current_index = self.current_index + 1
return
await self.increment_page(dbcog)
self.current_index = 0
def set_index(self, new_index: int):
# don't want to go out of range, which will forget current index, break next, and break prev
if new_index < len(self.monster_list):
self.current_index = new_index
class SeriesScrollView:
VIEW_TYPE = 'SeriesScroll'
@staticmethod
def embed(state: SeriesScrollViewState):
fields = [
EmbedField(BoldText('Current rarity: {}'.format(state.rarity)),
Box(*SeriesScrollView._monster_list(
state.monster_list,
state.current_index))),
EmbedField(BoldText('Rarities'),
Box(
SeriesScrollView._all_rarity_text(state),
), inline=True
),
EmbedField(BoldText('Page'),
Box('{} of {}'.format(state.current_page + 1, state.pages_in_rarity)),
inline=True
)
]
return EmbedView(
EmbedMain(
title=state.title,
color=state.color,
),
embed_footer=embed_footer_with_state(state),
embed_fields=fields)
@staticmethod
def _all_rarity_text(state):
lines = []
for r in state.all_rarities:
if r != state.rarity:
lines.append(str(r))
else:
lines.append('**{}**'.format(state.rarity))
return ', '.join(lines)
@staticmethod
def _monster_list(monsters, current_index):
if not len(monsters):
return []
return [
MonsterHeader.short_with_emoji(
mon,
link=SeriesScrollView._is_linked(i, current_index),
prefix=char_to_emoji(str(i))
)
for i, mon in enumerate(monsters)
]
@staticmethod
def _is_linked(i, current_index):
if current_index is None:
return True
return i != current_index
| [
"[email protected]"
] | |
7ae7198fa354aec4c1775a03d319da0d45d323ea | 0f07107b016d2aee64788966b9f0d322ac46b998 | /moya/testprojects/scratch/blog/py/test.py | 2b109b36a4486a66034ed4b3c5ad99d295a37b25 | [
"MIT"
] | permissive | fkztw/moya | 35f48cdc5d5723b04c671947099b0b1af1c7cc7a | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | refs/heads/master | 2023-08-09T09:20:21.968908 | 2019-02-03T18:18:54 | 2019-02-03T18:18:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from __future__ import print_function
import moya
@moya.expose.macro("test")
def test():
print("Success! :-)")
return 10
| [
"[email protected]"
] | |
ab16cc2ec6de6847a7b06e4883b4b8156aea149b | 228b0a0073884ae5e8c1035998e6a8cb9e4a4917 | /1. Strings/StringsInPython.py | bd0051dffaf116a3bfe8c8bb5974e4bd8e8a4606 | [] | no_license | geekysid/Python-Basics | dd4a6a9a4aae97377182c93b445313af41393995 | 7cc0a7ad55fdd7633a2d5b806a916af097428f66 | refs/heads/master | 2022-04-03T13:03:34.958479 | 2020-01-31T15:32:56 | 2020-01-31T15:32:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,489 | py | # Author: Siddhant Shah
# Descp: Understanding and working with strings
# In Python we can use strings as below
myName = "Siddhant"
print(myName)
# We can place string literals in between single quotes (') or in between double quotes (").
# If we start a string using single quote then it must end with single quote only and if we
# start a quote with double quotes then it must end with double quotes only as shown below.
print("Stepping into world of Python with Double Quotes")
print('Stepping into world of Python with Single Quotes')
# Escape Characters in Strings
print('\nESCAPE CHARACTERS IN STRINGS')
# Like other languages, python also provides escape characters to add some type of formatting to the string
print('This \nString \nUses \nNew Line \nEscape \nCharacter') # This string uses an special escape character (\n) to
# add a new line
print('This \tString \tUses \tTab \tEscape \tCharacter') # This string uses an special escape character (\t) to add
# tab between characters.
print('This \\String \\Uses \\BackLash \\Escape \\Character') # This string uses an special escape character (\\) to
# add backlash between characters
print('This \'String \'Uses \'SingleQuote \'Escape \'Character') # This string uses an special escape character (\')
# to add single quotes between characters
print('This \"String \"Uses \"DoubleQuote \"Escape \"Character') # This string uses an special escape character (\")
# to add double quotes between characters
# If we want to print single quotes in string then instead of using escape characters, we can simply
# define the string using double quotes and vice vera
print("This 'String 'Displays 'SingleQuote 'Without 'Any 'Escape 'Character")
print('"This "String "Displays "DoubleQuote "Without "Any "Escape "Character')
# STRING with INDEX
print('\nSTRINGS WITH INDEX')
# In Python we can fetch different parts of strings using the index. For this we need to assign a variable to the
# string and then access different characters of strigs using index of that character (variableName[idexOfCharacter])
completeString = 'Python is a really useful coding language'
print(completeString)
partOfString = completeString[0] # fetching the 3rd character from sting stored in 'completeString' variable.
# It is important to note that indices for a string starts from 0 (left to right),
# i.e. index for the 1st character of the string is always 0, 2nd character is 1
# and so on
print(partOfString)
partOfString = completeString[1]
print(partOfString)
partOfString = completeString[2]
print(partOfString)
# Negative indices in String
print('\nNEGATIVE INDEXES IN STRINGS')
# We can even use negative numbers as indices to fetch part of string. In this case the last character of the
# string is represented as -1, second last character is -2 and so on
completeString = 'Python is a really useful coding language'
print(completeString)
partOfString = completeString[-1] #fetching the last character from sting stored in 'completeString' variable.
# It is important to note that negative indices for a string starts from -1
# (right to left), i.e. index for the last character of the string is always
# -1, 2nd last character is -2 and so on
print(partOfString)
partOfString = completeString[-2]
print(partOfString)
partOfString = completeString[-3]
print(partOfString)
partOfString = completeString[-4]
print(partOfString)
# Slicing of String
print('\nSLICING OF STRINGS')
# So far we have seen how to access each characters of strings. In python we can even access part of strings instead
# of a single character. We do it by what we call SLICING OF A STRING. [start:stop:step]. Using this we can access any
# part of string that we need.
completeString = 'Lets slice this string'
print(completeString)
print(completeString[1:4]) # slicing string form character with index 1 upto character with index 4. (Character
# with index 4 is not included)
print(completeString[5:]) # slicing string form character with index 5 upto end
print(completeString[:6]) # slicing string form start of string upto character with index 6. (Character with
# index 6 is not included)
print(completeString[2:15:3]) # slicing string form character with index 2 upto character with index 15. Here only
# the 3rd charcter will be part of sliced string and all other characers will be
# skipped. So here we will have characters with indices 2,5,8,11 and 14
print(completeString[2:15:2]) # slicing string form character with index 2 upto character with index 15. Here only
# the 2nd charcter will be part of sliced string and all other characers will be
# skipped. So here we will have characters with indices 2, 4, 6, 8, 10, 12 and 14
print(completeString[::-1])
print(completeString[-10:-1])
# Immutable String
print('\nSTRINGS are IMMUTABLES')
# Even though we can access different part/characters of strings using indexes and slicing, we cant change string.
# This makes strings Immutables.
string1 = "This is String 1"
string2 = string1[:-1] + "2" # here we slice the string and used concatenation to change the string.
print(string2)
days = "Mon, Tue, Wed, Thu, Fri, Sat, Sun"
print('Mon' in days)
# Exercise: String = "abcdefghijklmnopqrstuvwxyz"
# Create a slice that produce characters qpo
# Create a slice that produce characters edcba
# Create a slice that produce last 8 characters in reverse order
print("Exercise")
alphabets = "abcdefghijklmnopqrstuvwxyz"
print(alphabets[-12:-9][::-1])
string2 = alphabets[:5]
print(string2[::-1])
print(alphabets[26:17:-1])
# STRING REPLACEMENT
# In python every datatype can be converter into String. we can use function str() to convert anything into string.
print('STRING REPLACEMENT')
num1 = 12
num2 = 13
print(num1 + num2) # Hete '+' performs the task of simple addition
print(str(num1) + str(num2)) # after converting num1 and num2 to string we can see that the two string just
# concatenates instead of adding together as in previous command
# STRING REPLACEMENT
# Python provides a function 'format()' which we can use with string to provide value to a placeholder'{}' dynamically.
print('STRING REPLACEMENT')
name = "Sid"
age = 22
print('your name is {} and your age is {}'.format(name, age)) # Here we have two place holder '{}' and we have provided
# value to the two place holders by passing an arguments
# to the format function. It is important to note that
# the placeholders takes arguments in sequential way if
# not mentioned oher way. In next command we will provide
# indexes in placeholders. These indices willindicate the
# which argument will be placed in place holder
print('Your name is {0} and your age is {1}'.format(name, age)) # Here we can see that we have provided indices to
# placeholders to indicate which arguments passed in
# format function will be placed in which place holder
print('Your name is {1} and your age is {0}'.format(age, name)) # Here we can see that even though arguments in format
# funtion is not in proper sequesnce, the indices used in
# placeholders make sure that we get the proper value in
# each placeholder
print('Your name is {n} and your age is {a}'.format(a=age, n=name)) # Here we have given a proper name to to each argument
# in format function and used those names as indices in
# placeholders. This is really an easy way to make sure
# we have placed correct value in each placeholder
# Examples
print('We can {r} {r} {r} any that can be {fun}'.format(r='repeat', fun='fun', h='hahaha'))
print("Jan: {2} Days, Feb: {0} Days, Mar: {2} Days, Apr: {1} Days, May: {2} Days, Jun: {1} Days, Jul: {2} Days".format(28,30,31))
print("""Jan: {2} Days
Feb: {0} Days
Mar: {2} Days
Apr: {1} Days
May: {2} Days
Jun: {1} Days
Jul: {2} Days""".format(28,30,31))
# STRING FORMATTING We can use string formatting in order for proper alignment or decimal places or white spaces before
# or after any digit. We cn achieve this by using {index:width.FloatingNumber}. Index is the positing of arguments
# passed in format function, width is the the total place that the value will take (if width provided is more then the
# characters of value then we'll have white space (and if width is less then total characters in value then also we will
# have all characters displayed) and last but not the least, Floating Number is the total number of characters that
# will be displayed after decimal. It is important to note that floating number can oly be associated with integer or
# decimal. We get an error if floating number is provided for string. If decimal numbers are less then number of
# floating number then for ever extra number we will have 0 place at the end. If floating number is less then the
# digits after decimal then digits will be rounded off according to floating number.
# We can even specify left, right or center alignment of the value using '<', '>', or '^' respectively after colon(:)
# and before width. We will see example of all these below.
print('\nSTRING FORMATTING')
print("We are simply using for loop (will study about it later) to print number and its square and cube value.")
for i in range (2, 6):
print('No. {0} has square value of {1} and cube value of {2}'.format(i, i**2, i**3))
print()
print("Now we are providing width for every value. the 1st placeholder will have width of 1 digit, 2nd will have\n"
"width of 2 digits and 3rd will have width of 3 digits. Notice that output will be right aligned by default\n "
"even though we have not provided any '>' operator.")
for i in range(2,6):
print('No. {0:1} has square value of {1:2} and cube value of {2:3}'.format(i, i**2, i**3))
print()
print("""Here we, along with providing width of to the placeholder, we have also provided alignment operators.
The 1st placeholder has ^ operator which signifies center alignment, 2nd placeholder has < that signifies
left alignment and 3rd placeholder have > which signifies right alignment""")
for i in range(2,6):
print('No. {0:1} has square value of {1:<2} and cube value of {2:>3}'.format(i, i**2, i**3))
print()
print("""In this section we have increased the width of each place holder to 5 digits and used same alingmnet
operators as in last section to see the difference""")
for i in range(2,6):
print('No. {0:^5} has square value of {1:<5} and cube value of {2:>5}'.format(i, i**2, i**3))
print()
print("""In this section we will test the floating numbers along with width. We have given 2 floating number to 1st
placeholder, 3 floating number to 2nd place holder and 4 to 3rd place holder. As any of our value have no
decimal points, so each value is followed by n number of 0 after decimal point where n is value of floating number.""")
for i in range(2,6):
print('No. {0:1.2f} has square value of {1:<8.3F} and cube value of {2:>9.4f}'.format(i, i**2, i**3))
print()
print("""Here we will see another example of floating number.""")
print("""Value of PI: {0:5}
Value of PI: {0:5.3f}
Value of PI: {0:5.2f}
Value of PI: {0:5.0f}
Value of PI: {0:5.60f}""".format(22/7))
# f STRINGS
# f String, also known as Formatted Strings is another way or rather a faster and easier way to format string. Here
# instead of using format function we simply pass the value along with width and floating expression directly into
# placeholder. We do this by placing 'f' before the starting of string (before quote) as shown below. Also the
# formatting of the string is exactly same as before. Lets dive into examples....
print("\nf STRINGS")
for i in range(2,6):
print(f"No. {i} has square value of {i**2} and cube value of {i**3}")
print()
for i in range(2,6):
print(f"No. {i:2} has square value of {(i**2):3} and cube value of {i**3:4}")
print()
for i in range(2,6):
print(f"No. {i:^3} has square value of {(i**2):<3} and cube value of {i**3:>4}")
print()
for i in range(2,6):
print(f"No. {i:^5.2f} has square value of {(i**2):<6.3f} and cube value of {i**3:>8.4f}")
print()
pi=22/7
print(f"""Value of PI: {pi}
Value of PI: {pi:6}
Value of PI: {pi:6.0f}
Value of PI: {pi:6.2f}
Value of PI: {pi:6.60f}
""")
# STRING FUNCTIONS
print("STRING FUNCTIONS")
string1 = "This is a string"
print("Our string is " + string1 + "\n")
print("Using function upper() to convert string to upper case\nstring1.upper(): " + string1.upper())
print("\nUsing function lower() to convert string to lower case\nstring1.lower(): " + string1.lower())
print(f"\nUsing function isupper() returns true if string has only upper case letter\nstring1.isupper(): "
f"{string1.isupper()}")
print(f"\nUsing function capitalize() returns string with 1st character as capital\nstring1.capitalize(): "
f"{string1.capitalize()}")
print(f"\nUsing function count() returns number of times string literal passed as an argument appear in "
f"string\nstring1.count('s'): {string1.count('s')}")
print(f"\nUsing function endswith() returns true if string ends with string literal passed as an argument "
f"\nstring1.endswith('ng'): {string1.endswith('g')}")
print(f"\nUsing function isnumeric() returns true if string is numeric else returns false\nstring1.isnumeric(): "
f"{string1.isnumeric()}")
print(f"\nUsing function isalpha() returns true if string is numeric else returns false\nstring1.isalpha(): "
f"{string1.isalpha()}")
print(f"\nUsing replace() function, we can replace characters in given literal\nstring1.replace('string', 'literals'): "
f"{string1.replace('string', 'literals')}")
print(f"\nUsing split() function, we can split the string whenever a literal passed in the argument appears in \n"
f"the string. 2nd argument is optional and signifies number of times the string will be splitted.\nstring1.split("
f"'s'): {string1.split('s')}")
print(f"\nUsing strip() function, we can remove the string literals passed in the argument of the function. It is "
f"\nimportant to note that the literals passed as arguments must be the starting literals of the string else it "
f"\nwont work. if no argument is passed then the space is removed if it is at the beginning of the "
f"string.\nstring1.strip('This'): {string1.strip('This')}")
print(f"\nUsing index() function, we get index of1st occurence of the literal passed in the argument of the function. "
f"Itis important to note that the 2nd and 3rd arguments are option and signifies starting and ending point from"
f"\nwhere the search will start and end.\nstring1.index('s',2,10): {string1.index('s',2,10)}")
| [
"[email protected]"
] | |
c15d4b3566815e61f9fc93ba6b974c34f358c170 | e2ad93398194942c13c27b25aa868eda4ff4f46c | /sponsortracker/download.py | 987305d12d281d57363d875f7b84883ec45f917e | [] | no_license | Auzzy/bfig-sponsor-tracker | ab2fbcf6ba562f977263b5f91c3aca756e037302 | cff466de6797ea276130335bdc368c706eed583d | refs/heads/master | 2023-06-01T04:41:26.082120 | 2023-05-11T11:52:11 | 2023-05-11T11:52:11 | 25,780,177 | 0 | 0 | null | 2023-05-11T11:52:12 | 2014-10-26T16:14:46 | Python | UTF-8 | Python | false | false | 2,989 | py | import collections
import os
import shutil
import tempfile
from enum import Enum
from os.path import exists, expanduser, join, splitext
from sqlalchemy import or_
from sponsortracker import model, uploads
from sponsortracker.data import AssetType, Level
ZIPNAME = "sponsortracker-assets"
def all(level=None):
return download(level=level)
def website_updates(start, level=None):
asset_filter = lambda deal: [asset for asset in deal.assets_by_type[AssetType.LOGO] if asset.date >= start]
return download('updates', asset_filter=asset_filter, level=level)
def logo_cloud(level=None):
asset_filter = lambda deal: deal.assets_by_type[AssetType.LOGO]
return download('logocloud', by_sponsor=False, info=False, asset_filter=asset_filter, level=level)
def download(zipname=ZIPNAME, by_sponsor=True, info=True, asset_filter=lambda deal: deal.assets, level=None):
with tempfile.TemporaryDirectory() as tempdir:
zipdir = join(tempdir, zipname)
os.makedirs(zipdir)
for deal in model.Deal.query.filter(model.Deal.level_name != ""):
if deal.level in (Level.SERVICE, Level.BRONZE, Level.BRONZE_BENEFITS, Level.SILVER, Level.GOLD, Level.PLATINUM) or deal.contract.received != None or deal.invoice.received != None:
if not level or deal.level_name == level:
target = join(*[zipdir, deal.level.name.lower()] + ([deal.sponsor.name] if by_sponsor else []))
os.makedirs(target, exist_ok=True)
if info:
_info_to_file(target, deal.sponsor)
_copy_assets(target, asset_filter(deal))
return shutil.make_archive(expanduser(join("~", zipname)), "zip", root_dir=tempdir)
def _info_to_file(target, sponsor):
if sponsor.link or sponsor.description:
with open(join(target, "info.txt"), 'w') as info_file:
if sponsor.link:
info_file.write(sponsor.link + "\n\n")
if sponsor.description:
info_file.write(sponsor.description)
def _copy_assets(target, assets):
for asset in assets:
name = '-'.join([asset.deal.sponsor.name.lower(), asset.type.name.lower()])
ext = splitext(asset.filename)[-1].lstrip('.')
dest = os.path.join(target, "{name}.{ext}".format(name=name, ext=ext))
uploads.Asset.get(asset.deal, asset.filename, dest)
'''
path = asset_uploader.path(asset.filename)
ext = splitext(asset.filename)[-1].lstrip('.')
name = '-'.join([asset.sponsor.name.lower(), asset.type.name.lower()])
shutil.copy(path, _filepath(target, name, ext))
'''
'''
def _filepath(target, basename, ext):
num = 2
name = "{name}.{ext}".format(name=basename, ext=ext)
while exists(join(target, name)):
name = "{name}_{num}.{ext}".format(name=basename, num=num, ext=ext)
num += 1
return join(target, name)
''' | [
"[email protected]"
] | |
ecec82f7d6a458418140579021abfe8fd06af04d | d5934c0624095112533201ca748e035cf33e19c1 | /CodeWars.py | cf1e344b5cb120c23316d6af8e874e87e7799ead | [] | no_license | waithope/codewars | b5bbb81193cb1b98830024c16b2470c5b0d070c9 | 315d9dca4e0163b03409a2d806ce0f809353a991 | refs/heads/master | 2020-03-14T15:49:17.211859 | 2018-05-10T04:30:40 | 2018-05-10T04:30:40 | 131,684,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,745 | py |
# 0123456789
# 0##########
# 1## ##
# 2# # # #
# 3# # # #
# 4# ## #
# 5# ## #
# 6# # # #
# 7# # # #
# 8## ##
# 9##########
# rowCount = 10
# columnCount = 10
# for i in range(rowCount):
# for j in range(columnCount):
# if i == 0 or i == rowCount - 1 or j == 0 or \
# j == columnCount - 1 or i == j or j == columnCount - i - 1:
# print("#", end='')
# else:
# print(" ", end='')
# print()
def high_and_low(numbers):
# l = numbers.split(' ')
# print(l)
# min = int(l[0])
# max = int(l[0])
# for num in l:
# if int(num) < min:
# min = int(num)
# if int(num) > max:
# max = int(num)
# more clever
l = [int(num) for num in numbers.split(' ')]
return str(max(l)) + ' ' + str(min(l))
# print(high_and_low("4 5 29 54 4 0 -214 542 -64 1 -3 6 -6"))
## Descending Order
# def Descending_Order(num):
# return int(''.join(sorted(str(num), reverse=True)))
# print(Descending_Order(10147237031))
# # initialize
# a = []
# # create the table (name, age, job)
# a.append(["Nick", 30, "Doctor"])
# a.append(["John", 8, "Student"])
# a.append(["Paul", 22, "Car Dealer"])
# a.append(["Mark", 66, "Retired"])
# # sort the table by age
# import operator
# a.sort(key=operator.itemgetter(0, 1), reverse=True)
# # print the table
# print(a)
def DNA_strand(dna):
dna_map = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
return ''.join([dna_map[sym] for sym in dna])
def DNA_strand_v2(dna):
return dna.translate(str.maketrans('ATCG', 'TAGC'))
# assert(DNA_strand('ATTGC') == 'TAACC')
## Given a string, replace every letter with its position in the alphabet.
## a being 1, b being 2, etc.
def alphabet_position(text):
return ' '.join([str(ord(item.lower()) - ord('a') + 1) \
for item in text if item.isalpha() \
])
# print(alphabet_position('asdjfak'))
## Take a list of non-negative integers and strings
## Returns a new list with the strings filtered out.
def filter_list(l):
return [item for item in l if item is not str(item)]
def filter_list_v2(l):
return [item for item in l if not isinstance(item, str)]
# print(filter_list([1,2,'aasf','1','123',123]) == [1,2,123])
## Decode morse_code
MORSE_CODE = {
'.-': 'A', '-...': 'B', '-.-.': 'C', '-..': 'D', '.': 'E', '..-.': 'F',
'--.': 'G', '....': 'H', '..': 'I', '.---': 'J', '-.-': 'K', '.-..': 'L',
'--': 'M', '-.': 'N', '---': 'O', '.--.': 'P', '--.-': 'Q', '.-.': 'R',
'...': 'S', '-': 'T', '..-': 'U', '...-': 'V', '.--': 'W', '-..-': 'X',
'-.--': 'Y', '--..': 'Z',
'-----': '0', '.----': '1', '..---': '2', '...--': '3', '....-': '4',
'.....': '5', '-....': '6', '--...': '7', '---..': '8', '----.': '9',
'.-.-.-': '.', '--..--': ',', '..--..': '?', '.----.': "'", '-.-.--': '!',
'-..-.': '/', '-.--.': '(', '-.--.-': ')', '.-...': '&', '---...': ':',
'-.-.-.': ';', '-...-': '=', '.-.-.': '+', '-....-': '-', '..--.-': '_',
'.-..-.': '"', '...-..-': '$', '.--.-.': '@', '...---...': 'SOS'
}
def decodeMorse(morse_code):
morse_code_part = morse_code.strip().split(' ')
space_cnt = 0
output = ''
for ele in morse_code_part:
if ele is '':
space_cnt += 1
if space_cnt == 2:
space_cnt = 0
output += ' '
else:
output += MORSE_CODE[ele]
return output
def decodeMorse_v2(morse_code):
return ' '.join([
''.join([MORSE_CODE[code] for code in word.split(' ')])
for word in morse_code.strip().split(' ')
])
# print(decodeMorse_v2(".... . -.-- .--- ..- -.. ."))
## persistence(999) => 4 # Because 9*9*9 = 729, 7*2*9 = 126,
## # 1*2*6 = 12, and finally 1*2 = 2.
def persistence(n):
factors = list(str(n))
cnt = 0
if len(factors) <= 1:
return 0
res = int(factors[0])
for i in range(1, len(factors)):
res *= int(factors[i])
cnt = persistence(res)
return cnt + 1
from functools import reduce
## reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5)
def persistence_v2(n):
factors = [int(x) for x in str(n)]
i = 0
while len(factors) > 1:
res = reduce(lambda x, y: x*y, factors)
i += 1
factors = [int(x) for x in str(res)]
return i
# print(persistence_v2(999))
def get_sign(x):
return (x > 0) - (x < 0)
# print(get_sign(-1))
## Write a function to calculate the absolute value of a 32-bit integer
def myabs(x):
high_bit_mask = x >> 31
return (x ^ high_bit_mask) - high_bit_mask
# print(myabs(7))
# import random
# print(random.randrange(10))
## Dig Pow 89 = 8^1 + 9^2
def sum_dig_pow(a, b):# range(a, b + 1) will be studied by the function
output = []
for num in range(a, b+1):
parts = list(str(num))
new_num = 0
for exp, base in enumerate(parts, 1):
new_num += (int(base))**exp
if num == new_num:
output.append(num)
return output
def dig_pow(n):
return sum([int(y)**x for x, y in enumerate(str(n), 1)])
def sum_dig_pow_v2(a, b):
return [num for num in range(a, b+1) if num == dig_pow(num)]
# print(sum_dig_pow_v2(89,135))
def countBits(n):
count = 0
while n > 0:
n = n & (n - 1)
count += 1
return count
# unique_in_order('AAAABBBCCDAABBB') == ['A', 'B', 'C', 'D', 'A', 'B']
# unique_in_order('ABBCcAD') == ['A', 'B', 'C', 'c', 'A', 'D']
# unique_in_order([1,2,2,3,3]) == [1,2,3]
def unique_in_order(iterable):
unique = []
prev = None
for char in iterable:
if char != prev:
unique.append(char)
prev = char
return unique
# print(unique_in_order([]))
def duplicate_count(text):
## str.count(sub) count the ocurrences of substring
occurs = [text.lower().count(char_cnt) for char_cnt in list(set(list(text.lower)))]
cnt = 0
for num in occurs:
if num > 1:
cnt += 1
return cnt
def duplicate_count_v2(text):
return len([c for c in set(text.lower()) if text.lower().count(c) > 1])
# print(duplicate_count_v2("aaBbccddeeffgg"))
# add 2 integers using bitwise operations
# but need to deal with special case a < 0; b > 0 abs(a) < b
def add(a, b):
while a:
b, a = b ^ a, (b & a) << 1
return b
print(add(-1, -800))
def reverseWords(str):
return ' '.join(str.split(' ')[::-1])
# print(reverseWords("hello world"))
## if a portion of str1 characters can be rearranged to match str2,
## otherwise returns false.
# Only lower case letters will be used (a-z).
# No punctuation or digits will be included.
# Performance needs to be considered.
# scramble('rkqodlw', 'world') ==> True
# scramble('katas', 'steak') ==> False
##cost time 4861ms
def scramble_v1(s1,s2):
for c in set(s2):
if s1.count(c) < s2.count(c):
return False
return True
##cost time 5865ms
def scramble_v2(s1, s2):
s1_dict = {}
s2_dict = {}
for char in s1:
if char in s1_dict:
s1_dict[char] += 1
else:
s1_dict[char] = 1
for char in s2:
if char in s2_dict:
s2_dict[char] += 1
else:
s2_dict[char] = 1
for k, v in s2_dict.items():
if s1_dict.get(k, 0) >= v:
continue
else:
return False
return True
## cost time 6396ms
def scramble_v3(s1, s2):
h = [0] * 26
for char in s1:
h[ord(char) - 97] += 1
for char in s2:
h[ord(char) - 97] -= 1
for i in h:
if i < 0:
return False
return True
## Divisors of 42 are : 1, 2, 3, 6, 7, 14, 21, 42.
## These divisors squared are: 1, 4, 9, 36, 49, 196, 441, 1764.
## The sum of the squared divisors is 2500 which is 50 * 50, a square!
## Given two integers m, n (1 <= m <= n) we want to find all integers between m and n
## whose sum of squared divisors is itself a square. 42 is such a number.
import math
def list_squared(m, n):
res = []
for num in range(m, n+1):
i = 1
sum = 0
while i <= math.sqrt(num): # all the divisors present in pairs
if num % i == 0:
div = num // i
sum += i**2
if div != i:
sum += div**2
i += 1
if math.sqrt(sum).is_integer():
res.append([num, sum])
return res
## If the input number is already a palindrome, the number of steps is 0.
## Input will always be a positive integer.
##For example, start with 87:
## 87 + 78 = 165; 165 + 561 = 726; 726 + 627 = 1353; 1353 + 3531 = 4884
##4884 is a palindrome and we needed 4 steps to obtain it, so palindrome_chain_length(87) == 4
def is_palindrome(n):
return str(n) == str(n)[::-1]
def palindrome_chain_length(n):
step = 0
while not is_palindrome(n):
n += int(str(n)[::-1])
step += 1
return step
# print(palindrome_chain_length(87))
## Breadcrumb Generator
ignore_words = ["the", "of", "in", "from", "by", "with", "and",
"or", "for", "to", "at", "a"
]
def generate_bc(url, separator):
if url.startswith("http"):
url = url.split("//")[1]
crumb = url.split('/')
crumb[-1] = crumb[-1].split('.')[0].split('?')[0].split('#')[0]
if crumb[-1] in ('', 'index'):
crumb.pop()
n = len(crumb)
processed_parts = []
for i, level in enumerate(crumb):
aux = level
if i == 0:
if n == 1:
processed_parts.append('<span class="active">HOME</span>')
else:
processed_parts.append('<a href="/">HOME</a>')
else:
if len(level) > 30:
aux = ''.join([entry[0] for entry in level.split('-')
if entry not in ignore_words
])
else:
aux = ' '.join(aux.split('-'))
if i > 1 and i <= n - 2:
level = "/".join(crumb[1:i+1])
if i == n - 1:
processed_parts.append('<span class="active">%s</span>' % aux.upper())
else:
processed_parts.append('<a href="/%s/">%s</a>' % (level, aux.upper()))
return separator.join(processed_parts)
## hamming number
# Write a function that computes the nth smallest Hamming number.
# Specifically:
# The first smallest Hamming number is 1 = 2^0 * 3^0 * 5^0
# The second smallest Hamming number is 2 = 2^1 * 3^0 * 5^0
# The third smallest Hamming number is 3 = 203150
# The fourth smallest Hamming number is 4 = 223050
# The fifth smallest Hamming number is 5 = 203051
def hamming(n):
hamm = [0 for num in range(n)]
hamm[0] = 1
a, b, c = 0, 0, 0
for i in range(1, n):
hamm[i] = min(hamm[a] * 2, hamm[b] * 3, hamm[c] * 5)
if hamm[i] == hamm[a] * 2: a += 1
if hamm[i] == hamm[b] * 3: b += 1
if hamm[i] == hamm[c] * 5: c += 1
return hamm[-1]
## original version also bad code
hamset = {1:1}
divisors = [2, 3, 5]
def hamming_v2(n):
if hamset.get(n) is not None:
return hamset[n]
i = list(hamset.keys())[-1] + 1
while i <= n:
now = hamset[i - 1]
find = False
while not find:
now += 1
rem = now
for div in divisors:
while (rem / div).is_integer():
rem = rem / div
if rem == 1:
hamset[i] = now
find = True
break
if find is True:
break
i += 1
return hamset[n]
# Strip Comments
# result = solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"])
# result should == "apples, pears\ngrapes\nbananas"
def solution(string,markers):
parts = string.split('\n')
for m in markers:
parts = [p.split(m)[0].rstrip() for p in parts]
print(parts)
return '\n'.join(parts)
# solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"])
# Original Version
def solution_v2(string,markers):
strip = 0
s = list(string)
for i in range(len(string)):
if s[i] in markers:
strip = 1
if s[i - 1] == ' ':
s[i - 1] = ''
if s[i] == "\n":
strip = 0
if strip == 1:
s[i] = ''
return ''.join(s)
# How many numbers III?
# Generate all the numbers of three digits that:
# the value of adding their corresponding ones(digits) is equal to 10.
# their digits are in increasing order (the numbers may have two or more equal contiguous digits)
# The numbers that fulfill the two above constraints are: 118, 127, 136, 145, 226, 235, 244, 334
# recursion
def find_all(sum_dig, digs):
res = [''.join([str(num) for num in x]) for x in gen(digs) if sum(x) == sum_dig]
if not res:
return []
return [len(res), int(res[0]), int(res[-1])]
def gen(d, start=1):
if d == 1:
for x in range(start, 10):
yield [x]
else:
for x in range(start, 10):
for y in gen(d - 1, x):
yield [x] + y
# built-in
import itertools
def find_all_v2(sum_dig, digs):
res = []
aux = list(itertools.combinations_with_replacement(range(1, 10), digs))
res = [''.join([str(num) for num in t]) for t in aux if sum(t) == sum_dig]
if not res:
return []
return [len(res), int(res[0]), int(res[-1])]
| [
"[email protected]"
] | |
360da2be75d373e79cf00b85bd0f99c57c87e8a5 | 64dfe7e6a139044799a6a54dfc2ba04c09479c8d | /Machine Learning/linear regression/linear_regression.py | ab3a3db2beffa82628962ffa843e02c5eb9dd684 | [] | no_license | chierqj/ML-and-Image | 9a1c9956eb90e41b06b0114012c4813dfcf8c9e1 | 92adc3530167ac1478808770f19baf81a18a086a | refs/heads/master | 2021-10-24T05:38:48.266694 | 2019-03-22T11:38:31 | 2019-03-22T11:38:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | import numpy as np
import matplotlib.pyplot as plt
# load data
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t')) - 1 # get number of fields
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
dataMat = np.array(dataMat)
labelMat = np.array(labelMat)
return dataMat, labelMat
# the standard equation of the regression
def standard_equation(dataMat, labelMat):
theta = np.dot(np.dot(np.linalg.inv(np.dot(dataMat.T, dataMat)), dataMat.T), labelMat)
y = np.dot(dataMat, theta)
return y
# use gradient descent to solve the regression
def gradient_descent(dataMat, labelMat, a, step):
m = dataMat.shape[0]
theta = np.zeros(dataMat.shape[1])
for i in range(step):
theta = theta - a * 1 / m * np.dot(dataMat.T, (np.dot(dataMat, theta) - labelMat))
y = np.dot(dataMat, theta)
return y
dataMat, labelMat = loadDataSet('G:\PythonProject\machinelearninginaction\Ch08\ex1.txt')
plt.scatter(dataMat[:, 1], labelMat)
y = gradient_descent(dataMat, labelMat, 0.3, 100)
plt.plot(dataMat[:, 1], y, 'r')
plt.show()
| [
"[email protected]"
] | |
0e9fc8acc9e665f358306e77d53a34064d1e86ec | 7d02f1f6b38284d647caa77d148505b8d8f89fb4 | /flask/loinc_service.py | f8c2a259211c9137439086a8778c032a49e279cf | [] | no_license | sfogo/rest-ways | 22e79ac621538028da8e0b81443d5bd3fb2459d3 | bfd7a6f0aa4d83a9077584ef74f29618c1b32045 | refs/heads/master | 2020-12-25T15:18:05.532394 | 2016-09-17T23:34:23 | 2016-09-17T23:34:23 | 66,242,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | #!/usr/bin/python3
import sql_service as service
# =====================
# LoincException
# =====================
class LoincException(Exception):
def __init__(self,code,status,message):
Exception.__init__(self,message)
self.code=code
self.status=status
def __str__():
return '{} {} {}'.format(code,status,message)
# =====================
# Get Code
# =====================
def getCode(code):
query = "select * from loinc where loinc_num = '{}'".format(code)
item = service.selectOne(query)
if (item==None):
raise LoincException(101,404,'Cannot find LOINC code {}'.format(code))
return item
# =====================
# Get Codes
# =====================
def getCodes(q,searchType=None):
if (q==None):
raise LoincException(102,400,'Missing query parameter q')
elif (searchType is None or searchType=='code'):
query = "select * from loinc where loinc_num like '%{}%'".format(q)
elif (searchType=='name'):
query = "select * from loinc where lower(long_common_name) like '%{}%'".format(q)
else:
raise LoincException(103,400,'Invalid search type {}'.format(searchType))
# Return items
return service.select(query)
| [
"[email protected]"
] | |
64ec01ba21f64787427bb5e89548b171ba75b077 | dd777c86d22e229841a324adae0c9064a4d59307 | /search_engine_project/settings.py | 6634c7e0725a316a2a41550f99b85303eb4ff3c3 | [] | no_license | JisunParkRea/search-engine | bcec97218f3fec00b6352e6415929464051f4aa3 | 34a1355ec445ce916b96058fd382478b897cb43e | refs/heads/master | 2022-11-09T21:44:55.089291 | 2020-06-13T14:41:33 | 2020-06-13T14:41:33 | 271,531,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,650 | py | """
Django settings for search_engine_project project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os, json
from django.core.exceptions import ImproperlyConfigured
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
secret_file = os.path.join(BASE_DIR, 'secrets.json') # secrets.json 파일 위치를 명시
with open(secret_file) as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
try:
return secrets[setting]
except KeyError:
error_msg = "Set the {} environment variable".format(setting)
raise ImproperlyConfigured(error_msg)
SECRET_KEY = get_secret("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'127.0.0.1',
'192.168.99.100', # VM_IP_ADDRESS
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'search_engine_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'search_engine_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'search_engine_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
c49f884ff1e502534a0cbabe7633c2134b201d34 | b391498124fdcaef989bf3ebafffb0df43e3e07f | /pygccxml-0.8.2/unittests/declarations_cache_tester.py | 10112018fdfee5e022b785ec191dc8df82e502cb | [
"BSL-1.0"
] | permissive | glehmann/WrapITK-unstable | 9a0dd9d387ecd59c9439465dcc32cca552e14576 | 402fc668f1f3c3dd57d0751a61efa3b1625d238b | refs/heads/master | 2021-01-10T22:02:04.715926 | 2008-05-25T16:53:07 | 2008-05-25T16:53:07 | 3,272,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,319 | py | # Copyright 2004 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os, sys, unittest, os.path
import autoconfig
import pygccxml.parser
from pygccxml.parser.config import config_t
from pygccxml.parser.declarations_cache import *
class decl_cache_tester(unittest.TestCase):
def __init__(self, *args ):
unittest.TestCase.__init__(self, *args)
if not os.path.exists( autoconfig.build_dir ):
os.makedirs( autoconfig.build_dir )
def test_file_signature(self):
file1 = os.path.join(autoconfig.data_directory, 'decl_cache_file1.txt')
file1_dup = os.path.join(autoconfig.data_directory, 'decl_cache_file1_duplicate.txt')
file2 = os.path.join(autoconfig.data_directory, 'decl_cache_file2.txt')
sig1 = file_signature(file1)
sig1_dup = file_signature(file1_dup)
sig2 = file_signature(file2)
self.assert_(sig1 == sig1_dup)
self.assert_(sig1 != sig2)
def test_config_signature(self):
diff_cfg_list = self.build_differing_cfg_list()
def_cfg = diff_cfg_list[0]
def_sig = configuration_signature(def_cfg)
# Test changes that should cause sig changes
for cfg in diff_cfg_list[1:]:
self.assert_(configuration_signature(cfg) != def_sig)
# Test changes that should not cause sig changes
no_changes = def_cfg.clone()
self.assert_(configuration_signature(no_changes) == def_sig)
#start_decls_changed = def_cfg.clone()
#start_decls_changed.start_with_declarations = "test object"
#self.assert_(configuration_signature(start_decls_changed) == def_sig)
ignore_changed = def_cfg.clone()
ignore_changed.ignore_gccxml_output = True
self.assert_(configuration_signature(ignore_changed) == def_sig)
def test_cache_interface(self):
cache_file = os.path.join(autoconfig.build_dir, 'decl_cache_test.test_cache_read.cache')
file1 = os.path.join(autoconfig.data_directory, 'decl_cache_file1.txt')
file1_dup = os.path.join(autoconfig.data_directory, 'decl_cache_file1_duplicate.txt')
file2 = os.path.join(autoconfig.data_directory, 'decl_cache_file2.txt')
diff_cfg_list = self.build_differing_cfg_list()
def_cfg = diff_cfg_list[0]
if os.path.exists(cache_file):
os.remove(cache_file)
cache = file_cache_t(cache_file)
self.assert_(len(cache._file_cache_t__cache) == 0)
# test creating new entries for differing files
cache.update(file1, def_cfg, 1,[])
self.assert_(len(cache._file_cache_t__cache) == 1)
cache.update(file1_dup, def_cfg, 2,[])
self.assert_(len(cache._file_cache_t__cache) == 1)
cache.update(file2, def_cfg, 3,[])
self.assert_(len(cache._file_cache_t__cache) == 2)
self.assert_(cache.cached_value(file1,def_cfg) == 2)
self.assert_(cache.cached_value(file2,def_cfg) == 3)
# Test reading again
cache.flush()
cache = file_cache_t(cache_file)
self.assert_(len(cache._file_cache_t__cache) == 2)
self.assert_(cache.cached_value(file1,def_cfg) == 2)
self.assert_(cache.cached_value(file2,def_cfg) == 3)
# Test flushing doesn't happen if we don't touch the cache
cache = file_cache_t(cache_file)
self.assert_(cache.cached_value(file1,def_cfg) == 2) # Read from cache
cache.flush() # should not actually flush
cache = file_cache_t(cache_file)
self.assert_(len(cache._file_cache_t__cache) == 2)
# Test flush culling
cache = file_cache_t(cache_file)
cache.update(file1_dup, def_cfg, 4,[]) # Modify cache
cache.flush() # should cull off one entry
cache = file_cache_t(cache_file)
self.assert_(len(cache._file_cache_t__cache) == 1)
def build_differing_cfg_list(self):
""" Return a list of configurations that all differ. """
cfg_list = []
def_cfg = config_t("gccxml_path",'.',['tmp'],['sym'],['unsym'],
None,False,"")
cfg_list.append(def_cfg)
# Test changes that should cause sig changes
gccxml_changed = def_cfg.clone()
gccxml_changed.gccxml_path = "other_path"
cfg_list.append(gccxml_changed)
wd_changed = def_cfg.clone()
wd_changed.working_directory = "other_dir"
cfg_list.append(wd_changed)
#inc_changed = def_cfg.clone()
#inc_changed.include_paths = ["/var/tmp"]
#self.assert_(configuration_signature(inc_changed) != def_sig)
inc_changed = config_t("gccxml_path",'.',['/var/tmp'],['sym'],['unsym'],
None,False,"")
cfg_list.append(inc_changed)
#def_changed = def_cfg.clone()
#def_changed.define_symbols = ["symbol"]
#self.assert_(configuration_signature(def_changed) != def_sig)
def_changed = config_t("gccxml_path",'.',['/var/tmp'],['new-sym'],['unsym'],
None,False,"")
cfg_list.append(def_changed)
#undef_changed = def_cfg.clone()
#undef_changed.undefine_symbols = ["symbol"]
#self.assert_(configuration_signature(undef_changed) != def_sig)
undef_changed = config_t("gccxml_path",'.',['/var/tmp'],['sym'],['new-unsym'],
None,False,"")
cfg_list.append(undef_changed)
cflags_changed = def_cfg.clone()
cflags_changed.cflags = "new flags"
cfg_list.append(cflags_changed)
return cfg_list
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(decl_cache_tester))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite() | [
"[email protected]"
] | |
047c1ebd782e80a602689965078bb65de47b133a | c2d23f201924972e32695e2ae848710836cd3e4d | /0x04-python-more_data_structures/9-multiply_by_2.py | 11b45fde03bb8ed0ec68816cf13fb15725c6e8e2 | [] | no_license | oangel26/holbertonschool-higher_level_programming-1 | 8e19ccf01d3933af1248d830d3f02ff8dd7870f2 | a49808576ad0b422c10f204f316bd921f9bc1597 | refs/heads/main | 2023-06-29T13:15:47.430077 | 2021-08-04T00:06:34 | 2021-08-04T00:06:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | #!/usr/bin/python3
def multiply_by_2(a_dictionary):
result = {}
for k, v in a_dictionary.items():
result[k] = v * 2
return result
| [
"[email protected]"
] | |
9c14abf2141c645825bb3572f94ff19a61281d45 | 698ad822ff616b86e88784ec4fce08b42c46e870 | /torch/_dynamo/allowed_functions.py | 67daafc5adac78651457c01de7096eb20617b562 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | puppup420247-org/pytorch | 6fa244b817ace397e97e85c553bd315093ef4533 | 090fce2547ac96730f99877085a7477097c4ad97 | refs/heads/master | 2023-07-21T03:54:26.234286 | 2022-11-12T05:59:55 | 2022-11-12T05:59:55 | 188,915,637 | 1 | 1 | NOASSERTION | 2023-07-20T11:42:38 | 2019-05-27T22:20:25 | C++ | UTF-8 | Python | false | false | 8,428 | py | import builtins
import collections
import copy
import functools
import inspect
import itertools
import math
import operator
import types
import warnings
from typing import Dict, Optional, Set
import numpy
import torch
from torch.fx._symbolic_trace import is_fx_tracing
from . import config
from .utils import is_safe_constant
"""
A note on allowed functions:
Dynamo consults this file to determine if a particular function/module
is allowed to appear as a node in its fx output.
If a function is disallowed, it may either be traced-through, or skipped.
Trace-through means dynamo will continue to trace the interior code for
the function/module rather than stopping at its boundary and recording it
as a node in the fx graph. Whether tracing through or allowing, the functionality
of the function/module is part of the dynamo graph. Caveat: if tracing through,
any interior operation could trigger its own graph-break.
Skips are determined by (torch/_dynamo/skipfiles.py) - see "a note on
skipfiles" there.
"""
def make_function_id_set(lazy_initializer):
"""
Track a set of `id()`s of objects which are either allowed or not
allowed to go into the generated FX graph. Use to test for torch.*,
numpy.*, builtins.*, etc.
Support user modification to permit customization of what can be
added to the graph and what will cause a graph break.
"""
class FunctionIdSet:
function_ids: Optional[Set[int]] = None
function_names: Optional[Dict[int, str]] = None
def __call__(self):
if self.function_ids is None:
value = lazy_initializer()
if isinstance(value, dict):
self.function_ids = set(value.keys())
self.function_names = value
else:
assert isinstance(value, set)
self.function_ids = value
return self.function_ids
def get_name(self, idx: int, default: str):
self() # lazy init
return self.function_names.get(idx, default)
def add(self, idx: int):
self() # lazy init
self.function_ids.add(idx)
def remove(self, idx: int):
if idx in self():
self.function_ids.remove(idx)
def __contains__(self, idx: int):
return idx in self()
return FunctionIdSet()
@make_function_id_set
def _disallowed_function_ids():
remove = [
True,
False,
None,
collections.OrderedDict,
copy.copy,
copy.deepcopy,
inspect.signature,
math.__package__,
torch.__builtins__,
torch.autocast_decrement_nesting,
torch.autocast_increment_nesting,
torch.autograd.grad,
torch.clear_autocast_cache,
torch.cuda.current_device,
torch.cuda.amp.autocast_mode.autocast,
torch.distributions.constraints.is_dependent,
torch.distributions.normal.Normal,
torch.inference_mode,
torch.set_anomaly_enabled,
torch.set_autocast_cache_enabled,
torch.set_autocast_cpu_dtype,
torch.set_autocast_cpu_enabled,
torch.set_autocast_enabled,
torch.set_autocast_gpu_dtype,
torch.autograd.profiler.profile,
warnings.warn,
torch._C._dynamo.eval_frame.unsupported,
]
# extract all dtypes from torch
dtypes = [
obj for obj in torch.__dict__.values() if isinstance(obj, type(torch.float32))
]
remove += dtypes
storage = [
obj
for obj in torch.__dict__.values()
if isinstance(obj, type(torch.FloatStorage))
]
remove += storage
return {id(x) for x in remove}
@make_function_id_set
def _allowed_function_ids():
"""
Walk torch.* and get the ids of all the stuff in it
"""
warnings.filterwarnings("ignore", category=UserWarning, module="torch.distributed")
torch_object_ids = dict()
def _is_allowed_module_prefix(obj):
allowed_modules = ("torch", "math")
# torch.nn.modules.rnn is disallowed because these modules internally
# flatten their parameters. This flattening process will call
# Tensor.set_ with a Storage, and Storages cannot be traced with
# AOTAutograd; so we need to graph-break. To ensure this, we inline
# these functions, rather than keep them opaque-ly in the graph.
disallowed_modules = (
"torch.optim.",
"torch.nn.modules.rnn.",
"torch._dynamo.",
"torch._C._dynamo.",
"torch._inductor.",
"torch._C.inductor.",
"torch.fx.",
"torch.distributed.fsdp.",
)
allowed_modules_dot = tuple([x + "." for x in allowed_modules])
module = inspect.getmodule(obj)
if module is None:
return False
mod_name = module.__name__
if any(mod_name.startswith(m) for m in disallowed_modules):
return False
return mod_name in allowed_modules or mod_name.startswith(allowed_modules_dot)
def _find_torch_objects(module):
if any(
module.__name__.startswith(mod_name)
for mod_name in config.allowed_functions_module_string_ignorelist
):
return
torch_object_ids[id(module)] = module.__name__
for name, obj in list(module.__dict__.items()):
if id(obj) not in torch_object_ids:
if isinstance(obj, types.ModuleType):
if obj.__name__.startswith("torch.") and _is_allowed_module_prefix(
obj
):
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
_find_torch_objects(obj)
elif _is_allowed_module_prefix(obj):
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
elif inspect.getmodule(obj) is None and not is_safe_constant(obj):
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
_find_torch_objects(torch)
_find_torch_objects(math)
for idx in _disallowed_function_ids():
if idx in torch_object_ids:
del torch_object_ids[idx]
for extra in (is_fx_tracing,):
torch_object_ids[id(extra)] = f"{extra.__module__}.{extra.__name__}"
return torch_object_ids
@make_function_id_set
def _builtin_function_ids():
rv = {
id(v): f"builtins.{k}"
for k, v in builtins.__dict__.items()
if not k.startswith("_") and callable(v)
}
rv.update(
{
id(v): f"operator.{k}"
for k, v in operator.__dict__.items()
if not k.startswith("_") and callable(v)
}
)
rv.update(
{id(v): f"functools.{v.__name__}" for v in (itertools.chain, itertools.islice)}
)
rv[id(functools.reduce)] = "functools.reduce"
return rv
@make_function_id_set
def _numpy_function_ids():
rv = dict()
for mod in (numpy, numpy.random):
rv.update(
{
id(v): f"{mod.__name__}.{k}"
for k, v in mod.__dict__.items()
if callable(v)
and (getattr(v, "__module__", None) or mod.__name__) == mod.__name__
}
)
return rv
@make_function_id_set
def _builtin_constant_ids():
"""
Collects constant builtins by eliminating callable items.
"""
rv = {
id(v): f"builtins.{k}"
for k, v in builtins.__dict__.items()
if not k.startswith("_") and not callable(v)
}
return rv
def is_allowed(obj):
"""Is this safe to trace like torch.add ?"""
# torch.ops is populated lazily so we don't necessarily have them in
# _allowed_function_ids. Figure it out by testing the type instead
# in those cases
return id(obj) in _allowed_function_ids or isinstance(
obj,
(torch._ops.OpOverloadPacket, torch._ops.OpOverload, torch._ops._OpNamespace),
)
def torch_get_name(obj, default):
"""Convert a torch.* funcion to a string"""
return _allowed_function_ids.get_name(id(obj), default)
def is_builtin_callable(obj):
return id(obj) in _builtin_function_ids
def is_builtin_constant(obj):
return id(obj) in _builtin_constant_ids
def is_numpy(obj):
return isinstance(obj, numpy.ndarray) or id(obj) in _numpy_function_ids
| [
"[email protected]"
] | |
41f0af239f27689911e24f4c57d69fae46c3a7c8 | 5dd841ec5b6e4b9a474b67d1f62843297d8b9073 | /frameworkutils/baseutils.py | 325cba9a25b866c19987568a879e9afa62afb9d5 | [] | no_license | Evading77/all_auto_testing | 9bf6036600d5cbc9285da182f1bb5cbb19473e50 | 627f5867d483dda2acc08192a6f140cdf27a5f36 | refs/heads/master | 2022-11-29T17:56:55.002675 | 2020-08-04T04:57:42 | 2020-08-04T04:57:42 | 284,881,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py |
# 数据驱动运行
import inspect
from frameworkutils import logger
def getfunc(obj, method):
"""
反射获取函数和参数列表
:param obj: 对象
:param method: 方法名
:return:
"""
try:
func = getattr(obj, method)
except Exception as e:
return None
arg = inspect.getfullargspec(func).__str__()
arg = arg[arg.find('args=') + 5:arg.find(', varargs')]
arg = eval(arg)
arg.remove('self')
return func, len(arg)
def runcase(obj, line):
if len(line[0]) > 0 or len((line[1])) > 0:
# 分组信息不执行
return
func = getfunc(obj, line[3])
if func is None:
logger.warn("关键字%s不存在" % line[3])
return
if func[1] == 0:
func[0]()
elif func[1] == 1:
func[0](line[4])
elif func[1] == 2:
func[0](line[4], line[5])
elif func[1] == 3:
func[0](line[4], line[5], line[6])
else:
logger.warn("关键字暂不支持超过3个参数") | [
"[email protected]"
] | |
72c66855914d9adbb005f2d70feed5876d242f5d | a2a23feb46da7f30d9910878fcbd3146bfb51696 | /simutool/scVI/scvi_imputation.py | 85eb457ae42bffb194bd1bbdd290bc5455d72dbe | [
"MIT"
] | permissive | JunLiuLab/SIMPLEs2020 | 7e9a9b552b0501cc2fe5035584fdc85c10b1b1fd | 90e6aa1e254b81fd81f2211b3fdfb552270d76fe | refs/heads/master | 2022-11-21T20:38:50.044833 | 2020-07-24T18:06:59 | 2020-07-24T18:06:59 | 274,720,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,878 | py | import argparse
import torch
import numpy as np
import pandas as pd
from pyprojroot import here
from pathlib import PosixPath
from scvi.dataset import CsvDataset
from scvi.models import VAE
from scvi import set_seed
from scvi.inference import UnsupervisedTrainer, load_posterior
# * commons
set_seed(0)
# ** data
ncell = 300
ngene = 1000
# ** model
n_epochs = 400
lr = 1e-3
use_cuda = True
# * impute
def scvi_impute(seed: int = 1, platform: str = "umi") -> None:
fnm: str = f"sim_{ncell}_{ngene}_{seed}_{platform}_.csv"
save_path: PosixPath = here('./scVI/data/symsim')
# fullpath:PosixPath = here('./scVI/data/symsim').joinpath(fnm)
symsim_dataset = CsvDataset(fnm, save_path=save_path, gene_by_cell=True)
vae = VAE(symsim_dataset.nb_genes)
trainer = UnsupervisedTrainer(vae,
symsim_dataset,
train_size=1.0,
use_cuda=use_cuda,
frequency=5)
trainer.train(n_epochs=n_epochs, lr=lr)
full = trainer.create_posterior(trainer.model,
symsim_dataset,
indices=np.arange(len(symsim_dataset)))
impute_values = full.sequential().imputation()
out_path = here("./simutool/jobs/scvi_result").joinpath(fnm)
np.savetxt(out_path, impute_values, delimiter=",")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="scVI")
parser.add_argument("--platform",
type=str,
default="umi",
help="umi or nonumi")
parser.add_argument("--seed",
type=int,
default=1,
help="seed from 1 to 20")
args = parser.parse_args()
scvi_impute(args.seed, args.platform)
| [
"[email protected]"
] | |
ad668f9d86edc435d70bd9c77ada6322c24bcda5 | eef0fbd71a3f8f1a3e4bd8a6e9527f220bb52b36 | /RoundF/p2.py | 721fd50578fb07187af393686e724d4543a01e56 | [] | no_license | azanbinzahid/google-kickstart-2020 | c609eb9ae7737abc6686b069999a58d6c589d452 | ab8aa5e28a696372192bbb8557ba8dadd76f0f38 | refs/heads/master | 2022-12-28T16:29:12.158826 | 2020-10-18T15:21:03 | 2020-10-18T15:21:03 | 298,944,187 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | import math
T = int(input())
for c in range(T):
N, K = map(int, input().split())
time = []
for n in range(N):
S, E = map(int, input().split())
time.append([S,E])
time.sort(key=lambda x: x[0])
robotCount = 0
robotCurrent = time[0][0]
for t in time:
if (robotCurrent<t[0]):
robotCurrent = t[0]
factor = t[1]-robotCurrent
if factor>0:
calc = math.ceil(factor/K)
robotCount+= calc
robotCurrent +=K*calc
ans = robotCount
print("Case #{}: {}".format(c+1, ans)) | [
"[email protected]"
] | |
25be6302bd9150151560453a17906af226789f01 | 904b0d81152649ccd3349f94f88e7b89a7b5c76a | /scripts/main/xicombNS_DA02.py | e2f17c6818b725c4127a2e6be411fb79ee8c98bd | [
"BSD-3-Clause"
] | permissive | desihub/LSS | ec33538a0e7280ad1c6b257368cc009ed4b39cbb | 5645461929172d327ed30389d76e7e887043c9bf | refs/heads/main | 2023-08-18T23:17:13.123605 | 2023-08-18T20:08:22 | 2023-08-18T20:08:22 | 36,753,969 | 14 | 28 | BSD-3-Clause | 2023-09-13T18:37:35 | 2015-06-02T18:42:51 | Jupyter Notebook | UTF-8 | Python | false | false | 1,710 | py | #!/usr/bin/env python
# coding: utf-8
import os
import argparse
import logging
import numpy as np
from astropy.table import Table, vstack
from matplotlib import pyplot as plt
from pycorr import TwoPointCorrelationFunction, TwoPointEstimator, KMeansSubsampler, utils, setup_logging
njack = '60'
trs = ['ELG_LOPnotqso','QSO','LRG','BGS_BRIGHT','QSO_ELG_LOPnotqso','LRG_QSO','LRG_ELG_LOPnotqso']
bsl = [1,2,4,5,10]
dirxi = '/global/cfs/cdirs/desi/survey/catalogs/DA02/LSS/guadalupe/LSScats/test/xi/smu/'
xit = 'poles'
for tr in trs:
if tr == 'ELG_LOPnotqso':
zws = ['0.8_1.6','0.8_1.1','1.1_1.6']
if tr == 'QSO_ELG_LOPnotqso':
zws = ['0.8_1.6','0.8_1.1','1.1_1.6']
if tr == 'QSO':
zws = ['0.8_1.1','0.8_2.1lowz','1.1_1.6','1.6_2.1','2.1_3.5','0.8_3.5']
if tr == 'LRG':
zws = ['0.4_0.6','0.6_0.8','0.8_1.1','0.4_1.1']
if tr == 'BGS_BRIGHT':
zws = ['0.1_0.3','0.3_0.5','0.1_0.5']
if tr == 'LRG_QSO' or tr == 'LRG_ELG_LOPnotqso':
zws = ['0.8_1.1']
for zw in zws:
result_N = TwoPointCorrelationFunction.load(dirxi+'allcounts_'+tr+'_N_'+zw+'_default_FKP_lin_njack'+njack+'.npy')
result_S = TwoPointCorrelationFunction.load(dirxi+'allcounts_'+tr+'_S_'+zw+'_default_FKP_lin_njack'+njack+'.npy')
result_NS = result_N.normalize() + result_S.normalize()
fn = dirxi+'allcounts_'+tr+'_NScomb_'+zw+'_default_FKP_lin_njack'+njack+'.npy'
result_NS.save(fn)
for bs in bsl:
rebinned = result_NS[:(result_NS.shape[0]//bs)*bs:bs]
fn_txt = dirxi+'xi'+xit+'_'+tr+'_NScomb_'+zw+'_default_FKP_lin'+str(bs)+'_njack'+njack+'.txt'
rebinned.save_txt(fn_txt, ells=(0, 2, 4))
| [
"[email protected]"
] | |
1268bbb90ae4972ef8895f051afd048acf92532e | 712729bfe3125fc149309b2436729f6cfd13b82b | /while/walking.py | fd4986df1f9348c1dd4354432253c3a4868429c7 | [] | no_license | borko81/basic_python_solvess | 61d29270fed7cccbac0e7654936d29811eb62a26 | 858ec7a520ee3df006e67c352dc198b783e8433e | refs/heads/master | 2022-12-29T04:18:26.441928 | 2020-07-17T08:04:47 | 2020-07-17T08:04:47 | 278,708,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | goal = 10000
total_steps = 0
while True:
step = input()
if step == 'Going home':
step = int(input())
total_steps += step
break
else:
total_steps += int(step)
if total_steps >= goal:
break
if total_steps >= goal:
print(f"Goal reached! Good job!")
print(f"{total_steps - goal} steps over the goal!")
else:
print(f'{goal - total_steps} more steps to reach goal.') | [
"[email protected]"
] | |
cc7c49501e786cd44d771aaabe8b71a5c0506d1f | 46a27b193308641d6029a11361a8d52389a2a405 | /upstream_extract.py | fdddc4e69972af6d37d468693b680a914b46760a | [] | no_license | ahtmatrix/bioinformatics-scripts | 702c28e6717f78f982b6572db248f2041118b988 | dd3762f7b5c8bbf9ec64a9faa861036342a54392 | refs/heads/master | 2022-11-29T06:52:29.556297 | 2020-08-15T23:20:41 | 2020-08-15T23:20:41 | 68,765,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,279 | py | import sys
import os
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio import pairwise2
import warnings
from Bio import BiopythonWarning
# Usage
# python SeqExtract.py [number of bases upstream to cut]
# to combine multiple .gbk files into 1 gbk
# navigate to directory containing gbk files
# cat *.gbk > filename.gbk
# python -m pdb -Werror myprogram.py to run and stop pdb at the warning
# python -Werror -m pdb SeqExtract.py 30
# turns warnings into errors so it can be caught
# grep '>' [filename] | wc -l
warnings.filterwarnings('error')
#number of base pairs upstream to extract
num_bp_upstreamcds = int(sys.argv[1])
def validate_cds(record, feature):
feature_validity = None
try:
protein_in_file = str(feature.qualifiers.get('translation', 'no_translation')).strip('\'[]')
# diff extracted CDS compare with FASTA nucleotide on NCBI
# is the problem with?
cds_to_protein = str(feature.extract(record).seq.translate(to_stop=True))
# print "protein_check_fail: |" + record.id +"|"+
# str(feature.qualifiers.get('transl_except')).strip('\'[]') +"| "
# +str(feature.qualifiers.get('note')).strip('\'[]') + "
# |"+str(feature.qualifiers.get('protein_id')).strip('\'[]') + "| " +
# protein_in_file + " |" + cds_to_protein
temp_fix_protein = list(cds_to_protein)
temp_fix_protein[0] = protein_in_file[0]
fixed_cds_to_protein = "".join(temp_fix_protein)
if fixed_cds_to_protein != protein_in_file:
feature_validity = False
else:
feature_validity = True
except BiopythonWarning:
print "Biopythonwarning:" + record.id + " --> " + str(feature.qualifiers.get('protein_id')).strip('\'[]')
return feature_validity
def extract_upstream_and_CDS(fullpath, filename):
extracted_cds_list = []
# reads in a gbk and creates a SeqRecord object
for record in SeqIO.parse(fullpath, "genbank"):
if record.features:
for feature in record.features:
if feature.type == "CDS":
if validate_cds(record, feature) == True:
# get the CDS nucleotide locations
cds_start_location = feature.location.start.position
cds_end_location = feature.location.end.position
# only used for length culling
#get the 5'UTR sequence coordinate and extract
FiveUTR_location = SeqFeature(FeatureLocation(cds_start_location - num_bp_upstreamcds, cds_start_location))
extracted_5UTR = FiveUTR_location.extract(record)
if len(extracted_5UTR.seq) == num_bp_upstreamcds:
#need to check if complement
#if it is complemement, then reverse complement it
if "+" in str(feature.location):
#extract -num_bp_upstreamcds + the whole CDS #THIS LOCATION HAS TO BE DIFFERENT
extract_location = SeqFeature(FeatureLocation(cds_start_location - num_bp_upstreamcds, cds_end_location))
extracted_seq = extract_location.extract(record)
#print "reverse complement disengaged" + str(feature.location)
elif "-" in str(feature.location):
rc_extract_location = SeqFeature(FeatureLocation(cds_start_location, cds_end_location+ num_bp_upstreamcds))
extracted_seq = rc_extract_location.extract(record).reverse_complement()
#print "reverse complement engaged " + str(feature.location)
cds_protein_id = str(feature.qualifiers.get('protein_id')).strip('\'[]')
annotated_record = SeqRecord(extracted_seq.seq, extracted_seq.name, description="|" + cds_protein_id + "|")
extracted_cds_list.append(annotated_record)
# create a SeqFeature object containing the location of where to extract
# need to test if its taking + or - 1 off the location
# genbank starts with 1
#upstream_cds_downstream_location = SeqFeature(FeatureLocation(cds_start_location - num_bp_upstreamcds, cds_end_location + num_bp_downstreamcds))
# extraction is using the GENBANK protein for all
SeqIO.write(extracted_cds_list, filename + str(num_bp_upstreamcds)+"upstream_and_CDS.fasta", "fasta")
return
# creates a list of the files in this directory
raw_datadir_listing = os.listdir(os.getcwd())
# loops over the list of files
for files in raw_datadir_listing:
if files.endswith('.gbk'):
full_path = os.path.join(os.getcwd(), files)
filename = os.path.splitext(files)[0]
extract_upstream_and_CDS(full_path, filename) | [
"[email protected]"
] | |
32e57294391a73d29a78726cd72289198fc7ad3b | 1a25b59f0b459e90505cca5e7bf0764928c66aa7 | /src/simulator/dstructures/binning.py | 39597c70186f59cb9cd5c62fe3a0de77c88b071a | [
"MIT"
] | permissive | hd818/htc-cache-system-simulator | ca104c201ed76e964a44f8a3a3ebc4ab7a5b0b60 | ee502db3f1c2b99ffe05ee609a18069b583798da | refs/heads/master | 2023-03-23T03:23:41.352685 | 2020-12-10T18:58:45 | 2020-12-10T18:59:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,329 | py | import abc
import itertools
from typing import Callable, cast, Generic, Iterable, Iterator, List, Mapping, Set, Tuple, TypeVar
from .sorted import SortedDefaultDict
# TODO: Binner could also handle floats (numbers, limits)
class Binner(abc.ABC):
@property
@abc.abstractmethod
def bounded(self) -> bool:
raise NotImplementedError
@property
@abc.abstractmethod
def bins(self) -> int:
raise NotImplementedError
@abc.abstractmethod
def bin_edges(self) -> Iterator[int]:
raise NotImplementedError
@abc.abstractmethod
def bin_limits(self, bin: int) -> Tuple[int, int]:
raise NotImplementedError
@abc.abstractmethod
def __call__(self, num: int) -> int:
raise NotImplementedError
class LinearBinner(Binner):
def __init__(self, width: int=1) -> None:
self._width: int = width
@property
def bounded(self) -> bool:
return False
@property
def bins(self) -> int:
return -1
def bin_edges(self) -> Iterator[int]:
return itertools.count(step=self._width)
def bin_limits(self, bin: int) -> Tuple[int, int]:
start = bin * self._width
return start, start + self._width
def __call__(self, num: int) -> int:
return num // self._width
class LogBinner(Binner):
def __init__(self, first: int=0, last: int=-1, step: int=1) -> None:
self._first: int = first
self._last: int = last
self._step: int = step
self._bins = -1 if last == -1 else (last - first) // step + 1
self._bin: Callable[[int], int]
if last == -1:
self._bin = lambda num: (max(num.bit_length() - 1, first) - first) // step
else:
self._bin = lambda num: (min(max(num.bit_length() - 1, first), last) - first) // step
@property
def bounded(self) -> bool:
return self._last != -1
@property
def bins(self) -> int:
return self._bins
def bin_edges(self) -> Iterator[int]:
it: Iterable[int]
if self._bins == -1:
it = itertools.count(start=self._first+self._step, step=self._step)
else:
it = range(self._first+self._step, self._last+1, self._step)
return itertools.chain((0,), (2 ** i for i in it))
def bin_limits(self, bin: int) -> Tuple[int, int]:
real_first = (2 ** (self._first + bin * self._step))
first: int
last: int
if bin == 0:
first = 0
else:
first = real_first
if self._last != -1 and bin == self._bins - 1:
past = -1
else:
past = real_first * 2 ** self._step
return first, past
def __call__(self, num: int) -> int:
return self._bin(num)
_T_co = TypeVar('_T_co', covariant=True)
_T_co_inner = TypeVar('_T_co_inner', covariant=True)
class BinnedMapping(Generic[_T_co], Mapping[int, _T_co]):
class _ItemSet(Generic[_T_co_inner], Set[Tuple[int, _T_co_inner]]):
def __init__(self, mapping: 'BinnedMapping[_T_co_inner]') -> None:
self._mapping: BinnedMapping[_T_co_inner] = mapping
def __len__(self) -> int:
return len(self._mapping)
def __contains__(self, el: object) -> bool:
try:
el = cast('Tuple[int, _T_co_inner]', el)
if len(el) != 2:
return False
return self._mapping[el[0]] == el[1]
except (KeyError, TypeError):
return False
def __iter__(self) -> Iterator[Tuple[int, _T_co_inner]]:
container = self._mapping._container
cnst_get_el = self._mapping._construct_or_get_element
if self._mapping._binner.bins == -1:
return zip(
self._mapping._binner.bin_edges(),
itertools.chain(
container,
(cnst_get_el(i) for i in itertools.count(len(container))),
),
)
else:
return zip(self._mapping._binner.bin_edges(), container)
def __init__(
self,
binner: Binner,
default_factory: Callable[[], _T_co],
) -> None:
self._default_factory: Callable[[], _T_co] = default_factory
self._binner: Binner = binner
self._container: List[_T_co]
self._get_element: Callable[[int], _T_co]
if self._binner.bins != -1:
container = list(default_factory() for _ in range(self._binner.bins))
self._container = container
self._get_element = lambda bin: container[bin]
else:
self._container = []
self._get_element = self._construct_or_get_element
def _construct_or_get_element(self, bin: int) -> _T_co:
try:
return self._container[bin]
except IndexError:
default_factory = self._default_factory
it = (default_factory() for _ in range(bin - len(self._container) + 1))
self._container.extend(it)
return self._container[bin]
@property
def binner(self) -> Binner:
return self._binner
@property
def bounded(self) -> bool:
return self._binner.bounded
@property
def default_factory(self) -> Callable[[], _T_co]:
return self._default_factory
def __getitem__(self, num: int) -> _T_co:
return self._get_element(self._binner(num))
def __iter__(self) -> Iterator[int]:
return self._binner.bin_edges()
def __len__(self) -> int:
if self._binner.bounded:
return self._binner.bins
else:
raise TypeError('The BinnedMapping is unbounded, len(.) is undefined')
def items(self) -> 'BinnedMapping._ItemSet[_T_co]':
return BinnedMapping._ItemSet(self)
def values_until(self, num: int, half_open: bool=True) -> Iterator[_T_co]:
last_bin = self._binner(num)
if not half_open:
last_bin += 1
if self._binner.bins == -1:
_ = self._construct_or_get_element(last_bin)
return itertools.islice(self._container, last_bin)
def values_from(self, num: int, half_open: bool=False) -> Iterator[_T_co]:
first_bin = self._binner(num)
if half_open:
first_bin += 1
if self._binner.bins == -1:
for i in range(first_bin, len(self._container)):
yield self._container[i]
for i in itertools.count(len(self._container)):
yield self._construct_or_get_element(i)
else:
for i in range(first_bin, self._binner.bins):
yield self._container[i]
def bin_limits(self, bin: int) -> Tuple[int, int]:
return self._binner.bin_limits(bin)
def bin_limits_from_num(self, num: int) -> Tuple[int, int]:
return self._binner.bin_limits(self._binner(num))
class BinnedSparseMapping(Generic[_T_co], Mapping[int, _T_co]):
def __init__(
self,
binner: Binner,
default_factory: Callable[[], _T_co],
) -> None:
self._binner: Binner = binner
self._dict: SortedDefaultDict[int, _T_co] = SortedDefaultDict(default_factory)
@property
def binner(self) -> Binner:
return self._binner
@property
def bounded(self) -> bool:
return self._binner.bounded
@property
def default_factory(self) -> Callable[[], _T_co]:
return cast(Callable[[], _T_co], self._dict.default_factory)
def __getitem__(self, num: int) -> _T_co:
return self._dict[self._binner(num)]
def __iter__(self) -> Iterator[int]:
bin_limits = self._binner.bin_limits
return (bin_limits(bin)[0] for bin in self._dict.keys())
def __len__(self) -> int:
return len(self._dict)
def __delitem__(self, num: int) -> None:
del self._dict[self._binner(num)]
def items_until(self, num: int, half_open: bool=True) -> Iterator[Tuple[int, _T_co]]:
dct = self._dict
for key in dct.irange(maximum=self._binner(num), inclusive=(True, not half_open)):
yield key, dct[key]
def items_from(self, num: int, half_open: bool=False) -> Iterator[Tuple[int, _T_co]]:
dct = self._dict
for key in dct.irange(minimum=self._binner(num), inclusive=(not half_open, True)):
yield key, dct[key]
def bin_limits_from_num(self, num: int) -> Tuple[int, int]:
return self._binner.bin_limits(self._binner(num))
| [
"[email protected]"
] | |
89494d606152fc377c552c22f50b2423e447ab6e | c9b562e31618417aa727d8f3da16604cc79e7cb1 | /tajna.py | 4238899752735dd56165b4eb3c0abbbea63cc7ea | [] | no_license | AlexKohanim/ICPC | 6922a9929a665b9ee8442f1e2355b8a0546c77ad | f22eaa63a1ac6cae6857e442c776f1b59542ed92 | refs/heads/master | 2022-01-27T07:49:19.480130 | 2022-01-18T05:39:58 | 2022-01-18T05:39:58 | 159,398,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | #!/usr/bin/env python3
message = input()
val = len(message)
factors = [(i, val // i) for i in range(1, int(val**0.5)+1) if val % i == 0]
i,j = factors[-1]
#print(i,j)
k = 0
l = 0
while l < val:
if k >= val:
k = (k+1) % val
print(message[k], end="")
l += 1
k += i
print() | [
"[email protected]"
] | |
98495503813fe7144d1b3df53d36140cbd69d039 | 55822b50e88d0627776864ed43325eec5d03d957 | /playGame.py | 8e94f04973064bf3fc3226180bcd315cc28c230e | [] | no_license | badman27/MTG-Game-Python-Final-Project | 83a2092f60a2ab1e6a129364bf89a3ea2bdf0eaa | 8590677c7a18da640b32abc91c6c743d1094045a | refs/heads/master | 2020-04-09T21:20:45.120286 | 2018-12-14T20:21:18 | 2018-12-14T20:21:18 | 160,599,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | import gameBoard
import playerLib
import buildDecks
import cardEffects
from urllib import request
board = gameBoard.Board()
player1 = playerLib.Player("Player1")
computer = playerLib.Player("Computer")
player1.initialDraw()
computer.initialDraw()
board.displayLife(player1)
board.displayLife(computer)
board.drawImage(player1.showHand())
board.win.getMouse()
board.exit() | [
"[email protected]"
] | |
5c08f2fcba52da2a82e6bd84da49a9d6dedfab31 | c32f2e1f3d9c1e556f58c7876abcf7943b7f8f9f | /envs/pybulletenvs.py | f47e70a83d9002859d335f141c98ee9f1dd42aba | [] | no_license | matthewlujp/drl_in_handful_of_trials | 00d2f265777e8cf1cb26e12cca25257c8c2a0d4a | aed77881eb76bafc2f880f7d90116cb0696254e2 | refs/heads/master | 2022-11-05T11:31:26.463080 | 2020-06-21T04:12:39 | 2020-06-21T04:12:39 | 273,715,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | import gym
import pybullet_envs
# Environments in pybullet
PYBULLET_ENVS = ["CartPoleBulletEnv", "CartPoleContinuousBulletEnv", "MinitaurBulletEnv", "MinitaurBulletDuckEnv", "RacecarGymEnv", "KukaGymEnv", "KukaCamGymEnv", "KukaDiverseObjectEnv"]
PYBULLET_ENVS_DEEPMIMIC = ["HumanoidDeepMimicBackflipBulletEnv", "HumanoidDeepMimicWalkBulletEnv"]
PYBULLET_ENVS_PENDULUM = ["InvertedPendulumBulletEnv", "InvertedDoublePendulumBulletEnv", "InvertedPendulumSwingupBulletEnv"]
PYBULLET_ENVS_MANIPULATOR = ["ReacherBulletEnv", "PusherBulletEnv", "ThrowerBulletEnv", "StrikerBulletEnv"]
PYBULLET_ENVS_LOCOMOTION = ["Walker2DBulletEnv", "HalfCheetahBulletEnv", "AntBulletEnv", "HopperBulletEnv", "HumanoidBulletEnv", "HumanoidFlagrunBulletEnv"]
def make(env_name, render=False):
if env_name in PYBULLET_ENVS:
from pybullet_envs import bullet
env = getattr(bullet, env_name)(renders=render)
return env
if env_name in PYBULLET_ENVS_DEEPMIMIC:
from pybullet_envs.deep_mimic.gym_env import deep_mimic_env
env = getattr(deep_mimic_env, env_name)(renders=render)
return env
if env_name in PYBULLET_ENVS_PENDULUM:
from pybullet_envs import gym_pendulum_envs
env = getattr(gym_pendulum_envs, env_name)()
if render:
env.render(mode='human')
return env
if env_name in PYBULLET_ENVS_MANIPULATOR:
from pybullet_envs import gym_manipulator_envs
env = getattr(gym_manipulator_envs, env_name)(render=render)
return env
if env_name in PYBULLET_ENVS_LOCOMOTION:
from pybullet_envs import gym_locomotion_envs
env = getattr(gym_locomotion_envs, env_name)(render=render)
return env
# Else
env = gym.make(env_name)
return env
| [
"[email protected]"
] | |
4d64347ffa03e103de068e66bc946d0999ccfce2 | 436051d199fcc323a422b7fea377f43c01004366 | /helpers/labml_helpers/metrics/simple_state.py | 2a6575141ce8871da42ebc4cbb7b596e62453fdb | [
"MIT"
] | permissive | xet7/labml | 29d411b94f1d6b9ff03c6033f510cea443d38248 | 7f3918ca7de8cb21cf6dcc9d127a6ea64c0aebb9 | refs/heads/master | 2023-08-18T10:03:13.142430 | 2021-07-18T11:11:42 | 2021-07-18T11:11:42 | 387,184,226 | 0 | 0 | MIT | 2023-08-11T20:01:39 | 2021-07-18T13:36:49 | null | UTF-8 | Python | false | false | 813 | py | from typing import Generic, TypeVar, Optional
from . import StateModule
T = TypeVar('T')
class SimpleState(Generic[T]):
state: Optional[T]
def __init__(self):
self.state = None
def get(self) -> T:
return self.state
def set(self, data: T):
self.state = data
def reset(self):
self.state = None
class SimpleStateModule(StateModule, Generic[T]):
data: SimpleState[T]
def __init__(self):
super().__init__()
def set(self, data: T):
self.data.set(data)
def get(self) -> T:
return self.data.get()
def create_state(self):
return SimpleState()
def set_state(self, data: any):
self.data = data
def on_epoch_start(self):
self.data.reset()
def on_epoch_end(self):
pass
| [
"[email protected]"
] | |
11d173e2e009317f099e646a9d101c71ae82a9b9 | 519b4cf7623c40e0280c435246b6cde46853ecc1 | /project/holviapp/utils.py | d731848e8107fe128a56275bbd33d4b1a41ef18a | [
"MIT"
] | permissive | HelsinkiHacklab/asylum | a3fe492f76145c922125949c41acce6e8d4beec4 | 6fcf71fb5c7bb894322039144e814b9edc07d5bb | refs/heads/hhl_changes | 2023-02-16T18:54:17.277017 | 2021-08-13T17:59:46 | 2021-09-13T17:45:45 | 47,038,401 | 1 | 1 | MIT | 2023-02-03T13:22:48 | 2015-11-28T20:28:58 | Python | UTF-8 | Python | false | false | 2,574 | py | # -*- coding: utf-8 -*-
import holviapi
import holvirc
from django.conf import settings
CONNECTION_SINGLETON = None
def apikey_configured():
"""Check if we have apikey"""
return bool(settings.HOLVI_POOL) and bool(settings.HOLVI_APIKEY)
def userauth_configured():
"""Check if we have username/password"""
return bool(settings.HOLVI_POOL) and bool(settings.HOLVI_USER) and bool(settings.HOLVI_PASSWORD)
def api_configured():
"""Check that we have some API config"""
return apikey_configured() or userauth_configured()
def get_connection():
"""Shorhand connection singleton getter"""
global CONNECTION_SINGLETON
if CONNECTION_SINGLETON is not None:
return CONNECTION_SINGLETON
if not api_configured():
raise RuntimeError('Holvi API is not configured')
if userauth_configured():
CONNECTION_SINGLETON = holvirc.Connection.singleton(settings.HOLVI_POOL, settings.HOLVI_USER, settings.HOLVI_PASSWORD)
if apikey_configured():
CONNECTION_SINGLETON = holviapi.Connection.singleton(settings.HOLVI_POOL, settings.HOLVI_APIKEY)
return CONNECTION_SINGLETON
def get_invoiceapi():
"""Shorthand API instance creator"""
return holvirc.InvoiceAPI(get_connection())
def list_invoices(**kwargs):
"""Shorthand accessor for the API method"""
return get_invoiceapi().list_invoices(**kwargs)
def get_invoice(code):
"""Shorthand accessor for the API method"""
return get_invoiceapi().get_invoice(code)
def get_checkoutapi():
"""Shorthand API instance creator"""
cnc = get_connection()
if isinstance(cnc, (holvirc.Connection, holvirc.connection.Connection)):
raise RuntimeError("This only works with the old style api keys")
return holviapi.CheckoutAPI(cnc)
def list_orders(**kwargs):
"""Shorthand accessor for the API method"""
cnc = get_connection()
if isinstance(cnc, (holvirc.Connection, holvirc.connection.Connection)):
# TODO: Log the issue
return iter([])
return get_checkoutapi().list_orders(**kwargs)
def get_order(code):
"""Shorthand accessor for the API method"""
return get_checkoutapi().get_order(code)
def get_categoriesapi():
"""Shorthand API instance creator"""
cnc = get_connection()
if isinstance(cnc, (holviapi.Connection, holviapi.connection.Connection)):
return holviapi.CategoriesAPI(get_connection())
return holvirc.CategoriesAPI(cnc)
def get_category(code):
"""Shorthand accessor for the API method"""
return get_categoriesapi().get_category(code)
| [
"[email protected]"
] | |
8fd04813872bbf94e8bd5b6b62e9db65d869052c | 63d6ca49d6f919daa87d098e70fd0fccbb6d84cf | /environments/environments.py | 6756536a30f327c50113dff0f458ca5b41ecd5bd | [
"MIT"
] | permissive | vpj/reproduceRL | dac2bde9986647e1c4b0b09c7021ffda2b2e5998 | 2db00b4d1f6c9c88029a7f06458f4b93e1b72a8d | refs/heads/main | 2023-07-13T15:22:49.040609 | 2021-08-15T05:21:49 | 2021-08-15T05:21:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,091 | py | import numpy as np
import random
import os
import sys
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
try:
import roboschool
except ImportError:
print('Running without using roboschool')
from terminations import choose_model
#This file provide environments to interact with, consider actions as continuous, need to rewrite otherwise
class Environment(object):
def __init__(self, env_params):
import gym
try:
import highway_env
# parking-v0 is a continuous control task
except:
print(' High way env is not available !!!!!!!!!!!!!!!! ')
self.name = env_params['name']
makename = self.name if not self.name.startswith('Disc') else self.name.replace('Disc', '')
self.instance = gym.make(makename)
self.instance.seed(env_params['seed'])
#self.instance.seed = env_params['seed']
random.seed(env_params['seed'])
np.random.seed(env_params['seed'])
# maximum number of steps allowed for each episode
#self.TOTAL_STEPS_LIMIT = env_params['TotalSamples']
if hasattr(self.instance, '_max_episode_steps'):
self.EPISODE_STEPS_LIMIT = max(env_params['EpisodeSamples'], self.instance._max_episode_steps)
print('-------------the original longest length is :: -----------------------', self.instance._max_episode_steps)
else:
self.EPISODE_STEPS_LIMIT = self.instance._max_episode_steps = env_params['EpisodeSamples']
self.modelinfo = choose_model(env_params)
self.reward_noise_sigma = env_params['rewardSigma'] if 'rewardSigma' in env_params else 0.0
self.actNoise = env_params['actNoise'] if 'actNoise' in env_params else 0.0
#self.instance._max_episode_steps = env_params['EpisodeSamples']
# state info
self.stateDim = self.getStateDim()
self.stateRange = self.getStateRange()
self.stateMin = self.getStateMin()
self.stateBounded = env_params['stateBounded']
#self.stateBounded = False if np.any(np.isinf(self.instance.observation_space.high)) or np.any(np.isinf(self.instance.observation_space.low)) else True
# action info
self.actionDim = self.getControlDim()
self.actionBound = self.getActBound()
self.actMin = self.getActMin()
#if self.name == 'Acrobot-v1':
self.statehigh = self.instance.observation_space.high
self.statelow = self.instance.observation_space.low
if self.name == 'MountainCar-v0' and self.stateBounded:
self.statehigh = np.array([1., 1.])
self.statelow = np.array([0., 0.])
print('stateDim:',self.stateDim)
print("stateBounded :: ", self.stateBounded)
print("actionDim", self.actionDim)
print("actionBound :: ", self.actionBound)
# Reset the environment for a new episode. return the initial state
def reset(self):
state = self.instance.reset()
if self.stateBounded:
# normalize to [-1,1]
scaled_state = (state - self.stateMin)/self.stateRange
return scaled_state
return state
def step(self, action):
if np.random.uniform(0., 1.) < self.actNoise:
action = np.random.randint(self.actionDim) if self.actionBound is None \
else np.random.uniform(-self.actionBound, self.actionBound, self.actionDim)
state, reward, done, info = self.instance.step(action)
reward = reward + np.random.normal(0.0, self.reward_noise_sigma)
if self.stateBounded:
scaled_state = (state - self.stateMin)/self.stateRange
return (scaled_state, reward, done, info)
return (state, reward, done, info)
def getStateDim(self):
dim = self.instance.observation_space.shape
print(dim)
if len(dim) < 2:
return dim[0]
return dim
# this will be the output units in NN
def getControlDim(self):
# if discrete action
if hasattr(self.instance.action_space, 'n'):
return int(self.instance.action_space.n)
# if continuous action
return int(self.instance.action_space.sample().shape[0])
# Return action ranges
def getActBound(self):
#print self.instance.action_space.dtype
if hasattr(self.instance.action_space, 'high'):
#self.action_space = spaces.Box(low=self.instance.action_space.low, high=self.instance.action_space.high, shape=self.instance.action_space.low.shape, dtype = np.float64)
return self.instance.action_space.high[0]
return None
# Return action min
def getActMin(self):
if hasattr(self.instance.action_space, 'low'):
return self.instance.action_space.low
return None
# Return state range
''' the range is rarely used '''
def getStateRange(self):
return self.instance.observation_space.high - self.instance.observation_space.low
# Return state min
def getStateMin(self):
return self.instance.observation_space.low
# Close the environment and clear memory
def close(self):
self.instance.close()
class HighwayEnvironment(Environment):
def __init__(self, env_params):
super(HighwayEnvironment, self).__init__(env_params)
self.stateDim = self.instance.reset().reshape(-1).shape[0]
print(' the state dim is :: ========================= ', self.stateDim)
self.stateRange = self.getStateRange().reshape(-1)
self.stateMin = self.getStateMin().reshape(-1)
# action info
self.actionDim = self.getControlDim()
# if self.name == 'Acrobot-v1':
self.statehigh = self.instance.observation_space.high.reshape(-1)
self.statelow = self.instance.observation_space.low.reshape(-1)
def step(self, a):
obs, reward, done, info = self.instance.step(a)
return obs.reshape(-1), reward, done, info
def reset(self):
obs = self.instance.reset()
return obs.reshape(-1) | [
"[email protected]"
] | |
413a04b1b20c53420cc7512c3aa19b0036de4ad8 | 826c8c24b03a345c6bd293b08e44a5a435a40c6c | /7月租.py | d8f0c280ff1497a66f5ceb8730ef8aa2a332b2bd | [] | no_license | Asterisk0224/python20 | 4e47a05d7d299e2b5d7277ca465e2959ac6288ad | de16b385249c7dc690bb66265a0984f5d928e321 | refs/heads/main | 2023-04-16T13:45:01.087508 | 2021-05-03T08:56:54 | 2021-05-03T08:56:54 | 363,868,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | a=int(input("輸入月租類型"))
b=int(input("通話秒數"))
if a == 186:
if b *0.09 <= 186:
print(round(b*0.09*0.9))
else:
print(round(b*0.09*0.8))
elif a == 386:
if b *0.08 <= 386:
print(round(b*0.08*0.8))
else:
print(round(b*0.08*0.7))
elif a == 586:
if b *0.07 <= 586:
print(round(b*0.07*0.7))
else:
print(round(b*0.07*0.6))
elif a == 986:
if b *0.06 <= 986:
print(round(b*0.06*0.6))
else:
print(round(b*0.07*0.5))
else:
print("錯誤") | [
"[email protected]"
] | |
3d88c5133c882da1c4cccc6b1b0ca1a5eb0f8501 | fe00d59cb37c0e6f8d1886a510573c55dae5df3a | /Scripts/data_population_scripts/populating_items.py | eb2ee846c0e46be8bfa8b8519253076807bc21d9 | [
"MIT"
] | permissive | AbhinavS99/OnlineShop | 76980ee5bb346b3dc74ea58c1d99788e4f4e38c6 | 47ea8bbb03aade88bccc4218b3f583181f1118f0 | refs/heads/main | 2023-03-01T14:23:18.676272 | 2021-02-05T22:21:06 | 2021-02-05T22:21:06 | 336,397,631 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | import mysql.connector as sql
db = sql.connect(
host="localhost",
user = "root",
passwd = "himraj18",
database = "project67"
)
#populating items using this code
attributes=[]
attributes.append([1005,"Travel Bag","American Tourister","Bags",1525.00,15,'2020-03-01',None])
attributes.append([1006,"Coca Cola","Coca Cola","Drinks",15.00,250,'2020-04-03','2022-04-03'])
attributes.append([1007,"Red Bull","Red Bull","Drinks",105.00,250,'2020-04-03','2022-04-03'])
attributes.append([1008,"Pepsi","Pepsi Co","Drinks",20.00,100,'2020-04-03','2022-04-03'])
attributes.append([1009,"Blue Pen","Parker","Stationary",250.00,20,'2020-04-03','2022-04-03'])
attributes.append([1010,"Black Pen","Cello","Stationary",10.00,69,'2020-04-03','2022-04-03'])
attributes.append([1011,"Pencil","Apsara","Stationary",5.00,44,'2020-04-03','2022-04-03'])
attributes.append([1012,"Green Chilli Lays","Lays","Chips",10.00,100,'2020-04-03','2022-04-03'])
attributes.append([1013,"OREO Biscuit","OREO","Biscuit",25.00,100,'2020-04-03','2022-04-03'])
attributes.append([1014,"Cookies","RAVA","Grocery",10.00,1000,'2020-04-03','2022-04-03'])
attributes.append([1015,"Volini","Volini","Medical",55.00,100,'2020-04-03','2022-04-03'])
attributes.append([1016,"Bandaid","Johnson & Johnson","Medical",5.00,50,'2020-04-03','2022-04-03'])
attributes.append([1017,"Adidas Fishcut Shoes","Adidas","Footwear",2500.00,20,'2020-04-03',None])
cursor=db.cursor()
for i in range(len(attributes)):
attr=attributes[i]
query="Insert Into item Values (%s,%s,%s,%s,%s,%s,%s,%s);"
cursor.execute(query,attr)
db.commit()
#l=fetchdetails(cursor) | [
"[email protected]"
] | |
fe8087783d56301fddb861866779ab604a5e83f6 | 4e5cdffa14c8404d836dc9f034cbbf34a86c7503 | /src/api/urls.py | 9113ef4c4d5780858faf92eee0a13749d97d0775 | [] | no_license | gtdata/publish_data_alpha | da1cf013a5b2c7290d0af7a48d43adc90e301f3f | f1a9753daac7fbe8cc5bed5f30b8601c781449ab | refs/heads/master | 2021-01-20T03:03:36.303264 | 2017-04-18T11:49:29 | 2017-04-18T11:49:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from django.conf.urls import url, include
from rest_framework import routers
import api.views as v
import api.api_def as a
urlpatterns = [
url(r'locations$', v.gazeteer_lookup, name='gazeteer_lookup'),
url(r'^datasets$', v.dataset_lookup, name='dataset_lookup'),
url(r'^status', v.StatusEndpoint.as_view()),
#url(r'^1/', include(router.urls)),
url(r'^1/datasets$', a.DatasetList.as_view()),
url(r'^1/datasets/(?P<name>[\w-]+)$', a.DatasetDetail.as_view(), name='dataset-detail'),
url(r'^1/organisations$', a.OrganisationList.as_view()),
url(r'^1/organisations/(?P<name>[\w-]+)$', a.OrganisationDetail.as_view(), name='organisation-detail'),
]
| [
"[email protected]"
] | |
fd4f34bd37a1850cebf63349ded69eb7dacdc2d9 | 12b54eef11d2e2e3147915c707c825c37e5ee544 | /env/bin/mako-render | 46ce75f9faf4e3e4fe9a61fd02e1e9d099f78ab3 | [] | no_license | EugeneKalentev/bfg-test | a23f39323ed34cbd5f1a2687b9811f7568211c1f | c7888402f1c580fd00a94c3d1c129b13200ee3c2 | refs/heads/master | 2020-04-23T11:25:55.221355 | 2019-02-23T11:04:08 | 2019-02-23T11:04:08 | 171,136,430 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | #!/home/eugenekalentev/Documents/Code/bfg_test/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from mako.cmd import cmdline
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(cmdline())
| [
"[email protected]"
] | ||
34c86687e3422730fe1c5f01255a0144f790b97e | 95ad4e4c6c57ce42d0b17c42623d3a042e6f8269 | /doc/conf.py | 2334998b25b434a9008fbae2cfe867919707f5fc | [
"Apache-2.0"
] | permissive | zhaobin74/cosima-cookbook | e35c19726b35acc7644dffddd7185b1d1d3f2146 | ffd8a2cd0aa4131816119487137f5c8bd664a50e | refs/heads/master | 2021-01-02T09:06:51.915465 | 2017-08-10T15:51:36 | 2017-08-10T15:51:36 | 99,145,230 | 0 | 0 | null | 2017-08-02T17:46:06 | 2017-08-02T17:46:05 | null | UTF-8 | Python | false | false | 5,413 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# cosima documentation build configuration file, created by
# sphinx-quickstart on Mon May 29 02:03:50 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_nbexamples
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_nbexamples',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'COSIMA Cookbook'
copyright = '2017, James Munroe'
author = 'James Munroe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cosimadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cosima.tex', 'cosima Documentation',
'cosima', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cosima', 'cosima Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cosima', 'cosima Documentation',
author, 'cosima', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
example_gallery_config = dict(
dont_preprocess=True,
insert_bokeh='0.12.5',
examples_dirs=['../configurations', '../diagnostics', '../notebooks'],
pattern='.+.ipynb',
)
| [
"[email protected]"
] | |
ed33a50324b7e3a5eecebca1a2b58fcd87538545 | 97fde6e1ee2c63d4359a005a8a17db87559dd3eb | /api/models.py | 44c72a280c9cf023ae7be9d686aecd138860d6d2 | [] | no_license | Bibin22/Book_project | 6f6d0cce452e0298d16676425eeb2f77e915c3e5 | 9884363927e6b3b559d43a6ead584f1741b54370 | refs/heads/master | 2023-03-31T21:36:02.594431 | 2021-03-24T07:15:46 | 2021-03-24T07:15:46 | 350,402,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from django.db import models
# Create your models here.
class Book(models.Model):
book_name = models.CharField(max_length=120, unique=100)
price = models.IntegerField()
pages = models.IntegerField()
author = models.CharField(max_length=100)
def __str__(self):
return self.book_name | [
"[email protected]"
] | |
648c96dc4e0b5288371503c57fbba9f47aad8f9b | 10cf8b015b4635f42738a3ef72b7ded225bf3ca2 | /vortexasdk/version_utils.py | 599c9d0e1e2e0355b6a33f24a575847f371afdf1 | [
"Apache-2.0"
] | permissive | minjeonglim/python-sdk | ac93237d9088b121a2389ab31943f680990e0eaa | 097948194e91a760f4c788eb33838d9f1a4402e7 | refs/heads/master | 2023-02-11T22:00:05.598784 | 2021-01-07T16:32:54 | 2021-01-07T16:32:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | import json
from urllib.request import urlopen
from distutils.version import StrictVersion
from vortexasdk import __name__ as sdk_pkg_name
from vortexasdk.version import __version__
def get_latest_sdk_version() -> str:
"""Retrieves the latest SDK version from PyPI."""
url = f"https://pypi.python.org/pypi/{sdk_pkg_name}/json"
with urlopen(url) as u:
data = json.loads(u.read())
versions = [StrictVersion(release) for release in data["releases"].keys()]
filtered_out_prerelease_versions = [v for v in versions if v.prerelease is None]
latest_version = sorted(filtered_out_prerelease_versions)[-1]
return str(latest_version)
def is_sdk_version_outdated():
"""Checks whether SDK version is outdated."""
latest_version = get_latest_sdk_version()
if StrictVersion(__version__) < latest_version:
return latest_version, True
else:
return latest_version, False
| [
"kitburgess"
] | kitburgess |
31da41a52c69e784f51eed56ad158f668b43f91e | 14f027326e17da2aff631b932835184f4791bde7 | /apps/home/migrations/0003_user_is_staff.py | f8bf99be80c43d4eb5594e018ef9da590eb8090a | [] | no_license | mohbadar/mohe-eform | d7f377e26c58f744946f58f2d3029cb99becd992 | 7590116ac238e798ba20967eb55f18099bf04d85 | refs/heads/master | 2022-11-09T19:23:14.356586 | 2020-06-29T05:49:34 | 2020-06-29T05:49:34 | 274,160,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 3.0.7 on 2020-06-29 05:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_auto_20200629_0944'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_staff',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
91a64805557e29b680b1300121cddd217db78eef | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/aam/aam_authentication_portal_logon_fail.py | 60ee400259987e360a04045658049f18c4e87e9f | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 8,059 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class FailMsgCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param fail_font_custom: {"description": "Specify custom font", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "fail-face", "type": "string"}
:param fail_color: {"default": 0, "type": "number", "description": "Specify font color", "format": "flag"}
:param fail_size: {"description": "Specify font size", "minimum": 1, "type": "number", "maximum": 7, "format": "number"}
:param fail_msg: {"default": 0, "type": "number", "description": "Configure logon failure message in default logon fail page", "format": "flag"}
:param fail_text: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify logon failure message (Default: Login Failed!!)", "format": "string-rlx"}
:param fail_color_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "fail-color-name", "type": "string"}
:param fail_font: {"default": 0, "type": "number", "description": "Sepcify font", "format": "flag"}
:param fail_color_name: {"not": "fail-color-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param fail_face: {"not": "fail-font-custom", "enum": ["Arial", "Courier_New", "Georgia", "Times_New_Roman", "Verdana"], "type": "string", "description": "'Arial': Arial; 'Courier_New': Courier New; 'Georgia': Georgia; 'Times_New_Roman': Times New Roman; 'Verdana': Verdana; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "fail-msg-cfg"
self.DeviceProxy = ""
self.fail_font_custom = ""
self.fail_color = ""
self.fail_size = ""
self.fail_msg = ""
self.fail_text = ""
self.fail_color_value = ""
self.fail_font = ""
self.fail_color_name = ""
self.fail_face = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class TitleCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param title: {"default": 0, "type": "number", "description": "Configure title in default logon fail page", "format": "flag"}
:param title_color: {"default": 0, "type": "number", "description": "Specify font color", "format": "flag"}
:param title_color_name: {"not": "title-color-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param title_font_custom: {"description": "Specify custom font", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "title-face", "type": "string"}
:param title_face: {"not": "title-font-custom", "enum": ["Arial", "Courier_New", "Georgia", "Times_New_Roman", "Verdana"], "type": "string", "description": "'Arial': Arial; 'Courier_New': Courier New; 'Georgia': Georgia; 'Times_New_Roman': Times New Roman; 'Verdana': Verdana; ", "format": "enum"}
:param title_color_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "title-color-name", "type": "string"}
:param title_size: {"description": "Specify font size", "minimum": 1, "type": "number", "maximum": 7, "format": "number"}
:param title_text: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify title (Default: Try Too Many Times)", "format": "string-rlx"}
:param title_font: {"default": 0, "type": "number", "description": "Sepcify font", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "title-cfg"
self.DeviceProxy = ""
self.title = ""
self.title_color = ""
self.title_color_name = ""
self.title_font_custom = ""
self.title_face = ""
self.title_color_value = ""
self.title_size = ""
self.title_text = ""
self.title_font = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Background(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param bgfile: {"description": "Specify background image filename", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "bgcolor", "type": "string"}
:param bgstyle: {"enum": ["tile", "stretch", "fit"], "type": "string", "description": "'tile': Tile; 'stretch': Stretch; 'fit': Fit; ", "format": "enum"}
:param bgcolor_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "bgcolor-name", "type": "string"}
:param bgcolor_name: {"not": "bgcolor-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param bgcolor: {"default": 0, "not": "bgfile", "type": "number", "description": "Specify background color", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "background"
self.DeviceProxy = ""
self.bgfile = ""
self.bgstyle = ""
self.bgcolor_value = ""
self.bgcolor_name = ""
self.bgcolor = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class LogonFail(A10BaseClass):
"""Class Description::
Logon fail page configuration.
Class logon-fail supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/aam/authentication/portal/{name}/logon-fail`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "logon-fail"
self.a10_url="/axapi/v3/aam/authentication/portal/{name}/logon-fail"
self.DeviceProxy = ""
self.fail_msg_cfg = {}
self.title_cfg = {}
self.background = {}
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"[email protected]"
] | |
c9564de1d32f486071f2b000dcece402f183afbd | d5a551f64cf8cc639bffb17c33601c60244ad9c8 | /cogs/filter.py | 58632997c02ef870432d87792f6af078af480daf | [] | no_license | IAmJSD/6thBot | 413dae1a306fd08978ebba0920c6b5dad258c96b | c566816d036a0e0b8bed11bcfba857e50b85c2b9 | refs/heads/master | 2023-06-11T11:04:22.401997 | 2020-04-29T19:27:39 | 2020-04-29T19:27:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,889 | py | from discord.ext import commands
from discord import Member, Guild, Message, NotFound, TextChannel, Embed, Role
from asyncio import sleep
from util.timeformatter import highest_denom
from typing import Union
class Filter(commands.Cog):
def __init__(self, bot):
self.bot = bot
def get_filter_time(self, guild: Guild) -> int:
"""
:param guild: a discord.Guild object
:return: the filter time, in seconds.
"""
guild_settings = self.bot.guild_settings[str(guild.id)]
return 60 * guild_settings.get("filter_time", 15)
def get_filter_role(self, guild: Guild):
guild_settings = self.bot.guild_settings[str(guild.id)]
filter_role_id = guild_settings.get("filter_role_id", None)
if filter_role_id is None:
return None
return guild.get_role(filter_role_id)
def get_manual_chl(self, guild: Guild):
guild_settings = self.bot.guild_settings[str(guild.id)]
manual_chl_id = guild_settings.get("manual_chl_id", None)
if manual_chl_id is None:
return None
return guild.get_channel(manual_chl_id)
def get_manual_content(self, guild: Guild):
guild_settings = self.bot.guild_settings[str(guild.id)]
return guild_settings.get("manual_content", None)
def get_welcome_chl(self, guild: Guild):
guild_settings = self.bot.guild_settings[str(guild.id)]
welcome_chl_id = guild_settings.get("welcome_chl_id", None)
if welcome_chl_id is None:
return None
return guild.get_channel(welcome_chl_id)
def is_manual(self, guild: Guild) -> bool:
guild_settings = self.bot.guild_settings[str(guild.id)]
return guild_settings.get("manual", False)
# could replace presence check with something else?
async def send_welcomes(self, member):
guild_settings = self.bot.guild_settings[str(member.guild.id)]
welcome_messages: dict = guild_settings['welcome_messages']
filter_secs = self.get_filter_time(member.guild)
# Manual verification text
if self.is_manual(member.guild):
text = "We're in manual verification, so you'll need to **__contact a member of staff__** to get verified."
else:
text = f"You'll have to wait **__{highest_denom(filter_secs)}__** as a spam prevention measure."
for name, msg_dict in welcome_messages.items():
# destination is either a Member or TextChannel
chl_id = msg_dict['chl_id']
if chl_id == "dm":
destination = member
else:
destination = member.guild.get_channel(chl_id)
if destination is None:
continue
content: str = msg_dict['content']
# Replace keywords
content = content.replace("<user>", member.mention)
content = content.replace("<timer>", highest_denom(filter_secs))
content = content.replace("<verification>", text)
await destination.send(content)
@commands.Cog.listener()
async def on_member_join(self, member: Member):
print("Member joined")
guild: Guild = member.guild
# Adds the filter role, if one exists
filter_role = self.get_filter_role(guild)
if filter_role is None:
return
await member.add_roles(filter_role)
await self.send_welcomes(member)
if not self.is_manual(guild):
# Schedule Role removal
filter_secs = self.get_filter_time(member.guild)
await sleep(filter_secs)
await member.remove_roles(filter_role)
print(f"Removed {filter_role.name} role from {str(member)}")
@commands.group(invoke_without_command=True)
@commands.has_guild_permissions(manage_roles=True)
async def manual(self, ctx):
if self.is_manual(ctx.guild):
toggle_text = "Enabled"
else:
toggle_text = "Disabled"
man_chl = self.get_manual_chl(ctx.guild)
if man_chl is None:
chl_text = "None set."
else:
chl_text = man_chl.mention
em = Embed(title="Manual Verification Settings", colour=0xFA8072)
em.set_author(name=f"Requested by {str(ctx.author)}", icon_url=str(ctx.author.avatar_url))
em.add_field(name="Status", value=toggle_text)
em.add_field(name="Notify Channel", value=chl_text)
em.set_footer(text="Sub-commands: on | off | set | message")
em.set_thumbnail(url=str(ctx.guild.icon_url))
await ctx.send(embed=em)
@manual.command(name="on")
@commands.has_guild_permissions(manage_roles=True)
async def manual_on(self, ctx):
guild_settings = self.bot.guild_settings[str(ctx.guild.id)]
man_chl = self.get_manual_chl(ctx.guild)
if man_chl is None:
ctx.send("No manual channel has been set.")
return
content = self.get_manual_content(ctx.guild)
if content is None:
ctx.send("No manual message has been set.")
return
guild_settings['manual'] = True
sent = await man_chl.send(content)
guild_settings['man_msg_id'] = sent.id
await ctx.send("Manual Verification Enabled.")
@manual.command(name="off")
@commands.has_guild_permissions(manage_roles=True)
async def manual_off(self, ctx):
guild_settings = self.bot.guild_settings[str(ctx.guild.id)]
man_chl = self.get_manual_chl(ctx.guild)
guild_settings.pop('manual', None)
man_msg_id = guild_settings.pop('man_msg_id', None)
if man_msg_id is not None:
try:
man_msg = await man_chl.fetch_message(man_msg_id)
await man_msg.delete()
except NotFound:
print("Message not found")
await ctx.send("Manual Verification Disabled.")
@manual.command(name="set")
@commands.has_guild_permissions(manage_roles=True)
async def manual_set(self, ctx, message: Message, channel: TextChannel):
print("Manual Set")
guild_settings: dict = self.bot.guild_settings[str(ctx.guild.id)]
guild_settings['manual_chl_id'] = channel.id
guild_settings['manual_content'] = message.content
await ctx.send(f"Set manual verification notice for {channel.mention}")
@manual.command(name="message")
@commands.has_guild_permissions(manage_roles=True)
async def manual_message(self, ctx):
content = self.get_manual_content(ctx.guild)
if content is None:
ctx.send("No manual message has been set.")
return
await ctx.send(content)
@commands.group(invoke_without_command=True)
@commands.has_guild_permissions(manage_roles=True)
async def welcome(self, ctx, name: str = None):
guild_settings: dict = self.bot.guild_settings[str(ctx.guild.id)]
welcome_messages: dict = guild_settings.get("welcome_messages", {})
# Name specified
if name is not None:
welcome_msg = welcome_messages.get(name, None)
if welcome_msg is None:
await ctx.send("Couldn't find a welcome message with this name...")
else:
await ctx.send(welcome_msg['content'])
return
# No name specified
welcome_list = []
for name, data in welcome_messages.items():
chl_id = data['chl_id']
if isinstance(chl_id, int):
channel = ctx.guild.get_channel(chl_id)
if channel is None:
location = None
else:
location = channel.mention
else:
location = "DMs"
welcome_list.append(f"`{name}` in {location}")
print(welcome_list)
em = Embed(title="Server Welcome Messages", description="\n".join(welcome_list) or "None set.", colour=0xFA8072)
em.set_footer(text="Sub-commands: add | remove | role | [name]")
em.set_author(name=f"Requested by {str(ctx.author)}", icon_url=str(ctx.author.avatar_url))
em.set_thumbnail(url=str(ctx.guild.icon_url))
filter_role = self.get_filter_role(ctx.guild)
if filter_role is None:
role_text = "None set."
else:
role_text = filter_role.mention
em.add_field(name="Filter Role", value=role_text)
filter_secs = self.get_filter_time(ctx.guild)
em.add_field(name="Filter Timer", value=highest_denom(filter_secs))
await ctx.send(embed=em)
@welcome.command(name="add")
@commands.has_guild_permissions(manage_roles=True)
async def welcome_add(self, ctx, message: Message, destination: Union[TextChannel, str], name: str):
if isinstance(destination, str):
if destination.lower() != "dm":
await ctx.send("It looks like you haven't properly specified a channel, or the keyword 'DM'.")
return
chl_id = "dm"
else:
chl_id = destination.id
# "welcome_messages": [{'chl_id': 83, 'content': "test"}, {'chl_id': "dm", 'content': "Hi <user>!"}]
guild_settings: dict = self.bot.guild_settings[str(ctx.guild.id)]
if 'welcome_messages' not in guild_settings:
guild_settings['welcome_messages'] = {}
guild_settings['welcome_messages'][name] = {'chl_id': chl_id, 'content': message.content}
await ctx.send(f"The following welcome message was added:\n{message.jump_url}")
@welcome.command(name="remove")
@commands.has_guild_permissions(manage_roles=True)
async def welcome_remove(self, ctx, name: str):
guild_settings: dict = self.bot.guild_settings[str(ctx.guild.id)]
welcome_messages: dict = guild_settings['welcome_messages']
welcome_msg = welcome_messages.get(name, None)
if welcome_msg is None:
await ctx.send("Couldn't find a welcome message with this name...")
return
welcome_messages.pop(name)
if len(welcome_messages) == 0:
guild_settings.pop('welcome_messages')
await ctx.send(f"Welcome message `{name}` removed.")
@welcome.command(name="role")
@commands.has_guild_permissions(manage_roles=True)
async def welcome_role(self, ctx, role: Role, filter_minutes: int):
if filter_minutes < 1:
await ctx.send("Choose a number of minutes greater than 0.")
return
guild_settings: dict = self.bot.guild_settings[str(ctx.guild.id)]
guild_settings['filter_role_id'] = role.id
guild_settings['filter_time'] = filter_minutes
await ctx.send(f"Welcome role set to muted {role.mention}, will be removed after {filter_minutes} minutes.")
def setup(bot):
bot.add_cog(Filter(bot))
| [
"[email protected]"
] | |
b830c5da546adf700dc6f5bf2e732a222d1e10e7 | 14f217c46834fc2fd40b516e189094aa0c0de2f6 | /Model Development/Linear Regression/predict.py | 1304da67b9422a1657ec36b2f2c889702e74d44b | [] | no_license | Sumyak-Jain/Data-Analysis | bb3cdb4f6bac37481abe26d79b1f9a30f70e711c | fb18f0608c121ab6f2d0853de9eae0d4e335876b | refs/heads/master | 2022-11-13T13:02:24.531152 | 2020-07-10T21:04:35 | 2020-07-10T21:04:35 | 193,383,707 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # We will predict car price using highway-mpg by making a linear regression model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
url = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/automobileEDA.csv'
df = pd.read_csv(url)
lm= LinearRegression()
lm
X = df[['highway-mpg']]
Y = df['price']
lm.fit(X,Y)
Yhat=lm.predict(X)
Yhat[0:5]
a=lm.intercept_
b=lm.coef_
# Yhat=a+bX
print(a+b*30)
| [
"[email protected]"
] | |
ee7a0f9a8194a37125dfa7ce2701c7f6b1da5b7c | b226ab6d84f76707b961b268e9b0fd9b6309c630 | /image/image-corr4.py | c66de7aabe66d983211e7a36b236e25495c33417 | [] | no_license | brycekelleher/pythondev | 5f68aa2ec49f30e1019994db708694702edd157e | a505298d517bb3fe37bb4cfe594662d1cc9f8f50 | refs/heads/master | 2020-09-12T21:32:08.449270 | 2016-09-09T01:48:38 | 2016-09-09T01:48:38 | 67,559,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | from scipy import signal
from scipy import misc
import numpy as np
import matplotlib.pyplot as plt
import Image
from skimage.feature import match_template
from scipy.ndimage import generic_filter
im = Image.open("outfb31_render31-hw-broken.bmp").convert("L")
data = np.array(im.getdata(), dtype='float32')
data = data.reshape(im.size[1], im.size[0])
#data = misc.lena()
lena = data
#template = np.copy(lena[0:4, 34:40]) # right eye
#template = np.copy(lena[235:295, 310:370]) # right eye
template = np.copy(lena[35:73, 191:240]) # right eye
#lena = lena + np.random.randn(*lena.shape) * 50 # add noise
#corr = match_template(lena, template)
lena = (lena - lena.mean()) / (lena.std() * len(lena))
template = (template - template.mean()) / (template.std())
corr = signal.correlate2d(lena, template, mode='same')
y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
print "location", x, y
corrscaled = corr.flatten() * 256
im = Image.new("L", (corr.shape[1], corr.shape[0]))
im.putdata(corrscaled)
im.save("corr.bmp")
fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)
ax_orig.imshow(lena, cmap='gray')
ax_orig.set_title('Original')
ax_orig.set_axis_off()
ax_template.imshow(template, cmap='gray')
ax_template.set_title('Template')
ax_template.set_axis_off()
ax_corr.imshow(corr, cmap='gray')
ax_corr.set_title('Cross-correlation')
ax_corr.set_axis_off()
ax_orig.plot(x, y, 'ro')
fig.show()
| [
"[email protected]"
] | |
3c47a1aef5e90399748239f82621812402b29976 | ff5bdeba3c451a8eeaf5caa6782ffa862c32986a | /measure-of-geometric-mean/index.py | 5a4d46f17d21331390b7876806ed6eddb147eca5 | [] | no_license | gregogalante/python-exercises | 65e02267f9409ba19d9484ce260c71b8136d04d0 | fd254ab5e7a0d049d20678a2a3f4759f01169970 | refs/heads/master | 2021-09-05T14:04:18.050713 | 2018-01-28T15:03:28 | 2018-01-28T15:03:28 | 115,612,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | import numpy
results = [1, 1, 2, 4, 2, 3, 5, 5, 3, 1, 2, 4, 5, 1, 2, 2, 1, 5, 4, 4, 3, 3, 3, 5]
num_items = len(results)
product = 1
for result in results:
product *= result
geometric_mean = product**(1./num_items)
print 'the geometric mean is:', geometric_mean | [
"[email protected]"
] | |
7511e9b2c84e342591484e8f5d44c2fc8b232f67 | 183f17993cefb4aea696d949219d7a98e85eeb73 | /aes.py | 1879d435070d08ba012a3bb0a62f1b4a01305c6e | [] | no_license | putte/PIN-Memory | 7a561f4df098f0e448484edbf7a334a4a0a3449b | d541738b4cc4d62fa25d61e77b5912e61ecb178d | refs/heads/master | 2016-09-16T00:26:37.699931 | 2011-07-22T21:33:53 | 2011-07-22T21:33:53 | 2,090,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,066 | py | #!/usr/bin/python
#
# AES - Advanced Encryption Standard
#
# Copyright (c) 2007 Josh Davis ( http://www.josh-davis.org ),
# Laurent Haan ( http://www.progressive-coding.com )
#
# Licensed under the MIT License ( http://www.opensource.org/licenses/mit-license.php ):
#
import math
class AES:
#
# START AES SECTION
#
#structure of valid key sizes
keySize = {
"SIZE_128":16,
"SIZE_192":24,
"SIZE_256":32}
#Rijndael S-box
sbox = [0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 ]
# Rijndael Inverted S-box
rsbox = [ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d ]
# retrieves a given S-Box Value
def getSBoxValue(self,num): return self.sbox[num]
# retrieves a given Inverted S-Box Value
def getSBoxInvert(self,num): returnself. rsbox[num]
#
# Rijndael's key schedule rotate operation
# rotate the word eight bits to the left
#
# rotate(1d2c3a4f) = 2c3a4f1d
#
# word is an char array of size 4 (32 bit)
#
def rotate(self,word):
c = word[0]
for i in range(3): word[i] = word[i+1]
word[3] = c
return word
# Rijndael Rcon
Rcon = [0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d,
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab,
0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d,
0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25,
0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01,
0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d,
0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa,
0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a,
0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02,
0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f,
0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5,
0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33,
0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb ]
# gets a given Rcon value
def getRconValue(self,num): return self.Rcon[num]
# Key Schedule Core
def core(self,word, iteration):
# rotate the 32-bit word 8 bits to the left
word = self.rotate(word)
# apply S-Box substitution on all 4 parts of the 32-bit word
for i in range(4):
word[i] = self.getSBoxValue(word[i])
# XOR the output of the rcon operation with i to the first part (leftmost) only
word[0] = word[0]^self.getRconValue(iteration)
return word
#
# Rijndael's key expansion
# expands an 128,192,256 key into an 176,208,240 bytes key
#
# expandedKey is a pointer to an char array of large enough size
# key is a pointer to a non-expanded key
#
def expandKey(self,key, size, expandedKeySize):
# current expanded keySize, in bytes
currentSize = 0
rconIteration = 1
# temporary 4-byte variable
t = [0,0,0,0]
expandedKey = []
while len(expandedKey) < expandedKeySize:
expandedKey.append(0)
# set the 16,24,32 bytes of the expanded key to the input key
for j in range(size):
expandedKey[j] = key[j]
currentSize += size
while currentSize < expandedKeySize:
# assign the previous 4 bytes to the temporary value t
for k in range(4): t[k] = expandedKey[(currentSize - 4) + k]
#
# every 16,24,32 bytes we apply the core schedule to t
# and increment rconIteration afterwards
#
if currentSize % size == 0:
t = self.core(t, rconIteration)
rconIteration += 1;
# For 256-bit keys, we add an extra sbox to the calculation
if size == self.keySize["SIZE_256"] and ((currentSize % size) == 16):
for l in range(4): t[l] = self.getSBoxValue(t[l])
#
# We XOR t with the four-byte block 16,24,32 bytes before the new expanded key.
# This becomes the next four bytes in the expanded key.
#
for m in range(4):
expandedKey[currentSize] = expandedKey[currentSize - size] ^ t[m]
currentSize += 1
return expandedKey
# Adds (XORs) the round key to the state
def addRoundKey(self,state, roundKey):
for i in range(16):
state[i] ^= roundKey[i]
return state
# Creates a round key from the given expanded key and the
# position within the expanded key.
def createRoundKey(self,expandedKey,roundKeyPointer):
roundKey = [];
while len(roundKey) < 16:
roundKey.append(0)
for i in range(4):
for j in range(4):
roundKey[j*4+i] = expandedKey[roundKeyPointer + i*4 + j]
return roundKey
# galois multiplication of 8 bit characters a and b
def galois_multiplication(self,a, b):
p = 0
for counter in range(8):
if (b & 1) == 1: p ^= a
if p > 0x100: p ^= 0x100
# keep p 8 bit
hi_bit_set = (a & 0x80)
a <<= 1
if a > 0x100:
# keep a 8 bit
a ^= 0x100
if hi_bit_set == 0x80:
a ^= 0x1b
if a > 0x100:
# keep a 8 bit
a ^= 0x100
b >>= 1
if b > 0x100:
# keep b 8 bit
b ^= 0x100
return p
#
# substitute all the values from the state with the value in the SBox
# using the state value as index for the SBox
#
def subBytes(self,state,isInv):
for i in range(16):
if isInv: state[i] = self.getSBoxInvert(state[i])
else: state[i] = self.getSBoxValue(state[i])
return state
# iterate over the 4 rows and call shiftRow() with that row
def shiftRows(self,state,isInv):
for i in range(4):
state = self.shiftRow(state,i*4, i,isInv)
return state
# each iteration shifts the row to the left by 1
def shiftRow(self,state,statePointer,nbr,isInv):
for i in range(nbr):
if isInv:
tmp = state[statePointer + 3]
j = 3
while j > 0:
state[statePointer + j] = state[statePointer + j-1]
j -= 1;
state[statePointer] = tmp
else:
tmp = state[statePointer]
for j in range(3):
state[statePointer + j] = state[statePointer + j+1]
state[statePointer + 3] = tmp
return state
# galois multipication of the 4x4 matrix
def mixColumns(self,state,isInv):
column = [0,0,0,0]
# iterate over the 4 columns
for i in range(4):
# construct one column by iterating over the 4 rows
for j in range(4): column[j] = state[(j*4)+i]
# apply the mixColumn on one column
column = self.mixColumn(column,isInv)
# put the values back into the state
for k in range(4): state[(k*4)+i] = column[k]
return state;
# galois multipication of 1 column of the 4x4 matrix
def mixColumn(self,column,isInv):
mult = []
if isInv: mult = [14,9,13,11]
else: mult = [2,1,1,3]
cpy = [0,0,0,0]
for i in range(4): cpy[i] = column[i]
column[0] = self.galois_multiplication(cpy[0],mult[0]) ^ self.galois_multiplication(cpy[3],mult[1]) ^ self.galois_multiplication(cpy[2],mult[2]) ^ self.galois_multiplication(cpy[1],mult[3])
column[1] = self.galois_multiplication(cpy[1],mult[0]) ^ self.galois_multiplication(cpy[0],mult[1]) ^ self.galois_multiplication(cpy[3],mult[2]) ^ self.galois_multiplication(cpy[2],mult[3])
column[2] = self.galois_multiplication(cpy[2],mult[0]) ^ self.galois_multiplication(cpy[1],mult[1]) ^ self.galois_multiplication(cpy[0],mult[2]) ^ self.galois_multiplication(cpy[3],mult[3])
column[3] = self.galois_multiplication(cpy[3],mult[0]) ^ self.galois_multiplication(cpy[2],mult[1]) ^ self.galois_multiplication(cpy[1],mult[2]) ^ self.galois_multiplication(cpy[0],mult[3])
return column
# applies the 4 operations of the forward round in sequence
def aes_round(self,state, roundKey):
state = self.subBytes(state,False)
state = self.shiftRows(state,False)
state = self.mixColumns(state,False)
state = self.addRoundKey(state, roundKey)
return state
# applies the 4 operations of the inverse round in sequence
def aes_invRound(self,state, roundKey):
state = self.shiftRows(state,True)
state = self.subBytes(state,True)
state = self.addRoundKey(state, roundKey)
state = self.mixColumns(state,True)
return state
#
# Perform the initial operations, the standard round, and the final operations
# of the forward aes, creating a round key for each round
#
def aes_main(self,state, expandedKey, nbrRounds):
state = self.addRoundKey(state, self.createRoundKey(expandedKey,0))
i = 1
while i < nbrRounds:
state = self.aes_round(state, self.createRoundKey(expandedKey,16*i))
i += 1
state = self.subBytes(state,False)
state = self.shiftRows(state,False)
state = self.addRoundKey(state, self.createRoundKey(expandedKey,16*nbrRounds))
return state
#
# Perform the initial operations, the standard round, and the final operations
# of the inverse aes, creating a round key for each round
#
def aes_invMain(self,state, expandedKey, nbrRounds):
state = self.addRoundKey(state, self.createRoundKey(expandedKey,16*nbrRounds))
i = nbrRounds - 1
while i > 0:
state = self.aes_invRound(state, self.createRoundKey(expandedKey,16*i))
i -= 0
state = self.shiftRows(state,True)
state = self.subBytes(state,True)
state = self.addRoundKey(state, self.createRoundKey(expandedKey,0))
return state
# encrypts a 128 bit input block against the given key of size specified
def encrypt(self,iput, key, size):
output = []
while len(output) < 16:
output.append(0)
# the number of rounds
nbrRounds = 0
# the 128 bit block to encode
block = []
# set the number of rounds
if size == self.keySize["SIZE_128"]: nbrRounds = 10
elif size == self.keySize["SIZE_192"]: nbrRounds = 12
elif size == self.keySize["SIZE_256"]: nbrRounds = 14
else: return None
# the expanded keySize
expandedKeySize = (16*(nbrRounds+1))
#
# Set the block values, for the block:
# a0,0 a0,1 a0,2 a0,3
# a1,0 a1,1 a1,2 a1,3
# a2,0 a2,1 a2,2 a2,3
# a3,0 a3,1 a3,2 a3,3
# the mapping order is a0,0 a1,0 a2,0 a3,0 a0,1 a1,1 ... a2,3 a3,3
#
while len(block) < 16:
block.append(0)
# iterate over the columns
for i in range(4):
# iterate over the rows
for j in range(4):
block[(i+(j*4))] = iput[(i*4)+j]
# expand the key into an 176, 208, 240 bytes key
# the expanded key
expandedKey = self.expandKey(key, size, expandedKeySize)
# encrypt the block using the expandedKey
block = self.aes_main(block, expandedKey, nbrRounds)
# unmap the block again into the output
for k in range(4):
# iterate over the rows
for l in range(4):
output[(k*4)+l] = block[(k+(l*4))]
return output
# decrypts a 128 bit input block against the given key of size specified
def decrypt(self,iput, key, size):
output = []
while len(output) < 16:
output.append(0)
# the number of rounds
nbrRounds = 0
# the 128 bit block to decode
block = []
# set the number of rounds
if size == self.keySize["SIZE_128"]: nbrRounds = 10
elif size == self.keySize["SIZE_192"]: nbrRounds = 12
elif size == self.keySize["SIZE_256"]: nbrRounds = 14
else: return None
# the expanded keySize
expandedKeySize = (16*(nbrRounds+1))
#
# Set the block values, for the block:
# a0,0 a0,1 a0,2 a0,3
# a1,0 a1,1 a1,2 a1,3
# a2,0 a2,1 a2,2 a2,3
# a3,0 a3,1 a3,2 a3,3
# the mapping order is a0,0 a1,0 a2,0 a3,0 a0,1 a1,1 ... a2,3 a3,3
#
# iterate over the columns
for i in range(4):
# iterate over the rows
for j in range(4):
block[(i+(j*4))] = iput[(i*4)+j]
# expand the key into an 176, 208, 240 bytes key
expandedKey = self.expandKey(key, size, expandedKeySize)
# decrypt the block using the expandedKey
block = self.aes_invMain(block, expandedKey, nbrRounds)
# unmap the block again into the output
for k in range(4):
# iterate over the rows
for l in range(4):
output[(k*4)+l] = block[(k+(l*4))]
return output
#
# END AES SECTION
#
class AESModeOfOperation:
#
# START MODE OF OPERATION SECTION
#
aes = AES()
# structure of supported modes of operation
modeOfOperation = {
"OFB":0,
"CFB":1,
"CBC":2}
# converts a 16 character string into a number array
def convertString(self,string,start,end,mode):
if end - start > 16: end = start + 16
if mode == self.modeOfOperation["CBC"]: ar = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
else: ar = []
i = start
j = 0
while len(ar) < end - start:
ar.append(0)
while i < end:
ar[j] = ord(string[i])
j += 1
i += 1
return ar
#
# Mode of Operation Encryption
# stringIn - Input String
# mode - mode of type modeOfOperation
# hexKey - a hex key of the bit length size
# size - the bit length of the key
# hexIV - the 128 bit hex Initilization Vector
#
def encrypt(self,stringIn,mode,key,size,IV):
if len(key)%size:
return None
if len(IV)%16:
return None
# the AES input/output
plaintext = []
iput = []
output = []
ciphertext = []
while len(ciphertext) < 16:
ciphertext.append(0)
# the output cipher string
cipherOut = []
# char firstRound
firstRound = True
if stringIn != None:
for j in range(int(math.ceil(float(len(stringIn))/16))):
start = j*16
end = j*16+16
if j*16+16 > len(stringIn):
end = len(stringIn)
plaintext = self.convertString(stringIn,start,end,mode)
if mode == self.modeOfOperation["CFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size)
firstRound = False
else:
output = self.aes.encrypt(iput, key, size)
for i in range(16):
if len(plaintext)-1 < i:
ciphertext[i] = 0 ^ output[i]
elif len(output)-1 < i:
ciphertext[i] = plaintext[i] ^ 0
elif len(plaintext)-1 < i and len(output) < i:
ciphertext[i] = 0 ^ 0
else:
ciphertext[i] = plaintext[i] ^ output[i]
for k in range(end-start):
cipherOut.append(ciphertext[k])
iput = ciphertext
elif mode == self.modeOfOperation["OFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size)
firstRound = False
else:
output = self.aes.encrypt(iput, key, size)
for i in range(16):
if len(plaintext)-1 < i:
ciphertext[i] = 0 ^ output[i]
elif len(output)-1 < i:
ciphertext[i] = plaintext[i] ^ 0
elif len(plaintext)-1 < i and len(output) < i:
ciphertext[i] = 0 ^ 0
else:
ciphertext[i] = plaintext[i] ^ output[i]
for k in range(end-start):
cipherOut.append(ciphertext[k])
iput = output
elif mode == self.modeOfOperation["CBC"]:
for i in range(16):
if firstRound:
iput[i] = plaintext[i] ^ ciphertext[i]
else:
iput[i] = plaintext[i] ^ IV[i]
firstRound = False
ciphertext = self.aes.encrypt(iput, key, size)
# always 16 bytes because of the padding for CBC
for k in range(16):
cipherOut.append(ciphertext[k])
return mode,len(stringIn),cipherOut
#
# Mode of Operation Decryption
# cipherIn - Encrypted String
# originalsize - The unencrypted string length - required for CBC
# mode - mode of type modeOfOperation
# key - a number array of the bit length size
# size - the bit length of the key
# IV - the 128 bit number array Initilization Vector
#
def decrypt(self,cipherIn,originalsize,mode,key,size,IV):
# cipherIn = unescCtrlChars(cipherIn)
if len(key)%size:
return None
if len(IV)%16:
return None
# the AES input/output
ciphertext = []
iput = []
output = []
plaintext = []
while len(plaintext) < 16:
plaintext.append(0)
# the output plain text string
stringOut = ''
# char firstRound
firstRound = True
if cipherIn != None:
for j in range(int(math.ceil(float(len(cipherIn))/16))):
start = j*16
end = j*16+16
if j*16+16 > len(cipherIn):
end = len(cipherIn)
ciphertext = cipherIn[start:end]
if mode == self.modeOfOperation["CFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size)
firstRound = False
else:
output = self.aes.encrypt(iput, key, size)
for i in range(16):
if len(output)-1 < i:
plaintext[i] = 0 ^ ciphertext[i]
elif len(ciphertext)-1 < i:
plaintext[i] = output[i] ^ 0
elif len(output)-1 < i and len(ciphertext) < i:
plaintext[i] = 0 ^ 0
else:
plaintext[i] = output[i] ^ ciphertext[i]
for k in range(end-start):
stringOut += chr(plaintext[k])
iput = ciphertext
elif mode == self.modeOfOperation["OFB"]:
if firstRound:
output = self.aes.encrypt(IV, key, size)
firstRound = False
else:
output = self.aes.encrypt(iput, key, size)
for i in range(16):
if len(output)-1 < i:
plaintext[i] = 0 ^ ciphertext[i]
elif len(ciphertext)-1 < i:
plaintext[i] = output[i] ^ 0
elif len(output)-1 < i and len(ciphertext) < i:
plaintext[i] = 0 ^ 0
else:
plaintext[i] = output[i] ^ ciphertext[i]
for k in range(end-start):
stringOut += chr(plaintext[k])
iput = output
elif mode == self.modeOfOperation["CBC"]:
output = self.aes.decrypt(ciphertext, key, size)
for i in range(16):
if firstRound:
plaintext[i] = IV[i] ^ output[i]
else:
plaintext[i] = iput[i] ^ output[i]
firstRound = False
if originalsize < end:
for k in range(originalsize-start):
stringOut += chr(plaintext[k])
else:
for k in range(end-start):
stringOut += chr(plaintext[k])
iput = ciphertext;
return stringOut;
#
# END MODE OF OPERATION SECTION
#
if __name__ == "__main__":
moo = AESModeOfOperation()
mode,orig_len,ciph = moo.encrypt(u"Test!",moo.modeOfOperation["OFB"],[143,194,34,208,145,203,230,143,177,246,97,206,145,92,255,84],moo.aes.keySize["SIZE_128"],[103,35,148,239,76,213,47,118,255,222,123,176,106,134,98,92])
print ciph
uciph = u""
for c in range(len(ciph)):
uciph += unichr(ciph[c])
print uciph
ciph2 = []
for c in uciph:
ciph2.append(ord(c))
print ciph2
decr = moo.decrypt(ciph2,orig_len,mode,[143,194,34,208,145,203,230,143,177,246,97,206,145,92,255,84],moo.aes.keySize["SIZE_128"],[103,35,148,239,76,213,47,118,255,222,123,176,106,134,98,92])
print decr
| [
"[email protected]"
] | |
626b6c86aaba310b53452ab321b0ff06147b5b84 | fac7b312d36edaec6b08a8e2378c5826ef8e98ef | /python-scripts/geokit_py/cvat/main.py | 210e4148d9abadc9ae6943679e407c9e33719dc1 | [] | no_license | developmentseed/geokit | f8a23c5e7862ca86a497cf9f64ad5b71fc95a9a0 | 00169304afae3a10ad327f2b76ed43430f0b53a9 | refs/heads/develop | 2023-07-19T11:46:06.637119 | 2023-04-05T18:46:30 | 2023-04-05T18:46:30 | 118,031,551 | 32 | 2 | null | 2023-08-31T14:34:27 | 2018-01-18T20:00:37 | Python | UTF-8 | Python | false | false | 4,201 | py | """
Script for cvat module
Author: @developmentseed
"""
import click
@click.group(chain=True)
def cli():
"""An Awesome doc."""
# click.echo(click.style("========= CVAT =============", fg="green"))
pass
@cli.command("intersectionbox")
@click.option(
"--in_file", required=True, type=str, help="Path to xml cvat file to be processed."
)
@click.option(
"--tolerance",
default=70.0,
type=float,
required=False,
help="tolerance to filter box area, default 70 (70% area of image, max area is 100%).",
)
def run_intersectionbox(in_file, tolerance):
"""
find the boxes that intersect and are greater than the tolerance,
for default tolerance is 70 (70% of the area of the small intersection box)
"""
from .intersectionbox import intersectionbox
intersectionbox(in_file, tolerance)
@cli.command("smallbox")
@click.option(
"--in_file", required=True, type=str, help="Path to xml cvat file to be processed."
)
@click.option(
"--tolerance",
default=1.0,
type=float,
required=False,
help="tolerance to filter box area, default 1 (1% image).",
)
def run_smallbox(in_file, tolerance):
"""
find the boxes with an area smaller than the image, for default tolerance is 1 (1% of area image).
"""
from .smallbox import smallbox
smallbox(in_file, tolerance)
@cli.command("count_tag")
@click.option(
"--xml_file",
required=True,
multiple=True,
type=str,
help="Path to xml cvat file to be processed.",
)
def run_count_tag(xml_file):
"""
Count xml-cvat tags, acep multiple xml_file
"""
from .count_tags import count_xml_tags
stats = {}
num_images = 0
for i in list(xml_file):
num = count_xml_tags(i, stats)
num_images += num
print(f"Total Images: {num_images}")
for key in stats.keys():
print(f"{key},\t {stats[key]}")
@cli.command("xml2csv")
@click.option(
"--xml_file", required=True, type=str, help="Path to xml cvat file to be processed."
)
@click.option("--csv_file", required=True, type=str, help="Path to csv file output.")
@click.option("--full", default=False, type=bool, help="full mode")
def run_xml2csv(xml_file, csv_file, full):
"""
Convert xml to csv file
"""
from .xml2csv import to_csv, to_csv_full
if full:
to_csv_full(xml_file, csv_file)
else:
to_csv(xml_file, csv_file)
@cli.command("npz2xml")
@click.option("--npz_file", required=True, type=str, help="labelMaker npz file")
@click.option("--img_path", required=True, type=str, help="path of the images in CVAT")
@click.option("--img_label", required=True, type=str, help="label image eg : tower.")
def run_npz2xml(npz_file, img_path, img_label):
"""
NPZ file to XML cvat imput format
"""
from .npz2xml import npz2xml
npz2xml(npz_file, img_path, img_label)
@cli.command("xml2npz")
@click.option("--xml_file", required=True, type=str, help="cvat xml dump file")
@click.option("--npz_file", required=True, type=str, help="npz file")
def run_xml2npz(xml_file, npz_file):
"""
NPZ file to XML cvat imput format
"""
import numpy as np
from .xml2npz import getTiles
tiles = getTiles(xml_file)
np.savez(npz_file, **tiles)
@cli.command("downsized_imgs")
@click.option("--img_path", required=True, type=str, help="Image folder")
@click.option("--output_path", required=True, type=str, help="Image output folder")
def run_downsized_imgs(img_path, output_path):
"""
Add all the images that you want to downsize in a folder, supports jpg files and the files will be resized to 512X512.
"""
from .downsized_imgs import downsized_imgs
downsized_imgs(img_path, output_path)
@cli.command("fix_ordinal_suffixes")
@click.option(
"--xml_input",
required=True,
type=str,
help="Path to xml cvat file to be processed.",
)
@click.option("--xml_output", required=True, type=str, help="Path to xml cvat output.")
def run_fix_ordinal_suffixes(xml_input, xml_output):
"""An Awesome doc."""
from .fix_ordinal_suffixes import fix_ordinal_suffixes
fix_ordinal_suffixes(xml_input, xml_output)
if __name__ == "__main__":
cli()
| [
"[email protected]"
] | |
de46df2be6af9a35d357df2cea39df16a0a578d4 | 6ee84906510c5b1664706a7f1e3c0725b721e18b | /src/ebml.py | 31d172cef5bf16b0f7889fd9597ad6862b4ce794 | [] | no_license | mt-hayashida/ebml | 6848ed9f4bf95461c044d87543606c2ea097e594 | 2bd15705d61fe2df2f13b7d1a234781572eb308e | refs/heads/master | 2020-12-15T07:44:04.773875 | 2015-02-14T17:15:53 | 2015-02-14T17:15:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64,310 | py | # Imports
import re, sys, math, struct, datetime;
version_info = ( 1 , 0 );
# Multi versioning
if (sys.version_info[0] == 3):
# Version 3
def py_2or3_byte_to_int(value):
return value;
def py_2or3_int_to_byte(value):
return bytes([ value ]);
def py_2or3_intlist_to_bytes(value):
return bytes(value);
def py_2or3_byte_ord(char):
return char;
def py_2or3_var_is_bytes(value):
return isinstance(value, bytes);
def py_2or3_var_is_unicode(value):
return isinstance(value, str);
def py_2or3_var_is_integer(v):
return isinstance(v, int);
def py_2or3_var_is_string(v):
return isinstance(v, str);
def py_2or3_unicode_upcast(value):
return str(value);
def py_2or3_unicode_obj(obj):
return str(obj);
else:
# Version 2
def py_2or3_byte_to_int(value):
return ord(value);
def py_2or3_int_to_byte(value):
return chr(value);
def py_2or3_intlist_to_bytes(value):
return b"".join([ chr(i) for i in value ]);
def py_2or3_byte_ord(char):
return ord(char);
def py_2or3_var_is_bytes(value):
return isinstance(value, str);
def py_2or3_var_is_unicode(value):
return isinstance(value, unicode);
def py_2or3_var_is_integer(v):
return isinstance(v, ( int , long ));
def py_2or3_var_is_string(v):
return isinstance(v, basestring);
def py_2or3_unicode_upcast(value):
if (isinstance(value, str)):
return value.decode(u"latin");
return unicode(value);
def py_2or3_unicode_obj(obj):
return unicode(obj);
# XML escaping
__re_xml_escaper = re.compile(r"[\"<>&]");
__re_xml_escaper_map = {
u"\"": u""",
u"<": u"<",
u">": u">",
u"&": u"&",
};
def xml_escape(value):
return __re_xml_escaper.sub(lambda m: __re_xml_escaper_map[m.group(0)], value);
# Type constants
INT = 0;
UINT = 1;
FLOAT = 2;
STRING = 3;
UNICODE = 4;
DATE = 5;
CONTAINER = 6;
BINARY = 7;
# Error classes
class EBMLError(Exception):
pass;
class SchemaError(EBMLError):
NAME_ALREADY_USED = u"Tag name already being used in schema";
ID_CLASS_INVALID = u"Id class is invalid";
ID_LENGTH_INVALID = u"Id length is invalid";
ID_LENGTH_INVALID_FOR_CLASS = u"Id length is invalid for its class";
ID_RESERVED = u"Id is reserved";
class StreamError(EBMLError):
STREAM_NOT_READABLE = u"The stream is not readable";
STREAM_NOT_SEEKABLE = u"An error occured trying to seek in the stream";
STREAM_NOT_SKIPPABLE = u"An error occured trying to skip part of the stream";
def __init__(self, message, info, pos):
self.message = message;
self.info = info;
self.pos = pos;
def __str__(self):
s = self.message;
if (self.info is not None):
s += u"; {0:s}".format(self.info);
if (self.pos is not None):
s += u"; (@{0:d})".format(self.pos);
return s;
class DecodeError(EBMLError):
UNEXPECTED_EOS = u"Unexpected end of stream";
ID_CLASS_INVALID = u"Id class is invalid";
ID_LENGTH_INVALID = u"Id length is invalid";
ID_RESERVED = u"Id is reserved";
SIZE_CLASS_INVALID = u"Size class is invalid";
SIZE_RESERVED = u"Size is reserved";
DATA_LENGTH_INCORRECT = u"Data length incorrect";
DATA_LENGTH_MISMATCH = u"Data length mismatch";
DATA_LENGTH_OVERFLOW = u"Data length overflows container size";
STRING_INVALID = u"Improperly formatted string";
UTF8_INVALID = u"Improperly formatted unicode";
FLOAT_LENGTH_INVALID = u"Invalid length for a floating point number";
DATE_LENGTH_INVALID = u"Invalid length for a date";
SCHEMA_ID_NOT_FOUND = u"Id not found in schema";
SCHEMA_ID_NOT_FOUND_WITH_CORRECT_LEVEL = u"Id not found in schema with correct level";
SCHEMA_VERSION_INVALID = u"Invalid schema version for element";
SCHEMA_VALIDATION_FAILED = u"Schema validator function returned false";
def __init__(self, message, info, pos):
self.message = message;
self.info = info;
self.pos = pos;
def __str__(self):
s = self.message;
if (self.info is not None):
s += u"; {0:s}".format(self.info);
if (self.pos is not None):
s += u"; (@{0:d})".format(self.pos);
return s;
class EncodeError(EBMLError):
SIZE_NEGATIVE = u"Size cannot be negative";
SIZE_CLASS_INVALID = u"Size class is invalid";
SIZE_TOO_LARGE = u"Size too high to be encoded";
class ElementError(EBMLError):
IMPLEMENTATION_ERROR = u"Method not implemented";
VALUE_ERROR = u"Error setting an element's value";
SCHEMA_VALIDATION_FAILED = u"Schema validator function returned false";
FLOAT_PRECISION_INVALID = u"Invalid floating point precision";
DESCRIPTOR_NOT_FOUND = u"Descriptor could not be found in schema";
INSERT_RELATIVE_INVALID = u"Relative for insertion is invalid";
SIZE_TOO_LARGE = u"Size too high to be encoded";
PARENT_INCORRECT = u"Element is not the parent element's child";
LEVEL_ERROR = u"Child could not be added to parent becuase the level is not valid in the schema";
RECURSIVE_CHILD = u"An element may not contain itself";
CANNOT_TURN_OBJECT_INTO_POINTER = u"An element that was not read from a stream cannot be converted to a pointer";
class SelectorError(EBMLError):
PSEUDO_SELECTOR_INVALID = u"Pseudo selector invalid";
PSEUDO_SELECTOR_EXTRA_PAREN = u"Pseudo selector parenthesized when it shouldn't be";
PSEUDO_SELECTOR_MISSING_PAREN = u"Pseudo selector missing parentheses";
PSEUDO_SELECTOR_MISSING_PAREN_END = u"Pseudo selector missing closing parenthesis";
NOT_SELECTOR_INVALID = u":not selector invalid";
BRACKETED_MISSING_END = u"Bracketed expression missing ending bracket";
NO_SELECTOR_BEFORE_RELATIONSHIP = u"No selector was found preceeding a relationship operator";
NO_SELECTOR_AFTER_RELATIONSHIP = u"No selector was found following a relationship operator";
END_OF_SELECTOR_NOT_REACHED = u"Characters found after parsing was completed";
TAG_NAME_SELECTOR_NOT_FIRST = u"Selector name did not appear first";
STRING_EXPRESSION_INVALID = u"String expression invalid";
STRING_EXPRESSION_NOT_CLOSED = u"String expression not closed";
NTH_EXPRESSION_INVALID = u"No valid nth expression was found";
# Context classes
class ReadContext(object):
def __init__(self, schema, stream):
self.schema = schema;
self.stream = stream;
self.warnings = [];
try:
self.pos = self.stream.tell();
except ValueError as e:
raise StreamError(StreamError.STREAM_NOT_READABLE, py_2or3_unicode_obj(e), 0);
self.pos_limit = -1;
self.pos_limit_stack = [ self.pos_limit ];
self.decode_depth = -1;
def warn(self, level, message, info):
if (level == Schema.STRICT):
raise DecodeError(message, info, self.pos);
elif (level == Schema.WARN):
self.warnings.append(( message , info , self.pos ));
def warn_data_length(self, expected, found):
self.warn(self.schema.strict_data_length, DecodeError.DATA_LENGTH_INCORRECT, u"Expected {0:d}; found {1:d}".format(expected, found));
def read(self, length):
# Length limit
if (self.pos_limit >= 0 and self.pos + length > self.pos_limit):
length = self.pos_limit - self.pos;
# Read
try:
d = self.stream.read(length);
except ValueError as e:
raise StreamError(StreamError.STREAM_NOT_READABLE, py_2or3_unicode_obj(e), 0);
self.pos += len(d);
return d;
def skip(self, length):
# Length limit
if (self.pos_limit >= 0 and self.pos + length > self.pos_limit):
length = self.pos_limit - self.pos;
# Skip
try:
self.stream.seek(length, 1);
except ValueError:
# Non-seekable?
try:
d = self.stream.read(length);
length = len(d);
except ValueError as e:
# Stream is invalid
raise StreamError(StreamError.STREAM_NOT_SKIPPABLE, py_2or3_unicode_obj(e), pos);
# Done
self.pos += length;
return length;
def seek(self, pos):
try:
self.stream.seek(pos, 0);
except ValueError as e:
raise StreamError(StreamError.STREAM_NOT_SEEKABLE, py_2or3_unicode_obj(e), pos);
self.pos = pos;
return True;
def read_id(self):
# Read byte
b = self.read(1);
if (len(b) < 1):
return None;
id = b;
b = py_2or3_byte_ord(id[0]);
# Class
found = False;
for count in range(4): # 4 bits
if ((b & (0x80 >> count)) != 0):
found = True;
break;
# Invalid
if (not found):
raise DecodeError(DecodeError.ID_CLASS_INVALID, u"[{0:s}]".format(ElementDescriptor.id_binary_to_str(id)), self.pos);
# Form id
all_bits_one = ((b | ((0xFF00 >> count) & 0xFF)) == 0xFF);
if (count > 0):
# Read
data = self.read(count);
if (len(data) < count):
raise DecodeError(DecodeError.UNEXPECTED_EOS, None, self.pos);
for i in range(count):
# Decode
b = py_2or3_byte_ord(data[i]);
if (all_bits_one and b != 0xFF):
all_bits_one = False;
id += data;
# Invalid
if (all_bits_one):
raise DecodeError(DecodeError.ID_RESERVED, None, self.pos);
# Done
return id;
def read_size(self):
# Read byte
b = self.read(1);
if (len(b) < 1):
raise DecodeError(DecodeError.UNEXPECTED_EOS, None, self.pos);
b = py_2or3_byte_ord(b[0]);
# Class
found = False;
for count in range(8): # 8 bits
if ((b & (0x80 >> count)) != 0):
found = True;
break;
# Valid
if (not found):
raise DecodeError(DecodeError.SIZE_CLASS_INVALID, None, self.pos);
# Form size
mask = (~(0xFF00 >> (count + 1)) & 0xFF);
size = b & mask;
all_bits_one = (size == mask);
if (count > 0):
# Read
data = self.read(count);
if (len(data) < count):
raise DecodeError(DecodeError.UNEXPECTED_EOS, None, self.pos);
for i in range(count):
# Decode
b = py_2or3_byte_ord(data[i]);
# Update
size <<= 8;
size += b;
# All ones
if (all_bits_one and b != 0xFF):
all_bits_one = False;
# Invalid
if (all_bits_one):
raise DecodeError(DecodeError.SIZE_RESERVED, None, self.pos);
# Done
return ( size , count );
def push_limit(self, size):
p_next = self.pos + size;
if (self.pos_limit >= 0 and p_next > self.pos_limit):
# Size greater than it should be
self.warn(
self.schema.strict_data_length,
DecodeError.DATA_LENGTH_OVERFLOW,
u"Size overflows container's size by {0:d} bytes".format(p_next - self.pos_limit)
);
p_next = self.pos_limit;
self.pos_limit_stack.append(p_next);
self.pos_limit = p_next;
def pop_limit(self):
limit_pre = self.pos_limit_stack.pop();
self.pos_limit = self.pos_limit_stack[-1];
if (self.pos != limit_pre):
# Size unexpected
self.warn(
self.schema.strict_data_length,
DecodeError.DATA_LENGTH_MISMATCH,
u"Expected to be at position {0:d}; actually at position {1:d}".format(limit_pre, self.pos)
);
class WriteContext(object):
@classmethod
def encode_size(cls, value, desired_class=0):
# Validate
if (desired_class < 0 or desired_class >= 8):
raise EncodeError(EncodeError.SIZE_CLASS_INVALID);
if (value < 0):
raise EncodeError(EncodeError.SIZE_NEGATIVE);
if (value > (2 ** (7 * (desired_class + 1))) - 2):
raise EncodeError(EncodeError.SIZE_TOO_LARGE);
# Encode
b = [];
desired_class += 1;
for i in range(desired_class):
b.append(value & 0xFF);
value >>= 8;
# Add class
b[-1] |= (0x100) >> desired_class;
return py_2or3_intlist_to_bytes(reversed(b));
def __init__(self, stream, pointers_temporary=True):
self.stream = stream;
self.stream_seekable = True;
self.pointers_temporary = pointers_temporary;
def write(self, data):
self.stream.write(data);
# Schema classes
class Schema(object):
STRICT = 2;
WARN = 1;
IGNORE = 0;
def __init__(self):
self.version = 0;
self.pointers_enabled = True;
self.tags = {};
self.names = {};
self.strict_unicode = self.STRICT;
self.strict_string = self.STRICT;
self.strict_missing_id = self.WARN;
self.strict_version_check = self.WARN;
self.strict_data_length = self.STRICT;
self.strict_validator = self.STRICT;
def define(self, id, name, el_type, level=u"g", versions=0, validator=None, pointer=False):
# Fix arguments and validate
if (isinstance(id, tuple) or isinstance(id, list)):
id = ElementDescriptor.id_list_to_binary(id);
elif (not py_2or3_var_is_bytes(id)):
id = ElementDescriptor.id_str_to_binary(id);
ElementDescriptor.id_binary_validate(id);
if (name is None):
name = ElementDescriptor.id_binary_to_name(id);
if (name in self.names):
raise SchemaError(SchemaError.NAME_ALREADY_USED);
# Create
d = ElementDescriptor(id, name, el_type, level, versions, validator, pointer, 0);
# Add
self.names[name] = d;
if (d.id in self.tags):
self.tags[d.id].append(d);
else:
self.tags[d.id] = [ d ];
# Done
return d;
def element(self, id, value=None):
# Find descriptor
if (py_2or3_var_is_string(id)):
# String name
if (id in self.names):
descriptor = self.names[id];
else:
# Id is a descriptor
if (id.name in self.names and id is self.names[id.name]):
# Create node
descriptor = id;
else:
# Error
raise ElementError(ElementError.DESCRIPTOR_NOT_FOUND);
# Create
el = ElementClasses[descriptor.type](None, descriptor, None, 0, 0, 0, 0, True);
# Value
if (value is not None):
el.set(value);
# Done
return el;
def root(self):
descriptor = ElementDescriptor(b"", u"", BINARY, -1, 0, None, False, ElementDescriptor.ROOT);
return ElementContainer(None, descriptor, None, -1, 0, 0, 0, True);
class ElementDescriptor(object):
MISSING = 0x1;
ROOT = 0x2;
def __init__(self, id, name, el_type, level, versions, validator, pointer, flags):
# Level
if (py_2or3_var_is_integer(level)):
self.level = level;
self.level_recursive = False;
self.level_global = False;
elif (level == u"g"):
self.level = 0;
self.level_recursive = True;
self.level_global = True;
elif (level[-1] == u"+"):
self.level = int(level[: -1], 10);
self.level_recursive = True;
self.level_global = False;
else:
self.level = int(level, 10);
self.level_recursive = False;
self.level_global = False;
# Other vars
self.id = id;
self.name = name;
self.type = el_type;
self.versions = versions;
self.validator = validator;
self.pointer = pointer;
self.flags = flags;
@classmethod
def id_str_to_binary(cls, id):
return py_2or3_intlist_to_bytes([ int(id[i : i + 2], 16) for i in range(0, len(id), 2) ]);
@classmethod
def id_list_to_binary(cls, id):
return py_2or3_intlist_to_bytes(id);
@classmethod
def id_binary_to_str(cls, id):
return u"".join([ u"{0:02X}".format(py_2or3_byte_to_int(c)) for c in id]);
@classmethod
def id_binary_to_name(cls, id):
return u"0x{0:s}".format(u"".join([ u"{0:0X}".format(py_2or3_byte_to_int(c)) for c in id ]));
@classmethod
def id_binary_validate(cls, id):
# Length check
id_len = len(id);
if (id_len == 0 or id_len > 4):
raise SchemaError.ID_LENGTH_INVALID;
# Class
b = py_2or3_byte_to_int(id[0]);
found = False;
for count in range(4): # 4 bits
if ((b & (0x80 >> count)) != 0):
found = True;
break;
# Invalid
if (not found):
raise SchemaError.ID_CLASS_INVALID;
# Invalid length
if (id_len != count + 1):
raise SchemaError.ID_LENGTH_INVALID_FOR_CLASS;
# Check for all 1's
all_bits_one = ((b | ((0xFF00 >> count) & 0xFF)) == 0xFF);
for i in range(1, id_len):
if (all_bits_one and py_2or3_byte_to_int(id[i]) != 0xFF):
all_bits_one = False;
# Done
if (all_bits_one):
raise SchemaError.ID_RESERVED;
# Element classes
class Element(object):
def __init__(self, context, descriptor, parent, level, pos, size, size_class):
self.parent = parent;
self.next_sibling = None;
self.previous_sibling = None;
self.level = level;
self.child_id = 0;
self.child_of_type_id = 0;
self.descriptor = descriptor;
if (self.descriptor.level_global):
self.descriptor_level = level;
else:
self.descriptor_level = self.descriptor.level;
self.context = context;
self.value = None;
self.size = size;
self.size_class = size_class;
self.stream_pos = pos;
self.stream_size = size;
self.stream_size_class = size_class;
def __repr__(self):
return u"{0:s}()".format(self.__class__.__name__);
def get(self, decode_depth=-1):
if (self.value is None):
# Seek
self.context.seek(self.stream_pos);
self._decode(decode_depth);
# Done
return self.value;
def set(self, value):
# Set
self._set_value(value);
# Validate
if (self.descriptor.validator is not None and not self.descriptor.validator(value)):
raise ElementError(ElementError.SCHEMA_VALIDATION_FAILED);
def is_pointer(self):
return (self.value is None);
def to_pointer(self):
# Can't convert
if (self.context is None):
raise ElementError(ElementError.CANNOT_TURN_OBJECT_INTO_POINTER);
# Clear value
self.value = None;
# Revert size
self._set_size_and_class(self.stream_size, self.stream_size_class);
def clear(self):
raise ElementError(ElementError.IMPLEMENTATION_ERROR);
def to_xml(self):
indent = 0;
indent_str = u"\t";
# Pointer
if (self.is_pointer()):
# Pointer
data = self._to_xml_pointer(indent, indent_str);
else:
# Data
data = self._to_xml_list(indent, indent_str);
# Join
return u"".join(data);
def get_tag_name(self):
return self.descriptor.name;
def value_string_matches(self, value_str):
return False;
def insert(self, element, before=None, after=None, prepend=False):
raise ElementError(ElementError.IMPLEMENTATION_ERROR);
def remove_child(self, element):
raise ElementError(ElementError.IMPLEMENTATION_ERROR);
def remove(self):
if (self.parent is not None):
self.parent.remove_child(self);
def is_child(self, element):
return False;
def get_full_size(self):
return len(self.descriptor.id) + self.size_class + 1 + self.size;
def _to_xml_list(self, indent, indent_str):
raise ElementError(ElementError.IMPLEMENTATION_ERROR);
def _to_xml_pointer(self, indent, indent_str):
# Form tag
return [ u"{0:s}<{1:s} type=\"{2:s}\" pointer=\"true\" pos=\"{3:s}\" size=\"{4:s}\" />\n".format(
indent_str * indent,
xml_escape(py_2or3_unicode_upcast(self.descriptor.name)),
xml_escape(py_2or3_unicode_upcast(self.TYPE_STR)),
xml_escape(py_2or3_unicode_upcast(self.stream_pos)),
xml_escape(py_2or3_unicode_upcast(self.stream_size))
) ];
def _decode(self, decode_depth=-1):
# Limit
self.context.push_limit(self.stream_size);
# Update decoding depth
if (decode_depth >= 0):
dd_pre = self.context.decode_depth;
self.context.decode_depth = decode_depth;
# Read and decode
value = self._decode_value();
# Validate
if (self.descriptor.validator is not None and not self.descriptor.validator(value)):
self.context.warn(
self.context.schema.strict_validator,
DecodeError.SCHEMA_VALIDATION_FAILED,
u"{0:s} [{1:s}]".format(self.descriptor.name, ElementDescriptor.id_binary_to_name(self.descriptor.id))
);
# Restore decoding depth
if (decode_depth >= 0):
self.context.decode_depth = dd_pre;
# Pop limit
self.context.pop_limit();
# Done
self.value = value;
def _decode_value(self):
raise ElementError(ElementError.IMPLEMENTATION_ERROR);
def _encode(self, context):
# Pointer
nullify = False;
if (self.value is None):
# Seek
if (context.pointers_temporary):
nullify = True;
self.context.seek(self.stream_pos);
self._decode(0);
# Write
context.write(self.descriptor.id);
context.write(WriteContext.encode_size(self.size, self.size_class));
self._encode_value(context);
# Nullify again?
if (nullify == True):
self.value = None;
def _encode_value(self, context):
raise ElementError(ElementError.IMPLEMENTATION_ERROR);
def _set_value(self, value):
raise ElementError(ElementError.IMPLEMENTATION_ERROR);
def _set_size(self, size):
if (self.size == size): return;
size_class = self.__get_size_class(size);
diff = (size - self.size) + (size_class - self.size_class);
self.size = size;
self.size_class = size_class;
n = self.parent;
while (n is not None):
n.size += diff;
n = n.parent;
def _set_size_and_class(self, size, size_class):
if (self.size == size and self.size_class == size_class): return;
diff = (size - self.size) + (size_class - self.size_class);
self.size = size;
self.size_class = size_class;
n = self.parent;
while (n is not None):
n.size += diff;
n = n.parent;
def __get_size_class(self, value):
# Get correct class
#desired_class = int(math.log(value + 2, 128)); # Not precise enough
desired_class = 0;
while (value > (2 ** (7 * (desired_class + 1))) - 2):
desired_class += 1;
if (desired_class >= 8):
raise ElementError(ElementError.SIZE_TOO_LARGE);
return desired_class;
class ElementInt(Element):
TYPE = INT;
TYPE_STR = u"int";
@classmethod
def binary_to_value(cls, data):
# Process
if (len(data) > 0):
v = py_2or3_byte_ord(data[0]);
value = v;
for i in range(1, len(data)):
value = (value << 8) | py_2or3_byte_ord(data[i]);
# Negative?
if ((v & 0x80) != 0):
value -= (0x1 << (len(data) * 8));
# Done
return value;
else:
# Assume 0
return 0;
def __init__(self, context, descriptor, parent, level, pos, size, size_class, construct=False):
Element.__init__(self, context, descriptor, parent, level, pos, size, size_class);
if (construct == True):
self.value = 0;
self.size = 1;
def __repr__(self):
if (self.is_pointer()):
v = u"pointer";
else:
v = py_2or3_unicode_obj(self.value);
return u"{0:s}({1:s})".format(self.__class__.__name__, v);
def clear(self):
self.set(0);
def value_string_matches(self, value_str):
try:
v = int(value_str, 10);
except ValueError:
return False;
return v == self.value;
def _decode_value(self):
# Read
data = self.context.read(self.size);
if (len(data) != self.size):
self.context.warn_data_length(self.size, len(data));
# Process
return self.binary_to_value(data);
def _encode_value(self, context):
v = self.value;
array = [];
# Form array
for i in range(self.size):
array.append(v & 0xFF);
v >>= 8;
# Write
context.write(py_2or3_intlist_to_bytes(reversed(array)));
def _set_value(self, value):
if (not py_2or3_var_is_integer(value)):
raise ElementError(ElementError.VALUE_ERROR);
size = self.__get_size_required(value);
self.value = value;
self._set_size(size);
def _to_xml_list(self, indent, indent_str):
# Form tag
return [ u"{0:s}<{1:s} type=\"{2:s}\" value=\"{3:s}\" />\n".format(
indent_str * indent,
xml_escape(py_2or3_unicode_upcast(self.descriptor.name)),
xml_escape(py_2or3_unicode_upcast(self.TYPE_STR)),
xml_escape(py_2or3_unicode_upcast(self.value))
) ];
def __get_size_required(self, value):
if (value < 0):
v = -value;
else:
v = value + 1;
if (v <= 0x80):
size = 1;
else:
size = 2 + int(math.log(int(v / 0x80), 256));
return size;
class ElementUInt(Element):
TYPE = UINT;
TYPE_STR = u"uint";
def __init__(self, context, descriptor, parent, level, pos, size, size_class, construct=False):
Element.__init__(self, context, descriptor, parent, level, pos, size, size_class);
if (construct == True):
self.value = 0;
self.size = 1;
def __repr__(self):
if (self.is_pointer()):
v = u"pointer";
else:
v = py_2or3_unicode_obj(self.value);
return u"{0:s}({1:s})".format(self.__class__.__name__, v);
def clear(self):
self.set(0);
def value_string_matches(self, value_str):
try:
v = int(value_str, 10);
except ValueError:
return False;
return v == self.value;
def _decode_value(self):
# Read
data = self.context.read(self.size);
if (len(data) != self.size):
self.context.warn_data_length(self.size, len(data));
# Process
value = 0;
for c in data:
value = (value << 8) | py_2or3_byte_ord(c);
# Done
return value;
def _encode_value(self, context):
v = self.value;
array = [];
# Form array
for i in range(self.size):
array.append(v & 0xFF);
v >>= 8;
# Write
context.write(py_2or3_intlist_to_bytes(reversed(array)));
def _set_value(self, value):
if (not py_2or3_var_is_integer(value) or value < 0):
raise ElementError(ElementError.VALUE_ERROR);
if (value < 256):
size = 1;
else:
size = 1 + int(math.log(value, 256));
self.value = value;
self._set_size(size);
def _to_xml_list(self, indent, indent_str):
# Form tag
return [ u"{0:s}<{1:s} type=\"{2:s}\" value=\"{3:s}\" />\n".format(
indent_str * indent,
xml_escape(py_2or3_unicode_upcast(self.descriptor.name)),
xml_escape(py_2or3_unicode_upcast(self.TYPE_STR)),
xml_escape(py_2or3_unicode_upcast(self.value))
) ];
class ElementFloat(Element):
TYPE = FLOAT;
TYPE_STR = u"float";
FLOAT_PRECISION = 4;
DOUBLE_PRECISION = 8;
def __init__(self, context, descriptor, parent, level, pos, size, size_class, construct=False):
Element.__init__(self, context, descriptor, parent, level, pos, size, size_class);
if (construct == True):
self.value = 0.0;
self.size = self.DOUBLE_PRECISION;
def __repr__(self):
if (self.is_pointer()):
v = u"pointer";
else:
v = py_2or3_unicode_obj(self.value);
return u"{0:s}({1:s})".format(self.__class__.__name__, v);
def clear(self):
self.set(0.0);
def value_string_matches(self, value_str):
try:
v = float(value_str);
except ValueError:
return False;
return v == self.value;
def set_precision(self, precision):
if (precision != self.FLOAT_PRECISION and precision != self.DOUBLE_PRECISION):
raise ElementError(ElementError.FLOAT_PRECISION_INVALID);
self._set_size(precision);
def _decode_value(self):
# Read
data = self.context.read(self.size);
if (len(data) != self.size):
self.context.warn_data_length(self.size, len(data));
# Done
if (self.size == self.FLOAT_PRECISION):
return struct.unpack(u">f", data)[0];
elif (self.size == self.DOUBLE_PRECISION):
return struct.unpack(u">d", data)[0];
else:
# Invalid
raise EMBL.Exception(
EMBL.Exception.FLOAT_LENGTH_INVALID,
u"Expected a float of size {0:d} or {1:d}, not {2:d}".format(self.FLOAT_PRECISION, self.DOUBLE_PRECISION, self.size),
self.pos
);
def _encode_value(self, context):
if (self.size == self.FLOAT_PRECISION):
# Write
context.write(struct.pack(u">f", self.value));
elif (self.size == self.DOUBLE_PRECISION):
# Write
context.write(struct.pack(u">d", self.value));
else:
context.write(b"\x00" * self.size);
def _set_value(self, value):
self.value = value;
def _to_xml_list(self, indent, indent_str):
# Form tag
return [ u"{0:s}<{1:s} type=\"{2:s}\" value=\"{3:s}\" />\n".format(
indent_str * indent,
xml_escape(py_2or3_unicode_upcast(self.descriptor.name)),
xml_escape(py_2or3_unicode_upcast(self.TYPE_STR)),
xml_escape(py_2or3_unicode_upcast(self.value))
) ];
class ElementString(Element):
TYPE = STRING;
TYPE_STR = u"string";
__re_string_ascii_replacer = re.compile(r"[^\x20-\x7F]+");
def __init__(self, context, descriptor, parent, level, pos, size, size_class, construct=False):
Element.__init__(self, context, descriptor, parent, level, pos, size, size_class);
if (construct == True):
self.value = u"";
self.size = 0;
def __repr__(self):
if (self.is_pointer()):
v = u"pointer";
else:
v = self.value;
if (len(v) > 16):
v = v[0 : 16];
v = repr(v) + u"...";
else:
v = repr(v);
return u"{0:s}({1:s})".format(self.__class__.__name__, v);
def clear(self):
self.set(u"");
def value_string_matches(self, value_str):
return value_str == self.value;
def _decode_value(self):
# Read
data = self.context.read(self.size);
if (len(data) != self.size):
self.context.warn_data_length(self.size, len(data));
# Replace more chars
value = self.__re_string_ascii_replacer.sub(u"", data.decode(u"latin", u"ignore"));
if (len(value) != len(data)):
self.context.warn(
self.context.schema.strict_string,
DecodeError.STRING_INVALID,
u"Raw length={0:d}, decoded length={1:d})".format(len(data), len(value)),
);
# Done
return value;
def _encode_value(self, context):
context.write(self.value.encode(u"latin", u"ignore"));
def _set_value(self, value):
if (py_2or3_var_is_bytes(value)):
# Convert
value2 = self.__re_string_ascii_replacer.sub(u"", value.decode(u"latin", u"ignore"));
if (len(value2) != len(value)):
raise ElementError(ElementError.VALUE_ERROR);
# Set
self.value = value2;
self._set_size(len(self.value));
elif (py_2or3_var_is_unicode(value)):
# Convert
value2 = self.__re_string_ascii_replacer.sub(u"", value);
if (len(value2) != len(value)):
raise ElementError(ElementError.VALUE_ERROR);
# Set
self.value = value2;
self._set_size(len(self.value));
else:
# Invalid type
raise ElementError(ElementError.VALUE_ERROR);
def _to_xml_list(self, indent, indent_str):
# Form tag
return [ u"{0:s}<{1:s} type=\"{2:s}\" value=\"{3:s}\" />\n".format(
indent_str * indent,
xml_escape(py_2or3_unicode_upcast(self.descriptor.name)),
xml_escape(py_2or3_unicode_upcast(self.TYPE_STR)),
xml_escape(py_2or3_unicode_upcast(self.value))
) ];
class ElementUnicode(Element):
TYPE = UNICODE;
TYPE_STR = u"unicode";
def __init__(self, context, descriptor, parent, level, pos, size, size_class, construct=False):
Element.__init__(self, context, descriptor, parent, level, pos, size, size_class);
if (construct == True):
self.value = u"";
self.size = 0;
def __repr__(self):
if (self.is_pointer()):
v = u"pointer";
else:
v = self.value;
if (len(v) > 16):
v = v[0 : 16];
v = repr(v) + "...";
else:
v = repr(v);
return u"{0:s}({1:s})".format(self.__class__.__name__, v);
def clear(self):
self.set(u"");
def value_string_matches(self, value_str):
return value_str == self.value;
def _decode_value(self):
# Read
data = self.context.read(self.size);
if (len(data) != self.size):
self.context.warn_data_length(self.size, len(data));
# Process
try:
value = data.decode(u"utf-8", u"strict");
except UnicodeDecodeError:
self.context.warn(
self.context.schema.strict_unicode,
DecodeError.UTF8_INVALID,
None
);
value = data.decode(u"utf-8", u"ignore");
# Done
return value;
def _encode_value(self, context):
context.write(self.value.encode(u"utf-8", u"strict"));
def _set_value(self, value):
if (py_2or3_var_is_bytes(value)):
# Convert
size = len(value);
try:
value = value.decode(u"latin", u"strict");
except UnicodeDecodeError:
raise ElementError(ElementError.VALUE_ERROR);
# Set
self.value = value;
self._set_size(size);
elif (py_2or3_var_is_unicode(value)):
# Size
try:
size = len(value.encode(u"utf-8", u"strict"));
except UnicodeEncodeError:
raise ElementError(ElementError.VALUE_ERROR);
# Set
self.value = value;
self._set_size(size);
else:
# Invalid type
raise ElementError(ElementError.VALUE_ERROR);
def _to_xml_list(self, indent, indent_str):
# Form tag
return [ u"{0:s}<{1:s} type=\"{2:s}\" value=\"{3:s}\" />\n".format(
indent_str * indent,
xml_escape(py_2or3_unicode_upcast(self.descriptor.name)),
xml_escape(py_2or3_unicode_upcast(self.TYPE_STR)),
xml_escape(py_2or3_unicode_upcast(self.value))
) ];
class ElementDate(Element):
TYPE = DATE;
TYPE_STR = u"date";
class Date(object):
class UTCTimezone(datetime.tzinfo):
def __init__(self):
self.offset = datetime.timedelta(0);
def utcoffset(self, dt):
return self.offset;
def tzname(self, dt):
return u"UTC";
def dst(self, dt):
return self.offset;
__date_timezone = UTCTimezone();
__date_offset_base = datetime.datetime(2001, 1, 1, 0, 0, 0, 0, __date_timezone);
__date_offset = __date_offset_base - datetime.datetime.fromtimestamp(0, __date_timezone);
__date_offset = (__date_offset.seconds + __date_offset.days * 24 * 60 * 60);
def __init__(self, year, month, day, hour=0, minute=0, second=0, nanoseconds=0):
self.year = year;
self.month = month;
self.day = day;
self.hour = hour;
self.minute = minute;
self.second = second;
self.nanoseconds = nanoseconds;
@classmethod
def from_timestamp(cls, value):
# Convert to date
nanoseconds = value % (10 ** 9);
seconds = int(value / (10 ** 9));
d = datetime.datetime.fromtimestamp(cls.__date_offset + seconds, cls.__date_timezone);
# Done
return cls(d.year, d.month, d.day, d.hour, d.minute, d.second, nanoseconds);
def to_timestamp(self):
diff = datetime.datetime(self.year, self.month, self.day, self.hour, self.minute, self.second, 0, self.__date_timezone) - self.__date_offset_base;
diff_seconds = (diff.seconds + diff.days * 24 * 60 * 60);
return diff_seconds * (10 ** 9) + self.nanoseconds;
def to_string(self):
return u"{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}.{6:09d}Z".format(self.year, self.month, self.day, self.hour, self.minute, self.second, self.nanoseconds);
def __init__(self, context, descriptor, parent, level, pos, size, size_class, construct=False):
Element.__init__(self, context, descriptor, parent, level, pos, size, size_class);
if (construct == True):
self.value = self.Date.from_timestamp(0);
self.size = 8;
def __repr__(self):
if (self.is_pointer()):
v = u"pointer";
else:
v = repr(self.value.to_string());
return u"{0:s}({1:s})".format(self.__class__.__name__, v);
def clear(self):
self.set(0);
def value_string_matches(self, value_str):
return value_str == self.value.to_string();
def _decode_value(self):
# Validate
if (self.size < 8):
raise EMBL.Exception(EMBL.Exception.DATE_LENGTH_INVALID, u"Expected a date length of at least 8, not {0:d}".format(self.size), self.pos);
# Read
data = self.context.read(self.size);
if (len(data) != self.size):
self.context.warn_data_length(self.size, len(data));
# Process
value = ElementInt.binary_to_value(data);
# Done
return self.Date.from_timestamp(value);
def _encode_value(self, context):
# Convert date to int
v = self.value.to_timestamp();
# Convert to byte representation
array = [];
for i in range(self.size):
array.append(v & 0xFF);
v >>= 8;
# Write
context.write(py_2or3_intlist_to_bytes(reversed(array)));
def _set_value(self, value):
if (isinstance(value, ( list , tuple ))):
# Time array
self.value = self.Date(*value);
elif (isinstance(value, self.Date)):
# Date
self.value = value;
else: # Assume integer
self.value = self.Date.from_timestamp(value);
def _to_xml_list(self, indent, indent_str):
# Form tag
return [ u"{0:s}<{1:s} type=\"{2:s}\" value=\"{3:s}\" />\n".format(
indent_str * indent,
xml_escape(py_2or3_unicode_upcast(self.descriptor.name)),
xml_escape(py_2or3_unicode_upcast(self.TYPE_STR)),
xml_escape(py_2or3_unicode_upcast(self.value.to_string()))
) ];
class ElementBinary(Element):
TYPE = BINARY;
TYPE_STR = u"binary";
def __init__(self, context, descriptor, parent, level, pos, size, size_class, construct=False):
Element.__init__(self, context, descriptor, parent, level, pos, size, size_class);
if (construct == True):
self.value = b"";
self.size = 0;
def __repr__(self):
if (self.is_pointer()):
v = u"pointer";
else:
v = self.value;
ext = u"";
if (len(v) > 16):
v = v[0:16];
ext = u"...";
v = repr(u"".join([ u"{0:02X}".format(py_2or3_byte_ord(c)) for c in v ])) + ext;
return u"{0:s}({1:s})".format(self.__class__.__name__, v);
def clear(self):
self.set(b"");
def value_string_matches(self, value_str):
if (len(value_str) != len(self.value) * 2):
return False;
for i in range(0, len(value_str), 2):
if (int(value_str[i : i + 2], 16) != py_2or3_byte_ord(self.value[i])):
return False;
return True;
def _decode_value(self):
# Read
data = self.context.read(self.size);
if (len(data) != self.size):
self.context.warn_data_length(self.size, len(data));
# Done
return data;
def _encode_value(self, context):
context.write(self.value);
def _set_value(self, value):
if (py_2or3_var_is_bytes(value)):
# Set
self.value = value;
self._set_size(len(self.value));
elif (py_2or3_var_is_unicode(value)):
# Convert
try:
value = value.encode(u"utf-8", u"strict");
except UnicodeEncodeError:
raise ElementError(ElementError.VALUE_ERROR);
# Set
self.value = value;
self._set_size(len(self.value));
else:
# Invalid type
raise ElementError(ElementError.VALUE_ERROR);
def _to_xml_list(self, indent, indent_str):
# Form tag
if (len(self.value) > 16):
return [ u"{0:s}<{1:s} type=\"{2:s}\" value-length=\"{3:s}\" />\n".format(
indent_str * indent,
xml_escape(py_2or3_unicode_upcast(self.descriptor.name)),
xml_escape(py_2or3_unicode_upcast(self.TYPE_STR)),
xml_escape(py_2or3_unicode_upcast(len(self.value)))
) ];
else:
return [ u"{0:s}<{1:s} type=\"{2:s}\" value=\"{3:s}\" />\n".format(
indent_str * indent,
xml_escape(py_2or3_unicode_upcast(self.descriptor.name)),
xml_escape(py_2or3_unicode_upcast(self.TYPE_STR)),
xml_escape(u"".join([ u"{0:02X}".format(py_2or3_byte_ord(c)) for c in self.value ]))
) ];
class ElementContainer(Element):
TYPE = CONTAINER;
TYPE_STR = u"container";
# Element container
class Container(object):
def __init__(self):
self.children = [];
self.children_of_type = {};
def insert_at_end(self, element):
# Add
element.child_id = len(self.children);
self.children.append(element);
# Type id
type_name = element.descriptor.name;
if (type_name in self.children_of_type):
# Add to end of array
array = self.children_of_type[type_name];
element.child_of_type_id = len(array);
array.append(element);
else:
# New array
element.child_of_type_id = 0;
self.children_of_type[type_name] = [ element ];
# Link
if (element.child_id > 0):
element.previous_sibling = self.children[element.child_id - 1];
def insert(self, element, position):
# Update children list
self.children.insert(position, element);
for i in range(position, len(self.children)):
self.children[i].child_id = i;
# Link
if (position > 0):
e = self.children[position - 1];
element.previous_sibling = e;
e.next_sibling = element;
if (position + 1 < len(self.children)):
e = self.children[position + 1];
element.next_sibling = e;
e.previous_sibling = element;
# Children of type
type_name = element.descriptor.name;
if (type_name in self.children_of_type):
# Find position
id = 0;
array = self.children_of_type[type_name];
for i in range(position):
if (self.children[i].descriptor.name == type_name):
id += 1;
# Update array
array.insert(id, element);
for i in range(id, len(array)):
array[i].child_id = i;
else:
# New array
element.child_of_type_id = 0;
self.children_of_type[type_name] = [ element ];
def remove(self, element):
# Remove from both lists
self.children.pop(element.child_id);
array = self.children_of_type[element.descriptor.name];
array.pop(element.child_of_type_id);
# Link
if (element.previous_sibling is not None):
element.previous_sibling.next_sibling = element.next_sibling;
if (element.next_sibling is not None):
element.next_sibling.previous_sibling = element.previous_sibling;
# Update ids
for i in range(element.child_id, len(self.children)):
self.children[i].child_id = i;
for i in range(element.child_of_type_id, len(array)):
array[i].child_of_type_id = i;
def __init__(self, context, descriptor, parent, level, pos, size, size_class, construct=False):
Element.__init__(self, context, descriptor, parent, level, pos, size, size_class);
if (construct == True):
self.value = self.Container();
self.size = 0;
def __repr__(self):
if (self.is_pointer()):
v = u"pointer";
else:
cc = len(self.value.children);
if (cc == 0):
v = u"[No children]";
elif (cc == 1):
v = u"[1 child]";
else:
v = u"[{0:d} children]".format(len(self.value.children));
return u"{0:s}({1:s})".format(self.__class__.__name__, v);
def clear(self):
# Pointer
if (self.value is None):
# Empty container
self.value = self.Container();
self._set_size(0);
return;
# De-parent children
for c in self.value.children:
self.__unparent(c);
# Empty
self.value.children = [];
self.value.children_of_type = {};
# Set size
self._set_size(0);
def insert(self, element, before=None, after=None, prepend=False):
# Validate recursion
if (element is self):
raise ElementError(ElementError.RECURSIVE_CHILD);
# Validate level
d = element.descriptor;
level_target = self.level + 1;
if (not (level_target == d.level or (d.level_recursive and level_target >= d.level))):
raise ElementError(ElementError.LEVEL_ERROR);
# Insertion mode and validation
insert_pos = None;
if (before is not None):
# Validate
if (not self.is_child(before)):
raise ElementError(ElementError.INSERT_RELATIVE_INVALID);
# Insert
insert_pos = before.child_id;
elif (after is not None):
# Validate
if (not self.is_child(after)):
raise ElementError(ElementError.INSERT_RELATIVE_INVALID);
# Insert
insert_pos = after.child_id + 1;
elif (prepend):
# Insert at beginning
insert_pos = 0;
# Pointer check
if (self.value is None):
self.value = self.Container();
# Remove
if (element.parent is not None):
element.parent.remove_child(element);
# Insert
if (insert_pos is None):
# Insert at end
self.value.insert_at_end(element);
else:
# Insert at position
self.value.insert(element, insert_pos);
# Update parent and level
element.parent = self;
element.level = level_target;
if (d.level_global):
element.descriptor_level = level_target;
else:
element.descriptor_level = d.level;
# Update size
self._set_size(self.size + element.get_full_size());
def remove_child(self, element):
# Validate
if (not self.is_child(element)):
raise ElementError(ElementError.PARENT_INCORRECT);
# Remove
self.value.remove(element);
# Remove
self.__unparent(element);
# Update size
self._set_size(self.size - element.get_full_size());
def is_child(self, element):
return (not self.is_pointer() and element.child_id < len(self.value.children) and self.value.children[element.child_id] is element);
def _decode_value(self):
# Create container
container = self.Container();
# Read id
while (True):
# Find
el = self.__read_element();
if (el is None): break;
# Add
container.insert_at_end(el);
# Done
return container;
def _encode(self, context):
if ((self.descriptor.flags & ElementDescriptor.ROOT) == 0):
return Element._encode(self, context);
# Don't include "tag" info if it's the root
self._encode_value(context);
def _encode_value(self, context):
for c in self.value.children:
c._encode(context);
def _set_value(self, value):
if (isinstance(value, ( list , tuple ))):
# Replace with children
self.clear();
for c in value:
self.insert(c);
else:
# Invalid type
raise ElementError(ElementError.VALUE_ERROR);
def _to_xml_list(self, indent, indent_str):
# Setup
data = [];
if ((self.descriptor.flags & ElementDescriptor.ROOT) == 0):
data.append(u"{0:s}<{1:s}>\n".format(indent_str * indent, xml_escape(py_2or3_unicode_upcast(self.descriptor.name))));
indent += 1;
# Children
for c in self.value.children:
if (c.is_pointer()):
data.extend(c._to_xml_pointer(indent, indent_str));
else:
data.extend(c._to_xml_list(indent, indent_str));
# Closing
if ((self.descriptor.flags & ElementDescriptor.ROOT) == 0):
indent -= 1;
data.append(u"{0:s}</{1:s}>\n".format(indent_str * indent, xml_escape(py_2or3_unicode_upcast(self.descriptor.name))));
# Done
return data;
def __unparent(self, element):
element.parent = None;
element.next_sibling = None;
element.previous_sibling = None;
element.level = 0;
element.descriptor_level = self.descriptor.level;
element.child_id = 0;
element.child_of_type_id = 0;
def __read_element(self):
# Read id
context = self.context;
id = context.read_id();
if (id is None):
return None;
# Read size
size,size_class = context.read_size();
# Process
schema = context.schema;
d_level = self.descriptor_level + 1;
decode_value = False;
descriptor = None;
if (id in schema.tags):
# Get descriptor
for d in schema.tags[id]:
# Level validation
if (d_level == d.level or (d.level_recursive and d_level >= d.level)):
descriptor = d;
break;
if (descriptor is not None):
# Version validate
if (schema.version != 0 and descriptor.versions != 0 and (descriptor.versions & schema.version) == 0):
context.warn(
schema.strict_version_check,
DecodeError.SCHEMA_VERSION_INVALID,
u"{0:X} does not match any of {1:X}".format(schema.version, descriptor.versions)
);
if (not descriptor.pointer or not schema.pointers_enabled):
# Check if context decode depth allows decoding
if (context.decode_depth < 0):
# Decode depth chain isn't active
decode_value = True;
elif (context.decode_depth > 0):
# Decode depth chain is active
context.decode_depth -= 1;
decode_value = True;
# Else, decode_depth = 0: don't decode
if (descriptor is None):
# None
context.warn(
schema.strict_missing_id,
DecodeError.SCHEMA_ID_NOT_FOUND,
u"[{0:s}]".format(ElementDescriptor.id_binary_to_str(id))
);
descriptor = ElementDescriptor(id, ElementDescriptor.id_binary_to_name(id), BINARY, d_level, 0, None, True, ElementDescriptor.MISSING);
# Create element
element = ElementClasses[descriptor.type](context, descriptor, self, self.level + 1, context.pos, size, size_class);
# Element value
if (decode_value):
# Decode
element._decode();
else:
# Skip
skip_size = context.skip(size);
if (skip_size != size):
# Invalid data length
context.warn_data_length(size, skip_size);
# Done
return element;
ElementClasses = (
ElementInt,
ElementUInt,
ElementFloat,
ElementString,
ElementUnicode,
ElementDate,
ElementContainer,
ElementBinary,
);
# Selector class
class Selector(object):
class __Context(object):
__re_whitespace = re.compile(r"\s*");
__re_whitespace_end = re.compile(r"\s*$");
__re_group = re.compile(r"(?::([\w_-]+)(\(\s*)?)|(\*)|([\w_-]+)|(\[\s*)", re.U);
__re_separator = re.compile(r"\s*([,>+~])\s*|\s+");
__re_paren_closer = re.compile(r"\s*\)");
__re_bracket_closer = re.compile(r"\s*\]");
__re_string_match = re.compile(r"\s*(?:([\"'])|([\w_-]+))", re.U);
__re_nth_expression = re.compile(r"\s*(?:(even|odd)|(?:([+-]?\d+)|([+-]))?n(?:\s*([+-])\s*(\d+))?|([+-]?\d+))", re.I);
__re_escape_codes = {
u"\"": u"\"",
u"\\": u"\\",
u"'": u"'",
};
__re_escape_char = u"\\";
def __init__(self, selector_str, pseudo_selectors):
self.selector_str = selector_str;
self.pos = self.__re_whitespace.match(selector_str).end();
self.endpos = self.__re_whitespace_end.search(selector_str, self.pos).start();
self.pseudo_selectors = pseudo_selectors;
def parse_single_selector(self, em_pre):
m = self.__re_group.match(self.selector_str, self.pos, self.endpos);
if (m is not None):
# Update position
self.pos = m.end();
# Check type
g = m.groups();
if (g[0] is not None):
# Pseudo selector
name = g[0];
if (name not in self.pseudo_selectors):
raise SelectorError(SelectorError.PSEUDO_SELECTOR_INVALID);
ps = self.pseudo_selectors[name];
paren = g[1] is not None;
if (paren):
# Validate
if (not ps[1]):
raise SelectorError(SelectorError.PSEUDO_SELECTOR_MISSING_PAREN);
# Create
em = ps[0](self);
# Find closing
m2 = self.__re_paren_closer.match(self.selector_str, self.pos);
if (m2 is None):
raise SelectorError(SelectorError.PSEUDO_SELECTOR_MISSING_PAREN_END);
self.pos = m2.end();
else:
# Validate
if (ps[1]):
raise SelectorError(SelectorError.PSEUDO_SELECTOR_EXTRA_PAREN);
# Create
em = ps[0]();
elif (g[2] is not None):
# * selector
if (em_pre is not None):
raise SelectorError(SelectorError.TAG_NAME_SELECTOR_NOT_FIRST);
em = Selector.ElementNameAny();
elif (g[3] is not None):
# Tag name selector
if (em_pre is not None):
raise SelectorError(SelectorError.TAG_NAME_SELECTOR_NOT_FIRST);
em = Selector.ElementName(g[3]);
else: # if (g[4] is not None):
# [bracketed] selector
name = g[4];
em = Selector.Bracketed(self);
# Find closing
m2 = self.__re_bracket_closer.match(self.selector_str, self.pos);
if (m2 is None):
raise SelectorError(SelectorError.BRACKETED_MISSING_END);
self.pos = m2.end();
# Return
return em;
# None found
return None;
def parse_selector(self, target):
# Create
s_chain = Selector.SiblingChain();
d_chain = Selector.DescendantChain(s_chain);
target.entries.append(d_chain);
em_pre = None;
# Select
while (True):
em = self.parse_single_selector(em_pre);
if (em is not None):
# Link
if (em_pre is None):
s_chain.entries.append(em);
else:
em_pre.next = em;
em_pre = em;
else:
# No element matcher was found
if (em_pre is None):
raise SelectorError(SelectorError.NO_SELECTOR_BEFORE_RELATIONSHIP);
em_pre = None;
m = self.__re_separator.match(self.selector_str, self.pos, self.endpos);
if (m is not None):
# Update position
self.pos = m.end();
# Check type
g = m.groups();
if (g[0] is not None):
# Operator separated
c = g[0][0];
if (c == u","):
# New group
s_chain = Selector.SiblingChain();
d_chain = Selector.DescendantChain(s_chain);
target.entries.append(d_chain);
elif (c == u">"):
# Child
s_chain = Selector.SiblingChain();
d_chain.entries.append(s_chain);
d_chain.relationships.append(Selector.DescendantChain.RELATIONSHIP_PARENT);
elif (c == u"+"):
# Following
s_chain.relationships.append(Selector.SiblingChain.RELATIONSHIP_FOLLOWING);
else: # if (c == u"~"):
# Preceeded by
s_chain.relationships.append(Selector.SiblingChain.RELATIONSHIP_PRECEEDED_BY);
else:
# Space separated
s_chain = Selector.SiblingChain();
d_chain.entries.append(s_chain);
d_chain.relationships.append(Selector.DescendantChain.RELATIONSHIP_DESCENDANT);
else:
# Done
break;
# Check chaining
if (len(s_chain.relationships) >= len(s_chain.entries)):
raise SelectorError(SelectorError.NO_SELECTOR_AFTER_RELATIONSHIP);
def parse_string(self):
m = self.__re_string_match.match(self.selector_str, self.pos, self.endpos);
if (m is None):
raise SelectorError(SelectorError.STRING_EXPRESSION_INVALID);
# Update position
self.pos = m.end();
# Process string
g = m.groups();
if (g[0] is None):
# Simple
return g[1];
else:
# Quoted
quote = g[0];
text = u"";
p0 = self.pos;
p1 = p0;
escaped = False;
while (p1 < self.endpos):
c = self.selector_str[p1];
# Escape
if (escaped):
escaped = False;
if (c in self.__re_escape_codes):
text += self.selector_str[p0 : p1 - 1];
text += self.__re_escape_codes[c];
p1 += 1;
p0 = p1;
continue;
if (c == self.__re_escape_char):
escaped = True;
elif (c == quote):
# Done
text += self.selector_str[p0 : p1];
self.pos = p1 + 1;
return text;
# Next
p1 += 1;
# Unterminated string
self.pos = p1;
raise SelectorError(SelectorError.STRING_EXPRESSION_NOT_CLOSED);
def parse_nth_expression(self):
# Match
m = self.__re_nth_expression.match(self.selector_str, self.pos, self.endpos);
if (m is None):
raise SelectorError(SelectorError.NTH_EXPRESSION_INVALID);
# Update position
self.pos = m.end();
# Process
g = m.groups();
if (g[0] is not None):
# even or odd
if (len(g[0]) == 3): # odd
return ( 2 , 0 );
else: # even
return ( 2 , 1 );
elif (g[5] is not None):
# Single digit
return ( 0 , int(g[5], 10) - 1 );
else:
# an+b form
n1 = 1;
if (g[1] is not None):
n1 = int(g[1], 10);
elif (g[2] == u"-"):
n1 = -1;
n2 = 0;
if (g[4] is not None):
n2 = int(g[4], 10);
if (g[3] == u"-"):
n2 = -n2;
return ( n1 , n2 - 1 );
class DescendantChain(object):
RELATIONSHIP_PARENT = 0;
RELATIONSHIP_DESCENDANT = 1;
RELATIONSHIP_OPERATORS = ( u">" , u" " );
def __init__(self, s_chain):
self.entries = [ s_chain ];
self.relationships = [];
def matches(self, element):
mode = self.RELATIONSHIP_PARENT;
i = len(self.entries) - 1;
while (True):
if (not self.entries[i].matches(element)):
if (mode == self.RELATIONSHIP_PARENT):
return False;
else:
# Continue up parent chain
element = element.parent;
if (element is None):
return False;
continue;
# Next
if (i <= 0):
return True;
i -= 1;
element = element.parent;
if (element is None):
return False;
mode = self.relationships[i];
def __str__(self):
array = [ py_2or3_unicode_obj(self.entries[0]) ];
i = 0;
i_max = len(self.relationships);
while (i < i_max):
array.append(self.RELATIONSHIP_OPERATORS[self.relationships[i]]);
i += 1;
array.append(py_2or3_unicode_obj(self.entries[i]));
return u"".join(array);
class SiblingChain(object):
RELATIONSHIP_FOLLOWING = 0;
RELATIONSHIP_PRECEEDED_BY = 1;
RELATIONSHIP_OPERATORS = ( u"+" , u"~" );
def __init__(self):
self.entries = [];
self.relationships = [];
def matches(self, element):
mode = self.RELATIONSHIP_FOLLOWING;
i = len(self.entries) - 1;
while (True):
if (not self.entries[i].matches_chained(element)):
if (mode == self.RELATIONSHIP_FOLLOWING):
return False;
else:
# Continue up sibling chain
element = element.previous_sibling;
if (element is None):
return False;
continue;
# Next
if (i <= 0):
return True;
i -= 1;
element = element.previous_sibling;
if (element is None):
return False;
mode = self.relationships[i];
def __str__(self):
array = [ self.entries[0].to_string_chained() ];
i = 0;
i_max = len(self.relationships);
while (i < i_max):
array.append(self.RELATIONSHIP_OPERATORS[self.relationships[i]]);
i += 1;
array.append(self.entries[i].to_string_chained());
return u"".join(array);
class ElementMatcher(object):
def __init__(self):
self.next = None;
def matches(self, element):
return False;
def matches_chained(self, element):
n = self;
while (True):
if (not n.matches(element)):
return False;
n = n.next;
if (n is None):
return True;
def to_string_chained(self):
array = [];
n = self;
while (True):
array.append(py_2or3_unicode_obj(n));
n = n.next;
if (n is None):
return u"".join(array);
class ElementName(ElementMatcher):
def __init__(self, name):
Selector.ElementMatcher.__init__(self);
self.name = name;
def matches(self, element):
return (element.get_tag_name() == self.name);
def __str__(self):
return py_2or3_unicode_upcast(self.name);
class ElementNameAny(ElementMatcher):
def __init__(self):
Selector.ElementMatcher.__init__(self);
def matches(self, element):
return True;
def __str__(self):
return u"*";
class Bracketed(ElementMatcher):
def __init__(self, context):
Selector.ElementMatcher.__init__(self);
self.value = context.parse_string();
def matches(self, element):
return not element.is_pointer() and element.value_string_matches(self.value);
def __str__(self):
return u"[{0:s}]".format(Selector.escape_string(self.value));
class PseudoRoot(ElementMatcher):
def __init__(self):
Selector.ElementMatcher.__init__(self);
def matches(self, element):
return (element.descriptor.flags & ElementDescriptor.ROOT) != 0;
def __str__(self):
return u":root";
class PseudoEmpty(ElementMatcher):
def __init__(self):
Selector.ElementMatcher.__init__(self);
def matches(self, element):
return (not isinstance(element, ElementContainer) or element.is_pointer() or len(element.value.children) == 0);
def __str__(self):
return u":empty";
class PseudoFirstChild(ElementMatcher):
def __init__(self):
Selector.ElementMatcher.__init__(self);
def matches(self, element):
return (element.child_id == 0);
def __str__(self):
return u":first-child";
class PseudoLastChild(ElementMatcher):
def __init__(self):
Selector.ElementMatcher.__init__(self);
def matches(self, element):
return (element.parent is None or element.child_id == len(element.parent.value.children) - 1);
def __str__(self):
return u":last-child";
class PseudoFirstOfType(ElementMatcher):
def __init__(self):
Selector.ElementMatcher.__init__(self);
def matches(self, element):
return (element.child_of_type_id == 0);
def __str__(self):
return u":first-of-type";
class PseudoLastOfType(ElementMatcher):
def __init__(self):
Selector.ElementMatcher.__init__(self);
def matches(self, element):
return (element.parent is None or element.child_of_type_id == len(element.parent.value.children_of_type[element.get_tag_name()]) - 1);
def __str__(self):
return u":last-of-type";
class PseudoNot(ElementMatcher):
def __init__(self, context):
Selector.ElementMatcher.__init__(self);
self.matcher = context.parse_single_selector(None);
if (self.matcher is None):
raise SelectorError(SelectorError.NOT_SELECTOR_INVALID);
def matches(self, element):
return not self.matcher.matches(element);
def __str__(self):
return u":not({0:s})".format(py_2or3_unicode_obj(self.matcher));
class PseudoNthChild(ElementMatcher):
def __init__(self, context):
Selector.ElementMatcher.__init__(self);
self.counter = context.parse_nth_expression();
def matches(self, element):
# Setup
a = self.counter[0];
diff = element.child_id - self.counter[1];
if (a == 0):
# 0n+b == id
return (diff == 0);
else:
# an+b == id, n >= 0
return (diff % a) == 0 and (diff / a) >= 0;
def __str__(self):
return u":nth-child({0:s})".format(Selector.nth_expression_to_string(self.counter));
class PseudoNthLastChild(ElementMatcher):
def __init__(self, context):
Selector.ElementMatcher.__init__(self);
self.counter = context.parse_nth_expression();
def matches(self, element):
# Setup
a = self.counter[0];
if (element.parent is None):
diff = 0;
else:
diff = (len(element.parent.value.children) - 1 - element.child_id);
diff -= self.counter[1];
if (a == 0):
# 0n+b == id
return (diff == 0);
else:
# an+b == id, n >= 0
return (diff % a) == 0 and (diff / a) >= 0;
def __str__(self):
return u":nth-last-child({0:s})".format(Selector.nth_expression_to_string(self.counter));
class PseudoNthOfType(ElementMatcher):
def __init__(self, context):
Selector.ElementMatcher.__init__(self);
self.counter = context.parse_nth_expression();
def matches(self, element):
# Setup
a = self.counter[0];
diff = element.child_of_type_id - self.counter[1];
if (a == 0):
# 0n+b == id
return (diff == 0);
else:
# an+b == id, n >= 0
return (diff % a) == 0 and (diff / a) >= 0;
def __str__(self):
return u":nth-of-type({0:s})".format(Selector.nth_expression_to_string(self.counter));
class PseudoNthLastOfType(ElementMatcher):
def __init__(self, context):
Selector.ElementMatcher.__init__(self);
self.counter = context.parse_nth_expression();
def matches(self, element):
# Setup
a = self.counter[0];
if (element.parent is None):
diff = 0;
else:
diff = (len(element.parent.value.children_of_type[element.get_tag_name()]) - 1 - element.child_of_type_id);
diff -= self.counter[1];
if (a == 0):
# 0n+b == id
return (diff == 0);
else:
# an+b == id, n >= 0
return (diff % a) == 0 and (diff / a) >= 0;
def __str__(self):
return u":nth-last-of-type({0:s})".format(Selector.nth_expression_to_string(self.counter));
class PseudoPointer(ElementMatcher):
def __init__(self):
Selector.ElementMatcher.__init__(self);
def matches(self, element):
return element.is_pointer();
def __str__(self):
return u":pointer";
class PseudoType(ElementMatcher):
__re_type_matcher = re.compile(r"\s*\w+\s*");
def __init__(self, context):
Selector.ElementMatcher.__init__(self);
self.type = context.parse_string().lower();
def matches(self, element):
return (self.type == element.TYPE_STR);
def __str__(self):
return u":type({0:s})".format(Selector.escape_string(self.type));
__pseudo_selectors = {
# No parens
"root": ( PseudoRoot , False ),
"empty": ( PseudoEmpty , False ),
"first-child": ( PseudoFirstChild , False ),
"first-of-type": ( PseudoFirstOfType , False ),
"last-child": ( PseudoLastChild , False ),
"last-of-type": ( PseudoLastOfType , False ),
# Parens
"not": ( PseudoNot , True ),
"nth-child": ( PseudoNthChild , True ),
"nth-last-child": ( PseudoNthLastChild , True ),
"nth-of-type": ( PseudoNthOfType , True ),
"nth-last-of-type": ( PseudoNthLastOfType , True ),
# Custom
"pointer": ( PseudoPointer , False ),
"type": ( PseudoType , True ),
};
__re_string_escaper = re.compile(r"[\"\'\\]");
__re_string_escaper_map = {
u"\"": u"\\\"",
u"\\": u"\\\\",
u"'": u"\\'",
};
__re_string_simple = re.compile(r"^([\w_-]+)$", re.U);
@classmethod
def escape_string(cls, text):
if (cls.__re_string_simple.match(text)):
return text;
return u"\"{0:s}\"".format(cls.__re_string_escaper.sub(lambda m: cls.__re_string_escaper_map[m.group(0)], text));
@classmethod
def nth_expression_to_string(self, expr):
a,b = expr;
s = [];
# First char
if (a != 0):
if (a == 1):
s.append(u"n");
elif (a == -1):
s.append(u"-n");
else:
if (a == 2):
if (b == 0):
return u"odd";
elif (b == 1):
return u"even";
s.append(u"{0:d}n".format(a));
# Second char
b += 1;
if (b > 0):
s.append(u"+{0:d}".format(b));
elif (b < 0):
s.append(u"{0:d}".format(b));
# Done
return u"".join(s);
def __init__(self, selector):
# Entry chain
self.entries = [];
# Create context
context = self.__Context(selector, self.__pseudo_selectors);
# Parse
context.parse_selector(self);
# Validate
if (context.pos != context.endpos):
raise SelectorError(SelectorError.END_OF_SELECTOR_NOT_REACHED);
def matches(self, element):
for e in self.entries:
if (e.matches(element)):
return True;
return False;
def select(self, element):
# Check element
if (self.matches(element)):
return element;
# Check children
if (isinstance(element, ElementContainer) and not element.is_pointer()):
for child in element.value.children:
n = self.select(child);
if (n is not None):
return n;
# None found
return None;
def select_all(self, element):
elements = [];
# Check element
if (self.matches(element)):
elements.append(element);
# Check children
if (isinstance(element, ElementContainer) and not element.is_pointer()):
for child in element.value.children:
elements.extend(self.select_all(child));
# Found
return elements;
def __str__(self):
return u",".join([ py_2or3_unicode_obj(e) for e in self.entries ]);
def __repr__(self):
return u"Selector({0:s})".format(repr(py_2or3_unicode_obj(self)));
# DOM creation
def decode(schema, stream):
# Create context
context = ReadContext(schema, stream);
# Root container
root = schema.root();
root.context = context;
root.value = root._decode_value();
root.context = None;
# Done
return ( root , context );
def encode(element, stream, pointers_temporary=True):
# Create context
context = WriteContext(stream, pointers_temporary);
# Write
element._encode(context);
| [
"[email protected]"
] | |
16e4e3747b49d106c8dd5f34903412691fc14364 | e6db63065fa37d68245072034b484efe8224dc1d | /tests/test_furl.py | c2bf3b0f02f5cf3b1b3dc333e9766b245b1439ac | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | realitee/furl | 1096c2a5b9ec0de1831d0c590fa7b2da5a97cd84 | e774a48ec857fa9a24a92f28acea256aa429353c | refs/heads/master | 2020-12-25T13:23:57.126047 | 2012-10-13T02:46:23 | 2012-10-13T02:46:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,925 | py | #
# furl: URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# [email protected]
#
# License: Build Amazing Things (Unlicense)
import urllib
import unittest
import urlparse
import warnings
from itertools import izip
from abc import ABCMeta, abstractmethod
try:
from collections import OrderedDict as odict # Python 2.7+.
except ImportError:
from ordereddict import OrderedDict as odict # Python 2.4-2.6.
import furl
from furl.omdict1D import omdict1D
#
# TODO(grun): Add tests for furl objects with strict=True. Make sure
# UserWarnings are raised when improperly encoded path, query, and fragment
# strings are provided.
#
# Utility list subclasses to expose allitems() and iterallitems() methods on
# different kinds of item containers - lists, dictionaries, multivalue
# dictionaries, and query strings. This provides a common iteration interface
# for looping through their items (including items with repeated keys).
# original() is also provided to get access to a copy of the original container.
class itemcontainer(object):
__metaclass__ = ABCMeta
@abstractmethod
def allitems(self):
pass
@abstractmethod
def iterallitems(self):
pass
@abstractmethod
def original(self):
"""
Returns: A copy of the original data type. For example, an itemlist would
return a list, itemdict a dict, etc.
"""
pass
class itemlist(list, itemcontainer):
def allitems(self):
return list(self.iterallitems())
def iterallitems(self):
return iter(self)
def original(self):
return list(self)
class itemdict(odict, itemcontainer):
def allitems(self):
return self.items()
def iterallitems(self):
return self.iteritems()
def original(self):
return dict(self)
class itemomdict1D(omdict1D, itemcontainer):
def original(self):
return omdict1D(self)
class itemstr(str, itemcontainer):
def allitems(self):
# Keys and values get unquoted. i.e. 'a=a%20a' -> ['a', 'a a'].
return urlparse.parse_qsl(self, keep_blank_values=True)
def iterallitems(self):
return iter(self.allitems())
def original(self):
return str(self)
class TestPath(unittest.TestCase):
def test_isdir_isfile(self):
for path in ['', '/']:
p = furl.Path(path)
assert p.isdir
assert not p.isfile
for path in ['dir1/', 'd1/d2/', 'd/d/d/d/d/', '/', '/dir1/', '/d1/d2/d3/']:
p = furl.Path(path)
assert p.isdir
assert not p.isfile
for path in ['dir1', 'd1/d2', 'd/d/d/d/d', '/dir1', '/d1/d2/d3']:
p = furl.Path(path)
assert p.isfile
assert not p.isdir
def test_leading_slash(self):
p = furl.Path('')
assert not p.isabsolute
assert not p.segments
assert p.isdir and p.isdir != p.isfile
assert str(p) == ''
p = furl.Path('/')
assert p.isabsolute
assert p.segments == ['']
assert p.isdir and p.isdir != p.isfile
assert str(p) == '/'
p = furl.Path('sup')
assert not p.isabsolute
assert p.segments == ['sup']
assert p.isfile and p.isdir != p.isfile
assert str(p) == 'sup'
p = furl.Path('/sup')
assert p.isabsolute
assert p.segments == ['sup']
assert p.isfile and p.isdir != p.isfile
assert str(p) == '/sup'
p = furl.Path('a/b/c')
assert not p.isabsolute
assert p.segments == ['a', 'b', 'c']
assert p.isfile and p.isdir != p.isfile
assert str(p) == 'a/b/c'
p = furl.Path('/a/b/c')
assert p.isabsolute
assert p.segments == ['a', 'b', 'c']
assert p.isfile and p.isdir != p.isfile
assert str(p) == '/a/b/c'
p = furl.Path('a/b/c/')
assert not p.isabsolute
assert p.segments == ['a', 'b', 'c', '']
assert p.isdir and p.isdir != p.isfile
assert str(p) == 'a/b/c/'
p.isabsolute = True
assert p.isabsolute
assert str(p) == '/a/b/c/'
def test_encoding(self):
encoded = ['a%20a', '/%7haypepps/', 'a/:@/a', 'a%2Fb']
unencoded = ['a+a', '/~haypepps/', 'a/:@/a', 'a/b']
for path in encoded:
assert str(furl.Path(path)) == path
for path in unencoded:
assert str(furl.Path(path)) == urllib.quote(path, "/:@-._~!$&'()*+,;=")
# Valid path segment characters should not be encoded.
for char in ":@-._~!$&'()*+,;=":
f = furl.furl().set(path=char)
assert str(f.path) == f.url == '/' + char
assert f.path.segments == [char]
# Invalid path segment characters should be encoded.
for char in ' ^`<>[]"?':
f = furl.furl().set(path=char)
assert str(f.path) == f.url == '/' + urllib.quote(char)
assert f.path.segments == [char]
# Encode '/' within a path segment.
segment = 'a/b' # One path segment that includes the '/' character.
f = furl.furl().set(path=[segment])
assert str(f.path) == '/a%2Fb'
assert f.path.segments == [segment]
assert f.url == '/a%2Fb'
def test_load(self):
self._test_set_load(furl.Path.load)
def test_set(self):
self._test_set_load(furl.Path.set)
def _test_set_load(self, path_set_or_load):
p = furl.Path('a/b/c/')
assert path_set_or_load(p, 'asdf/asdf/') == p
assert not p.isabsolute
assert str(p) == 'asdf/asdf/'
assert path_set_or_load(p, ['a', 'b', 'c', '']) == p
assert not p.isabsolute
assert str(p) == 'a/b/c/'
assert path_set_or_load(p, ['','a', 'b', 'c', '']) == p
assert p.isabsolute
assert str(p) == '/a/b/c/'
def test_add(self):
# absolute_if_not_empty is False.
p = furl.Path('a/b/c/', absolute_if_not_empty=False)
assert p.add('d') == p
assert not p.isabsolute
assert str(p) == 'a/b/c/d'
assert p.add('/') == p
assert not p.isabsolute
assert str(p) == 'a/b/c/d/'
assert p.add(['e', 'f', 'e e', '']) == p
assert not p.isabsolute
assert str(p) == 'a/b/c/d/e/f/e%20e/'
p = furl.Path(absolute_if_not_empty=False)
assert not p.isabsolute
assert p.add('/') == p
assert p.isabsolute
assert str(p) == '/'
assert p.add('pump') == p
assert p.isabsolute
assert str(p) == '/pump'
p = furl.Path(absolute_if_not_empty=False)
assert not p.isabsolute
assert p.add(['','']) == p
assert p.isabsolute
assert str(p) == '/'
assert p.add(['pump','dump','']) == p
assert p.isabsolute
assert str(p) == '/pump/dump/'
# absolute_if_not_empty is True.
p = furl.Path('a/b/c/', absolute_if_not_empty=True)
assert p.add('d') == p
assert p.isabsolute
assert str(p) == '/a/b/c/d'
assert p.add('/') == p
assert p.isabsolute
assert str(p) == '/a/b/c/d/'
assert p.add(['e', 'f', 'e e', '']) == p
assert p.isabsolute
assert str(p) == '/a/b/c/d/e/f/e%20e/'
p = furl.Path(absolute_if_not_empty=True)
assert not p.isabsolute
assert p.add('/') == p
assert p.isabsolute
assert str(p) == '/'
assert p.add('pump') == p
assert p.isabsolute
assert str(p) == '/pump'
p = furl.Path(absolute_if_not_empty=True)
assert not p.isabsolute
assert p.add(['','']) == p
assert p.isabsolute
assert str(p) == '/'
assert p.add(['pump','dump','']) == p
assert p.isabsolute
assert str(p) == '/pump/dump/'
def test_remove(self):
# Remove lists of path segments.
p = furl.Path('a/b/s%20s/')
assert p.remove(['b', 's s']) == p
assert str(p) == 'a/b/s%20s/'
assert p.remove(['b', 's s', '']) == p
assert str(p) == 'a/'
assert p.remove(['', 'a']) == p
assert str(p) == 'a/'
assert p.remove(['a']) == p
assert str(p) == 'a/'
assert p.remove(['a', '']) == p
assert str(p) == ''
p = furl.Path('a/b/s%20s/')
assert p.remove(['', 'b', 's s']) == p
assert str(p) == 'a/b/s%20s/'
assert p.remove(['', 'b', 's s', '']) == p
assert str(p) == 'a'
assert p.remove(['', 'a']) == p
assert str(p) == 'a'
assert p.remove(['a', '']) == p
assert str(p) == 'a'
assert p.remove(['a']) == p
assert str(p) == ''
p = furl.Path('a/b/s%20s/')
assert p.remove(['a', 'b', 's%20s', '']) == p
assert str(p) == 'a/b/s%20s/'
assert p.remove(['a', 'b', 's s', '']) == p
assert str(p) == ''
# Remove a path string.
p = furl.Path('a/b/s%20s/')
assert p.remove('b/s s/') == p # Encoding Warning.
assert str(p) == 'a/'
p = furl.Path('a/b/s%20s/')
assert p.remove('b/s%20s/') == p
assert str(p) == 'a/'
assert p.remove('a') == p
assert str(p) == 'a/'
assert p.remove('/a') == p
assert str(p) == 'a/'
assert p.remove('a/') == p
assert str(p) == ''
p = furl.Path('a/b/s%20s/')
assert p.remove('b/s s') == p # Encoding Warning.
assert str(p) == 'a/b/s%20s/'
p = furl.Path('a/b/s%20s/')
assert p.remove('b/s%20s') == p
assert str(p) == 'a/b/s%20s/'
assert p.remove('s%20s') == p
assert str(p) == 'a/b/s%20s/'
assert p.remove('s s') == p # Encoding Warning.
assert str(p) == 'a/b/s%20s/'
assert p.remove('b/s%20s/') == p
assert str(p) == 'a/'
assert p.remove('/a') == p
assert str(p) == 'a/'
assert p.remove('a') == p
assert str(p) == 'a/'
assert p.remove('a/') == p
assert str(p) == ''
p = furl.Path('a/b/s%20s/')
assert p.remove('a/b/s s/') == p # Encoding Warning.
assert str(p) == ''
# Remove True.
p = furl.Path('a/b/s%20s/')
assert p.remove(True) == p
assert str(p) == ''
def test_isabsolute(self):
paths = ['', '/', 'pump', 'pump/dump', '/pump/dump', '/pump/dump']
for path in paths:
p = furl.Path(absolute_if_not_empty=True)
p.set(path)
if path:
assert p.isabsolute
else:
assert not p.isabsolute
with self.assertRaises(AttributeError):
p.isabsolute = False
p = furl.Path(absolute_if_not_empty=False)
p.set(path)
if path and path[0] == '/':
assert p.isabsolute
else:
assert not p.isabsolute
def test_nonzero(self):
p = furl.Path()
assert not p
p = furl.Path('')
assert not p
p = furl.Path('')
assert not p
p.segments = ['']
assert p
p = furl.Path('asdf')
assert p
p = furl.Path('/asdf')
assert p
class TestPathCompositionInterface(unittest.TestCase):
def test_interface(self):
class tester(furl.PathCompositionInterface):
def __init__(self):
furl.PathCompositionInterface.__init__(self)
def __setattr__(self, attr, value):
if not furl.PathCompositionInterface.__setattr__(self, attr, value):
object.__setattr__(self, attr, value)
t = tester()
assert isinstance(t.path, furl.Path)
assert t.pathstr == ''
t.path = 'pump/dump'
assert isinstance(t.path, furl.Path)
assert t.pathstr == 'pump/dump'
assert t.path.segments == ['pump', 'dump']
assert not t.path.isabsolute
class TestQuery(unittest.TestCase):
def setUp(self):
# All interaction with parameters is unquoted unless that interaction is
# through an already encoded query string. In the case of an already encoded
# query string like 'a=a%20a&b=b', its keys and values will be unquoted.
self.itemlists = map(itemlist, [
[], [(1,1)], [(1,1), (2,2)], [(1,1), (1,11), (2,2), (3,3)], [('','')],
[('a',1), ('b',2), ('a',3)], [('a',1), ('b','b'), ('a',0.23)],
[(0.1, -0.9), (-0.1231,12312.3123)], [(None,None), (None, 'pumps')],
[('',''),('','')], [('','a'),('','b'),('b',''),('b','b')], [('<','>')],
[('=','><^%'),('><^%','=')], [("/?:@-._~!$'()*+,","/?:@-._~!$'()*+,=")],
[('+','-')], [('a%20a','a%20a')], [('/^`<>[]"','/^`<>[]"=')],
[("/?:@-._~!$'()*+,","/?:@-._~!$'()*+,=")],
])
self.itemdicts = map(itemdict, [
{}, {1:1, 2:2}, {'1':'1', '2':'2', '3':'3'}, {None:None}, {5.4:4.5},
{'':''}, {'':'a','b':''}, {'pue':'pue', 'a':'a&a'}, {'=':'====='},
{'pue':'pue', 'a':'a%26a'}, {'%':'`','`':'%'}, {'+':'-'},
{"/?:@-._~!$'()*+,":"/?:@-._~!$'()*+,="}, {'%25':'%25','%60':'%60'},
])
self.itemomdicts = map(itemomdict1D, self.itemlists)
self.itemstrs = map(itemstr, [
# Basics.
'', 'a=a', 'a=a&b=b', 'q=asdf&check_keywords=yes&area=default', '=asdf',
# Various quoted and unquoted parameters and values that will be unquoted.
'space=a+a&=a%26a', 'a a=a a&no encoding=sup', 'a+a=a+a', 'a%20=a+a',
'a%20a=a%20a', 'a+a=a%20a', 'space=a a&=a^a', 'a=a&s=s#s', '+=+',
"/?:@-._~!$&'()*+,=/?:@-._~!$'()*+,=", 'a=a&c=c%5Ec',
'<=>&^="', '%3C=%3E&%5E=%22', '%=%;`=`', '%25=%25&%60=%60',
# Only keys, no values.
'asdfasdf', '/asdf/asdf/sdf', '*******', '!@#(*&@!#(*@!#', 'a&b&', 'a;b',
# Repeated parameters.
'a=a&a=a', 'space=a+a&space=b+b',
# Empty keys and/or values.
'=', 'a=', 'a=a&a=', '=a&=b',
# Semicolon delimeter, like 'a=a;b=b'.
'a=a;a=a', 'space=a+a;space=b+b',
])
self.items = (self.itemlists + self.itemdicts + self.itemomdicts +
self.itemstrs)
def test_various(self):
for items in self.items:
q = furl.Query(items.original())
assert q.params.allitems() == items.allitems()
pairs = map(lambda pair: '%s=%s' % (pair[0],pair[1]),
self._quote_items(items))
# encode() and __str__().
assert str(q) == q.encode() == q.encode('&') == '&'.join(pairs)
assert q.encode(';') == ';'.join(pairs)
# __nonzero__().
if items.allitems():
assert q
else:
assert not q
def test_load(self):
for items in self.items:
q = furl.Query(items.original())
for update in self.items:
assert q.load(update) == q
assert q.params.allitems() == update.allitems()
def test_add(self):
for items in self.items:
q = furl.Query(items.original())
runningsum = list(items.allitems())
for itemupdate in self.items:
assert q.add(itemupdate.original()) == q
for item in itemupdate.iterallitems():
runningsum.append(item)
assert q.params.allitems() == runningsum
def test_set(self):
for items in self.items:
q = furl.Query(items.original())
items_omd = omdict1D(items.allitems())
for update in self.items:
q.set(update)
items_omd.updateall(update)
assert q.params.allitems() == items_omd.allitems()
# The examples.
q = furl.Query({1:1}).set([(1,None),(2,2)])
assert q.params.allitems() == [(1,None), (2,2)]
q = furl.Query({1:None,2:None}).set([(1,1),(2,2),(1,11)])
assert q.params.allitems() == [(1,1),(2,2),(1,11)]
q = furl.Query({1:None}).set([(1,[1,11,111])])
assert q.params.allitems() == [(1,1),(1,11),(1,111)]
# Further manual tests.
q = furl.Query([(2,None),(3,None),(1,None)])
q.set([(1,[1,11]),(2,2),(3,[3,33])])
assert q.params.allitems() == [(2,2),(3,3),(1,1),(1,11),(3,33)]
def test_remove(self):
for items in self.items:
# Remove one key at a time.
q = furl.Query(items.original())
for key in dict(items.iterallitems()):
assert key in q.params
assert q.remove(key) == q
assert key not in q.params
# Remove multiple keys at a time (in this case all of them).
q = furl.Query(items.original())
if items.allitems():
assert q.params
allkeys = [key for key,value in items.allitems()]
assert q.remove(allkeys) == q
assert len(q.params) == 0
# Remove the whole query string with True.
q = furl.Query(items.original())
if items.allitems():
assert q.params
assert q.remove(True) == q
assert len(q.params) == 0
def test_params(self):
# Basics.
q = furl.Query('a=a&b=b')
assert q.params == {'a':'a', 'b':'b'}
q.params['sup'] = 'sup'
assert q.params == {'a':'a', 'b':'b', 'sup':'sup'}
del q.params['a']
assert q.params == {'b':'b', 'sup':'sup'}
q.params['b'] = 'BLROP'
assert q.params == {'b':'BLROP', 'sup':'sup'}
# Blanks keys and values are kept.
q = furl.Query('=')
assert q.params == {'':''} and str(q) == '='
q = furl.Query('=&=')
assert q.params.allitems() == [('',''), ('','')] and str(q) == '=&='
q = furl.Query('a=&=b')
assert q.params == {'a':'','':'b'} and str(q) == 'a=&=b'
# ';' is a valid query delimeter.
q = furl.Query('=;=')
assert q.params.allitems() == [('',''), ('','')] and str(q) == '=&='
q = furl.Query('a=a;b=b;c=')
assert q.params == {'a':'a','b':'b','c':''} and str(q) == 'a=a&b=b&c='
# Non-string parameters are coerced to strings in the final query string.
q.params.clear()
q.params[99] = 99
q.params[None] = -1
q.params['int'] = 1
q.params['float'] = 0.39393
assert str(q) == '99=99&None=-1&int=1&float=0.39393'
# Spaces are encoded as '+'s. '+'s are encoded as '%2B'.
q.params.clear()
q.params['s s'] = 's s'
q.params['p+p'] = 'p+p'
assert str(q) == 's+s=s+s&p%2Bp=p%2Bp'
# Params is an omdict (ordered multivalue dictionary).
q.params.clear()
q.params.add('1', '1').set('2', '4').add('1', '11').addlist(3, [3,3,'3'])
assert q.params.getlist('1') == ['1', '11'] and q.params['1'] == '1'
assert q.params.getlist(3) == [3,3,'3']
# Assign various things to Query.params and make sure Query.params is
# reinitialized, not replaced.
for items in self.items:
q.params = items.original()
assert isinstance(q.params, omdict1D)
for item1, item2 in izip(q.params.iterallitems(), items.iterallitems()):
assert item1 == item2
def _quote_items(self, items):
# Calculate the expected querystring with proper query encoding.
# Valid query key characters: "/?:@-._~!$'()*,;"
# Valid query value characters: "/?:@-._~!$'()*,;="
allitems_quoted = []
for key, value in items.iterallitems():
pair = (urllib.quote_plus(str(key), "/?:@-._~!$'()*,;"),
urllib.quote_plus(str(value), "/?:@-._~!$'()*,;="))
allitems_quoted.append(pair)
return allitems_quoted
class TestQueryCompositionInterface(unittest.TestCase):
def test_interface(self):
class tester(furl.QueryCompositionInterface):
def __init__(self):
furl.QueryCompositionInterface.__init__(self)
def __setattr__(self, attr, value):
if not furl.QueryCompositionInterface.__setattr__(self, attr, value):
object.__setattr__(self, attr, value)
t = tester()
assert isinstance(t.query, furl.Query)
assert t.querystr == ''
t.query = 'a=a&s=s s'
assert isinstance(t.query, furl.Query)
assert t.querystr == 'a=a&s=s+s'
assert t.args == t.query.params == {'a':'a', 's':'s s'}
class TestFragment(unittest.TestCase):
def test_basics(self):
f = furl.Fragment()
assert str(f.path) == '' and str(f.query) == '' and str(f) == ''
f.args['sup'] = 'foo'
assert str(f) == 'sup=foo'
f.path = 'yasup'
assert str(f) == 'yasup?sup=foo'
f.path = '/yasup'
assert str(f) == '/yasup?sup=foo'
assert str(f.query) == f.querystr == 'sup=foo'
f.query.params['sup'] = 'kwlpumps'
assert str(f) == '/yasup?sup=kwlpumps'
f.query = ''
assert str(f) == '/yasup'
f.path = ''
assert str(f) == ''
f.args['no'] = 'dads'
f.query.params['hi'] = 'gr8job'
assert str(f) == 'no=dads&hi=gr8job'
def test_load(self):
comps = [('','',{}),
('?','%3F',{}),
('??a??','%3F%3Fa%3F%3F',{}),
('??a??=','',{'?a??':''}),
('schtoot','schtoot',{}),
('sch/toot/YOEP','sch/toot/YOEP',{}),
('/sch/toot/YOEP','/sch/toot/YOEP',{}),
('schtoot?','schtoot%3F',{}),
('schtoot?NOP','schtoot%3FNOP',{}),
('schtoot?NOP=','schtoot',{'NOP':''}),
('schtoot?=PARNT','schtoot',{'':'PARNT'}),
('schtoot?NOP=PARNT','schtoot',{'NOP':'PARNT'}),
('dog?machine?yes','dog%3Fmachine%3Fyes',{}),
('dog?machine=?yes','dog',{'machine':'?yes'}),
('schtoot?a=a&hok%20sprm','schtoot',{'a':'a','hok sprm':''}),
('schtoot?a=a&hok sprm','schtoot',{'a':'a','hok sprm':''}),
('sch/toot?a=a&hok sprm','sch/toot',{'a':'a','hok sprm':''}),
('/sch/toot?a=a&hok sprm','/sch/toot',{'a':'a','hok sprm':''}),
]
for fragment, path, query in comps:
f = furl.Fragment()
f.load(fragment)
assert str(f.path) == path
assert f.query.params == query
def test_add(self):
f = furl.Fragment('')
assert f is f.add(path='one two three', args={'a':'a', 's':'s s'})
assert str(f) == 'one%20two%20three?a=a&s=s+s'
f = furl.Fragment('break?legs=broken')
assert f is f.add(path='horse bones', args={'a':'a', 's':'s s'})
assert str(f) == 'break/horse%20bones?legs=broken&a=a&s=s+s'
def test_set(self):
f = furl.Fragment('asdf?lol=sup&foo=blorp')
assert f is f.set(path='one two three', args={'a':'a', 's':'s s'})
assert str(f) == 'one%20two%20three?a=a&s=s+s'
assert f is f.set(path='!', separator=False)
assert f.separator == False
assert str(f) == '!a=a&s=s+s'
def test_remove(self):
f = furl.Fragment('a/path/great/job?lol=sup&foo=blorp')
assert f is f.remove(path='job', args=['lol'])
assert str(f) == 'a/path/great/?foo=blorp'
assert f is f.remove(path=['path', 'great'], args=['foo'])
assert str(f) == 'a/path/great/'
assert f is f.remove(path=['path', 'great', ''])
assert str(f) == 'a/'
assert f is f.remove(fragment=True)
assert str(f) == ''
def test_encoding(self):
f = furl.Fragment()
f.path = "/?:@-._~!$&'()*+,;="
assert str(f) == "/?:@-._~!$&'()*+,;="
f.query = {'a':'a','b b':'NOPE'}
assert str(f) == "/%3F:@-._~!$&'()*+,;=?a=a&b+b=NOPE"
f.separator = False
assert str(f) == "/?:@-._~!$&'()*+,;=a=a&b+b=NOPE"
f = furl.Fragment()
f.path = "/?:@-._~!$&'()*+,;= ^`<>[]"
assert str(f) == "/?:@-._~!$&'()*+,;=%20%5E%60%3C%3E%5B%5D"
f.query = {'a':'a','b b':'NOPE'}
assert str(f) == "/%3F:@-._~!$&'()*+,;=%20%5E%60%3C%3E%5B%5D?a=a&b+b=NOPE"
f.separator = False
assert str(f) == "/?:@-._~!$&'()*+,;=%20%5E%60%3C%3E%5B%5Da=a&b+b=NOPE"
f = furl.furl()
f.fragment = 'a?b?c?d?'
assert f.url == '#a?b?c?d?'
# TODO(grun): Once encoding has been fixed with URLPath and FragmentPath,
# the below line should be:
#
# assert str(f.fragment) == str(f.path) == 'a?b?c?d?'
#
assert str(f.fragment) == 'a?b?c?d?'
def test_nonzero(self):
f = furl.Fragment()
assert not f
f = furl.Fragment('')
assert not f
f = furl.Fragment('asdf')
assert f
f = furl.Fragment()
f.path = 'sup'
assert f
f = furl.Fragment()
f.query = 'a=a'
assert f
f = furl.Fragment()
f.path = 'sup'
f.query = 'a=a'
assert f
f = furl.Fragment()
f.path = 'sup'
f.query = 'a=a'
f.separator = False
assert f
class TestFragmentCompositionInterface(unittest.TestCase):
def test_interface(self):
class tester(furl.FragmentCompositionInterface):
def __init__(self):
furl.FragmentCompositionInterface.__init__(self)
def __setattr__(self, attr, value):
if not furl.FragmentCompositionInterface.__setattr__(self, attr, value):
object.__setattr__(self, attr, value)
t = tester()
assert isinstance(t.fragment, furl.Fragment)
assert isinstance(t.fragment.path, furl.Path)
assert isinstance(t.fragment.query, furl.Query)
assert t.fragmentstr == ''
assert t.fragment.separator
assert t.fragment.pathstr == ''
assert t.fragment.querystr == ''
t.fragment = 'animal meats'
assert isinstance(t.fragment, furl.Fragment)
t.fragment.path = 'pump/dump'
t.fragment.query = 'a=a&s=s+s'
assert isinstance(t.fragment.path, furl.Path)
assert isinstance(t.fragment.query, furl.Query)
assert t.fragment.pathstr == 'pump/dump'
assert t.fragment.path.segments == ['pump', 'dump']
assert not t.fragment.path.isabsolute
assert t.fragment.querystr == 'a=a&s=s+s'
assert t.fragment.args == t.fragment.query.params == {'a':'a', 's':'s s'}
class TestFurl(unittest.TestCase):
def setUp(self):
# Don't hide duplicate Warnings - test for all of them.
warnings.simplefilter("always")
def _param(self, url, key, val):
# Note: urlparse.urlsplit() doesn't separate the query from the path for all
# schemes, only those schemes in the list urlparse.uses_query. So, as a
# result of using urlparse.urlsplit(), this little helper function only
# works when provided urls whos schemes are also in urlparse.uses_query.
return (key, val) in urlparse.parse_qsl(urlparse.urlsplit(url).query, True)
def test_username_and_password(self):
# Empty usernames and passwords.
for url in ['', 'http://www.pumps.com/']:
f = furl.furl(url)
assert not f.username and not f.password
usernames = ['user', 'a-user_NAME$%^&09']
passwords = ['pass', 'a-PASS_word$%^&09']
baseurl = 'http://www.google.com/'
# Username only.
userurl = 'http://%[email protected]/'
for username in usernames:
f = furl.furl(userurl % username)
assert f.username == username and not f.password
f = furl.furl(baseurl)
f.username = username
assert f.username == username and not f.password
assert f.url == userurl % username
f = furl.furl(baseurl)
f.set(username=username)
assert f.username == username and not f.password
assert f.url == userurl % username
f.remove(username=True)
assert not f.username and not f.password
assert f.url == baseurl
# Password only.
passurl = 'http://:%[email protected]/'
for password in passwords:
f = furl.furl(passurl % password)
assert f.password == password and f.username == ''
f = furl.furl(baseurl)
f.password = password
assert f.password == password and f.username == ''
assert f.url == passurl % password
f = furl.furl(baseurl)
f.set(password=password)
assert f.password == password and f.username == ''
assert f.url == passurl % password
f.remove(password=True)
assert not f.username and not f.password
assert f.url == baseurl
# Username and password.
userpassurl = 'http://%s:%[email protected]/'
for username in usernames:
for password in passwords:
f = furl.furl(userpassurl % (username, password))
assert f.username == username and f.password == password
f = furl.furl(baseurl)
f.username = username
f.password = password
assert f.username == username and f.password == password
assert f.url == userpassurl % (username, password)
f = furl.furl(baseurl)
f.set(username=username, password=password)
assert f.username == username and f.password == password
assert f.url == userpassurl % (username, password)
f = furl.furl(baseurl)
f.remove(username=True, password=True)
assert not f.username and not f.password
assert f.url == baseurl
# Username and password in the network location string.
f = furl.furl()
f.netloc = '[email protected]'
assert f.username == 'user' and not f.password
assert f.netloc == '[email protected]'
f = furl.furl()
f.netloc = ':[email protected]'
assert not f.username and f.password == 'pass'
assert f.netloc == ':[email protected]'
f = furl.furl()
f.netloc = 'user:[email protected]'
assert f.username == 'user' and f.password == 'pass'
assert f.netloc == 'user:[email protected]'
def test_basics(self):
url = 'hTtP://www.pumps.com/'
f = furl.furl(url)
assert f.scheme == 'http'
assert f.netloc == 'www.pumps.com'
assert f.host == 'www.pumps.com'
assert f.port == 80
assert str(f.path) == f.pathstr == '/'
assert str(f.query) == f.querystr == ''
assert f.args == f.query.params == {}
assert str(f.fragment) == f.fragmentstr == ''
assert f.url == str(f) == url.lower()
assert f.url == furl.furl(f).url == furl.furl(f.url).url
assert f is not f.copy() and f.url == f.copy().url
url = 'HTTPS://wWw.YAHOO.cO.UK/one/two/three?a=a&b=b&m=m%26m#fragment'
f = furl.furl(url)
assert f.scheme == 'https'
assert f.netloc == 'www.yahoo.co.uk'
assert f.host == 'www.yahoo.co.uk'
assert f.port == 443
assert f.pathstr == str(f.path) == '/one/two/three'
assert f.querystr == str(f.query) == 'a=a&b=b&m=m%26m'
assert f.args == f.query.params == {'a':'a', 'b':'b', 'm':'m&m'}
assert str(f.fragment) == f.fragmentstr == 'fragment'
assert f.url == str(f) == url.lower()
assert f.url == furl.furl(f).url == furl.furl(f.url).url
assert f is not f.copy() and f.url == f.copy().url
url = 'sup://192.168.1.102:8080///one//a%20b////?s=kwl%20string#frag'
f = furl.furl(url)
assert f.scheme == 'sup'
assert f.netloc == '192.168.1.102:8080'
assert f.host == '192.168.1.102'
assert f.port == 8080
assert f.pathstr == str(f.path) == '///one//a%20b////'
assert f.querystr == str(f.query) == 's=kwl+string'
assert f.args == f.query.params == {'s':'kwl string'}
assert str(f.fragment) == f.fragmentstr == 'frag'
query_quoted = 'sup://192.168.1.102:8080///one//a%20b////?s=kwl+string#frag'
assert f.url == str(f) == query_quoted
assert f.url == furl.furl(f).url == furl.furl(f.url).url
assert f is not f.copy() and f.url == f.copy().url
# URL paths are always absolute if not empty.
f = furl.furl()
f.path.segments = ['pumps']
assert str(f.path) == '/pumps'
f.path = 'pumps'
assert str(f.path) == '/pumps'
# Fragment paths are optionally absolute, and not absolute by default.
f = furl.furl()
f.fragment.path.segments = ['pumps']
assert str(f.fragment.path) == 'pumps'
f.fragment.path = 'pumps'
assert str(f.fragment.path) == 'pumps'
# netloc URLs should be the netloc only.
f = furl.furl()
assert f.set(host='foo').url == 'foo'
assert f.set(host='pumps.com').url == 'pumps.com'
assert f.set(host='pumps.com', port=88).url == 'pumps.com:88'
assert f.set(netloc='pumps.com:88').url == 'pumps.com:88'
def test_basic_manipulation(self):
f = furl.furl('http://www.pumps.com/')
f.args.setdefault('foo', 'blah')
assert str(f) == 'http://www.pumps.com/?foo=blah'
f.query.params['foo'] = 'eep'
assert str(f) == 'http://www.pumps.com/?foo=eep'
f.port = 99
assert str(f) == 'http://www.pumps.com:99/?foo=eep'
f.netloc = 'www.yahoo.com:220'
assert str(f) == 'http://www.yahoo.com:220/?foo=eep'
f.netloc = 'www.yahoo.com'
assert f.port == 80
assert str(f) == 'http://www.yahoo.com/?foo=eep'
f.scheme = 'sup'
assert str(f) == 'sup://www.yahoo.com:80/?foo=eep'
f.port = None
assert str(f) == 'sup://www.yahoo.com/?foo=eep'
f.fragment = 'sup'
assert str(f) == 'sup://www.yahoo.com/?foo=eep#sup'
f.path = 'hay supppp'
assert str(f) == 'sup://www.yahoo.com/hay%20supppp?foo=eep#sup'
f.args['space'] = '1 2'
assert str(f) == 'sup://www.yahoo.com/hay%20supppp?foo=eep&space=1+2#sup'
del f.args['foo']
assert str(f) == 'sup://www.yahoo.com/hay%20supppp?space=1+2#sup'
f.host = 'ohay.com'
assert str(f) == 'sup://ohay.com/hay%20supppp?space=1+2#sup'
def test_odd_urls(self):
# Empty.
f = furl.furl('')
assert f.scheme == ''
assert f.username == ''
assert f.password == ''
assert f.host == ''
assert f.port == None
assert f.netloc == ''
assert f.pathstr == str(f.path) == ''
assert f.querystr == str(f.query) == ''
assert f.args == f.query.params == {}
assert str(f.fragment) == f.fragmentstr == ''
assert f.url == ''
# Keep in mind that ';' is a query delimeter for both the URL query and the
# fragment query, resulting in the pathstr, querystr, and fragmentstr values
# below.
url = ("pron://example.com/:@-._~!$&'()*+,=;:@-._~!$&'()*+,=:@-._~!$&'()*+,"
"==?/?:@-._~!$'()*+,;=/?:@-._~!$'()*+,;==#/?:@-._~!$&'()*+,;=")
pathstr = "/:@-._~!$&'()*+,=;:@-._~!$&'()*+,=:@-._~!$&'()*+,=="
querystr = "/?:@-._~!$'()*+,=&=/?:@-._~!$'()*+,&=="
fragmentstr = "/?:@-._~!$=&'()*+,=&="
f = furl.furl(url)
assert f.scheme == 'pron'
assert f.host == 'example.com'
assert f.port == None
assert f.netloc == 'example.com'
assert f.pathstr == str(f.path) == pathstr
assert f.querystr == str(f.query) == querystr
assert f.fragmentstr == str(f.fragment) == fragmentstr
# TODO(grun): Test more odd urls.
def test_hosts(self):
# No host.
url = 'http:///index.html'
f = furl.furl(url)
assert f.host == '' and furl.furl(url).url == url
# Valid IPv4 and IPv6 addresses.
f = furl.furl('http://192.168.1.101')
f = furl.furl('http://[2001:db8:85a3:8d3:1319:8a2e:370:7348]/')
# Invalid IPv4 addresses shouldn't raise an exception because
# urlparse.urlsplit() doesn't raise an exception on invalid IPv4 addresses.
f = furl.furl('http://1.2.3.4.5.6/')
# Invalid IPv6 addresses shouldn't raise an exception because
# urlparse.urlsplit() doesn't raise an exception on invalid IPv6 addresses.
furl.furl('http://[0:0:0:0:0:0:0:1:1:1:1:1:1:1:1:9999999999999]/')
# Malformed IPv6 should raise an exception because urlparse.urlsplit()
# raises an exception.
with self.assertRaises(ValueError):
furl.furl('http://[0:0:0:0:0:0:0:1/')
with self.assertRaises(ValueError):
furl.furl('http://0:0:0:0:0:0:0:1]/')
def test_netlocs(self):
f = furl.furl('http://pumps.com/')
netloc = '1.2.3.4.5.6:999'
f.netloc = netloc
assert f.netloc == netloc
assert f.host == '1.2.3.4.5.6'
assert f.port == 999
netloc = '[0:0:0:0:0:0:0:1:1:1:1:1:1:1:1:9999999999999]:888'
f.netloc = netloc
assert f.netloc == netloc
assert f.host == '[0:0:0:0:0:0:0:1:1:1:1:1:1:1:1:9999999999999]'
assert f.port == 888
# Malformed IPv6 should raise an exception because urlparse.urlsplit()
# raises an exception.
with self.assertRaises(ValueError):
f.netloc = '[0:0:0:0:0:0:0:1'
with self.assertRaises(ValueError):
f.netloc = '0:0:0:0:0:0:0:1]'
# Invalid ports.
with self.assertRaises(ValueError):
f.netloc = '[0:0:0:0:0:0:0:1]:alksdflasdfasdf'
with self.assertRaises(ValueError):
f.netloc = 'pump2pump.org:777777777777'
# No side effects.
assert f.host == '[0:0:0:0:0:0:0:1:1:1:1:1:1:1:1:9999999999999]'
assert f.port == 888
def test_ports(self):
# Default port values.
assert furl.furl('http://www.pumps.com/').port == 80
assert furl.furl('https://www.pumps.com/').port == 443
assert furl.furl('undefined://www.pumps.com/').port == None
# Override default port values.
assert furl.furl('http://www.pumps.com:9000/').port == 9000
assert furl.furl('https://www.pumps.com:9000/').port == 9000
assert furl.furl('undefined://www.pumps.com:9000/').port == 9000
# Reset the port.
f = furl.furl('http://www.pumps.com:9000/')
f.port = None
assert f.url == 'http://www.pumps.com/'
assert f.port == 80
f = furl.furl('undefined://www.pumps.com:9000/')
f.port = None
assert f.url == 'undefined://www.pumps.com/'
assert f.port == None
# Invalid port raises ValueError with no side effects.
with self.assertRaises(ValueError):
furl.furl('http://www.pumps.com:invalid/')
url = 'http://www.pumps.com:400/'
f = furl.furl(url)
assert f.port == 400
with self.assertRaises(ValueError):
f.port = 'asdf'
assert f.url == url
f.port = 9999
with self.assertRaises(ValueError):
f.port = []
with self.assertRaises(ValueError):
f.port = -1
with self.assertRaises(ValueError):
f.port = 77777777777
assert f.port == 9999
assert f.url == 'http://www.pumps.com:9999/'
self.assertRaises(f.set, port='asdf')
def test_add(self):
f = furl.furl('http://pumps.com/')
assert f is f.add(args={'a':'a', 'm':'m&m'}, path='/kwl jump',
fragment_path='1', fragment_args={'f':'frp'})
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'm', 'm&m')
assert f.fragmentstr == str(f.fragment) == '1?f=frp'
assert f.pathstr == urlparse.urlsplit(f.url).path == '/kwl%20jump'
assert f is f.add(path='dir', fragment_path='23', args={'b':'b'},
fragment_args={'b':'bewp'})
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'm', 'm&m')
assert self._param(f.url, 'b', 'b')
assert f.pathstr == str(f.path) == '/kwl%20jump/dir'
assert str(f.fragment) == f.fragmentstr == '1/23?f=frp&b=bewp'
# Supplying both <args> and <query_params> should raise a warning.
with warnings.catch_warnings(True) as w1:
f.add(args={'a':'1'}, query_params={'a':'2'})
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
assert self._param(f.url, 'a', '1') and self._param(f.url, 'a', '2')
params = f.args.allitems()
assert params.index(('a','1')) < params.index(('a','2'))
def test_set(self):
f = furl.furl('http://pumps.com/kwl%20jump/dir')
assert f is f.set(args={'no':'nope'}, fragment='sup')
assert 'a' not in f.args
assert 'b' not in f.args
assert f.url == 'http://pumps.com/kwl%20jump/dir?no=nope#sup'
# No conflict warnings between <host>/<port> and <netloc>, or <query> and
# <params>.
assert f is f.set(args={'a':'a a'}, path='path path/dir', port='999',
fragment='moresup', scheme='sup', host='host')
assert f.pathstr == '/path%20path/dir'
assert f.url == 'sup://host:999/path%20path/dir?a=a+a#moresup'
# Path as a list of path segments to join.
assert f is f.set(path=['d1', 'd2'])
assert f.url == 'sup://host:999/d1/d2?a=a+a#moresup'
assert f is f.add(path=['/d3/', '/d4/'])
assert f.url == 'sup://host:999/d1/d2/%2Fd3%2F/%2Fd4%2F?a=a+a#moresup'
# Set a lot of stuff (but avoid conflicts, which are tested below).
f.set(query_params={'k':'k'}, fragment_path='no scrubs', scheme='morp',
host='myhouse', port=69, path='j$j*m#n', fragment_args={'f':'f'})
assert f.url == 'morp://myhouse:69/j$j*m%23n?k=k#no%20scrubs?f=f'
# No side effects.
oldurl = f.url
with self.assertRaises(ValueError):
f.set(args={'a':'a a'}, path='path path/dir', port='INVALID_PORT',
fragment='moresup', scheme='sup', host='host')
assert f.url == oldurl
with warnings.catch_warnings(True) as w1:
self.assertRaises(ValueError, f.set, netloc='nope.com:99', port='NOPE')
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
assert f.url == oldurl
# Separator isn't reset with set().
f = furl.Fragment()
f.separator = False
f.set(path='flush', args={'dad':'nope'})
assert str(f) == 'flushdad=nope'
# Test warnings for potentially overlapping parameters.
f = furl.furl('http://pumps.com')
warnings.simplefilter("always")
# Host, port, and netloc overlap - host and port take precedence.
with warnings.catch_warnings(True) as w1:
f.set(netloc='dumps.com:99', host='ohay.com')
assert len(w1) == 1 and issubclass(w1[0].category, UserWarning)
f.host == 'ohay.com'
f.port == 99
with warnings.catch_warnings(True) as w2:
f.set(netloc='dumps.com:99', port=88)
assert len(w2) == 1 and issubclass(w2[0].category, UserWarning)
f.port == 88
with warnings.catch_warnings(True) as w3:
f.set(netloc='dumps.com:99', host='ohay.com', port=88)
assert len(w3) == 1 and issubclass(w3[0].category, UserWarning)
# Query, args, and query_params overlap - args and query_params take
# precedence.
with warnings.catch_warnings(True) as w4:
f.set(query='yosup', args={'a':'a', 'b':'b'})
assert len(w4) == 1 and issubclass(w4[0].category, UserWarning)
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'b', 'b')
with warnings.catch_warnings(True) as w5:
f.set(query='yosup', query_params={'a':'a', 'b':'b'})
assert len(w5) == 1 and issubclass(w5[0].category, UserWarning)
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'b', 'b')
with warnings.catch_warnings(True) as w6:
f.set(args={'a':'a', 'b':'b'}, query_params={'c':'c', 'd':'d'})
assert len(w6) == 1 and issubclass(w6[0].category, UserWarning)
assert self._param(f.url, 'c', 'c')
assert self._param(f.url, 'd', 'd')
# Fragment, fragment_path, fragment_args, and fragment_separator overlap -
# fragment_separator, fragment_path, and fragment_args take precedence.
with warnings.catch_warnings(True) as w7:
f.set(fragment='hi', fragment_path='!', fragment_args={'a':'a'},
fragment_separator=False)
assert len(w7) == 1 and issubclass(w7[0].category, UserWarning)
assert str(f.fragment) == '!a=a'
with warnings.catch_warnings(True) as w8:
f.set(fragment='hi', fragment_path='bye')
assert len(w8) == 1 and issubclass(w8[0].category, UserWarning)
assert str(f.fragment) == 'bye'
with warnings.catch_warnings(True) as w9:
f.set(fragment='hi', fragment_args={'a':'a'})
assert len(w9) == 1 and issubclass(w9[0].category, UserWarning)
assert str(f.fragment) == 'hia=a'
with warnings.catch_warnings(True) as w10:
f.set(fragment='!?a=a', fragment_separator=False)
assert len(w10) == 1 and issubclass(w10[0].category, UserWarning)
assert str(f.fragment) == '!a=a'
def test_remove(self):
url = 'http://u:p@host:69/a/big/path/?a=a&b=b&s=s+s#a frag?with=args&a=a'
f = furl.furl(url)
# Remove without parameters removes nothing.
assert f.url == f.remove().url
# username, password, and port must be True.
assert f == f.copy().remove(username='nope', password='nope', port='nope')
# Basics.
assert f is f.remove(fragment=True, args=['a', 'b'], path='path/',
username=True, password=True, port=True)
assert f.url == 'http://host/a/big/?s=s+s'
# No errors are thrown when removing url components that don't exist.
f = furl.furl(url)
assert f is f.remove(fragment_path=['asdf'], fragment_args=['asdf'],
args=['asdf'], path=['ppp', 'ump'])
assert self._param(f.url, 'a', 'a')
assert self._param(f.url, 'b', 'b')
assert self._param(f.url, 's', 's s')
assert f.pathstr == '/a/big/path/'
assert f.fragment.pathstr == 'a%20frag'
assert f.fragment.args == {'a':'a', 'with':'args'}
# Path as a list of paths to join before removing.
assert f is f.remove(fragment_path='a frag', fragment_args=['a'],
query_params=['a','b'], path=['big', 'path', ''],
port=True)
assert f.url == 'http://u:p@host/a/?s=s+s#with=args'
assert f is f.remove(path=True, query=True, fragment=True, username=True,
password=True)
assert f.url == 'http://host'
def test_join(self):
empty_tests = ['', '/meat', '/meat/pump?a=a&b=b#fragsup',
'http://www.pumps.org/brg/pap/mrf?a=b&c=d#frag?sup',]
run_tests = [
# Join full urls.
('unknown://www.yahoo.com', 'unknown://www.yahoo.com'),
('unknown://www.yahoo.com?one=two&three=four',
'unknown://www.yahoo.com?one=two&three=four'),
('unknown://www.yahoo.com/new/url/?one=two#blrp',
'unknown://www.yahoo.com/new/url/?one=two#blrp'),
# Absolute paths ('/foo').
('/pump', 'unknown://www.yahoo.com/pump'),
('/pump/2/dump', 'unknown://www.yahoo.com/pump/2/dump'),
('/pump/2/dump/', 'unknown://www.yahoo.com/pump/2/dump/'),
# Relative paths ('../foo').
('./crit/', 'unknown://www.yahoo.com/pump/2/dump/crit/'),
('.././../././././srp', 'unknown://www.yahoo.com/pump/2/srp'),
('../././../nop', 'unknown://www.yahoo.com/nop'),
# Query included.
('/erp/?one=two', 'unknown://www.yahoo.com/erp/?one=two'),
('morp?three=four', 'unknown://www.yahoo.com/erp/morp?three=four'),
('/root/pumps?five=six', 'unknown://www.yahoo.com/root/pumps?five=six'),
# Fragment included.
('#sup', 'unknown://www.yahoo.com/root/pumps?five=six#sup'),
('/reset?one=two#yepYEP', 'unknown://www.yahoo.com/reset?one=two#yepYEP'),
('./slurm#uwantpump?', 'unknown://www.yahoo.com/slurm#uwantpump?')
]
for test in empty_tests:
f = furl.furl().join(test)
assert f.url == test
f = furl.furl('')
for join, result in run_tests:
assert f is f.join(join) and f.url == result
def test_equality(self):
assert furl.furl() is not furl.furl() and furl.furl() == furl.furl()
url = 'https://www.yahoo.co.uk/one/two/three?a=a&b=b&m=m%26m#fragment'
assert furl.furl(url) == furl.furl(url)
assert furl.furl(url).remove(path=True) != furl.furl(url)
def test_urlsplit(self):
# Without any delimeters like '://' or '/', the input should be treated as a
# path.
urls = ['sup', '127.0.0.1', 'www.google.com', '192.168.1.1:8000']
for url in urls:
assert isinstance(furl.urlsplit(url), urlparse.SplitResult)
assert furl.urlsplit(url) == urlparse.urlsplit(url)
# No changes to existing urlsplit() behavior for known schemes.
url = 'http://www.pumps.com/'
assert isinstance(furl.urlsplit(url), urlparse.SplitResult)
assert furl.urlsplit(url) == urlparse.urlsplit(url)
url = 'https://www.yahoo.co.uk/one/two/three?a=a&b=b&m=m%26m#fragment'
assert isinstance(furl.urlsplit(url), urlparse.SplitResult)
assert furl.urlsplit(url) == urlparse.urlsplit(url)
# Properly split the query from the path for unknown schemes.
url = 'unknown://www.yahoo.com?one=two&three=four'
correct = ('unknown', 'www.yahoo.com', '', 'one=two&three=four', '')
assert isinstance(furl.urlsplit(url), urlparse.SplitResult)
assert furl.urlsplit(url) == correct
url = 'sup://192.168.1.102:8080///one//two////?s=kwl%20string#frag'
correct = ('sup', '192.168.1.102:8080', '///one//two////',
's=kwl%20string', 'frag')
assert isinstance(furl.urlsplit(url), urlparse.SplitResult)
assert furl.urlsplit(url) == correct
url = 'crazyyyyyy://www.yahoo.co.uk/one/two/three?a=a&b=b&m=m%26m#fragment'
correct = ('crazyyyyyy', 'www.yahoo.co.uk', '/one/two/three',
'a=a&b=b&m=m%26m', 'fragment')
assert isinstance(furl.urlsplit(url), urlparse.SplitResult)
assert furl.urlsplit(url) == correct
def test_join_path_segments(self):
jps = furl.join_path_segments
# Empty.
assert jps() == []
assert jps([]) == []
assert jps([],[],[],[]) == []
# Null strings.
# [''] means nothing, or an empty string, in the final path segments.
# ['', ''] is preserved as a slash in the final path segments.
assert jps(['']) == []
assert jps([''],['']) == []
assert jps([''],[''],['']) == []
assert jps([''],['','']) == ['','']
assert jps([''],[''],[''],['']) == []
assert jps(['', ''],['', '']) == ['','','']
assert jps(['', '', ''],['', '']) == ['','','','']
assert jps(['', '', '', '', '', '']) == ['','','','','','']
assert jps(['', '', '', ''],['', '']) == ['','','','','']
assert jps(['', '', '', ''],['', ''],['']) == ['','','','','']
assert jps(['', '', '', ''],['', '', '']) == ['','','','','','']
# Basics.
assert jps(['a']) == ['a']
assert jps(['a','b']) == ['a','b']
assert jps(['a'],['b']) == ['a','b']
assert jps(['1','2','3'],['4','5']) == ['1','2','3','4','5']
# A trailing slash is preserved if no new slash is being added.
# ex: ['a', ''] + ['b'] == ['a', 'b'], or 'a/' + 'b' == 'a/b'
assert jps(['a',''],['b']) == ['a','b']
assert jps(['a'],[''],['b']) == ['a','b']
assert jps(['','a',''],['b']) == ['','a','b']
assert jps(['','a',''],['b','']) == ['','a','b','']
# A new slash is preserved if no trailing slash exists.
# ex: ['a'] + ['', 'b'] == ['a', 'b'], or 'a' + '/b' == 'a/b'
assert jps(['a'],['','b']) == ['a','b']
assert jps(['a'],[''],['b']) == ['a','b']
assert jps(['','a'],['','b']) == ['','a','b']
assert jps(['','a',''],['b','']) == ['','a','b','']
assert jps(['','a',''],['b'],['']) == ['','a','b']
assert jps(['','a',''],['b'],['','']) == ['','a','b','']
# A trailing slash and a new slash means that an extra slash will exist
# afterwords.
# ex: ['a', ''] + ['', 'b'] == ['a', '', 'b'], or 'a/' + '/b' == 'a//b'
assert jps(['a', ''],['','b']) == ['a','','b']
assert jps(['a'],[''],[''],['b']) == ['a','b']
assert jps(['','a',''],['','b']) == ['','a','','b']
assert jps(['','a'],[''],['b','']) == ['','a','b','']
assert jps(['','a'],[''],[''],['b'],['']) == ['','a','b']
assert jps(['','a'],[''],[''],['b'],['', '']) == ['','a','b','']
assert jps(['','a'],['', ''],['b'],['', '']) == ['','a','b','']
assert jps(['','a'],['','',''],['b']) == ['','a','','b']
assert jps(['','a',''],['','',''],['','b']) == ['','a','','','','b']
assert jps(['a','',''],['','',''],['','b']) == ['a','','','','','b']
# Path segments blocks without slashes, are combined as expected.
assert jps(['a','b'],['c','d']) == ['a','b','c','d']
assert jps(['a'],['b'],['c'],['d']) == ['a','b','c','d']
assert jps(['a','b','c','d'],['e']) == ['a','b','c','d','e']
assert jps(['a','b','c'],['d'],['e','f']) == ['a','b','c','d','e','f']
# Putting it all together.
assert jps(['a','','b'],['','c','d']) == ['a','','b','c','d']
assert jps(['a','','b',''],['c','d']) == ['a','','b','c','d']
assert jps(['a','','b',''],['c','d'],['','e']) == ['a','','b','c','d','e']
assert jps(['','a','','b',''],['','c']) == ['','a','','b','','c']
assert jps(['','a',''],['','b',''],['','c']) == ['','a','','b','','c']
def test_remove_path_segments(self):
rps = furl.remove_path_segments
# [''] represents a slash, equivalent to ['',''].
# Basics.
assert rps([],[]) == []
assert rps([''], ['']) == []
assert rps(['a'], ['a']) == []
assert rps(['a'], ['','a']) == ['a']
assert rps(['a'], ['a','']) == ['a']
assert rps(['a'], ['','a','']) == ['a']
# Slash manipulation.
assert rps([''], ['','']) == []
assert rps(['',''], ['']) == []
assert rps(['',''], ['','']) == []
assert rps(['','a','b','c'], ['b','c']) == ['','a','']
assert rps(['','a','b','c'], ['','b','c']) == ['','a']
assert rps(['','a','',''], ['']) == ['','a','']
assert rps(['','a','',''], ['','']) == ['','a','']
assert rps(['','a','',''], ['','','']) == ['','a']
# Remove a portion of the path from the tail of the original path.
assert rps(['','a','b',''], ['','a','b','']) == []
assert rps(['','a','b',''], ['a','b','']) == ['','']
assert rps(['','a','b',''], ['b','']) == ['','a','']
assert rps(['','a','b',''], ['','b','']) == ['','a']
assert rps(['','a','b',''], ['','']) == ['','a','b']
assert rps(['','a','b',''], ['']) == ['','a','b']
assert rps(['','a','b',''], []) == ['','a','b','']
assert rps(['','a','b','c'], ['','a','b','c']) == []
assert rps(['','a','b','c'], ['a','b','c']) == ['','']
assert rps(['','a','b','c'], ['b','c']) == ['','a','']
assert rps(['','a','b','c'], ['','b','c']) == ['','a']
assert rps(['','a','b','c'], ['c']) == ['','a','b','']
assert rps(['','a','b','c'], ['','c']) == ['','a','b']
assert rps(['','a','b','c'], []) == ['','a','b','c']
assert rps(['','a','b','c'], ['']) == ['','a','b','c']
# Attempt to remove valid subsections, but subsections not from the end of
# the original path.
assert rps(['','a','b','c'], ['','a','b','']) == ['','a','b','c']
assert rps(['','a','b','c'], ['','a','b']) == ['','a','b','c']
assert rps(['','a','b','c'], ['a','b']) == ['','a','b','c']
assert rps(['','a','b','c'], ['a','b','']) == ['','a','b','c']
assert rps(['','a','b','c'], ['','a','b']) == ['','a','b','c']
assert rps(['','a','b','c'], ['','a','b','']) == ['','a','b','c']
assert rps(['','a','b','c'], ['a']) == ['','a','b','c']
assert rps(['','a','b','c'], ['','a']) == ['','a','b','c']
assert rps(['','a','b','c'], ['a','']) == ['','a','b','c']
assert rps(['','a','b','c'], ['','a','']) == ['','a','b','c']
assert rps(['','a','b','c'], ['','a','','']) == ['','a','b','c']
assert rps(['','a','b','c'], ['','','a','','']) == ['','a','b','c']
assert rps(['','a','b','c'], ['']) == ['','a','b','c']
assert rps(['','a','b','c'], ['','']) == ['','a','b','c']
assert rps(['','a','b','c'], ['c','']) == ['','a','b','c']
# Attempt to remove segments longer than the original.
assert rps([], ['a']) == []
assert rps([], ['a','b']) == []
assert rps(['a'], ['a','b']) == ['a']
assert rps(['a','a'], ['a','a','a']) == ['a','a']
def test_is_valid_port(self):
valids = [1, 2, 3, 65535, 119, 2930]
invalids = [-1, -9999, 0, 'a', [], (0), {1:1}, 65536, 99999, {}, None]
for port in valids:
assert furl.is_valid_port(port)
for port in invalids:
assert not furl.is_valid_port(port)
def test_is_valid_encoded_path_segment(segment):
valids = [('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789' '-._~' ":@!$&'()*+,;="),
'', 'a', 'asdf', 'a%20a', '%3F',]
invalids = [' ^`<>[]"#/?', ' ', '%3Z', '/', '?']
for valid in valids:
assert furl.is_valid_encoded_path_segment(valid)
for invalid in invalids:
assert not furl.is_valid_encoded_path_segment(invalid)
def test_is_valid_encoded_query_key(key):
valids = [('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789' '-._~' ":@!$&'()*+,;" '/?'),
'', 'a', 'asdf', 'a%20a', '%3F', 'a+a', '/', '?',]
invalids = [' ^`<>[]"#', ' ', '%3Z', '#']
for valid in valids:
assert furl.is_valid_encoded_query_key(valid)
for invalid in invalids:
assert not furl.is_valid_encoded_query_key(invalid)
def test_is_valid_encoded_query_value(value):
valids = [('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789' '-._~' ":@!$&'()*+,;" '/?='),
'', 'a', 'asdf', 'a%20a', '%3F', 'a+a', '/', '?', '=']
invalids = [' ^`<>[]"#', ' ', '%3Z', '#']
for valid in valids:
assert furl.is_valid_encoded_query_value(valid)
for invalid in invalids:
assert not furl.is_valid_encoded_query_value(invalid)
| [
"[email protected]"
] | |
8b6e1e356ae27b2dc78266d986e81f9a3b47123f | 98710e1b64d2eb527efbffa7f1b3d846cce6024c | /20180801-daguan-nlp-classification/model/lr_tfidf_traintest_feature.py | 89efb82eaf5ac37fbbc9e737c6c55157675b818c | [] | no_license | SheldonWong/competition | bf6f8904cfd89d4649249e453f46f45f0a988604 | 3894c7ec5f9fca35d37702b5402dac9b7b1e04c4 | refs/heads/master | 2020-03-30T07:13:32.702321 | 2018-09-30T09:22:25 | 2018-09-30T09:22:25 | 150,924,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,721 | py | import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn import svm
from sklearn.calibration import CalibratedClassifierCV
'''
# ~/Downloads/train_set.csv
# ~/workspace/sublime/daguan/train_sample.csv
print('读取数据')
df_train = pd.read_csv('~/Downloads/train_set.csv')
df_test = pd.read_csv('~/Downloads/train_set.csv')
df_train.drop(columns=['word_seg','id'], inplace=True)
df_test.drop(columns=['word_seg'], inplace=True)
print('特征TF-IDF:')
#vectorizer = CountVectorizer(ngram_range=(1,2), min_df=3, max_df=0.9, max_features=100000)
vectorizer = TfidfVectorizer(ngram_range=(1,2), min_df=5, max_df=0.9,
use_idf=True,smooth_idf=True, sublinear_tf=True,norm='l2')
vectorizer.fit(df_train['article'])
# 训练的时候只用到词
x_train = vectorizer.transform(df_train['article'])
y_train = df_train['class'] - 1
x_test = vectorizer.transform(df_test['article'])
'''
import pickle
print('载入特征:')
with open('./feature/tfidf/x_train2.pickle', 'rb') as f:
x_train = pickle.load(f)
df_train = pd.read_csv('~/Downloads/train_set.csv')
df_test = pd.read_csv('~/Downloads/test_set.csv')
y_train = df_train['class'] - 1
#y_train.to_csv('./feature/y_train.csv')
with open('./feature/tfidf/x_test2.pickle', 'rb') as f3:
x_test = pickle.load(f3)
train_X,test_X, train_y, test_y = train_test_split(x_train,
y_train,
test_size = 0.2,
random_state = 0)
#test_X是稀疏矩阵
print('开始用LR训练')
# C越大,惩罚越小
lg = LogisticRegression(C=5,dual=True,verbose=1)
lg.fit(train_X,train_y)
y_pred= lg.predict(test_X)
accuracy = accuracy_score(test_y, y_pred)
y_prob = lg.predict_proba(test_X)
print(accuracy)
print(classification_report(test_y, y_pred))
print('开始SVM训练')
svc = svm.LinearSVC(C=5,dual=True)
lin_svc = CalibratedClassifierCV(base_estimator=svc)
lin_svc.fit(x_train,y_train)
y_pred2= lin_svc.predict(test_X)
accuracy = accuracy_score(test_y, y_pred2)
y_prob2 = lin_svc.predict_proba(test_X)
print(accuracy)
print(classification_report(test_y, y_pred2))
# bad case
# 特征
for row in test_X:
freature_l.append(str(row).replace('\t',' ').replace('\n',' '))
case_df = pd.DataFrame(columns=['feature','class','pred','pred2','prob','prob2'])
case_df['feature'] = freature_l
case_df['class'] = test_y.tolist()
case_df['pred'] = y_pred
case_df['pred2'] = y_pred2
case_df['prob'] = y_prob.tolist()
case_df['prob2'] = y_prob2.tolist()
case_df.to_csv('./result/bad_case3.csv')
## 获取badcase索引
b = case_df['class'] == case_df['pred']
b_l = list(b)
index_l = [i for i in range(len(b_l)) if b_l[i] == False]
c = case_df['class'] == case_df['pred2']
c_l = list(c)
index2_l = [i for i in range(len(c_l)) if c_l[i] == False]
# 二者预测结果不同的列表
d = case_df['pred'] == case_df['pred2']
d_l = list(d)
index3_l = [i for i in range(len(d_l)) if d_l[i] == False]
ana_df = pd.DataFrame(columns=['t-l','t-s','l-s'])
ana_df['t-l'] = index_l
ana_df['t-s'] = index2_l
ana_df['l-s'] = index3_l
ana_df.to_csv('./result/bad_case3_ana.csv')
'''
y_class = lg.predict(x_test)
y_prob = lg.predict_proba(x_test)
df_test['class'] = y_class.tolist()
df_test['class'] = df_test['class'] + 1
df_test['prob'] = y_prob.tolist()
df_result = df_test.loc[:, ['id','class','prob']]
df_result.to_csv('./result/result-tfidf-feature-prob.csv', index=False)
''' | [
"[email protected]"
] | |
bea22b6380e1f80d8d293727e249b8f9779d9fc8 | bf8ba111da414391c9bfc218579ec2d220865764 | /fiterp/fiterp/fiterp/doctype/leave_application_fiterp/test_leave_application_fiterp.py | 259ed56e02b1d6058f04b5c1f70f4da293992390 | [
"MIT"
] | permissive | AbrahamMan/fiterp | e0f76c59a3d31ebf38ce957612cec44d16fe6213 | a0ae1cc4f7b848dbbf51c870319e0aade4b92170 | refs/heads/master | 2021-04-27T19:39:08.494755 | 2018-01-28T11:10:13 | 2018-01-28T11:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, ITKMITL and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestLeaveApplicationfiterp(unittest.TestCase):
pass
| [
"[email protected]"
] | |
e1de656ba854ec86201e55c1fead133e35c7c2ec | 38e5c33200e2fe87cc39a53a2fe19a807ebeca2c | /manage.py | 044b5ba9512a1c8ff405cf17fb472998e6494abf | [] | no_license | Hitmantejas/Todo-Django | 623157466b808881cc4cdba69ba73db899ca6550 | 8648be091abc0ddb9fd277e03a25550e360aaeea | refs/heads/master | 2023-03-03T19:41:57.187086 | 2021-02-07T07:49:16 | 2021-02-07T07:49:16 | 336,728,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'justdoit.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2c12a85637d4448821f4e08fab01976870d8fdca | b3330bd3365767b89afb9c432f4deb722b39ac1c | /python/sort/selection_sort/selection_sort_10.py | d0142c054e2de0f2b0945ab15e296cef179f94f5 | [] | no_license | hguochen/algorithms | 944df332d5b39220bd59cbd62dc74b12e335fb9e | 703e71a5cd9e002d800340df879ed475a404d092 | refs/heads/master | 2022-02-27T12:11:10.607042 | 2022-02-18T21:04:00 | 2022-02-18T21:04:00 | 13,767,503 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # selection sort
def selection_sort(array):
"""
Divides the array into unsorted and sorted sublist. Left sublist contains
list of sorted elements, right sublist contains list of unsorted elements.
Find the least element in unsorted list and put in sorted list.
"""
# traverse the array
for i in xrange(len(array)):
# initialize min index
min_index = i
# find the least element in unsorted list and update min index
for j in xrange(i+1, len(array)):
if array[j] < array[min_index]:
min_index = j
# swap current element with min index value
array[i], array[min_index] = array[min_index], array[i]
# return array
return array
| [
"[email protected]"
] | |
5e882ab6a9df428646946d956286a2353bf597e6 | 2d382086ead32a2de90855fb02662771d7d92c7c | /all_files/migrations/0001_initial.py | 81f3677616ce3d3523a8363974caabad63a4e006 | [] | no_license | accprojects/project | b92f9ebb1ce09ee3d7dc3b77e5c1e7376d6055b2 | 56f51c9581da2c3e8805123c7cdc1652fbfac78b | refs/heads/master | 2021-05-12T19:23:15.617975 | 2018-02-13T11:38:52 | 2018-02-13T11:38:52 | 117,091,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-11 12:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
6be743b4b02d6eb6d7f62aab46ff57260ffa042b | f92dfdebb4bf6bc108f51783333520c35afa66da | /api-web/src/www/application/management/commands/publish_rabbitmq_genome_gene.py | 23f7465ee4e41b1adf971b243ae030a6a568b6ea | [] | no_license | duytran92-cse/nas-genodata | 4d8659a135913d226842ff6a013324714ead0458 | 80c88f42145f729c5862a5293012e71548182e1d | refs/heads/master | 2022-11-13T17:24:03.769605 | 2020-06-14T18:59:36 | 2020-06-14T18:59:36 | 272,264,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,305 | py | import json, pika, os
from application.models import *
from urad_api import registry
from urad_api_standard.commands import Command as BaseCommand
from django.conf import settings
import json
from application.modules.gene import components as gene_components
from django.db import connection
class Command(BaseCommand):
## PUBLISH
def publish_to_queue(self, iterator, genome_queue, rabbitmq_host, rabbitmq_port):
credentials = pika.PlainCredentials('guest', 'guest')
connection = pika.BlockingConnection(pika.ConnectionParameters(rabbitmq_host, rabbitmq_port, '/', credentials))
channel = connection.channel()
channel.queue_declare(queue=genome_queue)
for x in iterator:
channel.basic_publish(exchange='', routing_key=genome_queue, body=json.dumps(x))
connection.close()
def process(self, params = {}):
# DECLARE VARIABLE
GENOME_QUEUE = settings.GENOME_QUEUE
RABBITMQ_HOST = settings.RABBITMQ_HOST
RABBITMQ_PORT = int(settings.RABBITMQ_PORT)
# Starting
print "[x] Publish data to rabbitmq"
##########################
## Gene
print "[***] Publish GENE data to rabbitmq"
isDone = False
start = 0
gene_manager = gene_components.DataManager()
while not isDone:
end = start + 5000
print 'start: %s, end: %s' % (start, end)
gene = Gene.objects.all()[start:end]
start = end + 1
if gene.count() <= 0:
isDone = True
x = []
for var in gene:
y = ['gene', var.code]
try:
data = gene_manager.get(var.code)
values = {}
arr_disease = []
asso_disease = []
asso_pub = []
for field, value in data.items():
if field in ['synonyms', 'effects','start', 'end','num_exon','chromosome','protein_product','description'] and value['value'] != None:
values[field] = value['value']
# disease field
if field == 'disgenet-diseases' and value['value'] != None:
arr_disease.extend(value['value'])
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if field == 'gwas-diseases' and value['value'] != None:
try:
for k in value['value']:
arr_disease.append({
'disease': k.get('disease',''),
'pubmedid': k.get('pmid',''),
'sentence': k.get('sentence', '')
})
except Exception as e:
pass
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if field == 'ctdbase-diseases' and value['value'] != None:
try:
for k in value['value']:
arr_disease.append({
'disease': k.get('disease',''),
'pubmedid': k.get('pmid',''),
'sentence': k.get('evidence', '')
})
except Exception as e:
pass
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if len(arr_disease) > 0:
values['disgenet-diseases'] = arr_disease
if len(asso_disease) > 0:
values['associated_diseases'] = asso_disease
# publications
if field == 'publications' and value['value'] != None:
values[field] = value['value']
try:
for k in value['value']:
asso_pub.append({
'pmid': k['pmid'],
'title': k['title']
})
except Exception as e:
pass
if field == 'gwas-publications' and value['value'] != None:
asso_pub.extend(value['value'])
if len(asso_pub) > 0:
values['associated_publications'] = asso_pub
if values:
y.append(values)
x.append(y)
except Exception as e:
pass
# Publish rabbitMQ
self.publish_to_queue(x, GENOME_QUEUE, RABBITMQ_HOST, RABBITMQ_PORT)
print "[***] DONE gene"
print "[x] Sent data to RabbitMQ"
| [
"[email protected]"
] | |
4f7296681cdfba9a427661da990e966abc246734 | ce8d7f8171da70b75b805f2ba5b2dfaeed651c9b | /geopy.py | 31f7ec0757edea1530872f3c706b0334a5752853 | [] | no_license | DesPenny/Everythingbackpacker | 5798ebd827c095f0aea48b1a5efa9e1191f8d0a0 | 8c45242de8aa41f00d77d95057d9daa0a61c41f3 | refs/heads/master | 2016-09-06T21:53:25.982409 | 2014-05-01T12:30:37 | 2014-05-01T12:30:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | from geopy import geocoders
g = geocoders.GoogleV3()
def find():
print 'What would you like to search?'
query = raw_input()
place, (lat, lng) = g.geocode(query)
return place | [
"[email protected]"
] | |
e6495c54678fa25589ed821affe20e0b079fbc0a | d5b3011dcc61ea661d395747a4ecac2fc850e667 | /checkout/migrations/0003_order_user_profile.py | f77fa66947ef7054025fbbe925f0b8c909849972 | [] | no_license | Code-Institute-Submissions/learning-management-system-ms4 | d4f544f357351d45fd9dd471774d160cfde7d9a9 | 172738a1a8e37113a40f93a8ed230dae83b1ca93 | refs/heads/master | 2022-11-09T08:56:05.253560 | 2020-06-22T01:08:32 | 2020-06-22T01:08:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # Generated by Django 3.0.6 on 2020-06-12 09:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
('checkout', '0002_auto_20200612_0951'),
]
operations = [
migrations.AddField(
model_name='order',
name='user_profile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='profiles.UserProfile'),
),
]
| [
"[email protected]"
] | |
05d19c9a1a2febd779681ba4ce0cd85888d449d4 | 783a97b257ec086d6c7e2109840f1ad91f8e52bf | /scraper/src/config/config_validator.py | f432d0d299c047068b9d3b7fd3fbad2aa75bbfab | [
"MIT"
] | permissive | thefrenchmatt/docs-scraper | 949af243655201c865bc7f64cb906c7b597953a6 | 70665daafaea6dfefc48de3ca107c2a97cc9fc0d | refs/heads/master | 2022-07-05T03:34:20.627595 | 2020-05-14T12:05:59 | 2020-05-14T12:05:59 | 267,131,268 | 1 | 0 | NOASSERTION | 2020-05-26T19:17:39 | 2020-05-26T19:17:38 | null | UTF-8 | Python | false | false | 2,824 | py |
class ConfigValidator:
config = None
def __init__(self, config):
self.config = config
def validate(self):
"""Check for all needed parameters in config"""
if not self.config.index_uid:
raise ValueError('index_uid is not defined')
# Start_urls is mandatory
if not self.config.start_urls and not self.config.sitemap_urls:
raise ValueError('start_urls is not defined, nor sitemap urls')
# Start urls must be an array
if self.config.start_urls and not isinstance(self.config.start_urls,
list):
raise Exception('start_urls should be list')
# Stop urls must be an array
if self.config.stop_urls and not isinstance(self.config.stop_urls,
list):
raise Exception('stop_urls should be list')
if self.config.js_render and not isinstance(self.config.js_render,
bool):
raise Exception('js_render should be boolean')
# `js_wait` is set to 0s by default unless it is specified
if self.config.js_wait and not isinstance(self.config.js_wait, int):
raise Exception('js_wait should be integer')
if self.config.use_anchors and not isinstance(self.config.use_anchors,
bool):
raise Exception('use_anchors should be boolean')
if self.config.sitemap_alternate_links and not isinstance(
self.config.sitemap_alternate_links, bool):
raise Exception('sitemap_alternate_links should be boolean')
if self.config.sitemap_urls_regexs and not self.config.sitemap_urls:
raise Exception(
'You gave an regex to parse sitemap but you didn\'t provide a sitemap url')
if self.config.sitemap_urls_regexs and not self.config.sitemap_urls:
for regex in self.config.sitemap_urls_regex:
if not isinstance(regex, str):
raise Exception(
'You gave an bad regex: ' + regex + ' must be a string')
if self.config.force_sitemap_urls_crawling and not self.config.sitemap_urls:
raise Exception(
'You want to force the sitemap crawling but you didn\'t provide a sitemap url')
if not self.config.scrape_start_urls and not self.config.scrap_start_urls:
raise Exception(
'Please use only the new variable name: scrape_start_urls')
if self.config.nb_hits_max and not isinstance(self.config.nb_hits_max,
int):
raise Exception('nb_hits_max should be integer')
| [
"[email protected]"
] | |
a43e6873d5770d466c0143a8d8e3abdff3975ac4 | 4bc19f4dd098ebedcb6ee78af0ae12cb633671fe | /static/views.py | 608e8568b487fbee9eb1251fbf226fbe6d45ec5b | [] | no_license | StanislavKraev/rekvizitka | 958ab0e002335613a724fb14a8e4123f49954446 | ac1f30e7bb2e987b3b0bda4c2a8feda4d3f5497f | refs/heads/master | 2021-01-01T05:44:56.372748 | 2016-04-27T19:20:26 | 2016-04-27T19:20:26 | 57,240,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from django.http import Http404
from django.shortcuts import render_to_response
from rek.static.models import StaticPage
from django.template.context import RequestContext
def render(request, page_alias=''):
page = StaticPage.objects.get(alias=page_alias, enabled=True)
if not page:
raise Http404()
return render_to_response('static_page_with_sidebar.html',
{'page' : page},
context_instance=RequestContext(request))
| [
"[email protected]"
] | |
0ade196c2880c7c5454d81108adc3086b4ced438 | c7bd791903d36d5ee5e828cd90939e3358b5845a | /contacts/migrations/0001_initial.py | a972d5ff6aacc21aa3e2cf7fb92ed4c8be41ba86 | [] | no_license | Saxena611/bp_real_estate | b638ac477fcf8e44dccfb5d58473c83efa94e5cb | e2ce50678894f6f542864c525b9d8fcdb91f8669 | refs/heads/master | 2023-06-12T05:03:37.845739 | 2021-07-11T06:03:50 | 2021-07-11T06:03:50 | 330,562,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | # Generated by Django 3.0.3 on 2021-03-21 06:28
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request', models.CharField(max_length=100)),
('request_id', models.IntegerField()),
('name', models.CharField(max_length=200)),
('email', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('message', models.TextField(blank=True)),
('contact_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('user_id', models.IntegerField(blank=True)),
],
),
]
| [
"[email protected]"
] | |
08a41f586570d5ba0baa10410a977b1169ac947f | 4be9a5bdb8e051001b78c8f127ccc1a7f85c14e7 | /mapping/migrations/0033_auto_20170129_0939.py | 90fce4536a94b43eded5f95299f301669aa5c874 | [] | no_license | quentin-david/heimdall | f72a85606e7ab53683df2023ef5eaba762198211 | 84a429ee52e1891bc2ee4eb07a084dff209c789c | refs/heads/master | 2021-01-21T10:26:28.895663 | 2017-07-21T19:19:46 | 2017-07-21T19:19:46 | 83,432,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-29 09:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapping', '0032_servicewebserver_reverse_proxy'),
]
operations = [
migrations.AlterField(
model_name='servicereverseproxy',
name='servername',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
| [
"[email protected]"
] | |
12e0297f0e01b59d69ca42308b250e49c4b45112 | ec1b50134c1d8b7af7374dfdbec876a0c3e9d211 | /6.0001/string.py | 767f2d972b1344a10d00129904f916c95ced4f9a | [] | no_license | Leeboyd/learnPython | 1876d3a5126cb30938a044847a99093512dd88cc | 7fc8938a702ca32a6912490c5c8aa49561f8123e | refs/heads/master | 2023-02-20T03:12:35.529317 | 2022-06-16T23:41:35 | 2022-07-05T03:39:32 | 147,643,970 | 1 | 0 | null | 2023-02-10T22:46:18 | 2018-09-06T08:42:40 | Python | UTF-8 | Python | false | false | 196 | py | s = "abcdefgh"
for index in range(len(s)):
if s[index] == 'i' or s[index] == 'u':
print("There is an i or u")
for char in s:
if char == 'i' or char == 'h':
print("There is an i or h") | [
"[email protected]"
] | |
1a029ae138c31e24a7f6c8323cce780ea4cc4c45 | 209622dae7003dfec237123a8d4645f97e000df6 | /venv/bin/pyreverse | 22abef7cf37e49af31aee031b7d0ea9780fe04af | [] | no_license | vasughatole/promolta | f50d9ced2e07ff05beae1611cc6c52b49956ee17 | 947a91bc55d9dbd853fde011957df2fb9cc30705 | refs/heads/master | 2020-04-17T19:47:01.759322 | 2019-01-21T21:14:02 | 2019-01-21T21:14:02 | 166,878,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/vasu/python/promolta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"[email protected]"
] | ||
07d1801bc21bc3ce1f36e57d68f11e9aea47ae54 | f85fbab0cffaa54a136e0938715414383a4eea1f | /Challenge_climate_analysis.py | 4dc0330bdef972cf47b3e48416d2d98207c99eea | [] | no_license | varshajha28/Surfs_Up | 54bd59b9ba867573e974ce74f1bfafa6632c0fd4 | 31ff636167031c20d6de69398d142f02c0a78c73 | refs/heads/master | 2021-03-03T07:22:43.393395 | 2020-03-14T22:02:21 | 2020-03-14T22:02:21 | 245,942,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,957 | py | # Add dependancies
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
#Create the connection between sqlite and pandas
engine = create_engine("sqlite:///hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#Design a query to get June temperature data for all station and allthe years.
#complete the query to extract all desired results and put them in a list.
juneallresults = []
juneallresults = session.query(Measurement.date, Measurement.tobs).filter(func.extract('month', Measurement.date) == 6).all()
june_all_df = pd.DataFrame(juneallresults, columns=['date','temperatures'])
june_all_df.describe()
#june_all_df
#Design a query to get June temperature data for all station and only for two years.
# Calculate the date one year from the last date in data set.
prev_year = dt.date(2017,8,23)- dt.timedelta(days=365)
#complete the query to extract all desired results and put them in a list.
junetwoyearresults = []
junetwoyearresults = session.query(Measurement.date, Measurement.tobs).filter(func.extract('month', Measurement.date) == 6).filter(Measurement.date >= prev_year).all()
june_twoyear_df = pd.DataFrame(junetwoyearresults, columns=['date','temperatures'])
june_twoyear_df.describe()
#june_twoyear_df
#Design a query to get December temperature data for all station and all the years.
#complete the query to extract all desired results and put them in a list.
decemberallresults=[]
decemberallresults = session.query(Measurement.date, Measurement.tobs).filter(func.extract('month', Measurement.date) == 12).all()
dec_all_df = pd.DataFrame(decemberallresults, columns=['date','temperatures'])
dec_all_df.describe()
#dec_all_df
#Design a query to get December temperature data for all station and only for two years.
# Calculate the date one year from the last date in data set.
prev_year = dt.date(2017,8,23)- dt.timedelta(days=365)
#complete the query to extract all desired results and put them in a list.
dectwoyearresults=[]
#complete the query to extract all desired results and put them in a list.
dectwoyearresults = session.query(Measurement.date, Measurement.tobs).filter(func.extract('month', Measurement.date) == 12).filter(Measurement.date >= prev_year).all()
dec_twoyr_df = pd.DataFrame(dectwoyearresults, columns=['date','temperatures'])
dec_twoyr_df.describe()
#dec_twoyr_df | [
"[email protected]"
] | |
2db11fc713334d1c4d17ecf444cf9726e26cc5dd | 055cf8aeec011f67580bf92a83d94ee6919648cd | /migrations/versions/ad28a44f93c4_initial_migration.py | 18999b6182f1570c2b30ca638cbdbed3b8a6a43e | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | RisperAkinyi/BlogPost | df82c8fec558425ca1bbce65aa90464176aefb87 | f8ee4c887fceae8e70410b66a12bc5680cf26044 | refs/heads/master | 2022-09-30T19:09:27.969983 | 2019-08-13T07:36:26 | 2019-08-13T07:36:26 | 201,879,164 | 0 | 0 | MIT | 2022-09-16T18:07:44 | 2019-08-12T07:22:39 | Python | UTF-8 | Python | false | false | 2,128 | py | """Initial Migration
Revision ID: ad28a44f93c4
Revises:
Create Date: 2019-08-09 11:05:50.912878
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad28a44f93c4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comments', sa.String(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('posts')
op.drop_table('comments')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
bb5ab0eba71a2dc209d64e6c93ae9e1d690a3bab | 7c1abd2ec952d022342098990d4ee2382bd18266 | /tests/test_01.py | 6a34cdac5b44ae9e2caa6699102c5839ed98f80d | [
"MIT"
] | permissive | evestidor/svc-stock-price-simulator | 7679177bc06a6e230478cf383b718afb108120cc | 2ddfb504933959c19f8bd2b7d295b117c10fe27a | refs/heads/master | 2022-05-07T19:48:36.449467 | 2019-07-03T15:27:11 | 2019-07-03T15:27:11 | 192,607,658 | 0 | 0 | MIT | 2022-04-22T21:46:32 | 2019-06-18T20:24:01 | Python | UTF-8 | Python | false | false | 71 | py |
class TestDummy:
def test_assert_true(self):
assert True
| [
"[email protected]"
] | |
9a98b7f2a056ef505360b11f8b6ffb23274a9882 | d6d2773e7466b31da5ed9e7103d5eb9909db1223 | /web/sales_app/apps/stations/migrations/0002_auto_20170717_2015.py | 3bc58a2cad8a9bae6483af3e326e06df869aafeb | [
"MIT"
] | permissive | iabok/sales-tracker | bee3f0a18ca565a8da2ce356c6842f6af486367c | 7ef2e68f0b0393b983375d092b8469ca88f6b5ce | refs/heads/master | 2021-06-22T00:09:16.723616 | 2017-08-21T23:30:57 | 2017-08-21T23:30:57 | 92,953,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-17 20:15
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('stations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='station',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2017, 7, 17, 20, 15, 30, 246266, tzinfo=utc)),
),
migrations.AlterField(
model_name='station',
name='modified_date',
field=models.DateTimeField(default=datetime.datetime(2017, 7, 17, 20, 15, 30, 246303, tzinfo=utc)),
),
]
| [
"[email protected]"
] | |
e2a4d4248d4f5b48e5c69c52e0dad41e541340ba | 33cfcb4561e7320ae0e893fbe774c7eb0a2effe8 | /eg15.01.py | c94d345080db1688fdbb1a237e7fd737f5e8db93 | [] | no_license | Jueee/aByteOfPython | 9c8bc01f0707daef29e52467db0c3f5a94747119 | ae1a4a4b181612463ccdcd0d89c961f22f7ece20 | refs/heads/master | 2021-05-31T14:26:00.790823 | 2016-02-17T05:41:20 | 2016-02-17T05:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | #!/usr/bin/python
# Filename: list_comprehension.py
# 通过列表综合,可以从一个已有的列表导出一个新的列表。
listone = [2, 3, 4]
listtwo = [2*i for i in listone if i > 2]
print(listtwo)
# 在函数中接收元组和列表
# 当要使函数接收元组或字典形式的参数的时候,有一种特殊的方法,它分别使用*和**前缀。
# 这种方法在函数需要获取可变数量的参数的时候特别有用。
# 由于在args变量前有*前缀,所有多余的函数参数都会作为一个元组存储在args中。
# 如果使用的是**前缀,多余的参数则会被认为是一个字典的键/值对。
def powersum(power, *args):
'''Return the sum of each argument raised to specified power.'''
total = 0
for i in args:
total += pow(i, power)
return total
print(powersum(2,3,4,5))
print(powersum(2,10,100,1000))
| [
"hellojue @foxmail.com"
] | hellojue @foxmail.com |
3d0e56a951a0a89f5feb4223a746a737089a3ea2 | 6fc2feac7ec07870afc927983cb5b048af1f6566 | /src/monju_no_chie/admin.py | 2fa6e5f53d12c868dd20b772492651f03aa9d62e | [] | no_license | SpaceMagical/SpaceMagical | fd2f114e4df89eeb71c6fb5900010eca8746ebfc | e3b4e5eb1f91fd8145d397b9bd4018d5f99ca8df | refs/heads/master | 2021-06-11T02:42:43.704128 | 2016-12-03T16:13:33 | 2016-12-03T16:13:33 | 74,037,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from django.contrib import admin
from .models import MonjuNoChie
class MonjuNoChieAdmin(admin.ModelAdmin):
class Meta:
model = MonjuNoChie
admin.site.register(MonjuNoChie, MonjuNoChieAdmin)
| [
"[email protected]"
] | |
71bac15afe4dcfaa41d8dd9a2894a3b9ff0f5e83 | 5503712ed14239e48b5dc2fb66e38250f1c14320 | /accounts/migrations/0002_auto_20200506_2119.py | 20372336adb7672ab88f189debfe85514eb19dfe | [] | no_license | Aexki/sajas-hotel_management | ee8238e8f3a13a0e061a9951df7244014d948902 | f0ac71378153ce97c1588ecef3857fcde3cd4035 | refs/heads/master | 2022-07-03T03:36:34.818041 | 2020-05-09T15:53:43 | 2020-05-09T15:53:43 | 262,008,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # Generated by Django 3.0.5 on 2020-05-06 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='cabservice',
name='completed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='complaintservice',
name='completed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='roomservice',
name='completed',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.