metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "0-jam/mmt_api",
"score": 2
} |
#### File: 0-jam/mmt_api/markovify_sentence.py
```python
import graphene
from fastapi import FastAPI
from starlette.graphql import GraphQLApp
from modules.mcmodel import MCModel
import config
def get_settings():
return config.Settings()
settings = get_settings()
mc_model = MCModel()
class Query(graphene.ObjectType):
markovify = graphene.String(index=graphene.Int(default_value=0))
def resolve_markovify(self, info, index):
mc_model.load_model(settings.get_mc_model(index))
return mc_model.generate_sentence()
app = FastAPI()
app.add_route('/', GraphQLApp(schema=graphene.Schema(query=Query)))
``` |
{
"source": "0-jam/soundtest",
"score": 3
} |
#### File: 0-jam/soundtest/nbsoundtest.py
```python
from modules.alarm import NBAlarm
import time
def main():
nba = NBAlarm()
for i in range(3):
print('attempt:', i)
nba.play()
start_time = time.time()
while nba.is_streaming_active():
print('elapsed time: {:0.3f} sec'.format(time.time() - start_time), end='\r', flush=True)
print()
nba.stop()
if __name__ == "__main__":
main()
```
#### File: 0-jam/soundtest/soundtest.py
```python
from pathlib import Path
from modules.alarm import Alarm
def main():
alarm = Alarm(sound_file_path=Path('data/alarm-clock-elapsed.wav').resolve())
try:
alarm.play()
except KeyboardInterrupt:
print('Interrupted')
finally:
alarm.close()
try:
alarm.change_sound_file('data/phone-incoming-call.wav')
alarm.play()
except KeyboardInterrupt:
print('Interrupted')
finally:
alarm.close()
if __name__ == '__main__':
main()
``` |
{
"source": "0-jam/utanet_scraper",
"score": 3
} |
#### File: 0-jam/utanet_scraper/sqlite_converter.py
```python
import argparse
import json
import sqlite3
from pathlib import Path
def main():
parser = argparse.ArgumentParser(description='utanet_scraper.py で抽出した JSON ファイルを SQLite DB に変換')
parser.add_argument('json_dir', type=str, help='JSON ファイルのあるディレクトリ')
parser.add_argument('sqlite_file', type=str, help='SQLite ファイル')
args = parser.parse_args()
sqlite_file = Path(args.sqlite_file)
sqlite_connection = sqlite3.connect(sqlite_file)
sqlite_cursor = sqlite_connection.cursor()
sqlite_cursor.execute('''
create table if not exists utanet_songs(
song_id int primary key,
title text,
lyric text,
artist text,
lyricist text,
composer text
)
''')
query_string = '''
insert into utanet_songs(song_id, title, lyric, artist, lyricist, composer)
values (?, ?, ?, ?, ?, ?)
'''
for json_path in Path(args.json_dir).iterdir():
with json_path.open() as json_file:
song_dict = json.load(json_file)
print('処理中:', json_path.name)
song_id = int(json_path.stem)
song_data = tuple(song_dict.values())[0]
query_values = (
song_id,
song_data['title'],
song_data['lyric'],
song_data['artist'],
song_data['lyricist'],
song_data['composer'],
)
sqlite_cursor.execute(query_string, query_values)
sqlite_connection.commit()
sqlite_connection.close()
print('完了')
if __name__ == "__main__":
main()
``` |
{
"source": "0Jihad/django_local_librarys",
"score": 2
} |
#### File: django_local_librarys/blogs/views.py
```python
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Post, Profile
from .forms import PostForm, UserForm, ProfileForm
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.contrib import messages
#from django.views.generic import TemplateView
########################################POST###################################
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blogs/post_list.html', {'posts': posts})
#path('post/<int:pk>/', views.post_detail, name='post_detail')
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blogs/post_detail.html', {'post': post})
#@login_required
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
#post.published_date = timezone.now()#review post uncomment for no review
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blogs/post_edit.html', {'form': form})
@login_required
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
#post.published_date = timezone.now()#review post uncomment for no review
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blogs/post_edit.html', {'form': form})
@login_required
def post_draft_list(request):#post approvals(#review post uncomment for no review)
posts = Post.objects.filter(published_date__isnull=True).order_by('created_date')
return render(request, 'blogs/post_draft_list.html', {'posts': posts})
@login_required
def post_publish(request, pk):#adding publish button(#review post uncomment for no review)
post = get_object_or_404(Post, pk=pk)
post.publish()
return redirect('post_detail', pk=pk)
@login_required
def post_remove(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
return redirect('post_list')
############################################################
#####################PROFILE###################################################
###############################################################################
@login_required
@transaction.atomic
def update_profile(request):
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profileForm = ProfileForm(request.POST, instance=request.user.profile)
if user_form.is_valid() and profileForm.is_valid():
user_form.save(commit=False)
profileForm.save(commit=False)
profileForm.published_date = timezone.now()
profileForm.save()
messages.success(request, 'Your profile was successfully updated!')
return redirect('home')
else:
messages.error(request, 'Please correct the error below.')
else:
user_form = UserForm(instance=request.user)
profileForm = ProfileForm(instance=request.user.profile)
return render(request, 'blogs/profile.html', {
'user_form': user_form,
'profileForm': profileForm
})
###############################################################################
from django.contrib.sites.shortcuts import get_current_site
from django.shortcuts import render, redirect
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.template.loader import render_to_string
from .forms import SignUpForm
from .tokens import account_activation_token
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
user.refresh_from_db()
user.is_active = False#waiting for comfirmation####################
user.save()
current_site = get_current_site(request)
subject = 'Activate Your: MySite Account'
message = render_to_string('blogs/account_activation_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
user.email_user(subject, message)
return redirect('account_activation_sent')#########################
#login(request, user, backend='django.contrib.auth.backends.ModelBackend')
#return redirect('home')
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
def account_activation_sent(request):
return render(request, 'blogs/account_activation_sent.html')
from django.contrib.auth import login
from django.contrib.auth.models import User
#from django.shortcuts import render, redirect
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
#from .tokens import account_activation_token
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.profile.email_confirmed = True
user.save()
login(request, user)
return redirect('home')
else:
return render(request, 'blogs/account_activation_invalid.html')#
from django.core.mail import send_mail
send_mail('noreply', 'body of the message', '<EMAIL>', ['<EMAIL>', '<EMAIL>'])
###############################################################################
from django.contrib.admin import helpers
from django.contrib import auth
#...
def logins(request):
if request.user.is_authenticated:
return redirect('admin_page')
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(username=username, password=password)
if user is not None:
# correct username and password login the user
auth.login(request, user)
return redirect('admin_page')
else:
return redirect('home')
return render(request, 'registration/logins.html')
def logout(request):
auth.logout(request)
return render(request,'registration/logout.html')
def admin_page(request):
if not request.user.is_authenticated:
return redirect('blog_login')
return render(request, 'registration/admin.html')
def home(request):
"""
Home page
"""
# If a user is authenticated then redirect them to the user page
if request.user.is_authenticated:#user page
posts = Post.objects.filter(author=User.objects.get(username=request.user.username)).order_by('published_date')
return render(request, 'blogs/post_list.html', {'posts': posts})
else:#general page
return render(request, 'blogs/account_update.html')
##############################################################################
#SEARCHING
from django.contrib.auth.models import User
from django.shortcuts import render
from .forms import UserFilter
def search(request):
user_list = User.objects.all()
user_filter = UserFilter(request.GET, queryset=user_list)
return render(request, 'registration/user_list.html', {'filter': user_filter})
from .models import Students
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
def index(request):
next = request.GET.get('next', '/admin_page')
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
data = Students.objects.all()
stu = {"student_number": data}
return render(request, 'blogs/profiles.html', {'data': stu})
else:
HttpResponse("Inactive User.")
else:
print("User Not Found!", next)
return HttpResponseRedirect(settings.LOGIN_URL)
return render(request, 'blogs/profiles.html', {'redirect_to':next})
###############################################################################
################################################################################
#ACCOUNT MANAGMENT
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from social_django.models import UserSocialAuth
@login_required
def settings(request):
user = request.user
try:
github_login = user.social_auth.get(provider='github')
except UserSocialAuth.DoesNotExist:
github_login = None
try:
twitter_login = user.social_auth.get(provider='twitter')
except UserSocialAuth.DoesNotExist:
twitter_login = None
try:
facebook_login = user.social_auth.get(provider='facebook')
except UserSocialAuth.DoesNotExist:
facebook_login = None
can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password())
return render(request, 'registration/settings.html', {
'github_login': github_login,
'twitter_login': twitter_login,
'facebook_login': facebook_login,
'can_disconnect': can_disconnect
})
@login_required
def password(request):
if request.user.has_usable_password():
PasswordForm = PasswordChangeForm
else:
PasswordForm = AdminPasswordChangeForm
if request.method == 'POST':
form = PasswordForm(request.user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, 'Your password was successfully updated!')
return redirect('post_list')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordForm(request.user)
return render(request, 'registration/password.html', {'form': form})
###############################################################################
from .forms import DocumentForm
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
else:
form = DocumentForm()
return render(request, 'blogs/model_form_upload.html', {
'form': form
})
from .forms import StudentForm
def StudentReg(request):
if request.method == 'POST':
form = StudentForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
form = StudentForm()
return render(request, 'blogs/student_reg.html', {'form': form})
``` |
{
"source": "0just0/ibench",
"score": 2
} |
#### File: ibench/benchmarks/cholesky.py
```python
import numpy as np
import scipy.linalg
from .bench import Bench
class Cholesky(Bench):
sizes = {'large': 40000, 'small': 10000, 'tiny': 2000, 'test': 2}
def _ops(self, n):
return n*n*n/3.0*1e-9
def _make_args(self, n):
self._A = np.asarray(np.random.rand(n,n), dtype=self._dtype)
self._A = np.asfortranarray(self._A*self._A.transpose() + n*np.eye(n))
def _compute(self):
scipy.linalg.cholesky(self._A, lower=False, overwrite_a=True, check_finite=False)
```
#### File: ibench/benchmarks/dot.py
```python
import numpy as np
from .bench import Bench
class Dot(Bench):
sizes = {'large': 10000, 'small': 5000, 'tiny': 1000, 'test': 2}
def _ops(self, n):
return 2E-9 * n*n*n
def _make_args(self, n):
self._A = np.asarray(np.random.rand(n, n), dtype=self._dtype)
self._B = np.asarray(np.random.rand(n, n), dtype=self._dtype)
self._C = np.asarray(np.random.rand(n, n), dtype=self._dtype)
def _compute(self):
self._A.dot(self._B, out=self._C)
```
#### File: ibench/benchmarks/fft.py
```python
import math
import numpy as np
import scipy.fftpack
from .bench import Bench
class Fft(Bench):
sizes = {'large': 1000000, 'small': 1000000, 'tiny': 52000, 'test': 100}
# If you change the value of runs, change native.cpp as well
_runs = 1000
def _ops(self, n):
# This is not an actual flop count; it is simply a convenient scaling,
# based on the fact that the radix-2 Cooley-Tukey algorithm asymptotically
# requires 5 N log2(N) floating-point operations.
# http://www.fftw.org/speed/method.html
return self._runs*5*n*math.log(n,2)*1e-9
def _make_args(self, n):
self._A = np.asarray(np.random.rand(n), dtype=np.complex128)
def _compute(self):
for i in range(self._runs):
scipy.fftpack.fft(self._A, overwrite_x = True)
```
#### File: ibench/benchmarks/lregressionfit.py
```python
import sklearn
import numpy as np
import multiprocessing
from numpy.random import rand
from sklearn import linear_model
from .bench import Bench
if sklearn.__version__ == '0.18.2':
sklearn.utils.validation._assert_all_finite = lambda X: None
class Lregressionfit(Bench):
"""
Benchmark for Linear Regression Prediction from Scikit-learn
Attempts to utilize parallelism for larger datasets
"""
sizes = {'large': 1000000, 'small': 800000, 'tiny': 100000, 'test': 1000}
def _ops(self, n):
return 2E-9 * n*n*n
def _make_args(self, n):
p = int(np.log(n)+100)
self._X = rand(n,p)
self._y = rand(n)
if n < 8000:
self._regr = linear_model.LinearRegression()
else:
self._regr = linear_model.LinearRegression(n_jobs=-1)
self._regr.fit(self._X,self._y)
def _compute(self):
self._trained_model = self._regr.predict(self._X)
```
#### File: ibench/benchmarks/svm.py
```python
import numpy as np
import sklearn, sklearn.utils
import sklearn.svm as svm
from sklearn.datasets import make_classification
from sklearn.metrics import accuracy_score
from .bench import Bench
sklearn._ASSUME_FINITE = True
if sklearn.__version__ == '0.18.2':
sklearn.utils.validation._assert_all_finite = lambda X: None
features = [10, 50, 100, 200, 400, 800, 1000, 2000]
vectors = [1000, 2000, 4000, 10000, 20000]
class Svm(Bench):
"""
Benchmark for Ridge Regression Prediction from Scikit-learn
Attempts to utilize parallelism for larger datasets
"""
sizes = {'large': 5, 'small': 3, 'tiny': 2, 'test': 1}
def _gen_datasets(self, features, vectors, classes, dest='data'):
"""Generate classification datasets in binary .npy files
features: a list of feature lengths to test
vectors: a list of sample lengths to test
classes: number of classes (2 for binary classification dataset)
"""
self._X, self._y = make_classification(n_samples=vectors, n_features=features, n_informative=features, n_redundant=0, n_classes=classes, random_state=0)
return self._X, self._y
def _ops(self, n):
return 2E-9 * n
def _make_args(self, n):
self._X, self._y = self._gen_datasets(features[n-1],vectors[n-1],2)
self._clf = svm.SVC(C=0.01, kernel='linear', max_iter=10000, tol=1e-16, shrinking=True)
def _compute(self):
self._clf.fit(self._X, self._y)
```
#### File: ibench/ibench/__main__.py
```python
import argparse
import os
import sys
from .cmds import run
from .cmds import configs
cmds = [run, configs]
def parse_args():
"""
Does the initial argument scope and parsing
by bringing in run+configs and matching them
with the typed arguments.
"""
parser = argparse.ArgumentParser("ibench")
parser.set_defaults(func=None)
subparsers = parser.add_subparsers()
for cmd in cmds:
cmd.add_parser(subparsers)
return parser.parse_args()
# Load plugins first so it can modify everything that follows
if 'IBENCH_PLUGINS' in os.environ:
for plugin in os.environ['IBENCH_PLUGINS'].split(' '):
__import__(plugin)
# Parse arguments and intiate run
args = parse_args()
if args.func:
args.func(args)
```
#### File: ibench/tests/test_run.py
```python
import subprocess
def test_run_plugin():
subprocess.check_call('IBENCH_PLUGINS="os sys" python -m ibench run -b cholesky --size test --file foo', shell=True)
def test_run():
subprocess.check_call('python -m ibench run', shell=True)
def test_run_simple():
subprocess.check_call('python -m ibench run -b cholesky --size test --file foo', shell=True)
def test_run_sizes():
subprocess.check_call('python -m ibench run -b fft --size test --file foo', shell=True)
subprocess.check_call('python -m ibench run -b fft --size small --file foo', shell=True)
subprocess.check_call('python -m ibench run -b fft --size large --file foo', shell=True)
def test_run_groups():
subprocess.check_call('python -m ibench run -b linalg --size test --file foo', shell=True)
``` |
{
"source": "0K4T3/jsonpkt",
"score": 3
} |
#### File: jsonpkt/commands/sendrecv.py
```python
import json
from scapy import all as scapy
from jsonpkt.commands import BaseCommand
from jsonpkt.utils import parse_packet_json, to_packet_json
class SendRecvCommand(BaseCommand):
def process(self, file: str, layer: int = 3):
with open(file) as fp:
packet_json = json.load(fp)
packet = parse_packet_json(packet_json)
sender = scapy.srp1 if layer < 3 else scapy.sr1
res = sender(packet, verbose=False)
packet_json = to_packet_json(res)
print(json.dumps(packet_json, indent=2))
```
#### File: jsonpkt/jsonpkt/utils.py
```python
import os
import typing
from functools import reduce
from operator import truediv
from scapy import all as scapy
def replace_with_environment_var(value: typing.Union[int, str]) -> typing.Union[int, str]:
if isinstance(value, str) and value.startswith('${') and value.endswith('}'):
value = value[len('${'):-len('}')]
value = os.getenv(value, '')
return value
def resolve_environment_vars(packet_json: dict) -> dict:
for key, value in packet_json.items():
if key == 'payload':
continue
packet_json[key] = replace_with_environment_var(value)
return packet_json
def parse_packet_json(packet_json: dict) -> scapy.Packet:
packets = []
while packet_type := next(iter(packet_json), None):
packet = packet_json.get(packet_type, {})
packet = resolve_environment_vars(packet)
payload = packet.pop('payload', {})
scapy_packet = getattr(scapy, packet_type, None)
if scapy_packet:
packets.append(scapy_packet(**packet))
packet_json = payload
return reduce(truediv, packets)
def to_packet_json(packet: scapy.Packet) -> dict:
packet_jsons = []
while not isinstance(packet, scapy.packet.NoPayload):
packet_type = type(packet).__name__
packet_fields = {
key: value if isinstance(value, int) else str(value)
for key, value in packet.fields.items()
if key != 'options'
}
packet_jsons.append({ packet_type: packet_fields })
packet = packet.payload
for i in range(len(packet_jsons) - 1):
if not packet_jsons[i]:
continue
packet_type, packet_fields = list(packet_jsons[i].items())[0]
packet_fields['payload'] = packet_jsons[i + 1]
return packet_jsons[0]
``` |
{
"source": "0KABE/Subscription-Parser",
"score": 3
} |
#### File: Subscription-Parser/sub_parser/utils.py
```python
import base64
from typing import AnyStr, Dict
import aiohttp
async def download(url: str, result: Dict[str, str]):
""" async download method """
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
result[url] = await response.text()
def base64_decode(data: AnyStr) -> str:
""" adding missing padding '='
return a base64 decoded string """
if str == type(data):
data = data.encode()
data += b'='*(-len(data) % 4)
return base64.b64decode(data).decode()
def base64_encode(data: AnyStr) -> str:
""" return a base64 encoded string """
if str == type(data):
data = data.encode()
return base64.b64encode(data).decode()
def urlsafe_base64_decode(data: AnyStr) -> str:
""" adding missing padding '='
return a urlsafe_base64 decoded string """
if str == type(data):
data = data.encode()
data += b'='*(-len(data) % 4)
return base64.urlsafe_b64decode(data).decode()
def urlsafe_base64_encode(data: AnyStr) -> str:
""" return a urlsafe_base64 encoded string which removes paddings """
if str == type(data):
data = data.encode()
return base64.urlsafe_b64encode(data).decode().rstrip("=")
``` |
{
"source": "0kalekale/anitracc",
"score": 2
} |
#### File: anitracc/anitracc/server.py
```python
import anilistpy
from flask import Flask, session, redirect, url_for, request, render_template
#LOCAL IMPORTS
from search import animeS,mangaS
from page import animeP,mangaP
from user import display_userpage
from mutation import update_progress, update_status_
from config import writetoken, readtoken
app = Flask(__name__, template_folder='templates')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template("index.html")
@app.route('/<medium>/<id>', methods=['GET', 'POST'])
def page(medium, id):
if medium == "anime":
return animeP(id)
elif medium == "manga":
return mangaP(id)
@app.route('/search/<medium>/<sQ>')
def search(medium, sQ):
if medium == "anime":
rt = animeS(sQ)
elif medium == "manga":
rt = mangaS(sQ)
return rt
@app.route('/search', methods=['GET', 'POST'])
def searchPAGE():
return render_template("search.html")
@app.route('/result', methods=['GET', 'POST'])
def result():
_opt = request.form.get("mat")
_sq = request.form.get("sq")
rd = r'/search/' + _opt + r'/' + _sq
return redirect(rd)
@app.route('/user/<username>/')
def userpage(username):
# rt = 'user: ' + username
return display_userpage(username)
@app.route('/mutation/progress', methods=['GET', 'POST'])
def mutation():
media = request.args.get('media', default = '', type = str)
id = request.args.get('id', default = '', type = int)
progress = request.form.get("progress")
token = readtoken()
return update_progress(id, token, progress, media)
@app.route('/mutation/status', methods=['GET', 'POST'])
def mutationstatus():
media = request.args.get('media', default = '', type = str)
id = request.args.get('id', default = '', type = int)
status = request.form.get("status")
token = readtoken()
return update_status_(id, token, status, media)
@app.route('/login')
def login():
return render_template("login.html")
@app.route('/login/write', methods=['GET', 'POST'])
def write():
print("write")
token = request.form.get("password")
return writetoken(token)
if __name__ == "__main__":
app.run()
``` |
{
"source": "0k/anodb",
"score": 3
} |
#### File: src/anodb/anodb_clear.py
```python
from . import common
from kids.cmd import cmd
class Command(common.DbCommand):
"""Clear objects (tables, fields)
Will remove data from target database
Usage:
%(std_usage)s
"""
@cmd
def table(self, table, quiet=False, dry_run=False):
"""Clear Target Table in Target Database
Remove all records from specified table without deleting it.
Usage:
%(std_usage)s
%(surcmd)s TABLE [-q|--quiet] [--dry-run]
Options:
%(std_options)s
--dry-run Do nothing and print the SQL that would be executed.
-q, --quiet Suppress outputs.
"""
query = "DELETE FROM %s" % table
if dry_run:
print self.db.mogrify(query)
else:
self.db.execute(query)
self._connection.commit()
```
#### File: src/anodb/anodb_script.py
```python
import yaml
from . import common
from . import anodb_replace
from . import anodb_clear
from kids.cmd import msg
class Command(common.DbCommand):
"""Script
Load YML script and load it
Usage:
%(std_usage)s
%(surcmd)s SCRIPT [--dry-run] [-q|--quiet]
Options:
%(std_options)s
SCRIPT YAML script name to load and execute
--dry-run Do nothing and print the SQL that would be executed.
-q, --quiet Suppress outputs.
"""
def __call__(self, script, quiet=False, dry_run=False):
s = yaml.load(file(script, 'r'))
##
## Environment
##
env_code = s.get('env', '')
env = {}
if env_code:
try:
code = compile(env_code, '<env>', 'exec')
exec(code, env)
except SyntaxError as e:
raise SyntaxError(
'Syntax error in provided env ('
'Line %i offset %i)' % (e.lineno, e.offset))
env = dict((k, v) for k, v in env.items()
if k != "__builtins__")
##
## Actions
##
actions = s.get('actions', '')
if actions:
for nb_action, action in enumerate(actions):
for name, sub_actions in action.items():
if name == 'replace':
cmd = anodb_replace.Command(env=env)
for sub_action in sub_actions:
for subcmd, sub_actions2 in sub_action.items():
if subcmd == 'field':
subcmd = getattr(cmd, subcmd)
for params in sub_actions2:
kwargs = {}
for tablename, arguments in params.items():
for label, value in arguments.items():
if label.startswith('--'):
label = label[2:]
kwargs[label] = value
continue
subcmd(tablename, label, value, dry_run=dry_run, quiet=quiet, **kwargs)
else:
msg.die("Invalid subcommand %r for action %r." % (subcmd, name))
elif name == 'clear':
cmd = anodb_clear.Command()
for sub_action in sub_actions:
for subcmd, sub_actions2 in sub_action.items():
if subcmd == 'table':
subcmd = getattr(cmd, subcmd)
for tablename in sub_actions2:
kwargs = {}
subcmd(tablename, dry_run=dry_run, quiet=quiet, **kwargs)
else:
msg.die("Invalid subcommand %r for action %r." % (subcmd, name))
else:
msg.die("Invalid action %r (action number %d)" % (name, nb_action + 1))
```
#### File: src/anodb/common.py
```python
import psycopg2
import psycopg2.extras
from kids.cache import cache
from kids.cmd import BaseCommand
class DbCommand(BaseCommand):
@cache
@property
def _connection(self):
try:
return psycopg2.connect(dsn="")
except (TypeError, psycopg2.OperationalError):
raise SystemError(
"Please set correct PG connection information in current shell "
"environment. (ie: PGUSER, PGDATABASE, PGPASSWORD, PGHOST...)")
@cache
@property
def db(self):
return self._connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
``` |
{
"source": "0Karim/Concept-Task-with-python",
"score": 3
} |
#### File: 0Karim/Concept-Task-with-python/hello.py
```python
import ply.lex as lex
import ply.yacc as yacc
import re
tokens = [
'NUMBER',
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'LPAREN',
'RPAREN',
'FOR',
'ID',
'EQUALE' ,
'GREATER' ,
'SMALLER' ,
'TRUE',
'SEMICOLOMN',
'plus_plus',
'minus_minus'
]
# Regular expression rules for simple tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_FOR = 'for'
t_ID = r'[a-z] [^for]'
t_EQUALE = r'\='
t_GREATER = r'\>'
t_SMALLER = r'\<'
t_TRUE = "true"
t_SEMICOLOMN = r'\;'
t_plus_plus = '\++'
t_minus_minus ='\--'
# A regular expression rule with some action code
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lexer = lex.lex()
# Test it out
# Give the lexer some input
data = input()
lexer.input(data)
# Tokenize
while True:
tok = lexer.token()
if not tok:
break # No more input
print(tok)
def p_forstmt(p):
''' for_stmt : FOR LPAREN INIT SEMICOLOMN LOGICEXP SEMICOLOMN ASSIMENT RPAREN '''
def p_assimenr(p) :
''' ASSIMENT : ID EQUALE TERM PLUS EXPRESSION
| ID EQUALE TERM MINUS EXPRESSION
| ID EQUALE EXPRESSION
| ID plus_plus
| ID minus_minus
'''
def p_init (p) :
''' INIT : ID EQUALE TERM PLUS EXPRESSION
| ID EQUALE TERM MINUS EXPRESSION
| ID EQUALE EXPRESSION
| ID
'''
def p_logic_expression(p) :
'''LOGICEXP : ID GREATER EXPRESSION
| ID SMALLER EXPRESSION
| ID GREATER EQUALE EXPRESSION
| ID SMALLER EQUALE EXPRESSION
| ID EQUALE EQUALE EXPRESSION
| TRUE
'''
def p_expression(p):
''' EXPRESSION : EXPRESSION PLUS TERM
| EXPRESSION MINUS TERM
| TERM
'''
def p_TERM(p):
'''TERM : ID TIMES FACTOR
| ID MINUS FACTOR
| FACTOR
| ID
'''
def p_FACTOR(p) :
''' FACTOR : NUMBER TIMES NUMBER
| NUMBER DIVIDE NUMBER
| NUMBER
'''
def p_error(p):
print("Syntax error !")
# Build the parser
parser = yacc.yacc()
result = parser.parse(data)
print(result)
``` |
{
"source": "0--key/lib",
"score": 2
} |
#### File: apps/0--key/gafilman.py
```python
import os
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import blobstore
#import models
from google.appengine.ext import db
from models import Stuff
def doRender(handler, tname='index.htm', values={}):
temp = os.path.join(os.path.dirname(__file__), 'templates/' + tname)
if not os.path.isfile(temp):
return False
# Make a copy of the dictionary and add the path
newval = dict(values)
newval['path'] = handler.request.path
outstr = template.render(temp, newval)
handler.response.out.write(outstr)
return True
class FileManager(webapp.RequestHandler):
def get(self):
if self.request.get("myfile"):
way = 'file is saved'
else: way = 'initial'
default = str(users.get_current_user())
doRender(self,'files.htm', {'msg1' : default,
'msg2' : way})
class SaveBlob(webapp.RequestHandler):
def post(self):
# default=users.get_current_user()
if self.request.get('myfile'):
# myfile = self.request.get('myfile')
new_file = Stuff()
# self.new_file.owner = self.users.get_current_user()
new_file.pulp = self.request.get('myfile')
new_file.put()
way = 'File puts on datastore'
else: way = 'File is not exists'
self.redirect("/files/")
class UploadFile(webapp.RequestHandler):
def get(self):
# action = self.request.get('action')
doRender(self,'files_upload.htm', {'msg' : 'This is UploadFile handler :-)',
# 'action' : action
})
def main():
application = webapp.WSGIApplication([
('/files/UploadFile', UploadFile),
('/files/SaveBlob', SaveBlob),
('/.*', FileManager)
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
```
#### File: edu/ae-08-login/index.py
```python
import os
import logging
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
def doRender(handler, tname='index.htm', values={}):
temp = os.path.join(os.path.dirname(__file__), 'templates/' + tname)
if not os.path.isfile(temp):
return False
# Make a copy of the dictionary and add the path
newval = dict(values)
newval['path'] = handler.request.path
outstr = template.render(temp, newval)
handler.response.out.write(outstr)
return True
class LoginHandler(webapp.RequestHandler):
def get(self):
doRender(self, 'loginscreen.htm')
def post(self):
acct = self.request.get('account')
pw = self.request.get('password')
if pw == '' or acct == '':
doRender(self, 'loginscreen.htm', {'error' : 'Please specify Acct and PW'} )
elif pw == 'secret':
doRender(self,'loggedin.htm',{ } )
else:
doRender(self, 'loginscreen.htm', {'error' : 'Incorrect password'} )
class MainHandler(webapp.RequestHandler):
def get(self):
path = self.request.path
if doRender(self,path) :
return
doRender(self,'index.htm')
def main():
application = webapp.WSGIApplication([('/login', LoginHandler),
('/.*', MainHandler)], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
```
#### File: apps/Placebo/medhelper.py
```python
import os
import logging
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
from models import Placebo
def next_id(ID):
ID_chunks = ID.split('.')
count_chunks = len(ID_chunks)
last_chunk = ID_chunks[count_chunks - 1]
ID_chunks[count_chunks - 1] = str(int(last_chunk)+1)
ID = '.'.join(ID_chunks)
return ID
def doRender(handler, tname='helper.htm', values={}):
temp = os.path.join(os.path.dirname(__file__), 'templates/' + tname)
if not os.path.isfile(temp):
return False
# Make a copy of the dictionary and add the path
newval = dict(values)
newval['path'] = handler.request.path
outstr = template.render(temp, newval)
handler.response.out.write(outstr)
return True
class MainHandler(webapp.RequestHandler):
def get(self):
developer = str(self.request.get('developer'))
concept = str(self.request.get('concept')).lower()
category = str(self.request.get('category')).lower()
taxonomy = str(self.request.get('taxonomy')).lower()
result = []
warnings = ''
if concept == '' and category == '' and taxonomy == '':
warnings = 'You need to specify the searching words'
category = taxonomy = concept = num_entities = ''
else:
if concept != '' and category != '' and taxonomy != '':
query_str = "SELECT * FROM Placebo WHERE concept_words = '%s' AND category_words = '%s' AND taxonomy_words = '%s'" % (concept, category, taxonomy)
# for pairs
if concept == '' and category == '' and taxonomy != '':
query_str = "SELECT * FROM Placebo WHERE taxonomy_words = '%s' AND developer = '%s'" % (taxonomy, developer)
if concept == '' and category != '' and taxonomy == '':
query_str = "SELECT * FROM Placebo WHERE category_words = '%s' AND developer = '%s'" % (category, developer)
if concept != '' and category == '' and taxonomy == '':
query_str = "SELECT * FROM Placebo WHERE concept_words = '%s' AND developer = '%s'" % (concept, developer)
# and single absence too:
if concept == '' and category != '' and taxonomy != '':
query_str = "SELECT * FROM Placebo WHERE category_words = '%s' AND taxonomy_words = '%s' AND developer = '%s'" % (category, taxonomy, developer)
if category == '' and concept != '' and taxonomy != '':
query_str = "SELECT * FROM Placebo WHERE concept_words = '%s' AND taxonomy_words = '%s' AND developer = '%s'" % (concept, taxonomy, developer)
if taxonomy == '' and category != '' and concept != '':
query_str = "SELECT * FROM Placebo WHERE concept_words = '%s' AND category_words = '%s' AND developer = '%s'" % (concept, category, developer)
query_str = query_str + " ORDER BY OID ASC"
placebo = db.GqlQuery (query_str)
num_entities = int(placebo.count())
result.append(placebo)
template_values = {'placebo':result,'concept':concept,'warnings':warnings,
'category':category,'taxonomy':taxonomy,'num_entities':num_entities}
doRender(self,'helper.htm',template_values)
def main():
application = webapp.WSGIApplication([
('/.*', MainHandler),
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
```
#### File: portfolio/2013_OrSys/detector.py
```python
import MySQLdb
import logging
import datetime
from settings import db_password, db_name, MdbUser, MdbPassword, Mdb, MdbIP
from functions import getPhoneID, getFaxID, getAddressID
logging.basicConfig(filename='logs/detector.log', level=logging.DEBUG)
# logger = logging.getLogger(__name__)
def check_new_orders(lastOid):
"""New recent orders check up """
checkUp = False
db = MySQLdb.connect(host=MdbIP, user=MdbUser, passwd=<PASSWORD>, db=Mdb)
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT entity_id FROM sales_flat_order ORDER BY created_at DESC
LIMIT 1""")
lastMid = int(MySQL_c.fetchone()[0])
db.commit()
db.close()
if lastMid > lastOid:
checkUp = True
return checkUp
def getLastOid():
"""Retrieve last detected order id out from OrSys"""
db = MySQLdb.connect(passwd=db_password, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT magento_id FROM orders ORDER BY magento_time DESC LIMIT 1""")
lastOid = int(MySQL_c.fetchone()[0])
db.commit()
db.close()
return lastOid
def getFreshOrderDataSet(o_id):
"""Retrieve data about order and products inside it"""
logging.basicConfig(filename='logs/detector.log', level=logging.DEBUG)
#
db = MySQLdb.connect(host=MdbIP, user=MdbUser, passwd=<PASSWORD>, db=Mdb)
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT customer_firstname, customer_lastname, customer_email,
shipping_address_id, billing_address_id, created_at
FROM sales_flat_order WHERE entity_id=%s""", (o_id,))
(customer_firstname, customer_lastname, customer_email,
shipping_address_id, billing_address_id, mag_time) = MySQL_c.fetchone()
#
sh_address = getAddress(shipping_address_id)
b_address = getAddress(billing_address_id)
try:
cName = customer_firstname + ' ' + customer_lastname
except TypeError:
cName = 'corrupted'
logging.debug('This is corrupted custorer name in %s', o_id)
o_Data = {
'cName': cName,
'cEmail': customer_email, 'shAddress': sh_address,
'bAddress': b_address, 'magTime': mag_time, 'mID': o_id
}
MySQL_c.execute(
"""SELECT sku, qty_ordered, price, name FROM sales_flat_order_item
WHERE order_id=%s""", (o_id))
productsDataSet = MySQL_c.fetchall()
db.commit()
db.close()
return o_Data, productsDataSet
def getAddress(aID):
"""Retrieve address attributes from remote Magento DB"""
db = MySQLdb.connect(host=MdbIP, user=MdbUser, passwd=MdbPassword, db=Mdb)
MySQL_c = db.cursor()
aA = {}
MySQL_c.execute(
"""SELECT region, postcode, firstname, lastname, street, city, email,
telephone, fax FROM sales_flat_order_address WHERE entity_id=%s""",
(aID,))
(region, postcode, firstname, lastname, street, city, email, telephone,
fax) = MySQL_c.fetchone()
db.commit()
db.close()
(aA['region'], aA['postcode'], aA['firstname'], aA['lastname'],
aA['street'], aA['city'], aA['email'], aA['telephone'], aA['fax']) = (
region, postcode, firstname, lastname, street, city, email,
telephone, fax)
return aA
def insertOrder(o_Data):
"""Accordingly with the definition :-)"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
rTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
shippingAddress = o_Data['shAddress']
(province, ZIP,
firstname, lastname,
street, city,
email, telephone,
fax) = (
shippingAddress['region'], shippingAddress['postcode'],
shippingAddress['firstname'], shippingAddress['lastname'],
shippingAddress['street'], shippingAddress['city'],
shippingAddress['email'], shippingAddress['telephone'],
shippingAddress['fax']
)
country = 'Australia' # it is a tap
shAid = getAddressID(street, city, province, ZIP, country, 1)
shPhid = getPhoneID(telephone, 1)
c_id = getCustomerID(firstname, lastname, email, shPhid, shAid)
prepData = (
o_Data['mID'], c_id, shAid, shPhid,
o_Data['magTime'], rTime, 'revocated')
MySQL_c.execute(
"""INSERT INTO orders (magento_id, customer_id, shipping_address_id,
shipping_phone_id, magento_time, orsys_reg_time, status) VALUES(%s, %s,
%s, %s, %s, %s, %s)""", prepData)
o_id = MySQL_c.lastrowid
db.commit()
db.close()
return o_id
def getCustomerID(firstName, lastName, email, shPhid, shAid):
"""Fetches customer id in table customers"""
logging.basicConfig(filename='logs/detector.log', level=logging.DEBUG)
# for simplification purposes let
# shipping address == billing address
try:
cName = firstName + ' ' + lastName
except TypeError:
cName = 'corrupted'
logging.debug('This is corrupted getCustomerID name in %s order', o_id)
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT id from customers WHERE customer_name=%s AND email=%s""",
(cName, email))
try:
cID = MySQL_c.fetchone()[0]
except:
MySQL_c.execute(
"""INSERT INTO customers (customer_name, email, phone_num_id,
billing_address_id, shipping_address_id)
VALUES(%s, %s, %s, %s, %s)""",
(cName, email, shPhid, shAid, shAid))
cID = MySQL_c.lastrowid
db.commit()
db.close()
return cID
def processOrder(o_id, p_Data):
"""Order products data insertion into OrSys DB"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
for i in p_Data:
sku, qty_ordered, price, name = i
pID = getProductID(sku, name)
MySQL_c.execute(
"""INSERT INTO order_composition (order_id, product_id, price,
qty) VALUES(%s, %s, %s, %s)""", (o_id, pID, price,
qty_ordered))
db.commit()
db.close()
return True
def getProductID(sku, name):
"""Retrieves product ID by SKU"""
logging.basicConfig(filename='logs/detector.log', level=logging.DEBUG)
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT id FROM products WHERE sku=%s""", (sku,))
try:
pID = MySQL_c.fetchone()[0]
# synonym detection:
MySQL_c.execute(
"""SELECT item_name FROM products WHERE id=%s""", (pID,))
itemName = MySQL_c.fetchone()[0]
if name != itemName:
msg = "Synonym detecded. Product with id %s have an old \
name: %s and a new one: %s" % (pID, itemName, name)
logging.debug(msg)
except:
MySQL_c.execute(
"""INSERT INTO products (item_name, sku) VALUES(%s, %s)""",
(name, sku))
pID = MySQL_c.lastrowid
db.commit()
db.close()
return pID
# Detector itself:
lastOid = int(getLastOid()) + 1
while check_new_orders(lastOid): # there is (are) new orders there
logging.basicConfig(filename='logs/detector.log', level=logging.DEBUG)
oData, pDataSet = getFreshOrderDataSet(lastOid)
newOID = insertOrder(oData)
logging.debug("New order No %s was detected." % (newOID,))
processOrder(newOID, pDataSet)
lastOid = lastOid + 1
```
#### File: portfolio/2013_OrSys/dispatcher.py
```python
from flask import Flask, render_template, session, redirect, url_for, request
from flask import logging, g
from settings import users
from functions import fetch_pending_orders_data, fetch_products_data,\
fetch_suppliers_data, get_user_id, suppDataCheck, suppDataInsert, \
suppDataUpdate, setActiveTab, appendProduct, checkProduct, throw_product,\
fetch_invoices_data, removeProduct, getSupplierData, revocateOrder,\
sendPurchaseOrder, fetch_held_products, checkOTLock, grasp_product,\
eliminate_product
app = Flask(__name__)
@app.route('/')
def index():
"""Composes operator dashboard"""
if 'username' in session:
user = session['username']
logo = users.get(user).get('img')
else:
return redirect(url_for('login'))
if 'active_tab' not in session: # active tab defining
session['active_tab'] = 'orders' # <-- initial value
o_dataset, pii_data = fetch_pending_orders_data() # compose
otl = checkOTLock()
agg_products = fetch_products_data(pii_data) # tabs
supp_tab_data = fetch_suppliers_data()
p_invoices_tab_data = fetch_invoices_data('pending')
sent_PO_tab_data = fetch_invoices_data('sent')
heldP_tab_data = fetch_held_products()
a_tab = setActiveTab(session['active_tab'])
return render_template(
'index.htm', user=user, logo=logo, orders=o_dataset, o_t_lock=otl,
orders_agg=agg_products, agg_products_qty=len(agg_products),
active=a_tab, supp_data=supp_tab_data, pItab=p_invoices_tab_data,
sItab = sent_PO_tab_data, hTd = heldP_tab_data
)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""A primitive authentication feature"""
if request.method == 'POST':
input_username = request.form['username']
input_password = request.form['password']
if (input_username in users and
users.get(input_username).get('password') == input_password):
session['username'] = input_username
session['userID'] = get_user_id(input_username)
return redirect(url_for('index'))
return render_template('login.htm')
@app.route('/logout')
def logout():
"""LogOut implementation"""
session.pop('username', None)
return redirect(url_for('login'))
@app.route('/addNewSupplier')
def addS_modal_form():
"""Modal for upload data about a new supplier"""
app.logger.debug('This is SupplierForm modal')
sup_data = {'city': 'Sydney', 'province': 'New South Wales'}
return render_template('addNewSupplierForm.htm', sup_data=sup_data)
@app.route('/editSupplier', methods=['GET'])
def editS_modal_form():
"""Modal for upload data about a new supplier"""
app.logger.debug('This is editSupplierForm')
sup_data = getSupplierData(request.args.get('s_id'))
return render_template('editSupplierForm.htm', sup_data=sup_data)
@app.route('/SupplierDataFiller', methods=['GET', 'POST'])
def supplierDataFill():
"""Manipulation with the input data and redirect"""
app.logger.debug('This is supplier data filler')
if request.method == 'POST':
(pure_data, check_up) = suppDataCheck(request.form)
if check_up == 'new':
suppDataInsert(pure_data, session['userID'])
session['active_tab'] = 'supplier'
elif check_up == 'known':
suppDataUpdate(pure_data, session['userID'])
session['active_tab'] = 'supplier'
elif check_up == 'update':
suppDataUpdate(pure_data, session['userID'])
session['active_tab'] = 'supplier'
return redirect(url_for('index'))
@app.route('/appendItem', methods=['GET', 'POST'])
def appendItem():
"""Includes product into invoice and redirect"""
app.logger.debug('This is appendItem to PO process')
if request.method == 'POST':
(prod_properties, check_up) = checkProduct(request.form)
if check_up:
appendProduct(prod_properties, session['userID'])
session['active_tab'] = 'p_agg'
return redirect(url_for('index'))
@app.route('/removeItem', methods=['GET', 'POST'])
def freeItem():
"""Removes product out from invoice and redirect"""
app.logger.debug('This is freeItem out from PO process')
if request.method == 'POST':
removeProduct(session['userID'], request.form['piID'])
session['active_tab'] = 'invoices'
return redirect(url_for('index'))
@app.route('/toggleOrder', methods=['GET'])
def toggleOrder():
"""Exclude or include order and its products out from
processing and redirect to index page"""
o_id = request.args.get('o_id')
app.logger.debug('This is revOrder id=%s' % (o_id,))
revocateOrder(o_id, session['username'])
session['active_tab'] = 'orders'
return redirect(url_for('index'))
@app.route('/sendPO', methods=['GET'])
def sendPurOrder():
"""Organize application output"""
i_id = request.args.get('i_id')
app.logger.debug('This is send purchase order with id=%s' % (i_id,))
sendPurchaseOrder(i_id, session['username'])
session['active_tab'] = 'invoices'
return redirect(url_for('index'))
@app.route('/graspProduct', methods=['GET'])
def graspProduct():
"""Move product to the pail"""
sku = request.args.get('p_id')
app.logger.debug('This is grasp product with sku=%s and userID=%s' %
(sku, session['userID']))
result = grasp_product(sku, session['userID'])
session['active_tab'] = 'p_agg'
return redirect(url_for('index'))
@app.route('/throwProduct', methods=['GET'])
def throwProduct():
"""Move product to the agg product tab"""
pipID = request.args.get('p_id')
app.logger.debug('This is throw product with ID=%s out from product pail \
and userID=%s' % (pipID, session['userID']))
result = throw_product(pipID, session['userID'])
session['active_tab'] = 'p_agg'
return redirect(url_for('index'))
@app.route('/eliminateProduct', methods=['GET'])
def eliminateProduct():
"""Move product to the trash"""
pipID = request.args.get('p_id')
app.logger.debug('This is eliminate product with ID=%s out from product\
pail and userID=%s' % (pipID, session['userID']))
result = eliminate_product(pipID, session['userID'])
session['active_tab'] = 'p_agg'
return redirect(url_for('index'))
app.secret_key = '<KEY>'
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
```
#### File: portfolio/2013_OrSys/functions.py
```python
import MySQLdb
import logging
import datetime
import sendgrid
from flask import render_template, session
from twilio.rest import TwilioRestClient
from settings import users, db_password, db_name
from settings import TEST_EMAIL_FROM as faxMailFrom
from settings import ACCOUNT_SID as account
from settings import AUTH_TOKEN as token
from settings import TEST_PhN as PhN
from pdfGen import composePDF_PO
logging.basicConfig(filename='logs/func.log', level=logging.DEBUG)
def get_user_id(nick):
"""Initial auth function"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT id FROM authors WHERE nickname=%s""",
(nick, ))
UserID = MySQL_c.fetchone()[0]
db.commit()
db.close()
return UserID
def setActiveTab(session_a_tab):
"""Determines what tab should be open initially"""
a_tab = {'orders': True, 'p_agg': False, 'suppliers': False,
'invoices': False, 'hold': False} # <-- default value
if session_a_tab == 'supplier':
a_tab.update({
'orders': False, 'p_agg': False, 'suppliers': True,
'invoices': False, 'hold': False})
elif session_a_tab == 'p_agg':
a_tab.update({
'orders': False, 'p_agg': True, 'suppliers': False,
'invoices': False, 'hold': False})
elif session_a_tab == 'invoices':
a_tab.update({
'orders': False, 'p_agg': False, 'suppliers': False,
'invoices': True, 'hold': False})
elif session_a_tab == 'hold':
a_tab.update({
'orders': False, 'p_agg': False, 'suppliers': False,
'invoices': False, 'hold': True})
return a_tab
def fetch_pending_orders_data():
"""Extracts data about orders and their products compositon"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT * FROM orders WHERE magento_time >= (CURDATE() -
INTERVAL 3 DAY) ORDER BY status ASC, magento_time DESC""")
o_raw_dataset = MySQL_c.fetchall() # it's list of tuples
o_dataset = [] # orders empty list
pii_data = [] # products in order empty list
for o_raw_data in o_raw_dataset:
(o_id, mag_id, c_id, sh_id, sh_ph_id,
mag_time, tS, status) = o_raw_data
#
MySQL_c.execute(
"""SELECT customer_name FROM customers WHERE id=%s""", (c_id,))
c_name = MySQL_c.fetchone()[0]
# lets calculate the total sum at each order:
MySQL_c.execute("""SELECT SUM(price * qty) FROM order_composition \
WHERE order_id=%s""", (o_id,))
total_s = MySQL_c.fetchone()[0]
o_composition = []
MySQL_c.execute("""SELECT product_id, price, qty FROM \
order_composition WHERE order_id=%s ORDER BY qty DESC""", (o_id,))
product_in_order_raw_data = MySQL_c.fetchall()
for (product_id, p_price, qty) in product_in_order_raw_data:
MySQL_c.execute("""SELECT item_name, sku FROM products WHERE \
id=%s""", (product_id,))
(p_name, sku) = MySQL_c.fetchone()
p_details = {'sku': sku, 'p_name': p_name[:35],
'price': p_price, 'qty': qty}
o_composition.append(p_details)
if status == 'pending': # <--- exclude revocated orders and its
# products out from further processing
pii_data_row = (sku, p_name[:35], o_id, mag_id, p_price, qty)
pii_data.append(pii_data_row)
#
if status == 'pending':
pending = True
else:
pending = False
o_data = {
'id': o_id, 'm_id': mag_id, 'customer': c_name, 'address': sh_id,
'phone': sh_ph_id, 'time': mag_time, 'stamp': tS,
'o_c': o_composition, 't_sum': total_s, 'pending': pending}
o_dataset.append((o_data))
db.commit()
db.close()
return o_dataset, pii_data
def fetch_products_data(product_list):
"""Converts a raw product array into aggregated by similar SKU
list of dictionaries which might be convenient to pass directly
to the Jinja2"""
aggregator = {} # it should be a dictionary with SKUs as keys
for (sku, p_name, o_id, mag_id, p_price, qty) in product_list:
if sku in aggregator: # this product was aggregated already
(orders_list, p_name, old_sigma) = aggregator.get(sku)
else: # this is a new product
orders_list = []
old_sigma = 0
orders_list.append((mag_id, p_price, qty))
aggregator.update({sku: (orders_list, p_name, old_sigma + qty)})
# all rows are iterated already and products was aggregated by their SKU
# lets convert aggregator from dictionary into list of dictionaries:
output_list = []
for j, k in aggregator.iteritems():
# j - key (sku), k - a tuple of associated values
# add suppliers into output dictionary
supp_dataset = getSuppliers(j)
pInActPail = getPrInOpenPail()
incProducts = getIncludedProducts() # <-- already included into
if j not in set(incProducts + pInActPail): # pending invoice
output_list.append({ # + actual pail
'sku': j, 'order_orig': k[0], 'product': k[1], 'sigma': k[2],
'suppliers': supp_dataset})
newlist = sorted(output_list, key=lambda k: k['sigma'], reverse=True)
return newlist
def fetch_invoices_data(iStatus):
"""Yields dataset about purchase orders accordingly its status"""
out_list = []
db = MySQLdb.connect(passwd=db_password, db=db_name, user='orsys')
MySQL_c = db.cursor()
if iStatus == 'pending':
MySQL_c.execute(
"""SELECT id, author_id, supplier_id, initilized FROM invoices
WHERE initilized IS NOT NULL AND sent IS NULL ORDER BY initilized
DESC""")
if iStatus == 'sent':
MySQL_c.execute(
"""SELECT id, author_id, supplier_id, initilized, sent,
s_author_id FROM invoices WHERE initilized IS NOT NULL AND
sent IS NOT NULL ORDER BY sent DESC""")
open_invoices_raw_dataset = MySQL_c.fetchall() # it's list of tuples
if open_invoices_raw_dataset:
for i in open_invoices_raw_dataset:
if iStatus == 'pending':
(invoice_id, invoice_author_id, supplier_id, i_date) = i
#
if iStatus == 'sent':
(invoice_id, invoice_author_id, supplier_id, i_date, s_date,
s_author_id) = i
sent_author = getAuthor(s_author_id)
invoice_author = getAuthor(invoice_author_id)
supplierData = getSupplierData(supplier_id)
MySQL_c.execute(
"""SELECT id, product_id, price, original_order_price, qty,
author_id, appended FROM invoice_composition WHERE
invoice_id=%s AND removed IS NULL ORDER BY appended DESC""",
(invoice_id,))
productsInInvoice = MySQL_c.fetchall()
productIIdata = []
i_sigma = 0
i_err = False
for j in productsInInvoice:
(piID, product_id, price, original_order_price,
qty, pIauthor_id, a_date) = j
pIauthor = getAuthor(pIauthor_id)
(sku, p_name) = getProduct(product_id)
k = "{0:.2f}".format((1 - price / original_order_price)
* 100) # <- correct
if k[0] == '-': # benefit check up
p_err = True
i_err = True
else:
p_err = False
i_sigma = i_sigma + price * qty
sigma = price * qty
PIIDataSet = ({'author': pIauthor, 'product': p_name,
'sku': sku, 'price': price, 'pErr': p_err,
'o_oprice': original_order_price, 'k': k,
'qty': qty, 'a_date': a_date, 'sigma': sigma,
'piID': piID})
productIIdata.append(PIIDataSet)
if i_sigma > 0: # <-- exclude out all blank POs
if iStatus == 'pending':
out_list.append({
'i_author': invoice_author, 's_d': supplierData,
'p_data': productIIdata, 'i_id': invoice_id,
'i_date': i_date, 'sigma': i_sigma, 'iErr': i_err})
if iStatus == 'sent':
out_list.append({
'i_author': invoice_author, 's_d': supplierData,
'p_data': productIIdata, 'i_id': invoice_id,
'i_date': i_date, 'sigma': i_sigma, 'iErr': i_err,
's_date': s_date, 's_author': sent_author})
else: # there are no any purchase order case
out_list = None
db.commit()
db.close()
p_invoices_tab_data = out_list
return p_invoices_tab_data
def fetch_suppliers_data():
"""Generates suppliers data array"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT id, supplier_name, address_id, contact_id, author_id,
inserted FROM suppliers ORDER BY inserted DESC""")
suppliers_dataset = MySQL_c.fetchall() # it's list of tuples
supp_tab_data = []
for (s_id, s_name, address_id, contact_id,
author_id, inserted) in suppliers_dataset:
MySQL_c.execute(
"""SELECT line1, city, province, zip, country FROM addresses
WHERE id=%s""", (address_id,))
(address_line, city, province, ZIP, country) = MySQL_c.fetchone()
MySQL_c.execute(
"""SELECT first_name, last_name, email, preference, phone_id,
fax_id, author_id from clerks WHERE id=%s""", (contact_id,))
(first_name, last_name, email, preference, phone_id, fax_id,
author_id) = MySQL_c.fetchone()
supp_tab_data.append(
{'s_id': s_id, 's_name': s_name, 'address_line': address_line,
'city': city, 'province': province, 'zip': ZIP,
'country': country, 'first_name': first_name,
'last_name': last_name, 'email': email,
'preference': preference, 'phone': getPhoneNum(phone_id),
'fax': getFaxNum(fax_id),
'author': getAuthor(author_id), 'inserted': inserted})
db.commit()
db.close()
return supp_tab_data
def suppDataCheck(form_data):
"""Pick up and check up input data"""
raw_data = {} # a single dictionary
iDkeys = [
'supplier', 'line1', 'city', 'province', 'zip',
'firstname', 'lastname', 'email', 'phone', 'fax',
'preference']
for i in iDkeys:
raw_data.update({i: form_data[i]})
pure_data = sanitize(raw_data)
pure_data['country'] = 'Australia' # it is a tap
supplier = pure_data['supplier'].title()
check_up = False
if 's_id' in form_data: # this is update case
pure_data.update({'s_id': form_data['s_id']})
logging.debug('This is update case!!!')
check_up = 'update'
else:
if supplier:
db = MySQLdb.connect(
passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT id FROM suppliers WHERE supplier_name=%s""",
(supplier,))
if MySQL_c.fetchone(): # this supplier is already exists in DB
check_up = 'known'
else:
check_up = 'new'
db.commit()
db.close()
return (pure_data, check_up)
def suppDataInsert(pure_data, userID):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
# input data array
supplier = pure_data['supplier'].title()
line = pure_data['line1'].title()
city = pure_data['city'].title()
province = pure_data['province'].title()
ZIP = pure_data['zip']
country = pure_data['country'].title()
first_name = pure_data['firstname'].title()
last_name = pure_data['lastname'].title()
email = pure_data['email']
phone = pure_data['phone']
fax = pure_data['fax']
preference = pure_data['preference']
# revealed data -->
address_id = getAddressID(line, city, province, ZIP, country, userID)
phone_id = getPhoneID(phone, userID)
fax_id = getFaxID(fax, userID)
clerk_id = getClerkID(
first_name, last_name, email, preference, phone_id, fax_id, userID)
if 's_id' in pure_data: # update case
MySQL_c.execute(
"""UPDATE suppliers SET supplier_name=%s,
address_id=%s, contact_id=%s, author_id=%s
WHERE id=%s""", (supplier, address_id, clerk_id, userID,
pure_data['s_id']))
else: # insertion itself -->
MySQL_c.execute(
"""INSERT INTO suppliers (supplier_name, address_id,
contact_id, author_id) VALUES (%s, %s, %s, %s)""",
(supplier, address_id, clerk_id, userID))
db.commit()
db.close()
return True
def suppDataUpdate(pure_data, userID):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
#
supplier = pure_data['supplier'].title()
line = pure_data['line1'].title()
city = pure_data['city'].title()
province = pure_data['province'].title()
ZIP = pure_data['zip']
country = pure_data['country'].title()
first_name = pure_data['firstname'].title()
last_name = pure_data['lastname'].title()
email = pure_data['email']
phone = pure_data['phone']
fax = pure_data['fax']
preference = pure_data['preference']
# revealed data -->
address_id = getAddressID(line, city, province, ZIP, country, userID)
phone_id = getPhoneID(phone, userID)
fax_id = getFaxID(fax, userID)
clerk_id = getClerkID(
first_name, last_name, email, preference, phone_id, fax_id, userID)
MySQL_c.execute(
"""UPDATE suppliers SET supplier_name=%s,
address_id=%s, contact_id=%s, author_id=%s
WHERE id=%s""", (supplier, address_id, clerk_id, userID,
pure_data['s_id']))
logging.debug('This is suppDataUpdate case @@')
logging.debug('This is suppDataUpdate case %s, %s, %s, %s, %s, %s, %s' % (
first_name, last_name, email, preference, phone_id, fax_id, userID))
db.commit()
db.close()
return True
def getAddressID(line, city, province, ZIP, country, userID):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT id FROM addresses WHERE line1=%s AND city=%s AND
province=%s AND zip=%s AND country=%s""",
(line, city, province, ZIP, country))
addressCheck = MySQL_c.fetchone()
if addressCheck:
addressID = addressCheck[0]
else:
#logging.debug("%s, %s, %s, %s, %s" %
#(line, city, province, ZIP, country, userID))
MySQL_c.execute(
"""INSERT INTO addresses (line1, city, province, zip,
country) VALUES (%s, %s, %s, %s, %s)""",
(line, city, province, ZIP, country))
addressID = MySQL_c.lastrowid
db.commit()
db.close()
return addressID
def getClerkID(firstname, lastname, email, preference, phone_id,
fax_id, userID):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT id, preference FROM clerks WHERE first_name=%s AND
last_name=%s AND email=%s AND phone_id=%s AND fax_id=%s""",
(firstname, lastname, email, phone_id, fax_id))
clerkCheck = MySQL_c.fetchone()
if clerkCheck:
clerkID = clerkCheck[0]
old_pref = clerkCheck[1]
if old_pref != preference:
logging.debug('This is preference update case @@')
MySQL_c.execute("""UPDATE clerks SET preference=%s WHERE id=%s""",
(preference, clerkID))
else:
MySQL_c.execute(
"""INSERT INTO clerks (first_name, last_name, email, preference,
phone_id, fax_id, author_id) VALUES
(%s, %s, %s, %s, %s, %s, %s)""",
(firstname, lastname, email, preference, phone_id, fax_id, userID))
clerkID = MySQL_c.lastrowid
db.commit()
db.close()
return clerkID
def getPhoneID(phoneNum, userID):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
pN = phoneNum.replace(' ', '').replace('-', '') # <--
MySQL_c.execute(
"""SELECT id FROM phones WHERE phone_num=%s""", (pN,))
phoneCheck = MySQL_c.fetchone()
if phoneCheck: # existing already
phID = phoneCheck[0]
else: # the new one
MySQL_c.execute(
"""INSERT INTO phones (phone_num, author_id) VALUES (%s, %s)""",
(pN, userID))
phID = MySQL_c.lastrowid
db.commit()
db.close()
return phID
def getFaxID(fax, userID):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
faxN = fax.replace(' ', '').replace('-', '') # <--primitive normalization
MySQL_c.execute(
"""SELECT id FROM faxes WHERE fax_num=%s""", (faxN,))
faxCheck = MySQL_c.fetchone()
if faxCheck: # existing already
faxID = faxCheck[0]
else: # the new one
MySQL_c.execute(
"""INSERT INTO faxes (fax_num, author_id) VALUES (%s, %s)""",
(faxN, userID))
faxID = MySQL_c.lastrowid
db.commit()
db.close()
return faxID
def sanitize(dictionary):
"""Iterate and sanitaze eachone item value in the dict"""
pure_data_dict = {}
for i in dictionary:
if dictionary[i]: # it's not empty
# this is sanitize function itself
pure_data_dict.update({i: dictionary[i].replace("'", '')})
else:
pure_data_dict.update({i: ''})
return pure_data_dict
def checkProduct(form_data):
"""Sanitize and check up user input"""
pure_data = sanitize(form_data)
supplier_id = pure_data['supplier']
sku = pure_data['sku']
price = pure_data['price']
o_oprice = pure_data['o_oprice']
sigma = pure_data['sigma']
if supplier_id and sku and price and sigma:
check_up = True
else:
check_up = False
try: # price input validation
float(price)
except ValueError:
check_up = False
prod_properties = {
'sku': sku, 'price': price, 'qty': sigma,
'supplier_id': supplier_id, 'o_oprice': o_oprice}
return (prod_properties, check_up)
def getInvoiceID(supplierID, authorID):
"""Check up 'not sent' invoice for particular supplier, if it is
not exists yet - create one"""
db = MySQLdb.connect(passwd=db_password, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT id FROM invoices WHERE initilized IS NOT NULL AND
sent IS NULL AND supplier_id=%s""", (supplierID,))
invoiceCheck = MySQL_c.fetchone()
if invoiceCheck: # does it exists already
invoiceID = invoiceCheck[0]
else:
initTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
MySQL_c.execute(
"""INSERT INTO invoices (author_id, supplier_id, initilized)
VALUES (%s, %s, %s)""", (authorID, supplierID, initTime))
invoiceID = MySQL_c.lastrowid
db.commit()
db.close()
return invoiceID
def appendProduct(prod_properties, userID):
"""Move products from aggregated tab onto pending invoices one"""
db = MySQLdb.connect(passwd=db_password, db=db_name, user='orsys')
logging.debug("This is output from appendProduct")
logging.debug(prod_properties)
(sku, price, qty, supplier_id, o_oprice) = (
prod_properties['sku'], prod_properties['price'],
prod_properties['qty'], prod_properties['supplier_id'],
prod_properties['o_oprice'])
invoiceID = getInvoiceID(supplier_id, userID)
productID = getProductID(sku)
#
MySQL_c = db.cursor()
MySQL_c.execute(
"""INSERT INTO invoice_composition (invoice_id, product_id,
price, original_order_price, qty, author_id) VALUES
(%s, %s, %s, %s, %s, %s)""",
(invoiceID, productID, price, o_oprice, qty, userID))
db.commit()
db.close()
if len(getIncludedProducts()) == 1: # it's a first product in the batch
newBatchID = genNewBatch(userID)
return True
def removeProduct(r_id, piI_id):
"""Move aggProduct out from purchase order"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
removeTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
MySQL_c.execute(
"""UPDATE invoice_composition SET removed=%s, remover_id=%s
WHERE id=%s""", (removeTime, r_id, piI_id))
db.commit()
db.close()
if len(getIncludedProducts()) == 0: # it's a last product in the batch
closeBatch(removeTime)
return True
def getSuppliers(sku):
"""Temporary solving sku--supplier association issue
which should be corrected when DB will filled by real data"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT id, supplier_name FROM suppliers""")
supp_dataset = MySQL_c.fetchall() # it's list of tuples
db.commit()
db.close()
return supp_dataset
def getProductSKU(product_id):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT sku FROM products WHERE id=%s""", (product_id,))
sku = MySQL_c.fetchone()[0]
db.commit()
db.close()
return sku
def getProductID(sku):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT id FROM products WHERE sku=%s""", (sku,))
productID = MySQL_c.fetchone()[0]
db.commit()
db.close()
return productID
def getIncludedProducts():
"""List of SKU products included already into pending invoices"""
incProductsList = []
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT id FROM invoices WHERE initilized IS NOT NULL AND
sent IS NULL""")
open_invoices = MySQL_c.fetchall() # it's list of tuples
if open_invoices:
for i in open_invoices:
MySQL_c.execute(
"""SELECT product_id FROM invoice_composition WHERE
invoice_id=%s AND removed IS NULL""", (i[0],))
products_ids = MySQL_c.fetchall()
if products_ids:
for j in products_ids:
incProductsList.append(getProductSKU(j[0]))
db.commit()
db.close()
return incProductsList
def getSupplierData(s_id):
"""Get all data about supplier by their id"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT supplier_name, address_id, contact_id FROM
suppliers WHERE id=%s""", (s_id,))
(supplier_name, address_id, contact_id) = MySQL_c.fetchone()
db.commit()
db.close()
s_data = {
'name': supplier_name, 'address': getAddress(address_id),
'contact': getClerk(contact_id), 'id': s_id}
return s_data
def getClerk(c_id):
"""Returns all data about contact person"""
c_data = {}
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT first_name, last_name, email, preference, phone_id,
fax_id FROM clerks WHERE id=%s""", (c_id,))
(c_data['first_name'], c_data['last_name'], c_data['email'],
c_data['preference'], phone_id, fax_id) = MySQL_c.fetchone()
c_data['phone'] = getPhoneNum(phone_id)
c_data['fax'] = getFaxNum(fax_id)
db.commit()
db.close()
return c_data
def getFaxNum(fax_id):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT fax_num FROM faxes WHERE id=%s""", (fax_id,))
faxN = MySQL_c.fetchone()[0]
db.commit()
db.close()
return faxN
def getPhoneNum(phone_id):
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT phone_num FROM phones WHERE id=%s""", (phone_id,))
phoneN = MySQL_c.fetchone()[0]
db.commit()
db.close()
return phoneN
def getAddress(a_id):
"""Fetches address properties"""
a_data = {}
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT line1, city, province, zip, country FROM addresses
WHERE id=%s""", (a_id,))
(a_data['line1'], a_data['city'], a_data['province'], a_data['zip'],
a_data['country']) = MySQL_c.fetchone()
db.commit()
db.close()
return a_data
def getAuthor(a_id):
"""Get author nickname by his id"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT nickname FROM authors WHERE id=%s""",
(a_id,))
author_nick = MySQL_c.fetchone()[0]
db.commit()
db.close()
return author_nick
def getProduct(p_id):
""" Get product's sku and name by its id """
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT sku, item_name FROM products WHERE id=%s""",
(p_id,))
p_data = MySQL_c.fetchone()
db.commit()
db.close()
return p_data
def revocateOrder(o_id, username):
"""Toggle pending and revocated order"""
db = MySQLdb.connect(passwd=db_password, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT status FROM orders WHERE id=%s""",
(o_id,))
o_status = MySQL_c.fetchone()[0]
if o_status == 'pending':
new_status = 'revocated'
# conditionaly exclude products from this order into PP
elif o_status == 'revocated':
new_status = 'pending'
#
# conditionaly include products from this order into PP
incProdInPP(o_id, username)
toggleTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logging.debug('%s toggle %s order status from %s to %s at %s' %
(username, o_id, o_status, new_status, toggleTime))
MySQL_c.execute("""UPDATE orders SET status=%s WHERE id=%s""",
(new_status, o_id))
db.commit()
db.close()
return True
def sendPurchaseOrder(iID, uName):
"""send PO to supplier accordiongly with preference method"""
state = False
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT supplier_id FROM invoices WHERE id=%s""",
(iID,))
sID = MySQL_c.fetchone()[0]
MySQL_c.execute("""SELECT contact_id FROM suppliers WHERE id=%s""",
(sID,))
cID = MySQL_c.fetchone()[0]
clerk = getClerk(cID)
poData = getPO_Data(iID)
prSet = getPrSet(iID)
poData['total'] = getTotalPOsum(prSet)
# Pure emailing
if clerk['preference'] == 'email':
sg = sendgrid.SendGridClient('jaketay', 'lemonlime')
pdf = composePDF_PO(poData, prSet)
lSub = ("New Purchase Order #%s" % poData['No'])
letter = render_template(
'leemunroe.htm', poData=poData, prSet=prSet)
pdfFilename = "Purchase_order_%s.pdf" % poData['No']
#
message = sendgrid.Mail()
message.add_to(clerk['email'])
message.set_subject(lSub)
message.set_html(letter)
message.set_from('<EMAIL>')
message.add_attachment_stream(pdfFilename, pdf)
status, msg = sg.send(message)
#
if status == 200:
state = True
logging.debug('Email with PO No%s was sent by %s to %s %s' %
(iID, uName, clerk['first_name'],
clerk['last_name']))
else:
logging.debug('Email with PO No%s NOT sent to %s' %
(iID, uName))
# Fax emailing case
if clerk['preference'] == 'fax':
sg = sendgrid.SendGridClient('jaketay', 'lemonlime')
lSub = ("New Purchase Order #%s" % poData['No'])
#tXt = "<h1>%s</h1>" % lSub
htmlFilename = "Purchase_order_%s.html" % poData['No']
faxMail = clerk['fax'] + '@fax.utbox.net'
letter = render_template(
'faxmail.htm', poData=poData, prSet=prSet)
#
#
message = sendgrid.Mail()
message.add_to(faxMail)
message.set_subject(lSub)
#message.set_html(tXt)
message.set_from(faxMailFrom)
message.add_attachment_stream(htmlFilename, letter)
status, msg = sg.send(message)
#
if status == 200:
state = True
logging.debug('FaxMail with PO No%s was sent by %s to %s %s' %
(iID, uName, clerk['first_name'],
clerk['last_name']))
else:
logging.debug('FaxMail with PO No%s NOT sent to %s' %
(iID, uName))
# SMS emailing case
if clerk['preference'] == 'sms':
client = TwilioRestClient(account, token)
sms = genSMS(poData, prSet)
nOrPhN = checkNo(clerk['phone'])
if nOrPhN:
message = client.messages.create(
to=nOrPhN, # "+61417617909",
from_=PhN,
body=sms)
state = True
logging.debug('SMS %s with PO No%s was sent to %s' %
(message.sid, iID, uName))
if state: # PO was success successfuly sent
markAsSent(iID, uName)
db.commit()
db.close()
return True
def checkNo(rawPhNum):
"""Convert Australian phone numbers into international view"""
if len(rawPhNum) == 10:
phNo = "+61" + rawPhNum[1:]
else:
logging.debug('%s NOT VALID for Twilio SMS purposes' % rawPhNum)
phNo = False
return phNo
def getPrSet(iID):
"""Returns products in purchase order dictionary list"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT product_id, price, qty FROM invoice_composition WHERE
invoice_id=%s AND removed IS NULL ORDER BY qty DESC""", (iID,))
poPrDataSet = MySQL_c.fetchall()
prSet = []
for k, i in enumerate(poPrDataSet, start=1):
pr = {}
product_id, price, qty = i
pr['No'] = k
pr['price'] = price
pr['qty'] = qty
pr['sku'], pr['name'] = getProduct(product_id)
pr['subtotal'] = float(price) * int(qty)
prSet.append(pr)
db.commit()
db.close()
return prSet
def getPO_Data(iID):
"""Retrieve purchase order data accordingly its ID"""
poData = {}
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT author_id, supplier_id, sent, s_author_id
FROM invoices WHERE id=%s""", (iID,))
author_id, supplier_id, sent, s_author_id = MySQL_c.fetchone()
db.commit()
db.close()
poData['buyer'] = session['username']
poData['email'] = users.get(session['username']).get('email')
poData['phoneNo'] = users.get(session['username']).get('phoneNo')
poData['No'] = int(iID) * 3 + 140121 # PO No obfuscator
return poData
def markAsSent(iID, uName):
"""Mark this PO as already sent to supplier"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
sTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
uID = get_user_id(uName)
MySQL_c.execute("""UPDATE invoices SET sent=%s, s_author_id=%s
WHERE id=%s""", (sTime, uID, iID))
# and
db.commit()
db.close()
return True
def genSMS(poData, prSet):
"""Generates SMS text body"""
header = "\nPurchase Order\n#%s\nBuyer: %s\nPhone: %s\n\n" % (
poData['No'],
poData['buyer'],
poData['phoneNo']
)
body = ''
for i, j in enumerate(prSet, start=1):
prRow = "%s | %s | %s | %s\n%s\n\n" % (
i, j['sku'], j['qty'], j['price'], j['name'][:15])
body = body + prRow
footer = "\n\nTHANK YOU!"
sms = header + body + footer
return sms
def getTotalPOsum(prSet):
"""Calculates PO total cost"""
Ts = 0
for i in prSet:
Ts = Ts + i['subtotal']
return Ts
def getMagID(oID):
"""Retrieve out Magento ID"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT magento_id FROM orders WHERE id=%s""", (oID,))
mID = MySQL_c.fetchone()[0]
db.commit()
db.close()
return mID
# Pail related functions --->
def get_product_Set(order_batch_id, product_id):
"""Render data about product origin in the pail"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
prSet = []
#
MySQL_c.execute(
"""SELECT order_id FROM orders_in_batch WHERE batch_id=%s""",
(order_batch_id,))
orders_In_batch = MySQL_c.fetchall()
for i in orders_In_batch:
MySQL_c.execute("""SELECT price, qty FROM order_composition
WHERE order_id=%s AND product_id=%s""", (i[0], product_id))
try:
price, qty = MySQL_c.fetchone()
mID = getMagID(i[0])
prRow = {'order_id': mID, 'price': price, 'qty': qty}
prSet.append(prRow)
except TypeError:
pass
db.commit()
db.close()
return prSet
def fetch_held_products():
"""Retrieve data about products in the pail"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT id, order_batch_id, product_id, author_id,
i_date, resolver_id, r_time FROM products_in_pail ORDER BY id DESC""")
pail_products_dataset = MySQL_c.fetchall()
#
HpDataSet = []
for i in pail_products_dataset:
(pip_id, order_batch_id, product_id, author_id, i_date, resolver_id,
r_time) = i
prSet = get_product_Set(order_batch_id, product_id)
sku, p_name = getProduct(product_id)
auth_nick = getAuthor(author_id)
resolver = ''
if resolver_id:
resolver = getAuthor(resolver_id)
p_row = {'id': pip_id, 'i_date': i_date, 'author': auth_nick, 'sku':
sku, 'name': p_name, 'pData': prSet, 'resolver': resolver,
'r_date': r_time}
HpDataSet.append(p_row)
db.commit()
db.close()
return HpDataSet
def grasp_product(p_sku, userID):
"""Absorbs product into the pail"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
pID = getProductID(p_sku)
iDate = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Retrieving an acutal open batch there:
MySQL_c.execute("""SELECT id FROM order_batches WHERE r_time IS NULL ORDER
BY id DESC LIMIT 1""")
try:
bID = MySQL_c.fetchone()[0]
except TypeError:
bID = genNewBatch(userID)
MySQL_c.execute(
"""INSERT INTO products_in_pail (order_batch_id, product_id, author_id,
i_date) VALUES (%s, %s, %s, %s)""", (bID, pID, userID, iDate))
db.commit()
db.close()
return True
def eliminate_product(p_id, uID):
"""Push product out from the pail"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
#MySQL_c.execute("""UPDATE ppail SET r_time=%s WHERE p_id=%s AND""")
db.commit()
db.close()
return True
def throw_product(p_id, uID):
"""Incorporate product back into aggregated products tab"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
r_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
MySQL_c.execute("""UPDATE products_in_pail SET resolver_id=%s, r_time=%s
WHERE id=%s""", (uID, r_time, p_id))
numO_PrInLastBatch = len(getPrInOpenPail()) - 1 # last was deleted ^
#logging.debug('There are %s in the PAIL' % numO_PrInLastBatch)
if numO_PrInLastBatch == 0: # it's a last product in the batch
closeBatch(r_time)
db.commit()
db.close()
return True
def move_to_pail(p_id):
"""Move product into pail"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
#MySQL_c.execute("""SELECT ppail WHERE p_id=%s AND""")
db.commit()
db.close()
return False
def getPrInOpenPail():
"""List of SKU products included already into last open pail"""
skuList = []
if check_last_batch():
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute("""SELECT id FROM order_batches WHERE r_time IS NULL\
ORDER BY id DESC LIMIT 1""")
lastBatchID = MySQL_c.fetchone()[0]
MySQL_c.execute("""SELECT product_id FROM products_in_pail WHERE
order_batch_id=%s AND r_time IS NULL""", (lastBatchID,))
productsInLastBatchIDs = MySQL_c.fetchall()
for i in productsInLastBatchIDs:
sku, p_name = getProduct(i[0])
skuList.append(sku)
db.commit()
db.close()
return skuList
def check_last_batch():
"""Is the last batch open?"""
checkUp = False
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""SELECT r_time FROM order_batches ORDER BY id DESC LIMIT 1"""
)
try:
resolved = MySQL_c.fetchone()[0]
except TypeError:
resolved = True
if not resolved:
checkUp = True
db.commit()
db.close()
return checkUp
def checkOTLock():
"""Does orders tab was locked?"""
checkUp = False
PO_sku_list = getIncludedProducts()
if PO_sku_list: # or product from the actual batch is in pail
checkUp = True
elif check_last_batch():
checkUp = True
return checkUp
def genNewBatch(aID):
"""Creates a new pile of orders"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
iTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
MySQL_c.execute(
"""INSERT INTO order_batches (author_id, i_time) VALUES (%s, %s)""",
(aID, iTime))
newBatchID = MySQL_c.lastrowid
MySQL_c.execute("""SELECT id FROM orders WHERE magento_time >= (CURDATE()
- INTERVAL 3 DAY) AND status = 'pending'""")
o_id_dataset = MySQL_c.fetchall()
# lets fill orders in the batch table:
for i in o_id_dataset:
MySQL_c.execute("""INSERT INTO orders_in_batch (batch_id, order_id)
VALUES (%s, %s)""", (newBatchID, i[0]))
db.commit()
db.close()
return newBatchID
def closeBatch(rTime):
"""Close up a last pile of orders"""
# only open right now thus it will be reconfigured later
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""UPDATE order_batches SET r_time=%s WHERE r_time IS NULL ORDER BY
id DESC LIMIT 1""", (rTime,)
)
db.commit()
db.close()
return True
def incProdInPP(o_id, username):
"""Include product into product pail"""
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
MySQL_c.execute(
"""UPDATE order_batches SET r_time=%s WHERE r_time IS NULL ORDER BY
id DESC LIMIT 1""", (rTime,)
)
db.commit()
db.close()
return True
```
#### File: 2013_OrSys/stuff/scratches.py
```python
def insertNewSupplier(form_data, userID): # @@
"""All necessary convertations, validations and insertions there"""
raw_data = {} # a single dictionary
# lets prepare data to processing
iDkeys = [
'supplier', 'line1', 'city', 'province', 'zip',
'firstname', 'lastname', 'email', 'phone', 'fax'
] # later add preference & etc.
for i in iDkeys:
raw_data.update({i: form_data[i]})
# now raw_data filled
# input data validation
(data_valid, ins_data, msg) = inputDataValidator(raw_data)
msg.update({'ins': False})
if data_valid: # --<insertion case
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
supplier = ins_data['supplier']
address_id = ins_data['address_id']
contact_id = ins_data['contact_id']
try:
MySQL_c.execute(
"""INSERT INTO suppliers (supplier_name, address_id,
contact_id) VALUES (%s, %s, %s)""",
(supplier, address_id, contact_id))
msg.update({'ins': True})
except:
logging.info('Insertions failed, supplier=%s,\
address_id=%s, contact_id=%s' % (supplier, address_id, contact_id))
#
db.commit()
db.close()
else:
logging.info('Data not valid for insertion: %s' % (msg,))
return msg
def inputDataValidator(raw_data_dict): # @@
msg = {}
data_for_insertion = {}
val_result = ()
db = MySQLdb.connect(passwd=<PASSWORD>, db=db_name, user='orsys')
MySQL_c = db.cursor()
data_dict = sanitizer(raw_data_dict) # sanitize it
# check up supplier
supplier = data_dict['supplier']
if supplier:
MySQL_c.execute(
"""SELECT id FROM suppliers WHERE supplier_name=%s""",
(supplier,))
if MySQL_c.fetchone(): # this supplier is already exists in DB
msg.update({'s_name': 'already exists'}) # <-- update case
data_for_insertion.update({'supplier': supplier})
val_result = False
else: # <-- insert case
data_for_insertion.update({'supplier': supplier})
val_result = True
else: # <-- empty field case:
msg.update({'s_name': 'empty'})
val_result = False
data_for_insertion.update({'address_id': 1}) # address_id})
data_for_insertion.update({'contact_id': 1}) # clerk_id})
result = (val_result, data_for_insertion, msg)
db.commit()
db.close()
return result
# order_composition filler:
SQLite3_c.execute(
'SELECT Order_Number, Item_SKU, Item_Price, Item_Qty_Ordered \
FROM orders')
raw_item_data = SQLite3_c.fetchall()
prep_data = []
for i in raw_item_data:
(o_number, sku, price, qty) = i
MySQL_c.execute("""SELECT id FROM orders WHERE magento_id=%s""",
(o_number,))
o_id = int(MySQL_c.fetchone()[0])
MySQL_c.execute("""SELECT id FROM products WHERE sku=%s""",
(sku,))
p_id = int(MySQL_c.fetchone()[0])
prep_data.append((o_id, p_id, price.split('$')[-1], qty))
print prep_data
MySQL_c.executemany(
""" INSERT INTO order_composition (order_id, product_id,
price, qty) VALUES (%s, %s, %s, %s)""", prep_data)
# this is orders table filler
SQLite3_c.execute(
'select Order_Number,Order_Date, Customer_Name, \
Shipping_Phone_Number, Shipping_Street from orders'
)
raw_orders = set(SQLite3_c.fetchall())
orders = list(raw_orders)
prepared_data = []
for i in orders:
(m_num, m_date, c_name, p_num, street) = i
# lets convert date into MySQL format:
raw_date, raw_time = m_date.split()
time = raw_time + ':00'
date = '-'.join(raw_date.split('/')[::-1])
m_date = date + ' ' + time
# lets find foreing keys:
MySQL_c.execute("""SELECT id FROM customers WHERE customer_name=%s""",
(c_name,))
customer_id = int(MySQL_c.fetchone()[0])
MySQL_c.execute("""SELECT id FROM phones WHERE phone_num=%s""",
(p_num,))
phone_id = int(MySQL_c.fetchone()[0])
MySQL_c.execute("""SELECT id FROM addresses WHERE line1=%s""",
(street,))
address_id = int(MySQL_c.fetchone()[0])
print (
m_num, m_date, c_name, customer_id, p_num, phone_id,
street, address_id
)
prepared_data.append(
(int(m_num), customer_id, address_id, phone_id, m_date))
MySQL_c.executemany(
"""INSERT INTO orders (magento_id, customer_id, shipping_address_id,
shipping_phone_id, magento_time) VALUES (%s, %s, %s, %s, %s)""",
prepared_data)
#?
def phoneFiller(self, raw_phone):
# extract significant parts:
if len(raw_phone) == 8: # it's a bold phone number
# Filling addresses table:
SQLite3_c.execute(
"""SELECT Shipping_Street, Shipping_Zip, Shipping_City, Shipping_State_Name, \
Shipping_Country_Name FROM orders"""
)
address_data = set(SQLite3_c.fetchall())
MySQL_c.executemany(
"""INSERT INTO addresses (line1, zip, city, province, country)
VALUES (%s, %s, %s,%s, %s)""", address_data
)
# - #
# typical MySQL interaction: filling products table
SQLite3_c.execute('SELECT Item_Name, Item_SKU from orders')
product_data = SQLite3_c.fetchall()
inserted_sku = []
prepared_data = []
for i in product_data:
if i[1] not in inserted_sku:
prepared_data.append((None, i[0], i[1]))
inserted_sku.append(i[1])
print prepared_data
MySQL_c.executemany(
"""INSERT INTO products (id, item_name, sku) VALUES (%s, %s, %s)""",
prepared_data)
# - #
# this snippet fills data from csv into SQLite3
csv_file = open('orders.csv', 'rU')
o = csv.reader(csv_file)
for i in o:
c.execute('INSERT INTO orders VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,\
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,\
?)', tuple(i))
# - #
# check up address
line1 = data_dict['line1'] # -- four variables there
city = data_dict['city']
province = data_dict['province']
postal_zip = data_dict['zip']
#
if line1:
MySQL_c.execute(
"""SELECT id FROM addresses WHERE line1=%s""",
(line1,))
if MySQL_c.fetchone(): # this address is well known
address_id = MySQL_c.fetchone()[0]
else: # the new one
msg.update({'line1': 'new insertion'})
MySQL_c.execute(
"""INSERT INTO addresses (line1, city, province, zip)
VALUES (%s, %s, %s, %s)""",
(line1, city, province, postal_zip))
address_id = MySQL_c.lastrowid
else: # empty line1 case
msg.update({'line1': 'empty'})
MySQL_c.execute(
"""INSERT INTO addresses (line1, city, province, zip)
VALUES (%s, %s, %s, %s)""",
(line1, city, province, postal_zip))
address_id = MySQL_c.lastrowid
# check up clerk
c_first_name = data_dict['firstname']
c_last_name = data_dict['lastname']
email = data_dict['email']
phone = data_dict['phone']
fax = data_dict['fax']
# the main condition:
if (email or phone) or (email and phone):
# check it up
MySQL_c.execute(
"""SELECT id FROM clerks WHERE email=%s""",
(email,))
clerk_id = MySQL_c.fetchone()
if clerk_id: # this email is well known already
#
else: # it's a new email
#
else: # it's a deviation
msg.update({'contact': 'unknown communication method'})
# - #
```
#### File: portfolio/2015_cna/cnaa.py
```python
import sys
import requests
from lxml import html
def main(argv):
# compose url:
query = argv[0]
prefix = "http://www.bing.com/news/search?q=entertainment+news+"
suffix = "&qs=n&form=NWRFSH&pq=entertainment+news+ozz&sc=0-20&sp=-1&sk="
print query
uri = prefix + query + suffix
print uri
page = requests.get(uri)
tree = html.fromstring(page.text)
# extract data
try:
news_nodes = tree.xpath('//div[@class="sn_r"]')
urls = tree.xpath('//div[@class="newstitle"]/a/@href')
for i in range(len(news_nodes)):
print i
hl_xpath = '//div[@class="sn_r"][' + \
str(i + 1) + ']/div[@class="newstitle"]/a//text()'
headline = tree.xpath(hl_xpath)
print headline, ' ## ', urls[i]
except:
print 'No relevant data'
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: portfolio/2015_cna/models.py
```python
class NewsArticle():
"""Article about some new event
"""
def __init__(self):
self.id = 0
self.c_time = '' # when the article been published
self.headline = ''
self.summary = ''
self.img_url = ''
self.url = ''
self.origin = '' # the news' source
self.keywords = [] # list of keywords
def insert(self, cursor):
"""Inserts data into DB.news
and returns inserted id
"""
return True
class DataSource():
""" Source of data
"""
def __init__(self, **kwargs):
self.name = str(kwargs['name'])
self.url = kwargs['url']
def smart_insert(self, conn):
"""Inserts or pick up the id in/from to DB.sources
"""
cursor = conn.cursor()
cursor.execute("""SELECT id FROM sources WHERE s_name=%s;""",
(self.name, ))
s_id = cursor.fetchone()
if s_id: # this name exists in DB
return int(s_id[0])
else:
cursor.execute("""INSERT INTO sources (s_name, s_url)
VALUES (%s, %s);""", (self.name, self.url))
cursor.execute("""SELECT LASTVAL();""")
s_id = int(cursor.fetchone()[0])
conn.commit()
return s_id
```
#### File: GAE/server_test/main.py
```python
import json
import logging
from decimal import Decimal
from google.appengine.ext import webapp
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext.webapp.util import run_wsgi_app
from m.fn import get_quremo
# Web application with primitive arithmetical calculations
# and API for external usage
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.write('Hello world!<p>\
This is the index page.</p>')
class DivideHandler(webapp.RequestHandler):
def get(self, divisor, raw_dividend):
dividend = self.request.query_string.split('=')[1]
(q, r, m) = get_quremo(divisor, dividend)
self.response.headers['Content-Type'] = 'application/json'
obj = {'quotient': q, 'result': r, 'modulus': m}
self.response.out.write(json.dumps(obj))
application = webapp.WSGIApplication([
('/', MainHandler),
(r'/divide/(.*)/(.*)', DivideHandler)
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
```
#### File: server_test/m/fn.py
```python
import logging
from decimal import Decimal
from google.appengine.api import memcache
from google.appengine.ext import ndb
from models import Result
# MemCache and arithmetics inside
def quremo(a, b):
"""Primitive ariphmetic calculations"""
qu = Decimal(b) / Decimal(a)
re = int(qu)
mo = Decimal(b) % Decimal(a)
return str(qu), str(re), str(mo)
def get_quremo(a, b):
"""Memcache or DataStore interaction implementation
with aim to avoid server calculations overload"""
a_key = a + '&' + b # an unique key for each pair
# looking for MemCache value firstly:
cached_result = memcache.get(key=a_key)
if cached_result is None:
# looking for persistent cached value:
q = Result.query(Result.a_key == a_key)
if q.get(): # the values are there
calc_val = tuple(q.fetch(1)[-1].a_value)
memcache.add(key=a_key, value=calc_val, time=60)
logging.info("Data was restored out from ndb")
else: # values are completely new
calc_val = quremo(a, b)
memcache.add(key=a_key, value=calc_val, time=60)
R = Result()
R.a_key, R.a_value = a_key, calc_val
R.put()
logging.info("Data is new and was cached successfully")
else:
calc_val = cached_result
logging.info("Data was retrieved out from MemCache")
return calc_val
```
#### File: scrapy/acurtis/bestmaterials.py
```python
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoader
try:
import json
except ImportError:
import simplejson as json
import logging
js_api_request_args = {
'__EVENTTARGET': '',
'__EVENTARGUMENT': '',
'__VIEWSTATE': '',
'myhiddenfield': 'null',
'variation_id': '',
'w': 'getProductAttributeDetails'
}
class BestmaterialsSpider(BaseSpider):
name = "bestmaterials"
allowed_domains = ["bestmaterials.com"]
start_urls = (
'http://www.bestmaterials.com/',
)
manufacturers_urls = (
'http://www.bestmaterials.com/SearchResult.aspx?Manufacturer=14',
'http://www.bestmaterials.com/SearchResult.aspx?Manufacturer=54',
'http://www.bestmaterials.com/SearchResult.aspx?Manufacturer=115',
'http://www.bestmaterials.com/SearchResult.aspx?Manufacturer=104',
)
cat_1_url = 'http://www.bestmaterials.com/retrofit_pipe_flashing_boots.aspx'
cat_2_url = 'http://www.bestmaterials.com/masterflash_sizes_and_materials.aspx'
products_urls = (
'http://www.bestmaterials.com/detail.aspx?ID=17345',
'http://www.bestmaterials.com/detail.aspx?ID=17347',
'http://www.bestmaterials.com/detail.aspx?ID=17348',
'http://www.bestmaterials.com/detail.aspx?ID=17349',
'http://www.bestmaterials.com/detail.aspx?ID=17271',
'http://www.bestmaterials.com/detail.aspx?ID=17272',
'http://www.bestmaterials.com/detail.aspx?ID=17273',
'http://www.bestmaterials.com/detail.aspx?ID=17274',
'http://www.bestmaterials.com/detail.aspx?ID=17275',
)
def parse(self, response):
yield Request(self.cat_1_url, callback=self.parse_1_cat)
yield Request(self.cat_2_url, callback=self.parse_2_cat)
for url in self.manufacturers_urls:
yield Request(url, callback=self.parse_manufacturer)
for url in self.products_urls:
yield Request(url, callback=self.parse_item)
def parse_1_cat(self, response):
hxs = HtmlXPathSelector(response)
content = hxs.select("//td[@id='ContentCell']/table/tr/td")
items = content.select("div[@align='center']/center/table[@id='AutoNumber1']/*/tr/td[1]")
items = items.select(".//a/@href").extract()
for item in items:
yield Request(item, callback=self.parse_item)
items = content.select("div[@align='center']/center/table[@id='AutoNumber2']/tr/td[1]")
items = items.select(".//a/@href").extract()
for item in items:
yield Request(item, callback=self.parse_item)
def parse_2_cat(self, response):
hxs = HtmlXPathSelector(response)
content = hxs.select("//td[@id='ContentCell']/table/tr/td")
items = content.select("div[@align='center']/center/table[@id='AutoNumber1']/tr/td[1]")
items = items.select(".//a/@href").extract()
for item in items:
yield Request(item, callback=self.parse_item)
items = content.select("div[@align='center']/center/table[@id='AutoNumber2']/tr/td[1]")
items = items.select(".//a/@href").extract()
for item in items:
yield Request(item, callback=self.parse_item)
def parse_manufacturer_pages(self, response):
hxs = HtmlXPathSelector(response)
content = hxs.select("//td[@id='ContentCell']/table/tr/td[@class='Content']")
items = content.select("div[@align='right']/div/table[2]")
pages = items.select("tr[@class='Content']/td/a/@href").extract()
for page in pages:
"javascript:__doPostBack('SearchTemplate13$DataGrid1$_ctl1$_ctl1','')"
m = re.search("doPostBack\('(.*?)','(.*?)'\)", page)
if m:
target = m.group(1)
target = target.replace('$', ':')
argument = m.group(2)
item_options = js_api_request_args.copy()
item_options['__VIEWSTATE'] = hxs.select("//form[@name='Form2']/input[@name='__VIEWSTATE']/@value").extract()
item_options['__EVENTTARGET'] = target
item_options['__EVENTARGUMENT'] = argument
request = FormRequest(
url=response.url,
formdata=item_options,
callback=self.parse_manufacturer
)
yield request
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
content = hxs.select("//td[@id='ContentCell']/table/tr/td[@class='Content']")
items = content.select("div[@align='right']/div/table[2]")
items = items.select("tr/td/table//td[@class='Content']/table/tr[2]/td/a/@href").extract()
for item in items:
yield Request(urljoin_rfc(base_url, item), callback=self.parse_item)
def parse_manufacturer(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
content = hxs.select("//td[@id='ContentCell']/table/tr/td[@class='Content']")
items = content.select("div[@align='right']/div/table[2]")
items = items.select("tr/td/table//td[@class='Content']/table/tr[2]/td/a/@href").extract()
for item in items:
yield Request(urljoin_rfc(base_url, item), callback=self.parse_item)
def parse_item(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
name = hxs.select("//tr[@id='ProductDetail11_trProductName']/td/text()").extract()
if name:
name = name[0].strip()
url = response.url
price = hxs.select("//tr[@id='ProductDetail11_trCustomPrice']/td/font/b/text()").extract()
if not price:
price = hxs.select("//tr[@id='ProductDetail11_trPrice']/td/text()").extract()
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', str(name))
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
else:
# may be several products
products = hxs.select("//table[@id='SearchTemplate13_DataGrid1']// \
table[@id='SearchTemplate13_DataGrid1__ctl3_ProductInfoTable']")
for product in products:
url = product.select("//tr[@id='SearchTemplate13_DataGrid1__ctl3_ProductNameRow']/td/a/@href").extract()
if url:
yield Request(urljoin_rfc(base_url, url[0]), callback=self.parse_item)
```
#### File: scrapy/americanrv/pplmotorhomes_americanrv.py
```python
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from scrapy import log
import csv, codecs, cStringIO
from product_spiders.items import Product, ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class PplmotorhomesSpider(BaseSpider):
USER_AGENT = "Googlebot/2.1 ( http://www.google.com/bot.html)"
name = 'pplmotorhomes_americanrv.com'
allowed_domains = ['www.pplmotorhomes.com','pplmotorhomes.com','google.com','www.google.com']
start_urls = ('http://www.google.com',)
def __init__(self, *args, **kwargs):
super(PplmotorhomesSpider, self).__init__(*args, **kwargs)
# parse the csv file to get the product ids
csv_file = csv.reader(open(os.path.join(HERE, 'americanrv_products.csv')))
csv_file.next()
self.product_ids = {}
for row in csv_file:
ids = row[3].split(' ')
if ids[0] == '':
ids = set()
else:
ids = set(ids)
ids.add(row[0])
ids.add(row[2])
self.product_ids[row[0]] = {'ids': frozenset(ids), 'mfrgid': row[2]}
def start_requests(self):
for sku, data in self.product_ids.items():
for id in data['ids']:
url = 'http://www.google.com/cse?cx=008536649155685395941%3Aiahjfr-bdbs&ie=UTF-8&q='+re.sub(' ','+', id)+'&sa=Search&siteurl=www.pplmotorhomes.com&ref=www.pplmotorhomes.com&nojs=1'
req = Request(url, callback=self.parse)
req.meta['search_q'] = id
req.meta['sku'] = sku
req.meta['mfrgid'] = data['mfrgid']
yield req
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
product_urls = hxs.select('//a[@class="l"]/@href').extract()
#product_urls = hxs.select('//a[@class="gs-title"]/@href').extract()
#log.msg("CRAWLING::::::::::: %s" % hxs.select('/html').extract())
if product_urls:
request = Request(product_urls[0], callback=self.parse_product, dont_filter=True)
request.meta['sku'] = response.meta['sku']
request.meta['search_q'] = response.meta['search_q']
request.meta['mfrgid'] = response.meta['mfrgid']
yield request
else:
return
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
products = hxs.select('//table[@width="86%"]/tr')
for product in products:
sku_ = product.select('./form/td[1]/b/text()').extract()
if sku_:
site_mfrgid = product.select('./form/td[2]/font[contains(text(),"Manufacturer")]/b/text()').extract()
if site_mfrgid:
site_mfrgid = site_mfrgid[0].lower() == response.meta['mfrgid'].lower()
else:
site_mfrgid = False
if sku_[0] == response.meta['search_q'] or site_mfrgid:
price = "".join(product.select("./form/td[3]/font/b/text()").re(r'([0-9\,\. ]+)')).strip()
if price:
name = product.select('./form/td[2]/text()').extract()[0]
product_loader = ProductLoader(item=Product(), response=response)
if '...Regularly' in name:
name = re.sub('\.{3}Regularly.*?\$.*$', '', name)
product_loader.add_value('price', price)
product_loader.add_value('url', response.url)
product_loader.add_value('sku', response.meta['sku'])
product_loader.add_value('identifier', response.meta['sku'].lower())
product_loader.add_value('name', response.meta['sku'] + ' ' + name)
yield product_loader.load_item()
name = hxs.select(u'//h1[@class="big product_title"]/text()').extract()
if not products and name:
product_loader = ProductLoader(item=Product(), response=response)
name = name[0]
if '...Regularly' in name:
name = re.sub('\.{3}Regularly.*?\$.*$', '', name)
product_loader.add_value('name', name)
product_loader.add_xpath('price', u'//dt[@id="prod_price"]//span[@class="small"]/strong[@class="big"]/text()',
re='\$(.*)')
product_loader.add_value('sku', response.meta['sku'])
product_loader.add_value('identifier', response.meta['sku'].lower())
product_loader.add_value('url', response.url)
site_mfrgid = hxs.select(u'//span[@class="small" and contains(text(),"Manufacturer")]/following-sibling::strong[1]/text()').extract()
if site_mfrgid:
site_mfrgid = site_mfrgid[0].lower().strip()
if site_mfrgid == response.meta['mfrgid'].strip().lower():
yield product_loader.load_item()
```
#### File: scrapy/applejack/klwines.py
```python
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class KLWinesSpider(BaseSpider):
name = 'klwines.com'
allowed_domains = ['www.klwines.com', 'klwines.com']
start_urls = ('http://www.klwines.com/content.asp?N=0&display=500&Nr=OR%28OutofStock%3AN%2CInventory+Location%3ASpecial+Order%29&Ns=p_lotGeneratedFromPOYN|0||p_price', )
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
# categories = hxs.select(u'').extract()
# for url in categories:
# url = urljoin_rfc(get_base_url(response), url)
# yield Request(url)
# pagination
next_page = hxs.select(u'//a[@title="Next Page"]/@href').extract()
if next_page:
next_page = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(next_page)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
products = hxs.select(u'//div[@class="result clearfix"]')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
url = product.select(u'.//div[@class="result-desc"]/a/@href').extract()
name = product.select(u'.//div[@class="result-desc"]/a/text()').extract()
if not url:
url = product.select(u'.//div[@class="auctionResult-desc"]/p/a/@href').extract()
name = product.select(u'.//div[@class="auctionResult-desc"]/p/a/text()').extract()
url = urljoin_rfc(get_base_url(response), url[0])
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_xpath('price', u'.//span[@class="price"]/span[@class="global-serif global-pop-color"]/strong/text()')
if loader.get_output_value('price'):
yield loader.load_item()
```
#### File: scrapy/axemusic/lamusic_ca.py
```python
import re
import logging
import urllib
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class LaMusicCaSpider(BaseSpider):
name = 'lamusic.ca'
allowed_domains = ['lamusic.ca']
start_urls = ('http://www.lamusic.ca',)
def parse(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//div[@id="display_menu_s"]/ul/li/a/@href').extract():
yield Request(url + '?searching=Y&show=4000&page=1', callback=self.parse_product_list)
def parse_product_list(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//a[contains(@class,"subcategory_link")]/@href').extract():
yield Request(url + '?searching=Y&show=4000&page=1', callback=self.parse_product_list)
for url in hxs.select(u'//a[contains(@class,"productnamecolor")]/@href').extract():
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), selector=hxs)
product_loader.add_value('url', response.url)
product_loader.add_xpath('name', u'//span[@itemprop="name"]/text()')
product_loader.add_xpath('price', u'//form[@id="vCSS_mainform"]//span[@itemprop="price"]/text()')
product_loader.add_xpath('sku', u'//span[@class="product_code"]/text()')
product_loader.add_xpath('category', u'//td[@class="vCSS_breadcrumb_td"]//a[position()=2]/@title')
product_loader.add_xpath('image_url', u'concat("http:",//img[@id="product_photo"]/@src)')
product_loader.add_xpath('brand', u'//meta[@itemprop="manufacturer"]/@content')
if hxs.select(u'//img[@class="vCSS_img_icon_free_shipping"]'):
product_loader.add_value('shipping_cost', '0')
product = product_loader.load_item()
if hxs.select(u'//tr[@class="Multi-Child_Background"]'):
for opt in hxs.select(u'//tr[@class="Multi-Child_Background"]'):
p = Product(product)
p['sku'] = opt.select(u'./td[1]/text()').extract()[0].strip()
p['name'] = opt.select(u'./td[2]/text()').extract()[0].strip()
p['price'] = opt.select(u'./td[4]//span[@itemprop="price"]/text()').extract()[0].strip().replace('$', '').replace(',', '')
yield p
else:
yield product
```
#### File: scrapy/axemusic/tomleemusic_ca.py
```python
import re
import logging
import urllib
import csv
import os
import shutil
from datetime import datetime
import StringIO
from scrapy.spider import BaseSpider
from scrapy import signals
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.xlib.pydispatch import dispatcher
from scrapy.exceptions import CloseSpider
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class TomLeeMusicCaSpider(BaseSpider):
name = 'tomleemusic.ca'
allowed_domains = ['tomleemusic.ca', 'competitormonitor.com']
def __init__(self, *args, **kwargs):
super(TomLeeMusicCaSpider, self).__init__(*args, **kwargs)
dispatcher.connect(self.spider_closed, signals.spider_closed)
def start_requests(self):
if self.full_run_required():
start_req = self._start_requests_full()
log.msg('Full run')
else:
start_req = self._start_requests_simple()
log.msg('Simple run')
for req in start_req:
yield req
def spider_closed(self, spider):
if spider.name == self.name:
shutil.copy('data/%s_products.csv' % spider.crawl_id, os.path.join(HERE, 'tomleemusic_products.csv'))
def _start_requests_full(self):
yield Request('http://www.tomleemusic.ca/main/products.cfm', callback=self.parse_full)
def _start_requests_simple(self):
yield Request('http://competitormonitor.com/login.html?action=get_products_api&website_id=470333&matched=1',
callback=self.parse_simple)
def full_run_required(self):
if not os.path.exists(os.path.join(HERE, 'tomleemusic_products.csv')):
return True
#run full only on Mondays
return datetime.now().weekday() == 1
def parse_full(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//a[@class="catLink"]/@href').extract():
yield Request(url, callback=self.parse_product_list)
def parse_product_list(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//a[@class="catLink"]/@href').extract():
yield Request(url, callback=self.parse_product_list)
for url in hxs.select(u'//a[@class="productListLink"]/@href').extract():
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product)
next_page = hxs.select(u'//a[@class="smallPrint" and contains(text(),"Next")]/@href').extract()
if next_page:
url = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(url, callback=self.parse_product_list)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), selector=hxs)
product_loader.add_value('url', response.url)
product_loader.add_xpath('name', u'//h1[@class="productDetailHeader"]/text()')
if hxs.select(u'//span[@class="productDetailSelling"]/text()'):
product_loader.add_xpath('price', u'//span[@class="productDetailSelling"]/text()')
else:
product_loader.add_value('price', '')
product_loader.add_xpath('sku', u'//input[@type="hidden" and (@name="hidProductId" or @name="inv")]/@value')
product_loader.add_xpath('category', u'//td[@class="smallPrint"]/a[position()=2 and contains(text(),"Products")]/../a[3]/text()')
img = hxs.select(u'//a[@class="smallPrint" and @rel="lightbox"]/@href').extract()
if img:
img = urljoin_rfc(get_base_url(response), img[0])
product_loader.add_value('image_url', img)
if hxs.select(u'//a[contains(@href,"BrandName")]/@href'):
product_loader.add_xpath('brand', u'substring-after(//a[contains(@href,"BrandName")]/@href,"=")')
else:
brands = hxs.select(u'//strong[@class="sideBarText"]/text()').extract()
brands = [b.strip() for b in brands]
for brand in brands:
if product_loader.get_output_value('name').startswith(brand):
product_loader.add_value('brand', brand)
break
else:
product_loader.add_xpath('brand', u'normalize-space(substring-before(substring-after(//title/text(), " - "), " - "))')
# product_loader.add_xpath('shipping_cost', u'//div[@class="DetailRow"]/div[contains(text(),"Shipping")]/../div[2]/text()')
yield product_loader.load_item()
def parse_simple(self, response):
f = StringIO.StringIO(response.body)
hxs = HtmlXPathSelector()
reader = csv.DictReader(f)
self.matched = set()
for row in reader:
self.matched.add(row['url'])
for url in self.matched:
yield Request(url, self.parse_product)
with open(os.path.join(HERE, 'tomleemusic_products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
if row['url'] not in self.matched:
loader = ProductLoader(selector=hxs, item=Product())
loader.add_value('url', row['url'])
loader.add_value('sku', row['sku'])
loader.add_value('identifier', row['identifier'])
loader.add_value('name', row['name'])
loader.add_value('price', row['price'])
loader.add_value('category', row['category'])
loader.add_value('brand', row['brand'])
loader.add_value('image_url', row['image_url'])
loader.add_value('shipping_cost', row['shipping_cost'])
yield loader.load_item()
```
#### File: scrapy/bosch_russian/vseinstrumenti.py
```python
import csv
import os
import json
from string import join
from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class VseInstrumenti(BaseSpider):
base_url = "http://www.vseinstrumenti.ru/"
name = 'vseinstrumenti.ru'
allowed_domains = ['vseinstrumenti.ru']
start_urls = [base_url]
def start_requests(self):
with open(os.path.join(HERE, 'bosh_products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['sku'].strip()
name = row['name'].strip()
url = 'http://www.vseinstrumenti.ru/pre_search.php?make=0&term=%s'
yield Request(url % sku, meta={'sku': sku, 'name': name, 'search_by':'sku'})
def parse(self, response):
if response.body.strip() == "":
if response.meta['search_by']=='sku':
#search by name
url = 'http://www.vseinstrumenti.ru/pre_search.php?make=0&term=%s'
return Request(url % response.meta['name'].replace(" ","+"), callback=self.parse, meta={'sku': response.meta['sku'], 'name': response.meta['name'], 'search_by':'name'})
else:
jdata = json.loads(response.body)
for jd in jdata:
if response.meta['sku'].decode('utf-8') in jd['label'].replace(".","") or response.meta['name'].decode('utf-8') in jd['label']:
try:
log.msg("LINK:"+jd['link'])
return Request(jd['link'], callback=self.parse_product, meta={'sku': response.meta['sku'], 'name':jdata[0]['label']} )
except ValueError:
return Request((self.base_url+jdata[0]['link']).replace('.ru//', '.ru/'), callback=self.parse_product, meta={'sku': response.meta['sku'], 'name':jdata[0]['label']} )
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
price = join(hxs.select(u'//div[contains(@class, "goods_price")]/text()').extract())
price = price.strip().replace(" ","")
product_loader = ProductLoader(item=Product(), selector=hxs)
product_loader.add_value('name', response.meta["name"])
product_loader.add_value('url', response.url)
product_loader.add_value('price', price)
product_loader.add_value('sku', response.meta["sku"])
if product_loader.get_output_value('price'):
return product_loader.load_item()
```
#### File: scrapy/bosch_uk_diy/amazon_co_uk.py
```python
import re
import os
import csv
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
from urlparse import urlparse
import time
HERE = os.path.abspath(os.path.dirname(__file__))
class AmazonCoUkSpider(BaseSpider):
name = 'amazon.co.uk-bosch-diy'
allowed_domains = ['amazon.co.uk', 'www.amazon.co.uk']
start_urls = ['http://www.amazon.co.uk/']
def parse(self, response):
with open(os.path.join(HERE, 'amazon_co_uk.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
if not len(row['url'].strip()):
continue
url = re.sub(r'#.+$', '', row['url'])
log.msg('URL: %s' % url)
if urlparse(url).scheme != 'http':
continue
request = Request(url, callback=self.parse_product)
request.meta['sku'] = row['sku']
yield request
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_xpath('name', '//span[@id="btAsinTitle"]/text()')
price = hxs.select('//span[@id="actualPriceValue"]//b/text()')
if not price:
price = hxs.select('//div[@id="secondaryUsedAndNew"]//span[@class="price"]//text()')
if price:
product_loader.add_value('price', price.extract()[0].replace(u'\xa3', ''))
else:
product_loader.add_value('price', 0)
product_loader.add_value('sku', response.meta['sku'])
product_loader.add_value('url', response.url)
yield product_loader.load_item()
```
#### File: scrapy/camerahouse/jbhifi_spider.py
```python
import csv
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.fuzzywuzzy import process
from product_spiders.fuzzywuzzy import fuzz
HERE = os.path.abspath(os.path.dirname(__file__))
class JbhifiSpider(BaseSpider):
name = 'jbhifi.com.au'
allowed_domains = ['jbhifi.com.au', 'jbhifionline.com.au']
start_urls = ['http://www.jbhifionline.com.au']
def parse(self, response):
hxs = HtmlXPathSelector(response)
relative_urls = hxs.select('//*[@id="outernavigation"]/table/tr/td/a/@href').extract()
for relative_url in relative_urls:
url = urljoin_rfc(get_base_url(response), relative_url)
yield Request(url, callback=self.parse_subcategories)
def parse_subcategories(self, response):
hxs = HtmlXPathSelector(response)
relative_urls = hxs.select('//*[@id="leftNav"]/div[@class="sidenav"]/ul/li/a/@href').extract()
for relative_url in relative_urls:
url = urljoin_rfc(get_base_url(response), relative_url)
yield Request(url, callback=self.parse_products)
def parse_products(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//div[@class="result_container"]')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'div/div/div/div/h1/a/text()')
url = urljoin_rfc(get_base_url(response), product.select('div/div/div/div/h1/a/@href').extract()[0])
loader.add_value('url', url)
loader.add_xpath('price', 'div//div[@class="price-image-layer"]/img/@alt')
yield loader.load_item()
next = hxs.select('//div[@class="CatNavigation"]/a[text()="»"]/@href'.decode('utf')).extract()
if next:
url = urljoin_rfc(get_base_url(response), next[0])
yield Request(url, callback=self.parse_products)
```
#### File: scrapy/cocopanda/beautycos_spider.py
```python
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.fuzzywuzzy import process
from product_spiders.fuzzywuzzy import fuzz
HERE = os.path.abspath(os.path.dirname(__file__))
class BeautycosSpider(BaseSpider):
name = 'cocopanda-beautycos.dk'
allowed_domains = ['beautycos.dk']
start_urls = ['http://www.beautycos.dk']
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select('//form/select/option/@value').extract()
for category in categories:
yield Request('http://www.beautycos.dk/group.asp?group='+category, callback=self.parse_products)
def parse_products(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//table[@class="group-list"]/tr/td/table/tr/td[@id="group"]')
if products:
for product in products:
url = urljoin_rfc(get_base_url(response), ''.join(product.select('font/a[1]/@href').extract()))
price = ''.join(product.select('font/b/text()').extract()).replace('.','').replace(',','.')
if 'Pris' not in price:# == 'Midlertidig udsolgt':
yield Request(url, callback=self.parse_product)
else:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'font/a/text()')
loader.add_value('url', url)
loader.add_value('price', price)
yield loader.load_item()
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_xpath('name', '//*[@id="header"]/text()')
loader.add_value('url', response.url)
price = ''.join(hxs.select('//*[@id="productdesc"]/font/font/text()').extract()).replace('.','').replace(',','.')
if price:
price = price.split(':')[-1]
loader.add_value('price', price)
yield loader.load_item()
```
#### File: scrapy/cocopanda/cocopanda_spider.py
```python
import re
import os
import json
from scrapy.spider import BaseSpider
from scrapy.contrib.spiders import XMLFeedSpider
from scrapy.selector import XmlXPathSelector
from scrapy.http import Request, XmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import hashlib
from decimal import Decimal
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class CocopandaSpider(XMLFeedSpider):
name = 'cocopanda.dk'
allowed_domains = ['cocopanda.dk']
start_urls = ('http://www.cocopanda.dk/kelkoo.xml',)
itertag = 'product'
def parse_node(self, response, node):
if not isinstance(response, XmlResponse):
return
loader = ProductLoader(item=Product(), selector=node)
url = node.select(u'./product-url/text()').extract()[0]
loader.add_value('sku', url.split('/')[-2])
loader.add_value('url', url)
loader.add_xpath('name', u'./title/text()')
price = node.select(u'./price/text()').extract()[0].replace(',', '.')
loader.add_value('price', price)
if loader.get_output_value('price'):
return loader.load_item()
else:
return Product()
```
#### File: scrapy/cocopanda/nicehair_spider.py
```python
import csv
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.fuzzywuzzy import process
from product_spiders.fuzzywuzzy import fuzz
HERE = os.path.abspath(os.path.dirname(__file__))
class NiceHairSpider(BaseSpider):
name = 'cocopanda-nicehair.dk'
allowed_domains = ['nicehair.dk']
start_urls = ['http://nicehair.dk']
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select('//*[@id="narrow-by-list"]/dt/a/@href').extract()
for category in categories:
yield Request(category.replace('#nav','?limit=all'), callback=self.parse_products)
def parse_products(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//div[@class="grid-second"]/ul/li')
if products:
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'h5/a/text()')
loader.add_xpath('url', 'h5/a/@href')
price = ''.join(product.select('div[@class="price"]/b/text()').extract()).replace('.','').replace(',','.')
loader.add_value('price', price)
yield loader.load_item()
else:
sub_categories = hxs.select('//div[@class="cats"]/center/a/@href').extract()
for sub_category in sub_categories:
url = urljoin_rfc(get_base_url(response), sub_category)
yield Request(url+'?limit=all', callback=self.parse_products)
```
#### File: scrapy/cpr/cprfranchisecom.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import ProductLoader, Product
import logging
login = 'cmonitor'
password = '<PASSWORD>'
class CprFranchiseComSpider(BaseSpider):
name = 'cpr-franchise.com'
allowed_domains = ['cpr-franchise.com']
start_urls = (
'http://cpr-franchise.com/online-store/error_message.php?need_login',
)
def parse(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
login_form = hxs.select("//form[@name='errorform']")
login_post_url = login_form.select("@action").extract()[0]
login_form_data = {}
for input_el in login_form.select("input"):
input_name = input_el.select("@name").extract()[0]
input_value = input_el.select("@value").extract()[0]
login_form_data[input_name] = input_value
login_form_data['username'] = login
login_form_data['password'] = password
request = FormRequest(login_post_url, formdata=login_form_data, callback=self.parse_main)
yield request
def parse_main(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
categories = hxs.select("//div[@id='catrootmenu']//a/@href").extract()
for category in categories:
url = urljoin_rfc(base_url, category)
yield Request(url, callback=self.parse_main)
items_table = hxs.select("//table[contains(@class, 'products-table')]")
rows = items_table.select("tr")
i = 0
count = 0
rows_count = len(rows)
logging.error("Found rows: %d" % rows_count)
while i < rows_count:
image_row = rows.pop(0)
name_row = rows.pop(0)
sku_row = rows.pop(0)
price_row = rows.pop(0)
for name_cell, price_cell in zip(name_row.select('td'), price_row.select('td')):
name = name_cell.select("a/text()").extract()
if not name:
continue
name = name[0]
url = name_cell.select("a/@href").extract()
if not url:
continue
url = url[0]
url = urljoin_rfc(base_url, url)
price = price_cell.select(".//span[@class='product-price-value']/span/text()").extract()
if not price:
logging.error("%s - ERROR! NO PRICE!" % response.url)
continue
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', str(name))
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
count +=1
i += 4
logging.error("Used rows: %d" % i)
```
#### File: scrapy/cpr/wirelesspartsinccom.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import ProductLoader, Product
import logging
class WirelesspartsincComSpider(BaseSpider):
name = 'wirelesspartsinc.com'
allowed_domains = ['wirelesspartsinc.com']
start_urls = (
'https://www.wirelesspartsinc.com/',
)
def parse(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
categories = hxs.select("//table[@class='module category-module']//a/@href").extract()
for category in categories:
url = urljoin_rfc(base_url, category)
yield Request(url, callback=self.parse)
sub_categories = hxs.select("//table[@class='category-list']//a/@href").extract()
for sub_category in sub_categories:
url = urljoin_rfc(base_url, sub_category)
yield Request(url, callback=self.parse)
pages = hxs.select("//table[@class='product-pager']//a/@href").extract()
for page in pages:
url = urljoin_rfc(base_url, page)
yield Request(url, callback=self.parse)
items_table = hxs.select("//table[@class='product-list']/tr/td/div[@class='product-list-item']")
for item in items_table:
name = item.select("div[@class='product-list-options']/h5/a/text()").extract()
if not name:
logging.error("%s - ERROR! NO NAME!" % response.url)
continue
name = name[0]
url = item.select("div[@class='product-list-options']/h5/a/@href").extract()
if not url:
logging.error("%s - ERROR! NO URL!" % response.url)
continue
url = url[0]
url = urljoin_rfc(base_url, url)
price = item.select("div[@class='product-list-options']/div[@class='product-list-price']/\
div[@class='product-list-cost']/span[@class='product-list-cost-value']/text()").extract()
if not price:
logging.error("%s - %s - ERROR! NO PRICE!" % (response.url,
name))
continue
price = price[-1]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', str(name))
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
```
#### File: scrapy/dyersonline/googe_shopping_api.py
```python
import csv
import codecs
import cStringIO
import os
import copy
import json
from decimal import Decimal
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from product_spiders.items import Product, ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
KEYS = ['AIzaSyDmC2E8OgTrtikhGt5OlVaY8GqqSu696KE', '<KEY>',
'<KEY>',]
class GoogleSpider(BaseSpider):
name = 'google.com_DO'
allowed_domains = ['googleapis.com']
def start_requests(self):
csv_file = UnicodeReader(open(os.path.join(HERE, 'skus.csv')))
for i, row in enumerate(csv_file):
sku = row[0]
query = (row[4]).replace(' ', '+')
url = 'https://www.googleapis.com/shopping/search/v1/public/products' + \
'?key=%s&country=US&' + \
'q=%s&rankBy=price%%3Aascending'
yield Request(url % (KEYS[i % len(KEYS)], query), meta={'sku': sku})
def parse(self, response):
data = json.loads(response.body)
if not data['totalItems']:
return
item = data['items'][0]
pr = Product()
pr['name'] = (item['product']['title'] + ' ' + item.get('product', {}).get('author', {}).get('name', '')).strip()
pr['url'] = item['product']['link']
pr['price'] = Decimal(str(data['items'][0]['product']['inventories'][0]['price']))
pr['sku'] = response.meta['sku']
yield pr
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
```
#### File: scrapy/eservicegroup/portusdigitalpxrtrk_couk.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.utils.response import get_base_url
from scrapy.http import Request
from product_spiders.items import ProductLoader, Product
import urlparse
__author__ = '<NAME>. <<EMAIL>>'
class PortusDigitalSpider(BaseSpider):
name = "portusdigital-px.rtrk.co.uk"
allowed_domains = ["portusdigital-px.rtrk.co.uk"]
start_urls = ["http://portusdigital-px.rtrk.co.uk"]
def parse(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
for href in hxs.select('//ul[@id="nav"]/li/a/@href').extract():
yield Request(urlparse.urljoin(base_url, href) + "?limit=25", callback=self.load_products)
def load_products(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
next = hxs.select('//div[@class="pager"]//a[@class="next i-next"]')
if next:
href = next.select("./@href").extract()[0]
yield Request(urlparse.urljoin(base_url, href), callback=self.load_products)
for product_box in hxs.select('//ol[@id="products-list"]/li'):
product_loader = ProductLoader(item=Product(), selector=product_box)
product_loader.add_xpath('name', './/h2[@class="product-name"]/a/text()')
product_loader.add_xpath('url', './/h2[@class="product-name"]/a/@href')
if product_box.select('.//p[@class="special-price"]'):
product_loader.add_xpath('price', './/div[@class="price-box"]/p[@class="special-price"]/span[@class="price"]/text()')
else:
product_loader.add_xpath('price', './/div[@class="price-box"]//span[@class="regular-price"]/span[@class="price"]/text()')
yield product_loader.load_item()
```
#### File: scrapy/firedupgroup/tescocom.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoader
import logging
class TescoComSpider(BaseSpider):
name = 'tesco.com'
allowed_domains = ['tesco.com']
start_urls = (
'http://www.tesco.com/direct/home-electrical/fires/cat3376574.cat',
)
def parse(self, response):
URL_BASE = get_base_url(response)
hxs = HtmlXPathSelector(response)
pages_urls = hxs.select("//div[contains(@class, 'pagination')]/a/@href").extract()
for url in pages_urls:
url = urljoin_rfc(URL_BASE, url)
yield Request(url)
products_els = hxs.select("//li[contains(@class, 'product')]/div[@class='product-details']")
for product_el in products_els:
name = product_el.select("div[contains(@class, 'product-name')]/h3/a/text()").extract()
if not name:
logging.error('ERROR!! NO NAME!! %s' % response.url)
continue
name = " ".join(name)
url = product_el.select("div[contains(@class, 'product-name')]/h3/a/@href").extract()
if not url:
logging.error('ERROR!! NO URL!! %s %s' % (response.url, name))
continue
url = url[0]
url = urljoin_rfc(URL_BASE, url)
price = product_el.select("div[contains(@class, 'price-spacing')]/p[@class='current-price']/span[@class='pounds']/text()").extract()
price2 = product_el.select("div[contains(@class, 'price-spacing')]/p[@class='current-price']/span[@class='pence']/text()").extract()
if not price:
logging.error('ERROR!! NO PRICE!! %s %s' % (response.url, name))
continue
price = price[0]
if price2:
price += "." + price2[0]
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', '')
yield loader.load_item()
```
#### File: scrapy/gitarhuset/evenstadmusikk.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.utils.response import get_base_url
from scrapy.http import Request
import urlparse
from product_spiders.items import ProductLoader, Product
__author__ = '<NAME>. <<EMAIL>>'
class EventstadMusikkSpider(BaseSpider):
name = "evenstadmusikk.no"
allowed_domains = ["evenstadmusikk.no"]
start_urls = ("http://evenstadmusikk.no/index.php", )
def parse(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
self.navig_url_set = set()
cat_urls = hxs.select('//div[@id="navColumnOne"]//a[@class="category-top"]/@href').extract()
for cat_url in cat_urls:
subcat_url = urlparse.urljoin(base_url, cat_url)
self.navig_url_set.add(subcat_url)
yield Request(subcat_url, callback=self.browse_and_parse)
def browse_and_parse(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
for subcat_href in hxs.select('//div[@id="navColumnOne"]//a/@href').extract():
subsubcat_url = urlparse.urljoin(base_url, subcat_href)
if subsubcat_url not in self.navig_url_set:
self.navig_url_set.add(subsubcat_url)
yield Request(subsubcat_url, callback=self.browse_and_parse)
next_page = hxs.select("//div[@id='productListing']//div[@id='productsListingListingTopLinks']//a[contains(., 'Neste')]/@href")
if next_page:
yield Request(next_page[0].extract(), callback=self.browse_and_parse)
# parse product listing in this page, if any
for tr in hxs.select('//div[@id="productListing"]//tr[@class="productListing-even" or @class="productListing-odd"]'):
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_value('url', tr.select(".//td[2]//a/@href").extract()[0])
product_loader.add_value('name', tr.select(".//td[2]//a/text()").extract()[0])
product_loader.add_value('price', tr.select(".//td[3]/text()").extract()[0].split("-")[0].split(" ")[1].replace('.', '').replace(',', '.'))
yield product_loader.load_item()
# edge case: product listing page with a single product
product_price = hxs.select('//h2[@id="productPrices"]/text()').extract()
if product_price:
# this product listing page contains a single product
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_xpath('name', '//h1[@id="productName"]/text()')
product_loader.add_value('url', response.url)
product_loader.add_value('price', product_price[0].split("-")[0].split(" ")[1].replace('.', '').replace(',', '.'))
yield product_loader.load_item()
```
#### File: scrapy/gitarhuset/forsounds_spider.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy import log
from product_spiders.items import Product, \
ProductLoaderWithNameStrip as ProductLoader
class ForSoundsSpider(BaseSpider):
name = "4sound.no"
allowed_domains = ['4sound.no']
start_urls = ['http://www.4sound.no',]
def parse(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
catlurls = hxs.select("//div[@id='l_nav']/ul[1]//a/@href").extract()
for catlurl in catlurls:
yield Request(urljoin_rfc(base_url, catlurl), \
callback=self.parse_section_maingroup)
def parse_section_maingroup(self, response):
hxs = HtmlXPathSelector(response)
sectionurls = hxs.select("//div[@id='l_nav2']//a/@href").extract()
for sectionurl in sectionurls:
yield Request(urljoin_rfc('http://www.4sound.no', sectionurl), \
callback=self.parse_section)
def parse_section(self, response):
hxs = HtmlXPathSelector(response)
producturls = hxs.select("//div[@id='mainContent']//table//a/@href").extract()
for producturl in producturls:
log.msg('product url %s' % producturl)
yield Request(urljoin_rfc('http://www.4sound.no', producturl), \
callback=self.parse_product)
if "javascript:__doPostBack('_ctl0$_ctl0$MainContent$MainContent$Linkbutton3','')" in producturls:
formname = 'aspNetForm'
formdata = {'__EVENTTARGET':
'_ctl0$_ctl0$MainContent$MainContent$Linkbutton3',
'__EVENTARGUMENT': ''}
request = FormRequest.from_response(response, formname=formname,
formdata=formdata,
dont_click=True, callback=self.parse_section)
yield request
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
loader = ProductLoader(response=response, item=Product())
log.msg("%s %s %s" % (response.url,
hxs.select("//span[@class='itempgHeadline']/text()").extract(),
hxs.select("//span[@class='pris_base']/text()").extract()))
price = hxs.select("//span[@class='pris_base']/text()").extract()
if not response.url.startswith('javascript') and price:
str_price = price[0].split(' ')[-1][:-2]
price = int(float(str_price) * 1000) if '.' \
in str_price else int(str_price)
loader.add_xpath('name', "//span[@class='itempgHeadline']/text()")
loader.add_value('url', response.url)
loader.add_value('price', price)
yield loader.load_item()
```
#### File: scrapy/graigfarm/donaldrussel_crawler.py
```python
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
class DonaldRusselSpider(BaseSpider):
name = 'donaldrussell.com'
allowed_domains = ['www.donaldrussell.com']
start_urls = (
'http://www.donaldrussell.com/meat.html',
'http://www.donaldrussell.com/poultry-and-game.html',
'http://www.donaldrussell.com/fish-seafood.html',
)
def parse(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
cat_urls = hxs.select('//div[@class="clist-title"]/a/@href').extract()
log.msg("Found %d categories in %s" % (len(cat_urls), response.url))
for cat_url in cat_urls:
yield Request(cat_url, callback=self.parse)
# First AJAX to load some sort of placeholder
#re_ajax_url1 = re.compile("\'(http://www\.donaldrussell\.com/personalmerchantextension/event/suggest/block_names/.+)\';")
#match_url1 = re_ajax_url1.search(response.body)
# Second AJAX to load the products
re_ajax_url2 = re.compile("\'(http://www.donaldrussell.com/cachemanager/block/index/block_names.+)\';")
match_url2 = re_ajax_url2.search(response.body)
# Get all required variables for AJAX requests
re_registry_data = re.compile("registry_data\[\'(\w+)\'] = \'([^\']+)\'")
match_registries = re_registry_data.findall(response.body)
#if match_url1 and match_url2 and match_registries:
if match_url2 and match_registries:
registry_data = {}
for m in match_registries:
registry_data[m[0]] = m[1]
#if registry_data.has_key('current_category'):
#url1 = match_url1.group(1) + "current_category/" + registry_data["current_category"] + "/"
#yield Request(url1)
url2 = match_url2.group(1)
for (key, val) in registry_data.items():
url2 += key+'/'+val+'/'
yield Request(url2, callback=self.parse_product)
def parse_product(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
products_url = hxs.select('//li[contains(@class,"item")]//h2/a/@href').extract()
for product_url in products_url:
yield Request(product_url, callback=self.parse_product_detail)
next_page = hxs.select('//div[@class="pager"]//a[@class="next"]/@href').extract()
if(next_page):
yield Request(next_page[0], callback=self.parse_product)
def parse_product_detail(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
name = hxs.select('//div[@class="product-name"]/h1/text()').extract()[0]
# hxs.select('//div[@class="product-name"]/h1/text()')
simple_product = hxs.select('//script[contains(text(), "catalog_product_view_type_simple")]')
if simple_product:
html = simple_product.re('jQuery\(.+catalog_product_view_type_simple.+\.html\(\"(.+)\"\);')[0]
html = self.stripslashes(html)
hxs_item = HtmlXPathSelector(text=html)
# Discounted price
price = hxs_item.select('//span[contains(@id,"product-price")]/span/text()')
if not price:
# Normal price
price = hxs_item.select('//span[contains(@id,"product-price")]/text()')
price = price.extract()[0].strip()
# Remove JS euro sign
price = price.replace("u00a3","")
loader = ProductLoader(item=Product(), selector=hxs_item)
loader.add_value('name', name)
loader.add_value('url', response.url)
loader.add_value('price', price)
loader.add_value('sku', simple_product.re('CODE: (\w+)'))
yield loader.load_item()
sub_products = hxs.select('//script[contains(text(), "catalog_product_view_type_grouped")]')
if sub_products:
html = sub_products.re('jQuery\(.+\.html\(\"(.+)\"\);')[0]
html = self.stripslashes(html)
hxs_item = HtmlXPathSelector(text=html)
sub_products = hxs_item.select('//table//tr[descendant::a]')
for sub_product in sub_products:
loader = ProductLoader(item=Product(), selector=sub_product)
# \u00a3
price = sub_product.select('.//span[@class="price"]/text()').extract()[0].strip()
price = price.replace("u00a3","")
loader.add_value('name', name + ' ' + "\u20AC" + price)
loader.add_value('url', response.url)
loader.add_value('price', price)
loader.add_value('sku', sub_product.re('Code: (\w+)')[0])
yield loader.load_item()
def stripslashes(self, s):
r = re.sub(r"\\(n|r)", "\n", s)
r = re.sub(r"\\", "", r)
return r
```
#### File: scrapy/guy_broadest/gardenlines.py
```python
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from productloader import load_product
from scrapy.http import FormRequest
from product_spiders.items import ProductLoader, Product
class gardenlinesSpider(BaseSpider):
name = "gardenlines.co.uk"
allowed_domains = ["www.gardenlines.co.uk"]
start_urls = ("http://www.gardenlines.co.uk/",)
def parse(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
content = hxs.select("//div[@class='Menu']/ul/li")
items = content.select(".//a/@href").extract()
for item in items:
yield Request(urljoin_rfc(base_url,item), callback=self.parse_subcat)
def parse_subcat(self,response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
content = hxs.select("//div[@class='Menu']/ul/li/ul[@class='SubMenu']/li")
items = content.select(".//a/@href").extract()
for item in items:
yield Request(urljoin_rfc(base_url,item), callback=self.parse_items)
def parse_items(self,response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
items = hxs.select("//div[@class='Content']/div/div/div/div/div[@class='MoreInfo']/@onclick").re(r'\'(.*)\'')
for item in items:
yield Request(urljoin_rfc(base_url,item), callback=self.parse_item)
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
name = hxs.select("//div[@class='Content']/div/h1/text()").re(r'([a-zA-Z0-9\-\_\.\(\)\&\#\%\@\!\*][a-zA-Z0-9\-\_\.\(\)\&\#\%\@\!\* ]+)')
url = response.url
price = hxs.select("//div[@class='Content']/div/div//h5/text()").re(r'\xa3([\.0-9,]*)')
l = ProductLoader(item=Product(), response=response)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
```
#### File: scrapy/hifix/exceptionalavcouk.py
```python
import logging
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoader
class ExceptionalAvCoUkSpider(BaseSpider):
name = 'exceptional-av.co.uk'
allowed_domains = ['exceptional-av.co.uk']
start_urls = ('http://exceptional-av.co.uk',)
def parse(self, response):
URL_BASE = get_base_url(response)
hxs = HtmlXPathSelector(response)
# categories
category_urls = hxs.select('//div[@id="navbox"]/ul/li//a/@href').extract()
for url in category_urls:
url = urljoin_rfc(URL_BASE, url)
yield Request(url)
# one product
product = hxs.select("//td[@class='shopInformation']")
if product:
name = hxs.select("//span[@class='shopPageTitle']/text()[last()]").re(" > (.*)$")
if not name:
logging.error("ERROR!! NO NAME!! %s" % response.url)
url = response.url
price = product.select("form/span[1]//text()").extract()
if not price:
price = product.select("span[1]//text()").extract()
if not price:
logging.error("ERROR!! NO PRICE!! %s" % response.url)
if name and price:
name = name[0]
price = price[0]
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
yield loader.load_item()
# products list
products = hxs.select("//td[@class='shopSummary']")
for product_el in products:
name = product_el.select("table[1]/tr[1]/td[1]/a/text()").extract()
if not name:
continue
name = name[0]
url = product_el.select("table[1]/tr[1]/td[1]/a/@href").extract()
if not url:
logging.error("ERROR!! NO URL!! %s %s" % (response.url, name))
continue
url = url[0]
url = urljoin_rfc(URL_BASE, url)
price = product_el.select("table/form/tr/td/span[1]/text()").extract()
if not price:
price = product_el.select("table/tr[last()]/td/span[1]/text()").extract()
if not price:
logging.error("ERROR!! NO PRICE!! %s %s" % (response.url, name))
continue
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
yield loader.load_item()
if not products and not product:
logging.error("ERROR!! NO PRODUCTS!! %s " % response.url)
```
#### File: scrapy/hof_travel/houseoffraser_spider.py
```python
import os
import shutil
from scrapy import signals
from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from scrapy.xlib.pydispatch import dispatcher
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class HouseOfFraserSpider(BaseSpider):
name = 'houseoffraser.co.uk-travel'
allowed_domains = ['houseoffraser.co.uk']
start_urls = ['http://www.houseoffraser.co.uk/Antler+Bags+Luggage/BRAND_ANTLER_17,default,sc.html&redirectQuery=antler?sz=200&spcl',
'http://www.houseoffraser.co.uk/Samsonite+Bags+Luggage/%20BRAND_SAMSONITE_17,default,sc.html&redirectQuery=samsonite?sz=200&spcl',
'http://www.houseoffraser.co.uk/Eastpak+Bags+Luggage/BRAND_EASTPAK_17,default,sc.html?sz=200&spcl',
'http://www.houseoffraser.co.uk/Wenger/BRAND_WENGER,default,sc.html?redirectQuery=wenger?sz=200&spcl']
def __init__(self, *a, **kw):
super(HouseOfFraserSpider, self).__init__(*a, **kw)
dispatcher.connect(self.spider_closed, signals.spider_closed)
def spider_closed(self, spider):
if spider.name == self.name:
shutil.copy('data/%s_products.csv' % spider.crawl_id, os.path.join(HERE, 'houseoffraser_travel.csv'))
log.msg("CSV is copied")
def parse(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//div[@class="mainColumn"]/ol[@class="productListing clearfix"]/li')
if products:
for product in products:
loader = ProductLoader(item=Product(), selector=product)
name = ' '.join(product.select('span[@class="productInfo"]/a/descendant::*/text()').extract())
loader.add_value('name', name)
loader.add_xpath('url', 'a/@href')
loader.add_xpath('price', 'span/span[@class="price" or @class="priceNow"]/text()')
yield loader.load_item()
next = hxs.select('//a[@class="pager nextPage"]/@href').extract()
if next:
yield Request(next[0], callback=self.parse_product)
```
#### File: scrapy/hoptec/hoptec_amazon.py
```python
import csv
import os
import copy
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class AmazonfrSpider(BaseSpider):
name = 'hoptec-amazon.fr'
allowed_domains = ['amazon.fr']
def start_requests(self):
with open(os.path.join(HERE, 'products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['sku'].strip()
url = 'http://www.amazon.fr/s/ref=nb_sb_noss_1?' + \
'url=search-alias%%3Dwatches&field-keywords=%s&x=0&y=0'
yield Request(url % sku, meta={'sku': sku})
def parse(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//div[@id="atfResults"]//div[starts-with(@id, "result_0")]')
pr = None
if products:
product = products[0]
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', './/h3[@class="title"]/a/text()')
product_name = loader.get_output_value('name').lower()
loader.add_xpath('url', './/h3[@class="title"]/a/@href')
price = product.select('.//div[@class="newPrice"]//span[@class="price"]/text()').extract()
if not price:
price = product.select('.//div[@class="usedNewPrice"]//span[@class="price"]/text()').extract()
if price:
loader.add_value('price', price[0].replace(',','.'))
loader.add_value('sku', response.meta['sku'])
pr = loader
if pr and response.meta['sku'].lower() in product_name:
yield pr.load_item()
```
#### File: scrapy/instrumart/tequipment.py
```python
import re
import json
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.utils import extract_price
HERE = os.path.dirname(os.path.abspath(__file__))
class TEquipmentSpider(BaseSpider):
name = 'tequipment.net'
allowed_domains = ['tequipment.net']
def start_requests(self):
with open(os.path.join(HERE, 'tequipmentcats')) as f:
urls = f.read().split()
for url in urls:
yield Request(url)
def parse(self, response):
hxs = HtmlXPathSelector(response)
'''
if response.url == self.start_urls[0]:
cats = hxs.select('//font[@size="2.5"]/../@href').extract()
for cat in cats:
url = urljoin_rfc(get_base_url(response), cat)
yield Request(url)
'''
subcats = hxs.select('//img[contains(@src, "orange-arrow.gif")]/../font/a/@href').extract()
subcats += hxs.select('//table[@class="categorytable"]//td[@class="categorymodelcell"]//a/@href').extract()
for subcat in subcats:
yield Request(urljoin_rfc(get_base_url(response), subcat))
'''
price_list = hxs.select('//a[contains(text(), "Price List")]/@href').extract()
if not price_list:
price_list = hxs.select('//a[contains(@href, "PriceList")]/@href').extract()
if price_list:
yield Request(urljoin_rfc(get_base_url(response), price_list[0]))
'''
next_page = hxs.select('//a/b[contains(text(), "Next Page")]/../@href').extract()
if next_page:
yield Request(urljoin_rfc(get_base_url(response), next_page[0]))
for product in self.parse_products(hxs, response):
yield product
def parse_products(self, hxs, response):
products = hxs.select('//table[@class="pricelisttable"]' +
'//td[@class="pricelistcelltitle"]/../../tr')[1:]
for product in products:
loader = ProductLoader(selector=product, item=Product())
price = product.select('.//img[contains(@src, "addtocart.gif")]' +
'/../../..//span[@class="pricelistcelldatatext"]/text()').extract()
if not price:
continue
loader.add_value('price', price[0])
loader.add_xpath('name', './/td[@class="pricelistcelldata" and position()=2]/text()')
url = response.url
model_url = product.select('.//td[@class="pricelistcelldata" and position()=1]' +
'//a/@href').extract()
if model_url:
url = urljoin_rfc(get_base_url(response), model_url[0])
loader.add_value('url', url)
yield loader.load_item()
```
#### File: scrapy/instrumart/testequity.py
```python
import re
import json
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.utils import extract_price
class TestEquitySpider(BaseSpider):
name = 'testequity.com'
allowed_domains = ['testequity.com']
start_urls = ('http://www.testequity.com/categories/',)
def parse(self, response):
hxs = HtmlXPathSelector(response)
cats = []
if response.url == self.start_urls[0]:
cats += hxs.select('//div[@id="categoryTree"]/ul/li/a[@class="CatPathLink"]/@href').extract()
for cat in cats:
yield Request(urljoin_rfc(get_base_url(response), cat))
products = hxs.select('//td//img[contains(@src, "CartIcon.gif")]/../@href').extract()
for product in products:
yield Request(urljoin_rfc(get_base_url(response), product), callback=self.parse_product)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//td[@class="PriceColumn"]/..')
for product in products:
if product.select('.//td[@class="ItemAdCopy"]/b/a'):
continue
loader = ProductLoader(item=Product(), selector=product)
name = product.select('.//td[@class="ItemAdCopy"]//text()').extract()[:3]
name = name[1].strip() + ' ' + name[2].strip()
loader.add_value('name', name)
loader.add_xpath('price', './/td[@class="PriceColumn"]//text()')
loader.add_value('url', response.url)
yield loader.load_item()
```
#### File: scrapy/instrumart/transcat.py
```python
import re
import json
from collections import defaultdict
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.utils import extract_price
class TranscatSpider(BaseSpider):
name = 'transcat.com'
allowed_domains = ['transcat.com']
start_urls = ('http://www.transcat.com/Catalog/default.aspx',)
start_urls = ('http://www.transcat.com/Catalog/ProductSearch.aspx?SearchType=Combo&Mfg=&Cat=CL&SubCat=',)
def __init__(self, *args, **kwargs):
super(TranscatSpider, self).__init__(*args, **kwargs)
self.page_seen = defaultdict(dict)
def parse(self, response):
hxs = HtmlXPathSelector(response)
if response.url == self.start_urls[0]:
cats = hxs.select('//td[@class="catalog-list"]/strong/a/@href').extract()
for cat in cats:
yield Request(urljoin_rfc(get_base_url(response), cat))
pages = set(hxs.select('//tr[@class="Numbering"]//a/@href').re('_doPostBack\(.*,.*(Page\$\d+).*\)'))
for page in pages:
if not page in self.page_seen[response.url]:
self.page_seen[response.url][page] = True
r = FormRequest.from_response(response, formname='aspnetForm',
formdata={'__EVENTTARGET': 'ctl00$ContentPlaceHolderMiddle$TabContainer1$TabPanel2$grdSearch',
'__EVENTARGUMENT': page}, dont_click=True)
yield r
for product in self.parse_products(hxs, response):
yield product
def parse_products(self, hxs, response):
products = hxs.select('//table[@class="SearchGrid"]//td/a[contains(@href, "productdetail.aspx")]/../..')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
url = product.select('.//a[contains(@href, "productdetail.aspx")]/@href').extract()[0]
url = urljoin_rfc(get_base_url(response), url)
loader.add_value('url', url)
loader.add_xpath('name', './/td[position() = 2]//a[contains(@href, "productdetail.aspx")]/text()')
loader.add_xpath('price', './/td[position() = 3]//text()')
yield loader.load_item()
```
#### File: scrapy/loi/scheidegger_nl.py
```python
import re
import logging
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class ScheideggerNlSpider(BaseSpider):
name = 'scheidegger.nl'
allowed_domains = ['scheidegger.nl']
start_urls = ('http://www.scheidegger.nl',)
def parse(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//div[@id="sub"]/ul/li/a/@href').extract():
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_course_list)
def parse_course_list(self, response):
hxs = HtmlXPathSelector(response)
path = hxs.select(u'//ul[@id="breadcrumbs"]/li/a/text()').extract()
path.extend(hxs.select(u'//ul[@id="breadcrumbs"]/li[last()]/text()').extract())
path.pop(0)
for url in hxs.select(u'//h2[@class="coursetitle"]/a/@href').extract():
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, meta={'path':path}, callback=self.parse_course)
def parse_course(self, response):
hxs = HtmlXPathSelector(response)
path = response.meta['path'][:]
path.extend(hxs.select(u'//h1/text()').extract())
product_loader = ProductLoader(item=Product(), selector=hxs)
product_loader.add_value('name', u' / '.join((p.strip() for p in path)))
product_loader.add_value('url', response.url)
costs = hxs.select(u'//div[@id="kostenspecificatie"]')
pricetxt = costs.select(u'./p[1]/text()').extract()
if len(pricetxt) == 1:
pricetxt = costs.select(u'./p[2]/text()').extract()
try:
for line in pricetxt:
if 'Lesgeld' in line or 'Cursusgeld' in line:
groups = re.search(u'([\d.,]+).*\( *([\d.,]+) lessen\)', line)
if groups:
price = float(groups.group(2)) * float(groups.group(1).replace('.', '').replace(',', '.'))
break
groups = re.search(u'([\d.,]+).*\(.*\)', line)
if groups:
price = float(groups.group(1).replace('.', '').replace(',', '.'))
break
elif 'Trainingskosten' in line:
price = line.split(':')[1].replace(u'\u20ac', '').replace('-', '')
price = float(price.replace('.', '').replace(',', '.'))
break
product_loader.add_value('price', price)
except Exception, e:
logging.error("Bad price [%s] found at URL [%s] (%s)" % (pricetxt, response.url, e))
yield product_loader.load_item()
```
#### File: scrapy/petsafe/cabelas.py
```python
import re
try:
import json
except ImportError:
import simplejson as json
from csv import DictReader
from petsafeconfig import CSV_FILENAME
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class CabelasComSpider(BaseSpider):
name = 'cabelas.com'
allowed_domains = ['cabelas.com']
start_urls = ()
site_name_csv = 'cabelas.com'
def start_requests(self):
products = []
with open(CSV_FILENAME, 'rb') as csv_file:
csv_reader = DictReader(csv_file)
for row in csv_reader:
if row['Retailer'] == self.site_name_csv and row['Link'] != '':
products.append((row['SKU'].strip(), row['Link'].strip(), row['Notes'].strip(), row['Name of Product'].strip().decode('utf-8')))
for sku, url, notes, name in products:
yield Request(url, self.parse, meta={'sku': sku, 'notes': notes, 'name': name}, dont_filter=True)
def parse(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
sku = response.meta['sku']
name = hxs.select("//div[@id='productInfo']/div[@class='labledContainer']/\
h1[@class='label']/text()").extract()
if not name:
logging.error('ERROR!! NO NAME!! %s "%s"' % (sku, url))
return
name = name[0].strip()
options_select = hxs.select("//div[@class='variantConfigurator']//select[@class='js-dropdown']")
if options_select:
options = hxs.select("//div[@class='variantConfigurator']/form/div/script[last()]/text()").extract()
options = options[0]
options = re.search("ddWidgetEntries\[[^]]*\] = ([^;]*);", options).group(1)
options = options.replace("'", '"').replace('id', '"id"', ).\
replace('values', '"values"').replace('labels', '"labels"')
options = json.loads(options)
found = False
for option in options:
if option['values'][0] == response.meta['notes']:
found = True
label = option['labels'][0]
price = re.search("\$[\d.]+", label).group(0)
add_name = re.search("(.+) - (.+) - (.+)", label).group(1)
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name + " " + add_name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
if not found:
logging.error('ERROR!! OPTION NOT FOUDN! %s "%s" "%s"' % (sku, name, url))
logging.error("ID: %s. OPTIONS: %s" % (response.meta['notes'], str(options)))
else:
price = hxs.select("//div[@id='productInfo']//div[@class='price']/dl/dd[1]/text()").extract()
if not price:
logging.error('ERROR!! NO PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
```
#### File: scrapy/petsafe/petsafenet.py
```python
from csv import DictReader
from petsafeconfig import SKU_CSV_FILENAME
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class PetsafeNetSpider(BaseSpider):
name = 'petsafe.net'
allowed_domains = ['petsafe.net']
start_urls = ()
search_url = 'http://www.petsafe.net/search?q='
def start_requests(self):
skus = []
with open(SKU_CSV_FILENAME, 'rb') as csv_file:
csv_reader = DictReader(csv_file)
for row in csv_reader:
if row['SKU'] not in skus:
skus.append(row['SKU'])
logging.error("Number of products: %d" % len(skus))
for sku in skus:
url = self. search_url + sku
yield Request(url, self.parse, meta={'sku': sku}, dont_filter=True)
def parse(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
sku = response.meta['sku']
products = hxs.select("//ul[@id='categoryproductWrapper']/li/div")
for product in products:
prod_sku = product.select(".//div[@class='modelNumber']/text()").extract()
if not prod_sku:
logging.error('ERROR!! NO NAME!! %s "%s"' % (sku, url))
return
prod_sku = prod_sku[0].strip()
if prod_sku == sku:
name = product.select("header/a/text()").extract()
if not name:
logging.error('ERROR!! NO NAME!! %s "%s"' % (sku, url))
return
name = name[0].strip()
url = product.select("header/a/@href").extract()
if not url:
logging.error('ERROR!! NO url!! %s "%s"' % (sku, url))
return
url = url[0].strip()
price = product.select(".//div[@class='descPrice']/div/text()").extract()
if not price:
logging.error('ERROR!! NO PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('identifier', sku)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
break
```
#### File: scrapy/petsafe/petstreetmallcom.py
```python
from csv import DictReader
from petsafeconfig import CSV_FILENAME
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class PetstreetmallComSpider(BaseSpider):
name = 'petstreetmall.com'
allowed_domains = ['petstreetmall.com']
start_urls = ()
site_name_csv = 'petstreetmall.com'
def start_requests(self):
products = []
with open(CSV_FILENAME, 'rb') as csv_file:
csv_reader = DictReader(csv_file)
for row in csv_reader:
if row['Retailer'] == self.site_name_csv and row['Link'] != '':
products.append((row['SKU'].strip(), row['Link'].strip(), row['Notes'].strip(), row['Name of Product'].strip().decode('utf-8')))
for sku, url, notes, name in products:
yield Request(url, self.parse, meta={'sku': sku, 'notes': notes, 'name': name}, dont_filter=True)
def parse(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
sku = response.meta['sku']
sec_sku = response.meta['notes']
name = response.meta['name'].encode('ascii', 'ignore')
main_product = hxs.select("//div[@id='Product-MainProduct']")
main_products = hxs.select("//div[@id='Product-MainProductContainer']//div[@class='Product-SubProduct']")
secondary_products = hxs.select("//div[@id='Product-SubProductContainer']//div[@class='Product-SubProduct']")
main_product_sku = main_product.select("div[@id='Product-lblItem']/span[@id='lblItem']/text()").extract()
if not main_product_sku:
logging.error("NO MAIN SKU! %s" % url)
else:
main_product_sku = main_product_sku[0]
if main_product_sku == sku or main_product_sku == sec_sku:
# extract main product
price = main_product.select(".//div[@class='Product-Price']/span[@id='lblClubPrice']/b/font/text()").re("\$(.*)")
if not price:
logging.error('ERROR!! NO PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
elif main_products:
for product in main_products:
product_sku = product.select("div[@class='Product-SubProductNumber']/font/text()").re("#(.+)")
if not product_sku:
logging.error("NO MAIN SKU! %s" % url)
else:
product_sku = product_sku[0]
if product_sku == sku or product_sku == sec_sku:
# extract secondary product
price = product.select(".//span[contains(@id, 'lblClubPrice')]/b/font/text()").re("\$(.*)")
if not price:
logging.error('ERROR!! NO SEC PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
elif secondary_products:
for product in secondary_products:
product_sku = product.select("div[@class='Product-SubProductNumber']/text()").re("#(.+)")
if not product_sku:
logging.error("NO SECONDARY SKU! %s" % url)
else:
product_sku = product_sku[0]
if product_sku == sku or product_sku == sec_sku:
# extract secondary product
price = product.select(".//span[contains(@id, 'lblClubPrice2')]/b/font/text()").re("\$(.*)")
if not price:
logging.error('ERROR!! NO SEC PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
else:
logging.error("No products found!")
```
#### File: scrapy/phonesdemo/phones4u_spider.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class Phones4uSpider(BaseSpider):
name = 'phones4u.co.uk'
allowed_domains = ['phones4u.co.uk']
start_urls = ['http://www.phones4u.co.uk/shop/shop_payg_main.asp?intcid=PAYG%20Phones']
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select('//*[@id="secBody"]/table/tbody/tr/td/a/@href').extract()
for category in categories:
url = urljoin_rfc(response.url, category.strip(), response.encoding)
yield Request(url, callback=self.parse_pages)
def parse_pages(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//*[@id="manphones"]/table/tbody/tr[not(@class="netempty")]/td[not(@class="manempty")]')
for product in products:
BASE_URL = 'http://www.phones4u.co.uk/'
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'a/text()')
relative_url = product.select('a/@href').extract()[0]
url = urljoin_rfc(BASE_URL, relative_url, response.encoding)
loader.add_value('url', url)
loader.add_xpath('price', 'text()')
yield loader.load_item()
```
#### File: scrapy/reliefspot/medlief.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
# spider includes
from product_spiders.items import Product, ProductLoader
# main class
class MedLiefSpider(BaseSpider):
# setup
name = "medlief.com" # Name must match the domain
allowed_domains = ["medlief.com"]
# start_urls = ["http://www.medlief.com/sitemap.html",]
start_urls = ["http://www.medlief.com/products-by-category.html",]
# main request
def parse(self, response):
base_url = get_base_url(response)
hxs = HtmlXPathSelector(response)
#categoryUrls = hxs.select("//a[text()='Category']/../div/ul/li/a/@href").extract()
#categoryUrls = hxs.select("//a[text()='Category']/../div/ul//a/@href").extract()
# categoryUrls = hxs.select('//div[@class="divContainer"]//ul[@class="left-menu"]//ul[@class="sf-vertical sf-menu sf-js-enabled sf-shadow"]//a/@href').extract()
# categoryUrls = hxs.select('//div[@class="content_l"]//li/a/@href').extract()
categoryUrls = hxs.select('//div[@class="content_l"]/div/ul[@class="left-menu"]/li/ul//a/@href').extract()
for categoryUrl in categoryUrls:
yield Request(categoryUrl, callback=self.parse_categories)
# enter categories
def parse_categories(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
# proceed only it is a products page and not an upper level category
hasSubCategories = hxs.select("//div[@class='listing-type-grid catalog-listing']")[0].select(".//a/@href")
if hasSubCategories:
subCatUrls = hasSubCategories.extract()
for subCatUrl in subCatUrls:
yield Request(subCatUrl, callback=self.parse_categories)
else:
# go to the next page
nextPageLink = hxs.select("//img[@alt='Next Page']/../@href")
nextPageLink2 = hxs.select("//a[text()='Next']/@href")
# if there is a next page... (the link has different formats in different pages)
if nextPageLink:
link = nextPageLink.extract()[0]
yield Request(urljoin_rfc(base_url, link), callback=self.parse_categories)
elif nextPageLink2:
link = nextPageLink2.extract()[0]
yield Request(urljoin_rfc(base_url, link), callback=self.parse_categories)
productUrls = hxs.select("//li[@class='item']/div[@class='product-image']/a/@href").extract()
for productUrl in productUrls:
yield Request(urljoin_rfc(base_url, productUrl), callback=self.parse_product)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
prices = hxs.select('//span[@class="price"]/text()')
loader = ProductLoader(response=response, item=Product())
if prices:
loader.add_value('price', prices[len(prices) - 1])
loader.add_xpath('name', '//div[@class="product_l"]/h2/text()')
loader.add_value('url', response.url)
txt = hxs.select("//label[starts-with(text(), 'Manufacturers')]").extract()[0]
sku = txt[txt.find('/label>')+7:]
loader.add_value('sku', sku.strip())
yield loader.load_item()
```
#### File: scrapy/ropeandrescue/pksafety_com.py
```python
import re
import logging
from decimal import Decimal
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
def multiply(lst):
if not lst:
return [('', 0)]
while len(lst) > 1:
result = []
for name0, price0 in lst[0]:
for name1, price1 in lst[1]:
result.append((name0 + ' ' + name1, float(price0) + float(price1)))
lst = [result] + lst[2:]
return lst[0]
class PkSafetyComSpider(BaseSpider):
name = 'pksafety.com'
allowed_domains = ['pksafety.com']
start_urls = ('http://www.pksafety.com',)
def parse(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//div[@class="catNav"]/ul/li/div/a/@href').extract():
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product_list)
def parse_product_list(self, response):
hxs = HtmlXPathSelector(response)
cats = hxs.select(u'//div[@id="RightColumn"]/table/tr/td/center/div[@class="contentsName"]/a/@href').extract()
if cats:
for url in cats:
if url.split('.')[-1].lower() not in ('htm', 'html'):
# Contains links to PDFs as well
continue
url = urljoin_rfc(get_base_url(response), url)
yield Request(url, callback=self.parse_product_list)
else:
opt_groups = []
def fix_options(what, o):
try:
return (what + ':' + o[0], o[1].replace(',', ''))
except:
return (what + ':' + o[0], '0')
for option in hxs.select(u'//div[@class="eyOptions"]//select'):
what = option.select(u'./@name').extract()[0]
opt_list = option.select(u'./option[@value!="PleaseSelect" and @value!="Please Select"]/text()').extract()
opt_list = [o.replace(')', '').split('(') for o in opt_list]
opt_groups.append([fix_options(what, o) for o in opt_list])
for opt_name, opt_price in multiply(opt_groups):
product_loader = ProductLoader(item=Product(), selector=hxs)
product_loader.add_value('url', response.url)
product_loader.add_xpath('name', u'//h1/text()')
if hxs.select(u'//div[@class="bigSalePrice"]'):
product_loader.add_xpath('price', u'//div[@class="bigSalePrice"]/span/font/text()')
elif hxs.select(u'//span[@class="bigSalePrice"]'):
product_loader.add_xpath('price', u'//span[@class="bigSalePrice"]/font/text()')
else:
product_loader.add_xpath('price', u'//div[@class="itemRegPrice"]/span/font/text()')
product_loader.add_xpath('sku', u'normalize-space(substring-after(//div[@class="code"]/text(),":"))')
product_loader.add_xpath('category', u'//div[@class="eyBreadcrumbs"]/a[2]/text()')
product_loader.add_xpath('image_url', u'//img[@id="SwitchThisImage"]/@src')
# product_loader.add_xpath('brand', u'substring-after(//div[@class="product-meta"]/span[contains(text(),"Manufacturer:")]/text(),":")')
product_loader.add_value('shipping_cost', '')
product = product_loader.load_item()
product['name'] = (product['name'] + ' ' + opt_name).strip()
product['price'] = product['price'] + Decimal(opt_price)
yield product
```
#### File: scrapy/rosarioweb/caldaiemurali_it.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
from decimal import Decimal
import logging
class CaldaiemuraliItSpider(BaseSpider):
name = "caldaiemurali.it"
allowed_domains = ["caldaiemurali.it"]
start_urls = (
'http://www.caldaiemurali.it/',
)
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select("//ul[@id='nav']//a/@href").extract()
for category in categories:
yield Request(category, callback=self.parse)
pages = hxs.select("//div[@class='pages']/ol/li/a/@href").extract()
for page in pages:
yield Request(page, callback=self.parse)
items = hxs.select("//div[@class='product-list-block']//a[@class='product-image']/@href").extract()
for item in items:
yield Request(item, callback=self.parse_item)
def parse_item(self, response):
url = response.url
hxs = HtmlXPathSelector(response)
name = hxs.select("//div[@class='product-shop']/div[@class='product-name']/h2/text()").extract()
if not name:
logging.error("NO NAME! %s" % url)
return
name = name[0]
# adding product
price = hxs.select("//div[@class='product-shop']/div[@class='price-box']//span[@class='price']/text()").extract()
if not price:
logging.error("NO PRICE! %s" % url)
return
price = price[0].replace(".", "").replace(",", ".")
# price_delivery = hxs.select("//div[@class='product-shop']//table[@id='product-attribute-specs-table']/tr/td[(preceding::th[text()='Spese Spedizione'])]/text()").extract()
# if not price_delivery:
# logging.error("NO PRICE DELIVERY! %s" % url)
# return
# price_delivery = price_delivery[0]
# price = Decimal(price) + Decimal(price_delivery)
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', str(name))
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
```
#### File: scrapy/rosarioweb/fratellistrazzulloit.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class FratellistrazzulloItSpider(BaseSpider):
name = "fratellistrazzullo.it"
allowed_domains = ["fratellistrazzullo.it"]
start_urls = (
'http://www.fratellistrazzullo.it/',
)
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select("//div[@id='box_left_ctl02_livello_box']//table[@class='tabellaMenu']/tr/td[2]/a/@href").extract()
for category in categories:
yield Request(category, callback=self.parse)
pages = hxs.select("//div[@id='box_center2_span_navigazione']/a/@href").extract()
for page in pages:
yield Request(page, callback=self.parse)
items = hxs.select("//td[@class='centerPagina']/div[@id='box_center_ctl01_livello_box']/div[@class='tabMargini']/\
table[@class='tabellaBoxCentrale']/tr[2]/td/table/tr/td/table/tr/td/a/@href |\
//td[@class='centerPagina']/div[@id='box_center2_box_catalogo']/\
table[@class='tabellaBoxCentrale']/tr[2]/td/table/tr/td/table/tr/td/a/@href").extract()
for item in items:
yield Request(item, callback=self.parse_item)
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
content = hxs.select("//td[@class='centerPagina']/div[@class='tabMargini']/table[@class='tabellaBoxCentrale']/form/tr[2]/td/table/tr/td[2]")
name = content.select("//td[@class='centerPagina']/div[@class='tabMargini']/table[@class='tabellaBoxCentrale']/form/tr[1]/td/h1[@class='titolo']/text()").extract()
if not name:
logging.error("NO NAME!")
return
name = name[0]
url = response.url
# adding product
price = content.select("span[@id='box_center_span_prezzo']/span[@class='prezzo']/strong/text()").extract()
if not price:
logging.error("NO PRICE")
return
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name.encode('ascii', errors='ignore'))
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
```
#### File: scrapy/rosarioweb/refrinclima_it.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
from product_spiders.utils import extract_price2uk
from decimal import Decimal
import logging
class RefrinclimaItSpider(BaseSpider):
name = "refrinclima.it"
allowed_domains = ["refrinclima.it"]
start_urls = (
'http://www.refrinclima.it/',
)
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select("//div[@class='main-menu']/div[@class='menu']/ul/li//a/@href").extract()
for category in categories:
yield Request(category, callback=self.parse)
pages = hxs.select("//div[@class='pagination']/ul[@class='pagination']/li/a/@href").extract()
for page in pages:
yield Request(page, callback=self.parse)
products = hxs.select("//ul[@id='product_list']/li")
for product in products:
url = product.select("div/h5/a/@href").extract()[0]
yield Request(url, callback=self.parse_item)
def parse_item(self, response):
url = response.url
hxs = HtmlXPathSelector(response)
name = hxs.select("//div[@id='primary_block']/div[@id='pb-left-column']/h2/text()").extract()
if not name:
logging.error("NO NAME! %s" % url)
return
name = name[0]
price = hxs.select("//p[@class='price']/span[@class='our_price_display']/span/text()").extract()
if not price:
logging.error("NO PRICE! %s" % url)
return
price = price[0]
price = Decimal(extract_price2uk(price))
eco_tax = hxs.select("//p[@class='price-ecotax']/span/text()").extract()
if eco_tax:
eco_tax[0] = eco_tax[0].encode('ascii', 'ignore')
print "Found eco tax %s" % eco_tax[0]
price -= Decimal(extract_price2uk(eco_tax[0]))
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', str(name))
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', unicode(price))
yield l.load_item()
```
#### File: scrapy/rvpartssupplier/campingworld.py
```python
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
import csv
from product_spiders.items import Product, ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class CampingWorldSpider(BaseSpider):
name = 'campingworld.com'
allowed_domains = ['www.campingworld.com']
start_urls = ('http://www.campingworld.com/',)
def __init__(self, *args, **kwargs):
super(CampingWorldSpider, self).__init__(*args, **kwargs)
self.URLBASE = 'http://www.campingworld.com/'
# parse the csv file to get the product ids
csv_file = csv.reader(open(os.path.join(HERE, 'monitored_products.csv')))
self.product_ids = [row[3] for row in csv_file]
self.product_ids = self.product_ids[1:]
def start_requests(self):
for id in self.product_ids:
url = self.URLBASE + 'search/index.cfm?Ntt=' + id + '&N=0&Ntx=mode+matchallpartial&Ntk=primary&Nty=1&Ntpc=1'
yield Request(url, callback=self.parse_product)
def parse(self, response):
return
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_xpath('name', '//h1[@itemprop="name"]/text()')
product_loader.add_xpath('price', '//div[@class="club"]/span[@itemprop="Price"]/text()',
re='.*\$(.*[0-9])')
product_loader.add_value('url', response.url)
return product_loader.load_item()
```
#### File: scrapy/sagemcom/curryscouk_sagemcom.py
```python
__author__ = 'juraseg'
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from product_spiders.items import Product, ProductLoader
import logging
import re
class CurrysCoUkSpiderSagemcom(BaseSpider):
name = 'currys.co.uk_sagemcom'
allowed_domains = ['currys.co.uk']
start_urls = (
'http://www.currys.co.uk/',
)
search_url = 'http://www.currys.co.uk/gbuk/s_action/search_keywords/index.html'
keywords = ['Sagemcom']
products = [
'http://www.currys.co.uk/gbuk/humax-hdr-fox-t2-freeview-hd-recorder-500gb-07289192-pdt.html',
'http://www.currys.co.uk/gbuk/humax-hdr-fox-t2-freeview-hd-recorder-1tb-11502291-pdt.html',
'http://www.currys.co.uk/gbuk/humax-foxsat-hdr-freesat-hd-recorder-500gb-09785361-pdt.html',
'http://www.currys.co.uk/gbuk/panasonic-dmr-hw100-freeview-hd-recorder-320gb-10112707-pdt.html',
'http://www.currys.co.uk/gbuk/samsung-smt-s7800-freesat-hd-recorder-500gb-09933610-pdt.html',
'http://www.currys.co.uk/gbuk/sagemcom-rti-90-320-freeview-hd-recorder-320gb-05326751-pdt.html',
'http://www.currys.co.uk/gbuk/humax-pvr-9300t-500-freeview-recorder-500-gb-12290868-pdt.html',
'http://www.currys.co.uk/gbuk/sony-svr-hdt500-freeview-hd-recorder-500gb-10209414-pdt.html',
'http://www.currys.co.uk/gbuk/philips-picopix-ppx2480-pico-projector-12127328-pdt.html',
'http://www.currys.co.uk/gbuk/philips-picopix-ppx2055-pico-projector-12127320-pdt.html',
'http://www.currys.co.uk/gbuk/microvision-showwx-hdmi-pico-projector-12041449-pdt.html',
'http://www.currys.co.uk/gbuk/sagemcom-rti-95-320-freeview-hd-recorder-320-gb-14134720-pdt.html',
'http://www.currys.co.uk/gbuk/sagemcom-rti95-500-freeview-hd-recorder-500-gb-13406864-pdt.html',
'http://www.currys.co.uk/gbuk/philips-hdtp-8530-freeview-hd-recorder-500-gb-13985229-pdt.html',
]
def start_requests(self):
for keyword in self.keywords:
data = {
'subaction': 'keyword_search',
'search-field': keyword
}
url = self.search_url
request = FormRequest(url, formdata=data, callback=self.parse_search)
yield request
for url in self.products:
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
name = hxs.select("//h1[@class='pageTitle']/span/text()").extract()
if not name:
logging.error("ERROR! NO NAME! %s" % url)
return
name = " ".join(name)
name = re.sub("[\s]+", " ", name)
price = hxs.select("//div[contains(@class, 'productDetail')]//span[contains(@class, 'currentPrice')]/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! %s %s" % (url, name))
return
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
def parse_search(self, response):
hxs = HtmlXPathSelector(response)
# parse pages
pages = hxs.select("//ul[@class='pagination']//a/@href").extract()
for page in pages:
if page != '#':
request = Request(page, callback=self.parse_search)
yield request
# parse products
items = hxs.select("//article[contains(@class, 'product')]/div[contains(@class, 'desc')]")
for item in items:
name = item.select(".//div/header[@class='productTitle']/a/text()").extract()
if not name:
continue
name = name[0].strip()
name = re.sub("[\s]+", " ", name)
url = item.select(".//div/header[@class='productTitle']/a/@href").extract()
if not url:
logging.error("ERROR! NO URL! URL: %s. NAME: %s" % (response.url, name))
continue
url = url[0]
price = item.select(".//div//span[@class='currentPrice']/ins/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! URL: %s. NAME: %s" % (response.url, name))
continue
price = price[0].strip()
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
```
#### File: scrapy/sagemcom/johnlewiscom.py
```python
__author__ = 'juraseg'
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoader
import logging
class JohnlewisComSpider(BaseSpider):
name = 'johnlewis.com'
allowed_domains = ['johnlewis.com']
start_urls = (
'http://www.johnlewis.com/',
)
search_url = 'http://www.johnlewis.com/Search/Search.aspx?SearchTerm='
keywords = ['Sagemcom']
products = [
'http://www.johnlewis.com/230898937/Product.aspx?SearchTerm=Humax+HDR-+FOX+500GB+T2',
'http://www.johnlewis.com/231249320/Product.aspx?SearchTerm=Humax+HDR-+FOX+1TB+T2',
'http://www.johnlewis.com/230913445/Product.aspx?SearchTerm=Humax+FOXSAT+500GB',
'http://www.johnlewis.com/231395307/Product.aspx?SearchTerm=Panasonic+DMR+HW100+320GB',
'http://www.johnlewis.com/231462460/Product.aspx',
'http://www.johnlewis.com/230993658/Product.aspx',
'http://www.johnlewis.com/230562595/Product.aspx',
'http://www.johnlewis.com/231193482/Product.aspx?SearchTerm=sony+svr+hdt500',
'http://www.johnlewis.com/231520735/Product.aspx',
'http://www.johnlewis.com/231520734/Product.aspx',
'http://www.johnlewis.com/231520732/Product.aspx',
'http://www.johnlewis.com/231520733/Product.aspx',
'http://www.johnlewis.com/231659104/Product.aspx',
'http://www.johnlewis.com/231659103/Product.aspx',
]
def start_requests(self):
for keyword in self.keywords:
url = self.search_url + keyword
request = Request(url, callback=self.parse_search)
yield request
for url in self.products:
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
name = hxs.select("//div[@id='divContentContainer']//div[@id='divProdHead']/h1/text()").extract()
if not name:
logging.error("ERROR! NO NAME! %s" % url)
return
name = name[0]
price = hxs.select("//div[@id='divContentContainer']//div[@class='priceqty']/p[@class='price']/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! %s %s" % (url, name))
return
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
def parse_search(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
# parse pages
pages = hxs.select("//div[@class='pagenum']/a/@href").extract()
for page in pages:
request = Request(urljoin_rfc(base_url, page), callback=self.parse_search)
yield request
# parse products
items = hxs.select("//div[@class='grid-row']/div[@class='grid-item']")
for item in items:
name = item.select("div/a[@class='gridtitle']/text()").extract()
if not name:
continue
name = name[0]
url = item.select("div/a[@class='gridtitle']/@href").extract()
if not url:
logging.error("ERROR! NO URL! URL: %s. NAME: %s" % (response.url, name))
continue
url = url[0]
url = urljoin_rfc(base_url, url)
price = item.select("div/a[@class='price']/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! URL: %s. NAME: %s" % (response.url, name))
continue
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
```
#### File: scrapy/scentiments/beautyencounter.py
```python
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class BeautyEncounterSpider(BaseSpider):
name = 'beautyencounter.com'
allowed_domains = ['beautyencounter.com']
#start_urls = ('http://www.beautyencounter.com/',)
#start_urls = ('http://www.beautyencounter.com/discount/perfumes-and-colognes/4',)
def start_requests(self):
with open(os.path.join(HERE, 'beauty.txt')) as f:
for url in f:
yield Request(url.strip())
def parse(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_xpath('name', '//h1[@itemprop="name"]/text()')
loader.add_xpath('price', '//*[@itemprop="price"]/text()')
loader.add_value('url', response.url)
yield loader.load_item()
```
#### File: scrapy/seapets/completeaquatics.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request #, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
import logging as log
class OnlineShoesSpider(BaseSpider):
name = 'completeaquatics.co.uk'
allowed_domains = ['www.completeaquatics.co.uk', 'completeaquatics.co.uk']
start_urls = ['http://completeaquatics.co.uk']
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select(
'//div[@id="panel-navigation"]/a[@class="link-department"]/@href')
for cat in categories.extract():
yield Request(cat, callback=self.parse_category)
def parse_category(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
pages = hxs.select(
"//table[@class='pagination']//a[@class='button-small ']/@href"
).extract()
# handle current page
for p in self.get_products_from_page(response):
yield p
# handle other pages
for page in pages:
yield Request(
urljoin_rfc(base_url, page),
callback=self.get_products_from_page
)
def get_products_from_page(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select(
("//div[@class='catproducts']//"
"div[@class='prodlistboxbg']//a/@href")).extract()
for prod_url in products:
yield Request(
prod_url,
self.parse_product
)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
name = hxs.select("//div[@class='crumb_trail']/text()").extract()
if not name:
log.error("No product name! %s" % base_url)
return
name = name[-1].strip()
price = hxs.select("//div[@class='productheader']/span[@class='text-pricespecial']/text() | "
"//div[@class='productheader']/span[@class='text-price']/text()").extract()
if not price:
log.error("No product price! %s" % base_url)
return
price = price[0]
loader = ProductLoader(response=response, item=Product())
loader.add_value("name", name)
loader.add_value("url", base_url)
loader.add_value("price", price)
loader.add_xpath('sku', "//input[@name='prodcode']/@value")
yield loader.load_item()
```
#### File: scrapy/seapets/paddockfarm.py
```python
from scrapy.http import Request
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoader
class Paddockfarm(BaseSpider):
name = 'paddockfarm'
allowed_domains = ['paddockfarm.co.uk']
start_urls = ['http://www.paddockfarm.co.uk']
def parse(self, response):
hxs = HtmlXPathSelector(response)
urls = [urljoin_rfc(get_base_url(response), x.strip()) for x in hxs.select(".//*[@class='leftnavlist']/li/a/@href").extract() if x.strip()]
for url in urls:
yield Request(url, callback=self.parse_listing_page)
def parse_listing_page(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
prod_box = hxs.select(".//*[@class='prodbox']")
for product in prod_box:
loader = ProductLoader(item=Product(), selector=product)
name = ''.join(product.select(".//div[@class='prodtitle']/.//a/text()").extract())
price = ''.join(product.select(".//div[@class='prodprice']/strong/.//text()").extract())
loader.add_value('name', name)
loader.add_value('url', url)
loader.add_value('price', price)
yield loader.load_item()
next_page = hxs.select(".//a[contains(text(), 'Next')]/@href")
if next_page:
next_url = urljoin_rfc(get_base_url(response), next_page[0].extract())
yield Request(next_url, callback=self.parse_listing_page)
```
#### File: scrapy/seapets/pondsuperstores_spider.py
```python
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
class PondsuperstoresSpider(BaseSpider):
name = 'pondsuperstores.com'
allowed_domains = ['www.pondsuperstores.com']
start_urls = (
'http://www.pondsuperstores.com/brands/',
)
def parse(self, response):
hxs = HtmlXPathSelector(response)
brand_urls = hxs.select('//form[@id="frmCompare"]//a[@class="brandlink"]/@href').extract()
for url in brand_urls:
yield Request(url, callback=self.parse_brand)
def parse_brand(self, response):
hxs = HtmlXPathSelector(response)
product_urls = hxs.select('//form[@id="frmCompare"]//div[@class="ProductDetails"]//a/@href').extract()
for url in product_urls:
yield Request(url, callback=self.parse_product)
next = hxs.select('//div[@class="CategoryPagination"]//a[contains(text(),"Next")]/@href').extract()
if next:
yield Request(next[0], callback=self.parse_brand)
def parse_product(self, response):
loader = ProductLoader(item=Product(), response=response)
loader.add_xpath('name', '//div[@id="ProductDetails"]//h2/text()')
loader.add_value('url', response.url)
loader.add_xpath('price', '//div[@id="ProductDetails"]//em[contains(@class,"ProductPrice")]/text()')
loader.add_xpath('sku', '//div[@id="ProductDetails"]//span[contains(@class,"VariationProductSKU")]/text()')
yield loader.load_item()
```
#### File: scrapy/seattlecoffeegear/amazonespresso.py
```python
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import open_in_browser
from product_spiders.items import Product, ProductLoader
import logging
#user_agent = 'Mozilla/5.0 (Windows NT 5.1; rv:7.0.1) Gecko/20100101 Firefox/7.0.1'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.52 Safari/536.5'
class AmazonComSpider(BaseSpider):
name = 'amazon.com_espresso'
allowed_domains = ['amazon.com']
start_urls = ('http://www.amazon.com', )
headers = {
'User-agent': user_agent
}
form_headers = {
'User-agent': user_agent,
'Content-Type': 'application/x-www-form-urlencoded'
}
category_ids = [
'2251595011',
'2251593011',
'2251592011',
'915194'
]
search_url = "http://www.amazon.com/s/ref=nb_sb_noss?keywords=espresso&node=%%cat_id%%"
def start_requests(self):
for cat_id in self.category_ids:
yield Request(
self.search_url.replace("%%cat_id%%", cat_id),
headers=self.headers,
callback=self.parse
)
def parse(self, response):
URL_BASE = get_base_url(response)
hxs = HtmlXPathSelector(response)
total = hxs.select("//h2[@id='resultCount']//text()").re("Showing .*? - .*? of (.*?) Results")
bottom = hxs.select("//h2[@id='resultCount']//text()").re("Showing (.*?) - .*? of .*? Results")
top = hxs.select("//h2[@id='resultCount']//text()").re("Showing .*? - (.*?) of .*? Results")
if total:
total = int(total[0].replace(",", ""))
logging.error("Total: %d" % total)
if top and bottom:
top = int(top[0].replace(",", ""))
bottom = int(bottom[0].replace(",", ""))
else:
logging.error("No numbers!")
logging.error("Top: %s" % top)
logging.error("Bottom: %s" % bottom)
return
# parse products
items = hxs.select("//div[contains(@class, 'result') and contains(@class, 'product')]")
if not items:
logging.error("ERROR! No products %s" % response.url)
if top - bottom > 0:
logging.error("Products exist but not found!")
items_count = 0
counter = 0
for item in items:
counter += 1
name = item.select("div[@class='data']/h3/a/text()").extract()
if not name:
name = item.select("div[@class='data']/h3/a/span/@title").extract()
if not name:
logging.error("ERROR! NO NAME! URL: %s" % response.url)
continue
name = name[0]
logging.error("%d. Name: %s" % (counter, name))
url = item.select("div[@class='data']/h3/a/@href").extract()
if not url:
logging.error("ERROR! NO URL! URL: %s. NAME: %s" % (response.url, name))
continue
url = url[0]
url = urljoin_rfc(URL_BASE, url)
logging.error("%d. URL: %s" % (counter, url))
price = item.select("div/div[contains(@class,'newPrice')]/span[contains(@class, 'price')]/text()").extract()
if not price:
price = item.select("div/div[@class='usedNewPrice']/span[@class='subPrice']/span[@class='price']/text()").extract()
if not price:
external = hxs.select(".//div[@class='prodAds']")
if external:
logging.error("External site")
else:
logging.error("ERROR! No price! URL: %s. NAME: %s" % (response.url, name))
continue
price = price[0]
logging.error("%d. Price: %s" % (counter, price))
l = ProductLoader(item=Product(), response=response)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
items_count += 1
logging.error("Found %d items" % len(items))
logging.error("Processed %d items" % items_count)
# get current page number
m = re.search("page=([\d]*)&", response.url)
if not m:
current_page_number = 0
else:
current_page_number = int(m.group(1))
# pages
pages = hxs.select("//span[@class='pagnLink']/a/@href").extract()
for url in pages:
m = re.search("page=([\d]*)&", url)
if not m:
continue
else:
page_number = int(m.group(1))
if page_number > current_page_number:
request = Request(
urljoin_rfc(URL_BASE, url),
headers=self.headers,
callback=self.parse
)
yield request
## parse pages
#if len(items) > 0:
## process next page
#page_param_index = response.url.find("page=")
#if page_param_index > -1:
## page > 1
#page_param_index += len("page=")
#current_page = int(response.url[page_param_index:])
#next_page_url = response.url[:page_param_index] + str(current_page + 1)
#else:
#next_page_url = response.url + "&page=" + str(2)
#request = Request(urljoin_rfc(URL_BASE, next_page_url), callback=self.parse)
#yield request
```
#### File: scrapy/sexshops/simplypleasure.py
```python
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from productloader import load_product
from scrapy.http import FormRequest
class SimplyPleasure(BaseSpider):
name = 'simplypleasure.co.uk'
allowed_domains = ['simplypleasure.com', 'www.simplypleasure.com']
start_urls = ('http://www.simplypleasure.com/sitemap.aspx',)
def __init__(self, *args, **kwargs):
super(SimplyPleasure, self).__init__(*args, **kwargs)
self.URL_BASE = 'http://www.simplypleasure.com'
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
res = {}
try:
name = hxs.select('//div[@class="productInfo"]/h1/text()').extract()[0]
url = response.url
price = hxs.select('//span[@class="price last"]/text()').re('\xa3(.*)')
if price:
price = price[0]
else:
price = hxs.select('//span[@class="price"]/text()').re('\xa3(.*)')[0]
#sku = hxs.select('//div[@class="productInfo"]/p/text()').re(': (.*)')[0]
sku = hxs.select('//div[@class="productInfo"]/p/text()').re('Product code:\r\n(.*)')[0].strip()
res['url'] = url
res['description'] = name
res['price'] = price
res['sku'] = sku
yield load_product(res, response)
except IndexError:
return
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
categories = hxs.select('//div[@class="leftBox"]/div[@id="vNav"]//a/@href').extract()
categories = [urljoin_rfc(self.URL_BASE, cat) for cat in categories]
if categories:
yield Request(categories[0], meta={'categories': categories[1:]}, callback=self.parse_category)
def parse_category(self, response):
# next page
hxs = HtmlXPathSelector(response)
next_page = hxs.select('//a[text()=">"]/@href').re('k\(\'(.*?)\'')
if next_page:
formname = 'aspNetForm'
formdata = {'__EVENTTARGET': next_page, '__EVENTARGUMENT': ''}
request = FormRequest.from_response(response, formname=formname,
formdata=formdata,
dont_click=True, callback=self.parse_category,
meta={'categories': response.meta['categories'][:]})
yield request
elif response.meta['categories']:
cat = response.meta['categories'][0]
yield Request(cat, meta={'categories': response.meta['categories'][1:]},
callback=self.parse_category)
# products
products = hxs.select('//div[@id="content" and @class="content"]//h3/a/@href').extract()
for product in products:
product = urljoin_rfc(self.URL_BASE, product)
yield Request(product, callback=self.parse_product)
```
#### File: scrapy/shoemetro/shoebacca.py
```python
import csv
import os
import copy
import shutil
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from scrapy import log
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class shoebaccaSpider(BaseSpider):
name = 'shoebacca.com'
allowed_domains = ['shoebacca.com','www.shoebacca.com']
def start_requests(self):
shutil.copy(os.path.join(HERE, 'shoemetroall.csv'),os.path.join(HERE, 'shoemetroall.csv.' + self.name + '.cur'))
with open(os.path.join(HERE, 'shoemetroall.csv.' + self.name + '.cur')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['sku']
"""
brand = row['brand']
style = row['style']
query = (brand + ' ' + style).replace(' ', '%20')
"""
query = row['name'].replace(' ', '+')
url = 'http://www.shoebacca.com/finder/?query=%s&search_form=1&sort=price-low-high'
yield Request(url % query, meta={'sku': sku, 'name': query})
def parse(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
products = hxs.select('//ul[@id="finder-data"]/li')
if not products:
return
product = products[0]
loader = ProductLoader(item=Product(), selector=product)
name = "".join(product.select('./a/div/h5/span/text()').extract())
if name:
name2 = "".join(product.select('./a/div/h5/text()').extract())
url = product.select('./a/@href').extract()[0]
price = "".join(product.select('./a/div[@class="p-price"]/text()').re(r'([0-9\,\. ]+)')).strip()
if not price:
price = "".join(product.select('./a/div[@class="p-price"]/span[@class="sale-price"]/text()').re(r'([0-9\,\. ]+)')).strip()
loader.add_value('name', name.strip() + ' ' + name2.strip())
loader.add_value('url', urljoin_rfc(base_url,url))
loader.add_value('price', price)
loader.add_value('sku', response.meta['sku'])
if not 'apparelsave' in loader.get_output_value('name').lower():
yield loader.load_item()
```
#### File: scrapy/tigerchef/wasserstrom.py
```python
from urlparse import urlparse, parse_qs
from urllib import urlencode
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class WasserstromSpider(BaseSpider):
name = 'wasserstrom.com'
allowed_domains = ['wasserstrom.com']
start_urls = ('http://www.wasserstrom.com/',)
def parse(self, response):
hxs = HtmlXPathSelector(response)
#categories
'''
cats = hxs.select('//a[@class="category"]/@href').extract()
'''
cats = ['http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Winco_List_True_BrandDisplayView_1013200',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Thunder+Group_List_True_BrandDisplayView_204192',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Cardinal+International_List_True_BrandDisplayView_202571',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Libbey_List_True_BrandDisplayView_204083',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Victory+Refrigeration_List_True_BrandDisplayView_1013144',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Vollrath_List_True_BrandDisplayView_161583',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Dexter+Russell+Cutlery_List_True_BrandDisplayView_1012082',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Friedr.+Dick_List_True_BrandDisplayView_203090',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Cecilware+Corp_List_True_BrandDisplayView_1011943',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Turbo+Air_List_True_BrandDisplayView_1013106',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Eastern+Tabletop_List_True_BrandDisplayView_203074',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_G.E.T.+Enterprises_List_True_BrandDisplayView_181599',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_EMI+Yoshi+Inc._List_True_BrandDisplayView_1012141',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Beverage-Air_List_True_BrandDisplayView_1011848',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Amana_List_True_BrandDisplayView_1011732',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Chefwear_List_True_BrandDisplayView_1011960',
'http://www.wasserstrom.com/restaurant-supplies-equipment/Brand_Bakers+Pride+Oven_List_True_BrandDisplayView_202560']
for cat in cats:
yield Request(cat, cookies={}, meta={'dont_merge_cookies': True})
#pagination
'''
has_next = hxs.select('//form[@name="frmPagination"]//a[contains(text(), "Next")]')
if has_next:
total_results = hxs.select('//input[@name="totalResults"]/@value').extract()[0]
url_data = urlparse(response.url)
params = parse_qs(url_data.query)
current_page = int(params.get('currPgNum', ['1'])[0])
query = {'currPgNum': current_page + 1,
'pageSize': '20',
'pgStyle': 'Grid',
'results': '20',
'results2': '20',
'sortType': 'relevance',
'totalResults': total_results}
yield response.url.split('?')[0] + '?' + urlencode(query)
'''
next = hxs.select('//ul[@class="results-pages"]//li/a[contains(text(), "Next")]/@href').extract()
if next:
args = re.search('\((.*)\)', next[0]).groups()[0].split(',')
page_number = int(args[0].strip())
page_size = 20
total = args[3].strip()
data = {'beginIndex': page_number * page_size,
'totalNoOfResult': total,
'stdPageSize': page_size,
'currPgNum': page_number,
'pgStyle': 'List',
'sort_results': 'relevance'}
yield FormRequest.from_response(response, formname='goToPageForm', formdata=data,
cookies={}, meta={'dont_merge_cookies': True})
products = hxs.select('//ul[@class="product_types"]//a/@href').extract()
products += hxs.select('//td[@class="searchproductdescription"]//a/@href').extract()
for product in products:
yield Request(product, callback=self.parse_product, cookies={}, meta={'dont_merge_cookies': True})
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
loader = ProductLoader(response=response, item=Product())
loader.add_xpath('name', '//h1[@id="partNameId"]/text()')
loader.add_value('url', response.url)
loader.add_xpath('price', '//font[@class="txt-purchaseprice20blue"]/text()')
sku = ''.join(hxs.select('//b[contains(text(), "Model #:")]/../text()').extract()).strip()
loader.add_value('sku', sku)
yield loader.load_item()
```
#### File: scrapy/topgeartrading/tyresdirectukcouk.py
```python
import re
from decimal import Decimal
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoader
class TyresDirectCoUk(BaseSpider):
name = 'tyresdirectukcouk'
allowed_domains = ['tyresdirectuk.co.uk', 'www.tyresdirectuk.co.uk']
start_urls = ('http://www.tyresdirectuk.co.uk/brands.php',)
def __init__(self, *args, **kwargs):
super(TyresDirectCoUk, self).__init__(*args, **kwargs)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories and subcategories
category_urls = hxs.select('//div[@class="content"]/div[@class="brandbox"]//a/@href').extract()
for url in category_urls:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url)
# next page
# next_page =
# if next_page:
# url = urljoin_rfc(self.URL_BASE, next_page[0])
# yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//div[@class="shopprods"]')
for product in products:
product_loader = ProductLoader(item=Product(), selector=product)
product_loader.add_xpath('name', './/p/strong/a/text()')
url = product.select('.//p/strong/a/@href').extract()[0]
url = urljoin_rfc(get_base_url(response), url)
product_loader.add_value('url', url)
price = product.select('.//span[@class="price"]/text()').extract()[0]
price = Decimal(price) + Decimal(5)
price = str(price)
product_loader.add_value('price', price)
yield product_loader.load_item()
```
#### File: scrapy/wesco/grainger.py
```python
import csv
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class GraingerSpider(BaseSpider):
name = 'wesco-grainger.com'
allowed_domains = ['grainger.com']
start_urls = ('http://www.newark.com/jsp/search/advancedsearch.jsp',)
def start_requests(self):
with open(os.path.join(HERE, 'products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['Part #']
url = 'http://www.grainger.com/Grainger/wwg/search.shtml?'+\
'searchQuery=%(sku)s&op=search&Ntt=%(sku)s&N=0&sst=subset'
yield Request(url % {'sku': sku}, meta={'sku': sku})
def parse(self, response):
hxs = HtmlXPathSelector(response)
if hxs.select('//div[@id="noResultsMsg"]') \
or not hxs.select('//td[text()="Mfr. Model #"]/following-sibling::td/text()'):
return
loader = ProductLoader(item=Product(), response=response)
loader.add_xpath('name', '//div[@id="PageTitle"]/h1//text()')
loader.add_value('url', response.url)
loader.add_xpath('price', '//td[@class="tdrightalign"]/strong[starts-with(text(), "$")]/text()')
loader.add_xpath('sku', '//td[text()="Mfr. Model #"]/following-sibling::td/text()')
sku = loader.get_output_value('sku')
if sku.lower() != response.meta['sku'].lower():
return
yield loader.load_item()
```
#### File: scrapy/windowscleaning/windowcleaningsupply.py
```python
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from product_spiders.items import Product
from productloader import WindowsCleaningProductLoader, load_product
class WindowCleaningSupplySpider(BaseSpider):
name = 'window-cleaning-supply.com'
allowed_domains = ['www.window-cleaning-supply.com']
start_urls = ('http://www.window-cleaning-supply.com',)
def parse(self, response):
#categories
hxs = HtmlXPathSelector(response)
category_urls = hxs.select('//div[@class="Block CategoryList Moveable Panel"]//a/@href').extract()
for url in category_urls:
yield Request(url)
#next page
next_page = hxs.select('//div[@class="CategoryPagination"]//div[@class="FloatRight"]/a/@href').extract()
if next_page:
yield Request(next_page[0])
# products
product_links = hxs.select('//div[@class="ProductDetails"]//a/@href').extract()
for product_link in product_links:
yield Request(product_link, callback=self.parse_product)
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
# sub products
hxs = HtmlXPathSelector(response)
subproduct_urls = hxs.select('//div[@class="ProductDescriptionContainer"]//a/@href').extract()
if subproduct_urls:
for url in subproduct_urls:
try:
yield Request(url, callback=self.parse_product)
except ValueError:
pass
product = {}
try:
product['url'] = response.url
product['description'] = hxs.select('//h1/text()').extract()[0]
product['price'] = hxs.select('//em[@class="ProductPrice VariationProductPrice"]/text()').extract()[0]
try:
product['sku'] = hxs.select('//div[@id="sku"]/text()').extract()[0]
except IndexError:
product['sku'] = ''
yield load_product(product, response)
except IndexError:
return
``` |
{
"source": "0kim/bccard_email_parser",
"score": 3
} |
#### File: bccard_email_parser/BccardEmailParser/BccardParser.py
```python
import copy
import sys
from datetime import datetime
pyver = sys.version_info.major
if pyver == 2:
from HTMLParser import HTMLParser
elif pyver == 3:
from html.parser import HTMLParser
else:
raise Exception("Not supported Python version")
DATETIME_FORMAT_LOCAL = "%Y.%m.%d %H:%M"
DATETIME_FORMAT_INTL = "%Y년%m월%d일 %H:%M"
RECEIPT_FIELDS = {'amount': '사용금액',
'card_name': '카드명',
'card_installment': '사용구분',
'date': '사용일시',
'store_name': '가맹점명',
'type': '구분'}
RECEIPT_INIT = {'amount': None,
'currency': None,
'card_name': None,
'card_installment': None,
'date': None,
'store_name': None,
'type': None}
class BccardHtmlParser(HTMLParser):
STATUS_LIST = ('init',
'itemsearch', # status where searching 'filed name'
'valuesearch', # status where searching 'value of filed'
'done')
_receipt = None
_ctx = {'status': 'init'}
_curItem = ''
_jump_next_tag_flag = False # if True, jump to next sibling tag(ex. td)
def __init__(self, receipt):
self._receipt = receipt
self._ctx = {'status': 'init'}
HTMLParser.__init__(self)
# def reset(self):
# self._ctx = {'status': 'init'}
def handle_starttag(self, tag, attrs):
if tag.lower() == 'td':
# print("starttag:" + tag)
if self._ctx['status'] == 'init':
self._ctx['status'] = 'itemsearch'
# print("_ctx:" + "init -> itemsearch")
elif self._ctx['status'] == 'valuesearch':
self._ctx['status'] = 'valuesearch'
self._jump_next_tag_flag = True
# print("_ctx:" + self._ctx['status'] + " -> valuesearch")
else:
pass
else:
pass
def handle_data(self, data):
# print("data:" + data)
if self._ctx['status'] == 'valuesearch' and self._jump_next_tag_flag == True:
self._receipt[self._curItem] = data.strip()
# print("_ctx:" + self._ctx['status'] + " -> init")
self._ctx['status'] = 'init'
self._curItem = ''
self._jump_next_tag_flag = False
if self._ctx['status'] == 'itemsearch':
if data == RECEIPT_FIELDS['amount']:
self._curItem = 'amount'
self._ctx['status'] = 'valuesearch'
elif data == RECEIPT_FIELDS['card_name']:
self._curItem = 'card_name'
self._ctx['status'] = 'valuesearch'
elif data == RECEIPT_FIELDS['card_installment']:
self._curItem = 'card_installment'
self._ctx['status'] = 'valuesearch'
elif data == RECEIPT_FIELDS['date']:
self._curItem = 'date'
self._ctx['status'] = 'valuesearch'
elif data == RECEIPT_FIELDS['store_name']:
self._curItem = 'store_name'
self._ctx['status'] = 'valuesearch'
else:
# print("_ctx:" + self._ctx['status'] + " -> init")
self._ctx['status'] = 'init'
self._curItem = ''
def handle_endtag(self, tag):
if tag == 'html':
self._ctx['status'] = 'done'
def _strip_amount(self, value):
return value.replace(',', '').strip()
class BccardParser(object):
_receipt = None
_bc_html_parser = None
_encoding = None
def __init__(self):
pass
def _validate_receipt(self):
# todo: check whether all fields of the receipt are populated
return
def _strip_amount(self, value):
return value.replace(',', '').strip()
def _post_process(self):
# populate 'currency' and 'type'
if pyver == 2:
amount_str = unicode(self._receipt['amount'], 'utf-8')
else:
amount_str = self._receipt['amount']
# print(unicode(amount_str, 'utf-8'))
if amount_str[len(amount_str)-1:] == u'원':
amount = self._strip_amount(amount_str[:len(amount_str) - 1])
currency = 'KRW'
type = '국내'
elif amount_str[len(amount_str)-3:].isdigit() == False:
amount = self._strip_amount(amount_str[:len(amount_str) - 3])
currency = amount_str[len(amount_str)-3:]
type = '해외'
else:
raise Exception('Unable to parse \'amount\'. ' + amount_str)
self._receipt['amount'] = amount
self._receipt['currency'] = currency
self._receipt['type'] = type
# convert Date String to Datetime type. Date-format of local receipt is different from intl.'s one.
if type == '국내':
date = datetime.strptime(self._receipt['date'], DATETIME_FORMAT_LOCAL)
elif type == '해외':
if pyver == 2:
s = unicode(self._receipt['date'], 'utf-8')
else:
s = self._receipt['date']
date = datetime(year=int(s[0:4]),
month=int(s[5:7]),
day=int(s[8:10]),
hour=int(s[12:14]),
minute=int(s[15:17]),
second=int(s[18:20]))
else:
raise Exception('Unsupported type: ' + type)
self._receipt['date'] = date
def parse(self, html, encoding):
self._encoding = encoding
if pyver == 2:
if encoding != 'utf-8':
html = html.decode(encoding).encode('utf-8')
self._receipt = copy.deepcopy(RECEIPT_INIT)
self._bc_html_parser = BccardHtmlParser(self._receipt)
self._bc_html_parser.feed(html)
self._post_process()
self._validate_receipt()
self._bc_html_parser.close()
def get_result(self):
return self._receipt
``` |
{
"source": "0kim/mysql-to-excel",
"score": 3
} |
#### File: 0kim/mysql-to-excel/main.py
```python
import pymysql
import xlsxwriter
mysql_host = '127.0.0.1'
mysql_user = 'root'
mysql_pass = '<PASSWORD>'
mysql_db = 'mydb'
mysql_charset = 'utf8'
mysql_query = 'select * from table;'
xls_filename = 'output.xlsx'
xls_sheetname = 'work1'
class XlsxSheetWriter():
MAX_ROW = 1048576
MAX_COL = 16384 # == 'XFD' @ excel
DEFAULT_ROW = 0
DEFAULT_COL = 0
_cur_col = 0 # A, B, C...
_cur_row = 0 # 1, 2, 3...
_workbook = None # workbook(xls file)
_worksheet = None # worksheet
def __init__(self, filename, sheetname):
self._cur_col = self.DEFAULT_COL
self._cur_row = self.DEFAULT_ROW
self._wb = xlsxwriter.Workbook(filename)
self._ws = self._wb.add_worksheet(sheetname)
def reset_row(self):
self._cur_row = self.DEFAULT_ROW
def reset_col(self):
self._cur_col = self.DEFAULT_COL
def inc_row(self):
if self.MAX_ROW <= self._cur_row:
raise Exception("Out of range: row " + str(self._cur_row));
self._cur_row += 1
def inc_col(self):
if self.MAX_COL <= self._cur_col:
raise Exception("Out of range: column " + str(self._cur_col));
self._cur_col += 1
def write(self, data):
self._ws.write(self._cur_row, self._cur_col, data);
def close(self):
self._wb.close()
writer = XlsxSheetWriter(filename=xls_filename,
sheetname=xls_sheetname)
con = pymysql.connect(host=mysql_host,
user=mysql_user,
password=<PASSWORD>,
db=mysql_db,
charset=mysql_charset)
curs = con.cursor(pymysql.cursors.DictCursor)
curs.execute(mysql_query)
rows = curs.fetchall()
writer.reset_row()
for row in rows:
writer.reset_col()
for c in row:
writer.write(row[c])
writer.inc_col()
writer.inc_row()
con.close()
writer.close()
print("Done...")
``` |
{
"source": "0ki/PeakReader",
"score": 3
} |
#### File: 0ki/PeakReader/PeakWeb.py
```python
from datetime import datetime
import sys
if sys.version_info[0] <= 2: #dirty python2 hack to support utf8-sig
from io import open
class OverwriteReader:
def __init__(self,csv,separator=';',limit=-1):
self.index = -1
tmp = []
self.data=[]
for line in csv:
tmp2=line.split(separator)
for i in range(len(tmp2)):
if tmp2[i]!="" or (limit>0 and i>limit):
while i >= len(tmp):
tmp.append("")
tmp[i]=tmp2[i]
self.data.append(tmp[:])
def __iter__(self):
return self
def __next__(self):
self.index += 1
if self.index == len(self.data):
raise StopIteration
return self.data[self.index]
def __getitem__(self,key):
return self.data[key]
def next(self): # python 2 compat
return self.__next__()
class Peak:
def validateDate(self,specialdates,dfrom,dto,wd,target):
if type(target) is datetime:
target = target.strftime('%Y-%m-%d')
if target in specialdates: #unconditional match
return True
if dto is not None and target>dto:
return False
if dfrom is not None and target<dfrom:
return False
targetwd=datetime.strptime(target, '%Y-%m-%d').weekday() + 1
if target in specialdates:
targetwd=specialdates[target]
return str(targetwd) in wd
def fill(self,dest,value,count,maxcount):
if not count:
count = max(0, maxcount-len(dest) )
for tmp in range(count):
dest.append(value)
def dateDeltaList(self,st):
try:
val=st.split(",")
for n in range(len(val)):
try:
val[n] = int(val[n])
except:
val[n] = 0
if n>0:
val[n] += val[n-1]
st=[self.date(x) for x in val]
except:
st=[]
return st
def clock(self,a):
try:
a=int(a)
e=""
while(a>=1440): # kas ar +1 dienu?
a -= 1440
e = " (+1)"
return "%02d:%02d%s" % ( a/60 , a%60, e)
except:
return "ErrTime(%s)" % a
def date(self,a):
try:
a=int(a)
if a<=0:
return None
return datetime.utcfromtimestamp(a*86400).strftime('%Y-%m-%d')
except:
return "ErrDate(%s)" % a
def __init__(self,*arg,**kwargs):
header=kwargs['header'] if 'header' in kwargs else None
self._valueindex=-1
Peak._mapping = [ ]
self.header=None
if header is not None:
self.header=header.lower().split(";")
if (len(arg) == 0):
raise TypeError("Class needs to be initialized with stop data.")
if any ( isinstance(arg[x],list) for x in range(len(arg))):
if len(arg) != 1:
raise TypeError("If list is passed, it must be the only argument")
else:
self.listinit(arg[0])
else:
self.listinit(arg)
def __repr__(self):
ra = []
for n in range(len(self._mapping)):
if getattr(self,self._mapping[n]) is None:
continue
r = " '"
r += self._mapping[n]
r += "': "
r += getattr(self,self._mapping[n]).__repr__()
ra.append(r)
return "<\n" + ",\n".join(ra) + "\n>\n"
def __iter__(self):
if hasattr(self,"value"):
return self
else:
raise TypeError("Object needs 'value' to be iterated")
def __next__(self):
self._valueindex += 1
if self._valueindex == len(self.value):
raise StopIteration
return list(self.value)[self._valueindex]
def __getitem__(self,key):
if hasattr(self,"value"):
return self.value[key]
else:
raise TypeError("Object needs 'value' to be indexed")
def next(self): # python 2 compat
return self.__next__()
def listinit(self,stopdata):
raise NotImplementedError
class PeakSpecialDates(Peak):
_mapping = [
"value",
]
def listinit(self,rt):
for i in range(len(rt),13):
rt.append(None)
rt[0]={x:rt[11] for x in self.dateDeltaList(rt[5]) if x is not None}
for n in range(len(PeakSpecialDates._mapping)):
setattr(self,PeakSpecialDates._mapping[n],rt[n])
class PeakStop(Peak):
_mapping = [
"id",
"direction",
"coord_lat", #lat
"coord_long", #lng
"neighbours", #stops
"name",
"info",
"street",
"area",
"city",
]
def listinit(self,stopdata):
if not self.header:
self.header = self._mapping
for i in range(len(stopdata),len(self.header)+5):
stopdata.append(None)
for n in range(len(PeakStop._mapping)):
setattr(self,PeakStop._mapping[n],None)
for n in range(len(self.header)):
setattr(self,self.header[n],stopdata[n])
if hasattr(self,"stops"): self.neighbours = self.stops
if hasattr(self,"lat"): self.coord_lat = self.lat
if hasattr(self,"lng"): self.coord_long = self.lng
try: self.neighbours=self.neighbours.split(",")
except: pass
try: self.coord_lat=int(self.coord_lat)/100000
except: pass
try: self.coord_long=int(self.coord_long)/100000
except: pass
class PeakRoute(Peak):
def timeConv(self,timestring):
parsed={
'departures':[],
'validity_from':[],
'validity_to':[],
'weekdays':[],
'tags':[],
}
numbers=timestring.split(",")
timebase=0
vehcount=0
stage=0
pairing=0
mem=None
relive_ptr=0
prevmem=0
for i in numbers:
empty = i==''
i_string = i
try:
i = int(i)
except:
i = 0
if stage>0:
pairing = (pairing+1) % 2
if pairing:
mem=i
continue
if stage==0 and not empty:
timebase += i
parsed['departures'].append(timebase)
vehcount=len(parsed['departures'])
if len(i_string) > 2 and (i_string[0:3] == '-00' or i_string[0:2] == '+0'):
tag = '4'
elif (len(i_string) > 1 and i_string[0:2] == '-0' or len(i_string)>0 and i_string[0] == '+'):
tag = '1'
else:
tag = '0'
parsed['tags'].append(tag)
elif stage==1:
self.fill(parsed['validity_from'],self.date(mem),i,vehcount)
elif stage==2:
self.fill(parsed['validity_to'],self.date(mem),i,vehcount)
elif stage==3:
self.fill(parsed['weekdays'],str(mem),i,vehcount)
elif stage>3: #can't have "else" here!
if empty:
i=vehcount - relive_ptr%vehcount
for tmp in range(i):
parsed['departures'].append(parsed['departures'][relive_ptr]+mem+prevmem)
relive_ptr += 1
prevmem += mem-5
if empty:
stage += 1
prevmem = 0
parsed['departures']=[self.clock(x) for x in parsed['departures']]
return parsed
_mapping = [
"number", #routenum
"authority",
"city",
"transport",
"operator",
"validity", #validityperiods
"specialdates",
"tag",
"direction", #routetype
"commercial",
"name",
"weekdays",
"streets",
"stops",
#"timetables",
#"timetables_by_stops",
#"timetables_by_vehicles",
"realstreets",
"weekday_list",
"special",
"id",
]
def listinit(self,rt):
if not self.header:
self.header = self._mapping
for i in range(len(rt),len(self.header)+5):
rt.append(None)
for n in range(len(PeakRoute._mapping)):
setattr(self,PeakRoute._mapping[n],None)
for n in range(len(self.header)):
setattr(self,self.header[n],rt[n])
try: self.routestops=self.routestops.split(",")
except: pass
if hasattr(self,"routenum"): self.number = self.routenum
if hasattr(self,"routetype"): self.direction = self.routetype
if hasattr(self,"routestops"): self.stops = self.routestops
if hasattr(self,"routetag"): self.tag = self.routetag
if hasattr(self,"routename"): self.name = self.routename
if hasattr(self,"validityperiods"): self.validity = self.validityperiods
self.validity=[x for x in self.dateDeltaList(self.validity)]
self.specialdates=[x for x in self.dateDeltaList(self.specialdates) if x is not None] #probably wrong
self.timetables=rt[14]
# expressbuss, if starts with 3
if self.transport.lower() == 'minibus' and len(self.number)==3 and self.number[0]>='3' and self.city.lower() == 'riga':
self.transport = 'expressbus'
self.id = self.transport+"/"+self.number+"/"+self.direction
self.timetables=self.timeConv(self.timetables)
# night bus parse to move to proper weekdays for Riga
if self.transport.lower() == 'nightbus' and self.weekdays =='67' and self.city.lower() == 'riga':
self.weekdays = '56'
self.timetables["weekdays"] = ['56' if x == '67' else x for x in self.timetables["weekdays"] ]
if ('s' in self.weekdays):
self.special = "medium"
self.weekday_list=sorted(set(self.timetables["weekdays"]))
vehcount=len(self.timetables["weekdays"])
self.timetables_by_stops=[]
self.timetables_by_vehicles=[{'schedule':[]} for i in range(vehcount)]
d = 0
for nextstop in self.stops:
n = 0
stopval=[]
for time in self.timetables["departures"][0+d:vehcount+d]:
if d==0:
self.timetables_by_vehicles[n]['weekdays'] =self.timetables["weekdays"][n]
self.timetables_by_vehicles[n]['date_from'] = self.timetables["validity_from"][n]
self.timetables_by_vehicles[n]['date_to'] =self.timetables["validity_to"][n]
self.timetables_by_vehicles[n]['specialdates'] =self.specialdates
self.timetables_by_vehicles[n]['route'] =self.id
stopval.append({
'stop': nextstop,
'departure': time,
'weekdays': self.timetables["weekdays"][n],
'date_from': self.timetables["validity_from"][n],
'date_to': self.timetables["validity_to"][n],
'tag': self.timetables["tags"][n],
'specialdates': self.specialdates,
'route': self.id,
})
self.timetables_by_vehicles[n]["schedule"].append({
'stop': nextstop,
'departure': time,
'tag': self.timetables["tags"][n],
})
n += 1
self.timetables_by_stops.append(stopval)
d += vehcount
class PeakWebReader(Peak):
def __init__(self,routesin,stopsin):
self.stops={}
self.routes={}
self.subroutes={}
self.specialdates={}
self.addStops(stopsin)
self.addRoutes(routesin)
def addStops(self,stopsin):
for stop in OverwriteReader(stopsin[1:]):
st=PeakStop(stop,header=stopsin[0])
self.stops[st.id]=st
def addRoutes(self,routesin):
routesjoined=[]
for i in routesin:
if(i.find(';SpecialDates;',0,14)==0):
if self.specialdates != {}:
print ("Warning! Overwriting global special dates with new data.")
self.specialdates=PeakSpecialDates(OverwriteReader([i])[0])
break
for i in range(3 if routesin[2][0] == ';' else 2 if routesin[1][0] == ';' else 1,len(routesin),2):
try:
routesjoined.append(routesin[i]+routesin[i+1])
except:
print ("Warning! Wrong line count in routes.")
for route in OverwriteReader(routesjoined,limit=11):
rt=PeakRoute(route,header=routesin[0])
self.subroutes[rt.id]=rt
try:
self.routes[rt.transport+"/"+rt.number]
except:
self.routes[rt.transport+"/"+rt.number]=[]
self.routes[rt.transport+"/"+rt.number].append(rt.id)
def FindRoutesAtStop(self,stopid):
ra=[]
for r in self.subroutes:
if stopid in self.subroutes[r].stops:
ra.append(self.subroutes[r].id)
return ra
def GetRoutes(self,base="",every=False):
if(base.count('/')==2): # 3 parts
if base in self.subroutes:
return [base]
else:
return []
if(base.count('/')==1): #2 parts
every=True
if (every):
if base == "": #0 parts
return self.subroutes
return [i for i in self.subroutes if self.subroutes[i].id.find(base+"/") == 0] #1 and 2 parts
else:
if(base == ""): #0 parts
return list(set([self.subroutes[i].transport for i in self.subroutes ]))
else:
return list(set([base+"/"+self.subroutes[i].number for i in self.subroutes if self.subroutes[i].transport==base])) #1 parts
def GetDeparturesForRouteAtStop(self,routeid,stopid):
ra=[]
for r in self.subroutes[routeid].timetables_by_stops:
for r1 in r:
if r1['stop'] == stopid:
ra.append(r1)
return ra
def GetDeparturesAtStop(self,stopid):
ra=[]
for r in self.FindRoutesAtStop(stopid):
ra = ra + (self.GetDeparturesForRouteAtStop(r,stopid))
return ra
def FilterByDate(self,fullresult,date=None):
ra=[]
if date is None:
date=datetime.now().strftime('%Y-%m-%d')
for r in fullresult:
if(self.validateDate(r['specialdates'],r['date_from'],r['date_to'],r['weekdays'],date)):
ra.append(r)
return ra
def SortByDeparture(self,fullresult):
return sorted(fullresult, key = lambda i:
("99" if (i['departure'].find('(+1)')!=-1) else "0")+" "+i['departure'])
def PopulateRouteStreets(self):
for ro in self.subroutes:
realstreets=[]
thisroute=self.subroutes[ro]
striter = thisroute.streets.split(",")
stopcopy=thisroute.stops[:]
striter = striter + ['']*(len(stopcopy)-len([x for x in striter if x=='']))
for i in range(len(striter)):
if striter[i] == '':
try:
striter[i]=self.stops[stopcopy[0]].street
stopcopy.pop(0)
except:
striter[i]=""
for st in striter:
if st == '0' or st=='' or st is None:
continue
if len(realstreets)==0 or realstreets[-1]!=st:
realstreets.append(st)
self.subroutes[ro].realstreets=",".join(realstreets)
def PopulateAll(self):
self.PopulateRouteStreets()
class PeakWebFileReader(PeakWebReader):
def __init__(self,froutes,fstops):
PeakWebReader.__init__(self,self.open(froutes),self.open(fstops))
def open(self,filename):
return open(filename,encoding='utf-8-sig').read().strip().split('\n')
def addFile(self,routes=None,stops=None):
if stops is None and routes is None:
raise TypeError("File for stops or routes need to be specified.")
if stops is not None:
self.addStops(self.open(stops))
if routes is not None:
self.addRoutes(self.open(routes))
``` |
{
"source": "0k/kids.common",
"score": 4
} |
#### File: kids/common/exc.py
```python
import traceback
def format_last_exception(prefix=" | "):
"""Format the last exception for display it in tests.
This allows to raise custom exception, without loosing the context of what
caused the problem in the first place:
>>> def f():
... raise Exception("Something terrible happened")
>>> try: ## doctest: +ELLIPSIS
... f()
... except Exception:
... formated_exception = format_last_exception()
... raise ValueError('Oups, an error occured:\\n%s' % formated_exception)
Traceback (most recent call last):
...
ValueError: Oups, an error occured:
| Traceback (most recent call last):
...
| Exception: Something terrible happened
"""
return '\n'.join(str(prefix + line)
for line in traceback.format_exc().strip().split('\n'))
``` |
{
"source": "0k/kids.data",
"score": 4
} |
#### File: kids/data/format.py
```python
import math
import datetime
import sact.epoch
## Python 3 compatibility layer
try:
unicode = unicode
except NameError: ## pragma: no cover
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
else: ## pragma: no cover
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
class Formatter(object):
r"""Generic formatter factory.
A Formatter take a context and returns a callable which will use
the context to format a value.
context is optional.
>>> from __future__ import print_function
>>> f = Formatter()
>>> assert f('hello') == 'hello'
Context at init time
--------------------
Lets create a formatter that use the context:
>>> from pprint import pformat
>>> class MyFormatter(Formatter):
... def format(self, value, context):
... if isinstance(context, basestring): return "%r" % context
... if context is None: return 'None'
... return "value: %s\ncontext:\n%s" % (value, pformat(context))
...
You can setup context at instanciation time or at call time:
>>> fmt = Formatter({'data': 'foo'})
>>> fmt.context['data']
'foo'
To add convenience, you can specify some value in named arguments:
>>> fmt = Formatter(data='foo')
>>> fmt.context['data']
'foo'
These two way are combinable, but beware of overwrite !
>>> fmt = Formatter({'data': 'foo'}, data='bar')
>>> fmt.context['data']
'bar'
Non dict-like values in context are not allowed if you use keyword
arguments:
>>> fmt = Formatter(3, data='bar')
Traceback (most recent call last):
...
TypeError: ... you cannot init your Formatter with named args.
While any python object is allowed if you do not use keyword args :
>>> fmt = Formatter(3)
>>> fmt.context
3
Context at call time
--------------------
You can provide a context at call-time
>>> fmt = Formatter()
>>> assert fmt('hello', data='bar') == 'hello'
Context can be explicitly named and set:
>>> fmt = MyFormatter(context={'data': 'foo', 'ginko': 'biloba'},
... data='bar')
Then set
>>> print(fmt('hop', context={'data': 'bar2'}))
value: hop
context:
{'_': <function ... at ...>,
'data': 'bar2',
'gettextargs': {},
'ginko': 'biloba',
'trans': <function ...trans at ...>}
Support of old API:
-------------------
>>> class MyFormatter(Formatter):
... def format(self, value):
... return "%s" % value
...
>>> fmt = MyFormatter()
>>> print(fmt('hop'))
hop
"""
def __init__(self, context=None, **kwargs):
self.context = self._get_context(context, **kwargs)
def _get_context(self, context=None, **kwargs):
"""This function returns a context from keyword args and context
Note: to be thread safe, this function does not store anything in
the current object.
Note2: to be compatible with default overriding classes we must
check the prototype of the function format.
"""
## Get a dict copy of self.context if existent
try:
if hasattr(self, 'context'):
_context = dict(self.context) ## copy to be thread safe
else:
_context = {}
except (ValueError, TypeError):
_context = {}
## Merge with context values if any
if context is None:
context = {}
if isinstance(context, dict):
try:
_context.update(context) ## Update with new values
context = _context
## Put i18n informations to context
if '_' not in context:
def trans(msg, *args, **kwargs):
return unicode(msg)
context['_'] = trans
context.setdefault('gettextargs', {})
def trans(msg, *args, **kwargs):
"""Shortcut function for translation"""
kwargs.update(context['gettextargs'])
return context['_'](msg, *args, **kwargs)
context['trans'] = trans
except TypeError: ## Context is not a dict
pass ## context must be unchanged
## Merge with keyword arguments
if kwargs:
if not hasattr(context, '__setitem__'):
raise TypeError(
'Your context %r has no __setitem__ and thus '
'you cannot init your Formatter with named args.'
% context)
for key, value in kwargs.items():
context[key] = value
return context
def __call__(self, value, context=None, **kwargs):
## XXXvlab: ---- COMPATIBILITY CODE BEGIN (to delete)
self._formatting_structure = self.context
import inspect
from warnings import warn
(posargs, _, _, defaults) = inspect.getargspec(self.format)
if posargs[0] == 'self':
posargs = posargs[1:]
if len(posargs) == 1: ## OLD API
##XXXgsa: finaly we keep the old API
# warn('Formatter.format(..) should use 3 arguments with new API.',
# DeprecationWarning, stacklevel=2)
return self.format(value)
## --- COMPATIBILITY CODE END
return self.format(value, self._get_context(context, **kwargs))
def format(self, value, context=None):
# Default formatting does nothing
return unicode(value)
class CompoundFormatter(Formatter):
"""Compose Formatter from several subformatter
The formatters are passed the last ones result and thus
are chained one to the next in the list.
XXXvlab: should we migrate this behavior in the main formatter ?
Usage
=====
>>> from kids.data.format import Formatter, CompoundFormatter
Let's creates two simple formatters:
>>> class AddX(Formatter):
... def format(self, value, context=None):
... return value + context['nb']
This formatter factory will simply create a formatter that adds
'nb' to the value formatted:
>>> myAdd2 = AddX(nb=2)
>>> myAdd2(10)
12
This formatter factory will embrace the formatted value with given
characters in the 'sep' variable:
>>> class Embrace(Formatter):
... def format(self, value, context=None):
... return "%s%s%s" % (context['sep'][0], value, context['sep'][1])
>>> myParenEmbrace = Embrace(sep='()')
>>> myParenEmbrace('hello')
'(hello)'
Let's create a compound formatter factory:
>>> myCompoundFormatter = CompoundFormatter(compound=[myAdd2,
... myParenEmbrace])
>>> myCompoundFormatter(20)
'(22)'
Similar uses:
>>> CompoundFormatter(compound=[AddX(nb=20), Embrace(sep='<>')])(10)
'<30>'
"""
def format(self, value, context=None):
for subformatter in self.context['compound']:
value = subformatter(value, context)
return value
def Chain(chain):
"""Shorctut to create a CompoundFormatter
Usage
=====
>>> from kids.data.format import Formatter, Chain
>>> class AddX(Formatter):
... def format(self, value, context=None):
... return value + context['nb']
This formatter factory will simply create a formatter that adds
'nb' to the value formatted:
>>> myAdd2 = AddX(nb=2)
>>> myAdd2(10)
12
This formatter factory will embrace the formatted value with given
characters in the 'sep' variable:
>>> class Embrace(Formatter):
... def format(self, value, context=None):
... return "%s%s%s" % (context['sep'][0], value, context['sep'][1])
>>> myParenEmbrace = Embrace(sep='()')
>>> myParenEmbrace('hello')
'(hello)'
Let's create a compound formatter factory:
>>> myfmt = Chain([AddX(nb=20), Embrace(sep='<>')])
>>> myfmt(1)
'<21>'
"""
return CompoundFormatter(compound=chain)
Formatter.__or__ = lambda self, value: Chain([self, value])
class TimeStampFormatter(Formatter):
"""Format a date to a short string representation
If a timeframe is provided to the constructor, the output
will be reduced to the minimum output.
>>> format_timestamp = TimeStampFormatter()
>>> assert format_timestamp(1226929090) == '2008-11-17 13:38:10'
timeframe usage
~~~~~~~~~~~~~~~
You can provide a structure to the constructor to declare
what are the allowed segments to display:
>>> s = ((0, 2), (1, 4), (3, 5))
The structure must be a iterable containing couples of
(min, max) value indicating the beginning and the ending
of the segment allowed.
These min and max are an integer that represent a specific
field in the ISO format representation :
0 1 2 3 4 5
ISO representation : YYYY-MM-DD HH-MM-SS
So Y is 0, M is 1, D is 2, H is 3, M is 4, S are fifth field.
Declaring (0, 2) in the structure means that displaying
0 1 2
'YYYY-MM-DD' is allowed.
Example:
>>> w = (1226909090, 1226959090)
>>> format_timestamp = TimeStampFormatter(structure=s,timeframe=w)
>>> assert format_timestamp(w[0]) == '08:04:50'
which is the '(3, 5)' segment that matched best
>>> assert format_timestamp(w[1]) == '21:58:10'
>>> assert format_timestamp(1226919090) == '10:51:30'
>>> w=(120000000,1300000000)
>>> format_timestamp = TimeStampFormatter(structure=s,timeframe=w)
>>> assert format_timestamp(w[0]) == '1973-10-20'
>>> assert format_timestamp(w[1]) == '2011-03-13'
>>> assert format_timestamp(1290000000) == '2010-11-17'
>>> w=(123000000,124000000)
>>> format_timestamp = TimeStampFormatter(structure=s,timeframe=w)
>>> assert format_timestamp(w[0]) == '11-24 14:40'
>>> assert format_timestamp(w[1]) == '12-06 04:26'
>>> assert format_timestamp(123500000) == '11-30 09:33'
Exceptions
~~~~~~~~~~
>>> format_timestamp(1290000000)
Traceback (most recent call last):
...
ValueError: ...
>>> format_timestamp = TimeStampFormatter(structure=s,
... timeframe=(123500000,123500000))
...
>>> format_timestamp(123500000)
Traceback (most recent call last):
...
ValueError: ...
>>> format_timestamp = TimeStampFormatter(structure=s,
... timeframe=(123500001,123500000))
...
>>> format_timestamp(123500000)
Traceback (most recent call last):
...
ValueError: ...
"""
def format(self, value, context=None):
dt = sact.epoch.Time.fromtimestamp(value)
if 'timeframe' not in context:
return unicode(dt.short)
first, last = context['timeframe']
# raises TypeError if not castable:
first, last = int(first), int(last)
if not first <= last:
raise ValueError("timeframe has lower bound greater than "
"its greated bound (%d, %d)" % (first, last))
if not first <= value <= last:
raise ValueError("value %d is not in provided time "
"window (%d, %d)" % (value, first, last))
first = sact.epoch.Time.fromtimestamp(first).short
last = sact.epoch.Time.fromtimestamp(last).short
# find index of first char that differs is ``first`` and ``last``
diff = False
i = 0
for i, c in enumerate(first):
if c != last[i]:
diff = True
break
if not diff:
raise ValueError("Time frame cannot be equal "
"to the second (%d, %d)"
% context['timeframe'])
# uniformize separation char
last = last.replace("-", " ").replace(":", " ")
last_tuple = last.split(' ')
# now get the index of the previous non-digit char
cpt = 0
for cpt in range(i):
char = last[i - 1 - cpt]
if char == u' ':
break
# next char should be our first index
start_from = i - cpt
cutlist = last[start_from:].split(' ')
first_elmt = 6 - len(cutlist)
# finds the smallest format that contains first_elmt
candidates = [(lmin, lmax) for lmin, lmax in context['structure']
if lmin <= first_elmt <= lmax]
# take the one having the best precision
best_lmax = 0
for lmin, lmax in candidates:
if lmax > best_lmax:
best_lmax = lmax
# filter out candidates that have not lmax = best_lmax
candidates = [(lmin, lmax) for lmin, lmax in candidates
if lmax == best_lmax]
# get the smallest
smallest = None
smallest_size = 7
for lmin, lmax in candidates:
if (lmax - lmin) < smallest_size:
smallest_size = lmax - lmin
smallest = (lmin, lmax)
lmin, lmax = smallest
# get index in iso string of start and end
if lmin == 0:
start_from = 0
else:
start_from = len(' '.join(last_tuple[0:lmin])) + 1
end_at = len(' '.join(last_tuple[0:lmax + 1]))
# final cut !
return unicode(dt.isoformat(" ")[start_from:end_at])
class FancyNumberFormatter(Formatter):
"""Fancy number formatter factory.
This Formatter uses a specific structure to format numbers that
allows differents type of formatting base on variable scales...
Sample::
>>> from __future__ import print_function
>>> formatter = FancyNumberFormatter(structure=(
... # limit base format
... (10**3, 1, (u'< 1 ms' , {} )),
... (10**3, 10**3, (u'%(ms)d ms' , {'ms': 1,} )),
... (60 , 10**3, (u'%(sec).1f s' , {'sec': 1,} )),
... (60 , 1, (u'%(min)d m %(sec)d s' , {'min': 60, 'sec':1,} )),
... (24 , 60, (u'%(hour)d h %(min)d m' , {'hour': 60, 'min':1,} )),
... (None , 60, (u'%(day)d d %(hour)d h' , {'day': 24, 'hour':1,} )),
... ))
...
Build a new Formatter:
----------------------
The given structure must be a list of (limit,base,format) tuples.
Limit
^^^^^
Each tuple defines a way to display the value. The first value of
the tuple (called limit) triggers the choice of the tuple used to
display the value :
>>> formatter = FancyNumberFormatter(structure=(
... # limit base format
... ( 2, 1, (u'A' , {} )),
... ( 3, 1, (u'B' , {} )),
... ( None, 1, (u'C' , {} )),
... ))
...
You can see that limit will trigger a particular line depending
on the limit parameter... each new limit parameter is multiplied
by the preceding.
So in our example, the first line applies when the value is below
'2', the next line will apply between 2 < v <= 2*3. And the third
line will apply with no limit on 2*3 < v.
So:
>>> for i in range(10): print('%d => %s' % (i,formatter(i)))
0 => A
1 => A
2 => B
3 => B
4 => B
5 => B
6 => C
7 => C
8 => C
9 => C
Base
^^^^
The second parameter in each tuples defines the base 'resolution'
accessible through the format :
>>> formatter = FancyNumberFormatter(structure=(
... # limit base format
... ( 2, 1, (u'%(value)d single' , {'value': 1} )),
... ( 2, 2, (u'%(value)d duo' , {'value': 1} )),
... (None, 2, (u'%(value)d quartet' , {'value': 1} )),
... ))
...
This means, on first couple, that between 0 <= v <= 2, v/1 will
be sent to the 'format' structure.
On second tuple, that displays our value between 2 < v <= 2*2, v/2 will
be sent to the 'format' structure. And at last:
On last tuple, that displays our value for 2*2 < v , v/(2*2) will
be sent to the 'format' structure.
So::
>>> for i in range(10): print('%d => %s' % (i,formatter(i)))
0 => 0 single
1 => 1 single
2 => 1 duo
3 => 1 duo
4 => 1 quartet
5 => 1 quartet
6 => 1 quartet
7 => 1 quartet
8 => 2 quartet
9 => 2 quartet
Format
^^^^^^
format will take care of displaying the value. It only receive the
the 'based' value !
>>> formatter = FancyNumberFormatter(structure=(
... # limit base format
... ( 2, 1, (u'%(value)d single' , {'value': 1})),
... ( 2, 1, (u'%(value_1)d duo %(value_2)d single' , {'value_1': 2, 'value_2': 1})),
... (None, 2, (u'%(value_1)d quartet %(value_2)d duo', {'value_1': 2, 'value_2': 1})),
... ))
the format is a list of couple: each couple contains a formatting string,
and a reduction value. The reduction value applies to the base.
So::
>>> for i in range(10): print('%d => %s' % (i,formatter(i)))
0 => 0 single
1 => 1 single
2 => 1 duo 0 single
3 => 1 duo 1 single
4 => 1 quartet 0 duo
5 => 1 quartet 0 duo
6 => 1 quartet 1 duo
7 => 1 quartet 1 duo
8 => 2 quartet 0 duo
9 => 2 quartet 0 duo
Please note that black magic is in action. Let's explicit that: keys in the dict that provides
the scales (here {'value_1': 2, 'value_2': 1}) does not provide order information ! Order of
scales is alphabetical by default. If you want another order, you can specify a tuple of ordered
scale keys as third part of the structure
>>> formatter = FancyNumberFormatter(structure=(
... # limit base format
... ( 2, 1, (u'%(value)d single' , {'value': 1})),
... ( 2, 1, (u'%(value_2)d duo %(value_1)d single' , {'value_2': 2, 'value_1': 1}, ('value_2', 'value_1'))),
... (None, 2, (u'%(value_2)d quartet %(value_1)d duo', {'value_2': 2, 'value_1': 1}, ('value_2', 'value_1'))),
... ))
the format is a list of couple: each couple contains a formatting string,
and a reduction value. The reduction value applies to the base.
So::
>>> for i in range(10): print('%d => %s' % (i,formatter(i)))
0 => 0 single
1 => 1 single
2 => 1 duo 0 single
3 => 1 duo 1 single
4 => 1 quartet 0 duo
5 => 1 quartet 0 duo
6 => 1 quartet 1 duo
7 => 1 quartet 1 duo
8 => 2 quartet 0 duo
9 => 2 quartet 0 duo
Generic Example
---------------
>>> format_timedelta = FancyNumberFormatter(structure=(
... # limit base format
... (10**3, 1, (u'< 1 ms' , {} )),
... (10**3, 10**3, (u'%(value)d ms' , {'value': 1} )),
... (60 , 10**3, (u'%(value).1f s' , {'value': 1} )),
... (60 , 1, (u'%(value_1)d m %(value_2)d s' , {'value_1': 60, 'value_2': 1})),
... (24 , 60, (u'%(value_1)d h %(value_2)d m' , {'value_1': 60, 'value_2': 1})),
... (None , 60, (u'%(value_1)d d %(value_2)d h' , {'value_1': 24, 'value_2': 1})),
... ))
>>> assert format_timedelta(2500 * 10**3 * 60 * 60 * 24) == '2 d 12 h'
>>> assert format_timedelta(2500 * 10**3 * 60 * 60 * 24 * 10) == '25 d 0 h'
>>> assert format_timedelta(2500) == '2 ms'
>>> assert format_timedelta(2500 * 10**3) == '2.5 s'
>>> assert format_timedelta(2500 * 10**3 * 60) == '2 m 30 s'
>>> assert format_timedelta(2500 * 10**3 * 60 * 60) == '2 h 30 m'
>>> assert format_timedelta(1226929090) == '20 m 26 s'
>>> assert format_timedelta(800) == '< 1 ms'
>>> format_timedelta = FancyNumberFormatter(structure=(
... # limit base format
... (10**3, 1, ('%(value)d microsecs', {'value': 1})),
... (10**3, 10**3, ('%(value)d ms' , {'value': 1})),
... (60 , 10**3, ('%(value).1f s' , {'value': 1})),
... (60 , 1, ('%(value_1)d m %(value_2)d s' , {'value_1': 60, 'value_2': 1})),
... (24 , 60, ('%(value_1)d h %(value_2)d m' , {'value_1': 60, 'value_2': 1})),
... (None , 60, ('%(value_1)d d %(value_2)d h' , {'value_1': 24, 'value_2': 1})),
... ))
...
>>> assert format_timedelta(800) == '800 microsecs'
Float support
-------------
The first base permit to scale up the displayed value: This sample
allows to display '80 ms' when you provide 0.08
>>> format_timedelta = FancyNumberFormatter(structure=(
... # limit base format
... (10**3,10**-6, ('%(value)d microseconds' , {'value': 1})),
... (10**3, 10**3, ('%(value).2f ms' , {'value': 1})),
... (60 , 10**3, ('%(value).2f s' , {'value': 1})),
... (60 , 1, ('%(value_1)d m %(value_2)d s' , {'value_1': 60, 'value_2': 1})),
... (24 , 60, ('%(value_1)d h %(value_2)d m' , {'value_1': 60, 'value_2': 1})),
... (7 , 60, ('%(value_1)d wk %(value_2)d d %(value_3)d h' , {'value_1': 7*24, 'value_2': 24, 'value_3': 1})),
... (None , 24*7, ('%(value_1)d y %(value_2)d wk' , {'value_1': 52, 'value_2': 1})),
... ))
>>> assert format_timedelta(0.08) == '80.00 ms'
>>> assert format_timedelta(0.0008) == '800 microseconds'
Exception
---------
You must provide a 'None' value as last limit in the formatting
structure. If you forget it, an exception should raise::
>>> format_timedelta = FancyNumberFormatter(structure=(
... # limit base format
... (10**3,10**-6, (u'%(value)d microseconds', {'value': 1})),
... (10**3, 10**3, (u'%(value).2f ms' , {'value': 1})),
... ))
>>> format_timedelta(832)
Traceback (most recent call last):
...
TypeError: formatting structure is incorrect
"""
def format(self, value, context=None):
value = float(value) ## generates exception if not castable to a float
base = value
def _format(base, format):
if len(format) == 3:
msg, scales, order = format
else:
msg, scales = format
order = sorted(scales.keys())
values = {}
for key in order:
values[key] = base / scales[key]
base %= scales[key]
msg = context['trans'](msg)
return msg % (values)
try:
value /= context['structure'][0][1]
for limit, factor, fmt in context['structure']:
base = float(base) / factor
if not limit or 0 <= value < limit:
return _format(base, fmt)
value = float(value) / limit
raise TypeError ## raise error
except TypeError:
raise TypeError("formatting structure is incorrect")
class LogNumberFormatter(Formatter):
"""Logarithmic number formatter factory.
This Formatter uses a specific structure to format numbers on
a logarithmique basis. Typically, you should use this to suffix
numbers depending on a fixed scale : (ie: 1000 is a fixed scale
for lots of units, and 'K' is the common suffix, 'M' the next,
...)
Result of the Formatter will be a couple of unicode strings: the
value, and the unit.
Let's show this thru a simple byte formatter::
>>> format_size = LogNumberFormatter(
... units=(["B", "KiB", "MiB", "GiB", "TiB"], 2**10))
Notice that 2**10 == 1024 which is the base for the calcul in we
want true Bytes::
>>> assert format_size(0) == ('0', 'B')
>>> assert format_size(1024) == ('1.0', 'KiB')
>>> assert format_size(1024**4) == ('1.0', 'TiB')
>>> assert format_size(1024**4) == ('1.0', 'TiB')
>>> assert format_size(1024**2.5) == ('32.0', 'MiB')
Precision
---------
number after the period and rounding policies (to the nearest)
>>> assert format_size(1500) == ('1.5', 'KiB') ## 1500/1024 = 1.46
Note that :
>>> assert format_size(2047) == ('2.0', 'KiB') ## 1.999 KiB
Even with the default precision of 1, the size should not be
written with a trailing ".0"
>>> assert format_size(35) == ('35', 'B')
>>> format_size = LogNumberFormatter(
... units=([u"B", u"KiB", u"MiB", u"GiB", u"TiB"], 2**10),
... precision=6
... )
...
>>> assert format_size(2366) == ('2.311', 'KiB')
Number over the scale
---------------------
Big numbers over the scale are written in the last unit specified
in the scale:
>>> format_size = LogNumberFormatter(
... units=([u"B", u"KiB", u"MiB", u"GiB", u"TiB"], 2**10))
>>> assert format_size(1024**6) == ('1048576.0', 'TiB')
Internationalisation
--------------------
We can use the factory to create the international formatting function
that will use 1000 as scale factor and "KB" instead of "KiB"
>>> format_size = LogNumberFormatter(
... units=([u"B", u"KB", u"MB", u"GB", u"TB"], 10**3))
>>> assert format_size(5000) == ('5.0', 'KB')
>>> assert format_size(234) == ('234', 'B')
Exceptions
----------
>>> format_size = LogNumberFormatter(
... units=([u"B", u"KB", u"MB", u"GB", u"TB"], 10**3),
... precision='s'
... )
...
>>> format_size(234)
Traceback (most recent call last):
...
ValueError: ...
>>> format_size = LogNumberFormatter(
... units=([u"B", u"KB", u"MB", u"GB", u"TB"], 10**3))
...
>>> format_size('s')
Traceback (most recent call last):
...
ValueError: ...
>>> format_size = LogNumberFormatter(
... units=([u"B", u"KB", u"MB", u"GB", u"TB"], 10**3),
... precision='-5'
... )
...
>>> format_size(234)
Traceback (most recent call last):
...
ValueError: ...
"""
def format(self, value, context=None):
precision = int(context.get('precision', 1))
## generates exception if not integer
value = int(value)
if precision < 0:
raise ValueError("Negative precision '%d' is not coherent."
% precision)
def _format(value, suffix, precision, auth_prec):
"""Takes care of the final formatting ..."""
prec = min(auth_prec, precision)
if prec == 0:
format_string = u"%i"
else:
format_string = u"%." + str(prec) + "f"
return (format_string % value, suffix)
suffixes, factor = context['units']
## raise error if suffixes is not iterable
iter(suffixes)
auth_prec = 0
suffix = u'' ## default value if suffixes is empty
for suffix in suffixes:
if 0 <= value < factor:
return _format(value, suffix, precision, auth_prec)
last_value = value ## keep in case of last loop
value = float(value) / factor
auth_prec += int(math.log(factor, 10))
return _format(last_value, suffix, precision, auth_prec)
class PercentFormatter(Formatter):
"""Percent number formatter
Convert a number (float or int) in pourcentage. Or what you want if you
give the precision number. Percent, per thousand ...
>>> perNumber = PercentFormatter()
>>> perNumber(0.2)
20.0
>>> perNumber(-1)
-100
>>> perNumber(5)
500
We can specify the precision. For exemple if you want a per thousand value
>>> perNumber = PercentFormatter(precision=1000)
>>> perNumber(0.2)
200.0
>>> perNumber(-1)
-1000
>>> perNumber(5)
5000
>>> perNumber('e')
Traceback (most recent call last):
...
ValueError: Need an integer or float value.
"""
def format(self, value, context=None):
precision = int(context.get('precision', 100))
if isinstance(value, int) or isinstance(value, float):
return value * precision
raise ValueError("Need an integer or float value.")
def mk_fmt(fun):
class _Formatter(Formatter):
def format(self, value, context=None):
return fun(value, context)
return _Formatter
```
#### File: kids/data/lib.py
```python
def partition(elts, predicate):
"""Splits elts depending on their result.
Partitions set of elts. Result of the predicate must be hashable.
>>> partition("qweryuop ", lambda x: "How are you ?".count(x))
{0: ['q', 'p'], 1: ['w', 'e', 'r', 'y', 'u'], 2: ['o'], 3: [' ']}
"""
heaps = {}
for elt in elts:
key = predicate(elt)
heaps[key] = heaps.get(key, []) + [elt]
return heaps
def half_split_on_predicate(elts, predicate):
"""Splits elts in two thanks to a predicate function.
Partitions set of elts. Result of the predicate must be boolean.
>>> half_split_on_predicate("qweryuop ", lambda x: x in "How are you ?")
(['q', 'p'], ['w', 'e', 'r', 'y', 'u', 'o', ' '])
"""
heaps = partition(elts, predicate)
return heaps.get(False, []), heaps.get(True, [])
Null = object()
def first(elts, predicate, default=Null):
"""Returns the first elt of elts that matches predicate
>>> first([3, 7, 11, 15, 33], predicate=lambda x: x % 11 == 0)
11
But if no element matches the predicate it should cast an
exception::
>>> first([3, 7, 11, 15, 33], predicate=lambda x: x % 13 == 0)
Traceback (most recent call last):
...
ValueError: No value matches predicate
We can also set a default value in case of no match::
>>> first([3, 7, 11, 15, 33], predicate=lambda x: x % 13 == 0,
... default=0)
0
"""
for elt in elts:
if predicate(elt):
return elt
if default is Null:
raise ValueError("No value matches predicate")
else:
return default
```
#### File: kids/data/match.py
```python
import kids.cache
try:
import distance
except ImportError: ## pragma: no cover
distance = None
##
## Criterias
##
if distance: ## pragma: no cover
levenstein = lambda a, b: 1 - distance.nlevenshtein(a, b)
else: ## pragma: no cover
def levenstein(a, b):
raise Exception(
"'levenstein' function needs python module 'distance' "
"to be available.")
equal = lambda a, b: a == b
size = lambda a, b: len(a) == len(b)
##
## Criteria Factory
##
def weighted(criterias_weights):
"""Return callable criteria with weighted sub-criteria values
This allows you to create a new criteria function by mixing others.
>>> weighted([(equal, 1)])("foo", "bar")
0.0
>>> weighted([(equal, 1)])("foo", "foo")
1.0
>>> weighted([(equal, 1), (size, 1)])("foo", "bar")
0.5
>>> weighted([(equal, 3), (size, 1)])("foo", "bar")
0.25
"""
def wc(a, b):
s = 0
tot_wh = 0
for cr, wh in criterias_weights:
s += float(cr(a, b)) * wh
tot_wh += wh
return s / tot_wh
return wc
def avg(criterias):
"""Return average matching for given criteria
>>> avg([equal])("foo", "bar")
0.0
>>> avg([equal])("foo", "foo")
1.0
>>> avg([levenstein])("bar", "baz") ## doctest: +ELLIPSIS
0.66...
>>> avg([levenstein])("bar", "foo")
0.0
>>> avg([levenstein, size, equal])("bar", "barb")
0.25
>>> avg([levenstein, size, equal])("bar", "baz") ## doctest: +ELLIPSIS
0.55...
"""
return weighted([(cr, 1) for cr in criterias])
##
## Using criterias
##
@kids.cache.cache
def match(a, b, criteria):
"""Return a float between 0 and 1. 1 is perfect match.
Could Store result in cache.
"""
return criteria(a, b)
def first_match(elt, targets, criteria):
"""Returns False or perfect match in targets matching criterias if found
Suppose you have one elt, and you want to find the first perfect match for
this elt in list of other elts.
>>> first_match("foo", ["bar", "barb", "fooz", "foo", "zob"],
... criteria=avg([levenstein, size, equal]))
'foo'
If no element matches, it should return False::
>>> first_match("foo", ["bar", "barb", "fooz", "zob"],
... criteria=equal)
False
"""
for target in targets:
if match(elt, target, criteria) == 1:
return target
return False
def close_matches(elt, targets, criteria, min_ratio=0.):
"""Return only matches above min_ratio, first matches are the best.
>>> close_matches("foo",
... ["bar", "barb", "fooz", "foo", "zob"],
... criteria=avg([levenstein, size, equal]),
... min_ratio=0.1) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
[('foo', 1.0), ('zob', 0.44...), ('bar', 0.33...), ('fooz', 0.25)]
Notice that 'barb' has disappeared as its ratio is below ``min_ratio``.
They are also sorted with the best match first.
"""
candidates = [(target, match(elt, target, criteria)) for target in targets]
candidates = [(target, ratio)
for target, ratio in candidates
if ratio > min_ratio]
candidates.sort(key=lambda tr: tr[1], reverse=True)
return candidates
``` |
{
"source": "0k/kids.sh",
"score": 2
} |
#### File: kids/sh/__init__.py
```python
from __future__ import unicode_literals
from .sh import wrap, cmd, ShellError, ShellOutput, set_env
from kids.test import Test
from kids.cache import cache
class BaseShTest(Test):
COMMAND = ""
DEFAULT_ENV = {
}
@cache
@property
def cmd(self):
return set_env(**self.DEFAULT_ENV)(cmd)
@cache
@property
def w(self):
return set_env(**self.DEFAULT_ENV)(wrap)
```
#### File: sh/tests/test_cmd.py
```python
from .. import ShellError, BaseShTest
class CmdSimpleTest(BaseShTest):
def test_shell_call(self):
res = self.cmd("true")
self.assertEquals(res.out, "")
self.assertEquals(res.err, "")
self.assertEquals(res.errlvl, 0)
def test_full_bash_construct(self):
res = self.cmd("(false || true) && test -z "" > /dev/null")
self.assertEquals(res.out, "")
self.assertEquals(res.err, "")
self.assertEquals(res.errlvl, 0)
def test_fail(self):
res = self.cmd("false")
self.assertEquals(res.out, "")
self.assertEquals(res.err, "")
self.assertEquals(res.errlvl, 1)
def test_fail_and_out_err_catching(self):
res = self.cmd("echo -n 1 >&1 ; echo -n 2 >&2 ; exit 12")
self.assertEquals(res.out, "1")
self.assertEquals(res.err, "2")
self.assertEquals(res.errlvl, 12)
class CmdEnvTest(BaseShTest):
DEFAULT_ENV = {"MYVALUE": "XYXY"}
def test_shell_env(self):
res = self.cmd("echo -n $MYVALUE")
self.assertEquals(res.out, "XYXY")
def test_shell_inherit_main_process_env(self):
import os
os.environ["MYVALUE2"] = "ABAB"
res = self.cmd("echo -n $MYVALUE2")
self.assertEquals(res.out, "ABAB")
```
#### File: sh/tests/test_wrap.py
```python
from .. import ShellError, BaseShTest
class WrapSimpleTest(BaseShTest):
def test_shell_call(self):
out = self.w("true")
self.assertEquals(out, "")
def test_full_bash_construct(self):
out = self.w("(false || true) && test -z "" > /dev/null")
self.assertEquals(out, "")
def test_fail(self):
self.assertRaises(ShellError, self.w, ("false", ))
class WrapEnvTest(BaseShTest):
DEFAULT_ENV = {"MYVALUE": "XYXY"}
def test_shell_env(self):
out = self.w("echo $MYVALUE")
self.assertEquals(out, "XYXY")
def test_shell_inherit_main_process_env(self):
import os
os.environ["MYVALUE2"] = "ABAB"
out = self.w("echo $MYVALUE2")
self.assertEquals(out, "ABAB")
``` |
{
"source": "0k/kids.test",
"score": 3
} |
#### File: kids/test/__init__.py
```python
from __future__ import unicode_literals
from __future__ import print_function
import os
import tempfile
import unittest
import re
import shutil
class Test(unittest.TestCase):
## XXXvlab: it seems it's already there in PY3 and maybe PY2,
## so why keep it ?
def assertContains(self, haystack, needle, msg=None):
if not msg:
msg = "%r should contain %r." % (haystack, needle)
self.assertTrue(needle in haystack, msg)
def assertNotContains(self, haystack, needle, msg=None):
if not msg:
msg = "%r should not contain %r." % (haystack, needle)
self.assertTrue(needle not in haystack, msg)
def assertRegex(self, text, regex, msg=None):
if not msg:
msg = "%r should match regex %r." % (text, regex)
self.assertTrue(re.search(regex, text, re.MULTILINE) is not None, msg)
class BaseTmpDirTest(Test):
def setUp(self):
## put an empty tmp directory up
self.old_cwd = os.getcwd()
self.tmpdir = tempfile.mkdtemp()
os.chdir(self.tmpdir)
def tearDown(self):
## delete the tmp directory
os.chdir(self.old_cwd)
shutil.rmtree(self.tmpdir)
def run(test):
from pprint import pprint
try:
from StringIO import StringIO
except ImportError: ## PY3
from io import StringIO
stream = StringIO()
runner = unittest.TextTestRunner(stream=stream)
runner.run(unittest.makeSuite(test))
stream.seek(0)
print(stream.read())
``` |
{
"source": "0k/kids.txt",
"score": 4
} |
#### File: kids/txt/txt.py
```python
from __future__ import print_function
import textwrap
import re
def dedent(txt):
"""Dedent a txt, tolerating first line not indented.
>>> from __future__ import print_function
Various issues should be tackled:
>>> print(dedent(
... '''This is a doc
...
... with fancy indentation, that should just work also.
... Without removing too much neither as:
... - more space.'''))
This is a doc
<BLANKLINE>
with fancy indentation, that should just work also.
Without removing too much neither as:
- more space.
Note that the first line doesn't have indentation and neither the
second (which is empty).
Of course, ``dedent`` should not fail on empty string neither:
>>> dedent("")
''
"""
if "\n" not in txt:
return txt.lstrip()
first_line, end = txt.split('\n', 1)
return "%s\n%s" % (first_line, textwrap.dedent(end))
## Note that a quite equivalent function was added to textwrap in python 3.3
def indent(text, prefix=" ", first=None):
"""Return text string indented with the given prefix
>>> string = 'This is first line.\\nThis is second line\\n'
>>> print(indent(string, prefix="| "))
| This is first line.
| This is second line
|
>>> print(indent(string, first="- "))
- This is first line.
This is second line
>>> print(indent(string, first="")) ## doctest: -NORMALIZE_WHITESPACE
This is first line.
This is second line
<BLANKLINE>
"""
if first is not None:
first_line = text.split("\n")[0]
rest = '\n'.join(text.split("\n")[1:])
return '\n'.join([first + first_line,
indent(rest, prefix=prefix)])
return '\n'.join([prefix + line
for line in text.split('\n')])
def paragraph_wrap(text, regexp="\n\n"):
r"""Wrap text by making sure that paragraph are separated correctly
>>> string = 'This is first paragraph which is quite long don\'t you \
... think ? Well, I think so.\n\nThis is second paragraph\n'
>>> print(paragraph_wrap(string)) # doctest: +NORMALIZE_WHITESPACE
This is first paragraph which is quite long don't you think ? Well, I
think so.
This is second paragraph
Notice that that each paragraph has been wrapped separately.
"""
regexp = re.compile(regexp, re.MULTILINE)
return "\n".join("\n".join(textwrap.wrap(paragraph.strip()))
for paragraph in regexp.split(text)).strip()
def ucfirst(msg):
"""Return altered msg where only first letter was forced to uppercase."""
return msg[0].upper() + msg[1:]
## Provided in textwrap.shorten in python 3
def shorten(s, l, index=-1, token="..", token_length=None):
"""Return given string truncated to given length.
>>> shorten('bonjour', 10)
'bonjour'
>>> shorten('bonjour tout le monde', 10)
'bonjour ..'
>>> shorten('bonjour tout le monde', 10, index=4)
'bonj..onde'
>>> shorten('bonjour tout le monde', 10, index=4, token="/../")
'bonj/../de'
>>> shorten('bonjour tout le monde', 10, index=4,
... token="<span class='more'>..</span>",
... token_length=2)
"bonj<span class='more'>..</span>onde"
"""
ltoken = token_length if token_length else len(token)
ls = len(s)
if ls <= l:
return s
to_del = ls - l + ltoken
## get start
start = index if index >= 0 else ls - l
end = start + to_del
if end > ls:
## push start to the left
start = start - (end - ls)
if start < 0:
start = 0
end = ls
return s[:start] + token + s[end:]
truncate = shorten
``` |
{
"source": "0-k/prosumerpolicy",
"score": 3
} |
#### File: prosumerpolicy/prosumerPolicy/input.py
```python
from paths import *
import pandas as pd
import logging
__all__=['import_Load', 'import_PV', 'import_Prices']
def import_Load(path=path_Load):
try:
absPath = gen_Path(path_Load)
totalload = pd.read_csv(absPath, header=None, delimiter=';')
logging.info("Load Successfully Imported from {}".format(path_Load))
return totalload
except:
logging.WARNING("Load Input from {} Error".format(path_Load))
def import_PV(path=path_PvGen):
try:
absPath = gen_Path(path_PvGen)
totalPvGen = pd.read_csv(absPath, header=None)
logging.info("PV Gen Successfully Imported from {}".format(path_PvGen))
return totalPvGen
except:
logging.WARNING("Pv Gen Input from {} Error".format(path_PvGen))
def import_Prices(path=path_Prices):
try:
absPath = gen_Path(path_Prices)
totalPrices = pd.read_csv(absPath, sep=';')
logging.info("Prices Successfully Imported from {}".format(path_Prices))
return totalPrices
except:
logging.WARNING("Price Input from {} Error ".format(path_Prices))
```
#### File: prosumerpolicy/prosumerPolicy/inputSetter.py
```python
from __future__ import print_function
import logging
import numpy as np
from paths import *
from input import *
from pv import PV
from battery import Battery
totalPrices = import_Prices(path_Prices)
totalPvGen = import_PV(path_PvGen)
totalLoad = import_Load(path_Load)
class _InputSetter:
def __init__(self, duration=24, day=1, loadRow=0):
self.PV = PV()
self.Battery = Battery()
self. totalPrices = totalPrices
self.totalPvGen = totalPvGen
self.totalLoad = totalLoad
self.totalAverageLoad = totalLoad.mean(axis=1)
self.__timeDuration = duration
self.__day = day
self.loadRow = loadRow
@property
def pvGenList(self):
return self.get_pvGen_list()
@property
def priceList(self):
return self.get_price_list()
@property
def loadList(self):
return self.get_load_list()
@property
def timeDuration(self):
return self.__timeDuration
@timeDuration.setter
def timeDuration(self, time):
self.__timeDuration = time
@property
def day(self):
return self.__day
@day.setter
def day(self, day):
self.__day = day
@property
def loadRow(self):
return self.__loadRow
@loadRow.setter
def loadRow(self, loadrow):
logging.info('Load Row Changed to {}'.format(loadrow))
self.__loadRow = loadrow
def get_price_list(self, day=None, duration=None):
''' returns price list as np.array for specified day and duration'''
if day is None:
day = self.day
''' returns price list as np.array for specified day and duration'''
if duration is None:
duration = self.timeDuration
if day is None or duration is None:
raise ValueError("Please Specify a day and time series duration")
try:
hours = (day - 1) * 24
price = np.array(self.totalPrices['Price'][hours:hours + duration])
if np.nan in price:
raise IOError
return price / 1000 # in kWh
except IOError:
logging.WARNING('Price list in day {} contains missing values '.format(day)) #FIXME Logger
def get_load_list(self, day=None, duration=None, loadRow=None):
if day is None:
day = self.day
if duration is None:
duration = self.timeDuration
if loadRow is None:
loadRow = self.loadRow
if day is None or duration is None or loadRow is None:
raise ValueError("Please set day, duration and load Row")
try:
if loadRow == -1: ## LOAD ROW -1 gives average load row
hours = (day - 1) * 24
load = np.array(self.totalAverageLoad[:][hours:hours + duration])
if np.nan in load:
raise IOError
return load / 1000 # kWh
else:
hours = (day - 1) * 24
load = np.array(self.totalLoad[loadRow][hours:hours + duration])
if np.nan in load:
raise IOError
return load / 1000 # kWh
except IOError:
logging.warning('Load list in day {} contains missing values'.format(day))
def get_pvGen_list(self, day=None, duration=None):
if day is None:
day = self.day
if duration is None:
duration = self.timeDuration
if day is None or duration is None:
raise ValueError("Please Specify a day and time series duration")
try:
hours = (day - 1) * 24
result = np.array(self.PV._calculatedPvGen[0][hours:hours + duration])
if np.nan in result:
raise IOError
return result / 1000 # kW
except IOError:
logging.warning('PV Generation list day {} contains missing values.'.format(day))
```
#### File: prosumerpolicy/prosumerPolicy/model.py
```python
import warnings
import numpy as np
from paths import *
from optimize import _Optimization
from inputSetter import _InputSetter
from economics import Economics
from policy import Policy
class Model:
def __init__(self):
self.__input_setter = _InputSetter()
self.policy = Policy(self.__input_setter)
self.__optimization = _Optimization(self.__input_setter, self.policy)
self._economics = Economics(self.__input_setter, self.policy, self.__optimization)
self.PV = self.__input_setter.PV
self.Battery = self.__input_setter.Battery
@property
def day(self):
return self.__input_setter.day
@day.setter
def day(self,d):
self.__input_setter.day=d
self._economics._isOptimizeYear=False
@property
def loadRow(self):
return self.__input_setter.loadRow
@loadRow.setter
def loadRow(self,l):
self.__input_setter.loadRow=l
self._economics._isOptimizeYear = False
@property
def timeDuration(self):
return self.__input_setter.timeDuration
@timeDuration.setter
def timeDuration(self,t):
self.__input_setter.timeDuration=t
self._economics._isOptimizeYear = False
@property
def avoidedNetworkFees(self):
return self._economics._calculateAvoidedNetworkFees()
@property
def CSC(self):
return self._economics._calculate_CSC()
@property
def NPV(self):
if not self._economics._isOptimizeYear:
warnings.warn("Optimization for year automatically calculated")
self._economics.optimizeYear()
return self._economics._calculateNPV()
@property
def IRR(self):
if not self._economics._isOptimizeYear:
warnings.warn("Optimization for year automatically calculated")
self._economics.optimizeYear()
return self._economics._calculateIRR()
@property
def pvGenList(self):
return self.__input_setter.pvGenList
@property
def priceList(self):
return self.__input_setter.priceList
@property
def loadList(self):
return self.__input_setter.loadList
@property
def opt(self):
if self.policy.isRTP or self.policy.isVFIT:
return self.__optimization.optimize()[0]
else:
return self.__optimization.optimize()
@property
def revenue(self):
if self.policy.isRTP or self.policy.isVFIT:
return self.__optimization.optimize()[1]
else:
self.__optimization.optimize()
return self.__optimization.revenue
@property
def selfConsumption(self):
if not self._economics._isOptimizeYear:
warnings.warn("Optimization for year automatically calculated")
self._economics.optimizeYear()
if self.__optimization._optimizationStatus == 1: # BAU
return 1 - sum(self.__optimization.energyToGridBAU) / self._economics.pvTotal
else:
return 1 - (self._economics.fedin + max(sum(self._economics.deltaBattGrid), 0)) / self._economics.pvTotal
@property
def autarky(self):
if not self._economics._isOptimizeYear:
warnings.warn("Optimization for year automatically calculated")
self._economics.optimizeYear()
if self.__optimization._optimizationStatus == 1: # BAU
return 1 - sum(self.__optimization.energyFromGridBAU) / self._economics.consumptionYear
else:
return 1 - (self._economics.fromGrid - min(sum(self._economics.deltaBattGrid), 0)) / self._economics.consumptionYear
@property
def MAI(self):
return self._economics._calculate_MAI()
@property
def optimizationState(self):
return self.__optimization.optimizationState
@property
def storageDispatch(self):
return np.array(self._economics.batteryTotal)
@property
def storageDispatchArbitrage(self):
return self.__optimization.energyStorageArbitrage
```
#### File: prosumerpolicy/prosumerPolicy/optimize.py
```python
import logging
from pandas import DataFrame
import pandas as pd
import numpy as np
from gurobipy import *
from inputSetter import _InputSetter
from battery import Battery
from policy import Policy
pd.set_option('display.expand_frame_repr', False)
class _Optimization:
def __init__(self, InputSetter, Policy):
self.__InputSetter = InputSetter
self.__Policy = Policy
self._optimizationStatus = None
def optimize(self, rtp=None, vfit=None, capacity=None):
if rtp is None:
pass
else:
self.__Policy.isRTP = rtp
if vfit is None:
pass
else:
self.__Policy.isVFIT = vfit
if capacity is None:
pass
else:
self.__Policy.isFixedNetworkCharges = capacity
if not self.__Policy.isRTP and not self.__Policy.isVFIT:
return self.__BAU()
else:
return self.__optimizerDispatch()
def _optimize_arbitrage(self):
'''
Function takes Maximum Charge Capacity, Max Discharge Capacity, BatterySize, number of hours and
a random number generator and price curve as input
returns maximum revenue along with hourly Energy dispatch from grid,Energy dispatch to grid and battery state
Complete foresight, linear optimization done with GUROBI SOLVER
'''
self.arbitrageState = True
model = Model("Arbitrage") # Create Gurobi Model
prices = self.__InputSetter.priceList
N = self.__InputSetter.timeDuration
model.setParam('OutputFlag', 0)
e_charge, e_discharge, e_storage = {}, {}, {} # Intialize Constraint Dictionary
# All Efficiencies are taken with reference to the battery. if battery discharges 1kwh, this means it actually gives
# etaDischarge*1kwh to the grid...if battery charges by 1 kwh, this means it took 1/etacharge from the grid/pv
for j in range(N): # fills constraint dictionary along with lower and upper bounds
e_charge[j] = model.addVar(vtype='C', lb=0,
ub=self.__InputSetter.Battery.maximumChargeDischargeCapacity / self.__InputSetter.Battery.chargeEfficiency,
name="ECharge[%s]" % j)
e_discharge[j] = model.addVar(vtype='C', lb=0,
ub=self.__InputSetter.Battery.maximumChargeDischargeCapacity * self.__InputSetter.Battery.dischargeEfficiency,
name="EDischarge[%s]" % j)
e_storage[j] = model.addVar(vtype='C', lb=0, ub=self.__InputSetter.Battery.size, name="EStorage[%s]" % j)
model.update()
# EDischarge and Echarge are directly to the grid
# sets objective function
model.setObjective(sum(e_discharge[j] * prices[j] - (e_charge[j]) * prices[j] for j in range(N)),
GRB.MAXIMIZE)
for i in range(N): # Adding constraints for length of N
if i == 0:
model.addConstr(
e_storage[i] - 0 * self.__InputSetter.Battery.size * (
1 - self.__InputSetter.Battery.selfDischarge) - e_charge[
i] * self.__InputSetter.Battery.chargeEfficiency +
e_discharge[
i] / self.__InputSetter.Battery.dischargeEfficiency == 0)
else:
model.addConstr(
e_storage[i] - e_storage[i - 1] * (1 - self.__InputSetter.Battery.selfDischarge) - e_charge[
i] * self.__InputSetter.Battery.chargeEfficiency + e_discharge[
i] / self.__InputSetter.Battery.dischargeEfficiency == 0)
model.update()
model.optimize()
efrom_grid, eto_grid, battery_state = [], [], []
# data wrangling to extract solution
for i in range(N):
variables = model.getVarByName("ECharge[%s]" % i)
efrom_grid.append(variables.x)
variables = model.getVarByName("EDischarge[%s]" % i)
eto_grid.append(variables.x)
variables = model.getVarByName("EStorage[%s]" % i)
battery_state.append(variables.x)
self.energyStorageArbitrage = np.array(battery_state)
self.energyToGridArbitrage = np.array(eto_grid)
self.energyFromGridArbitrage = np.array(efrom_grid)
ans = DataFrame({"Prices": prices,
'Battery State (kW)': battery_state,
'Energy from the grid (kW)': efrom_grid,
'Energy into the grid (kW)': eto_grid}, )
# ans = ans.round(2)
ans = ans[['Prices', 'Battery State (kW)', 'Energy from the grid (kW)', 'Energy into the grid (kW)']]
# self.numOfCyclesArb=self.batteryCountsArb()
return ans
# return ans, model.objVal # function returns results as DataFrame and the value of objective function
def __BAU(self):
logging.info(
"Business as Usual: Day {}, Time Duration {}, PV Size {} ".format(self.__InputSetter.day,
self.__InputSetter.timeDuration,
self.__InputSetter.PV.size))
self._optimizationStatus = 1
if self.__Policy.isFixedNetworkCharges:
self.optimizationState = 'BAU Capacity'
else:
self.optimizationState = 'BAU Volumetric'
length = min(len(self.__InputSetter.pvGenList),
len(self.__InputSetter.loadList)) # in case the inputs are not the same length, use the smaller.
battState, energyFromGrid, energyToGrid, cases = [], [], [], [] # Create Return Variables
xi = self.__InputSetter.pvGenList[:length] - self.__InputSetter.loadList[:length]
battStateATBeg = []
# All Efficiencies are taken with reference to the battery. if battery discharges 1kwh, this means it actually gives
# etaDischarge*1kwh to the grid...if battery charges by 1 kwh, this means it took 1kwh/etacharge from the grid/pv
batteryState = self.__InputSetter.Battery.initialBatteryCapacity
for item in xi:
battAtBeg = batteryState * (1 - self.__InputSetter.Battery.selfDischarge)
batteryState *= (1 - self.__InputSetter.Battery.selfDischarge)
if item <= 0:
EtoGrid = 0
if abs(item) <= min(batteryState,
self.__InputSetter.Battery.maximumChargeDischargeCapacity) * self.__InputSetter.Battery.dischargeEfficiency:
batteryState = batteryState - (abs(item) / self.__InputSetter.Battery.dischargeEfficiency)
EfromGrid = 0
elif abs(item) > min(batteryState,
self.__InputSetter.Battery.maximumChargeDischargeCapacity) * self.__InputSetter.Battery.dischargeEfficiency:
EfromGrid = abs(item) - min(batteryState,
self.__InputSetter.Battery.maximumChargeDischargeCapacity) * self.__InputSetter.Battery.dischargeEfficiency
batteryState = batteryState - (
min(batteryState, self.__InputSetter.Battery.maximumChargeDischargeCapacity))
else:
EfromGrid = 0
if item >= min((self.__InputSetter.Battery.size - batteryState),
self.__InputSetter.Battery.maximumChargeDischargeCapacity) / self.__InputSetter.Battery.chargeEfficiency:
EtoGrid = item - min((self.__InputSetter.Battery.size - batteryState),
self.__InputSetter.Battery.maximumChargeDischargeCapacity) / self.__InputSetter.Battery.chargeEfficiency
batteryState = batteryState + min((self.__InputSetter.Battery.size - batteryState),
self.__InputSetter.Battery.maximumChargeDischargeCapacity)
else:
batteryState = batteryState + item * self.__InputSetter.Battery.chargeEfficiency
EtoGrid = 0
battState.append(batteryState)
energyFromGrid.append(EfromGrid)
energyToGrid.append(EtoGrid)
battStateATBeg.append(battAtBeg)
ans = DataFrame({'Price': self.__Policy.retailElectricity,
'load (kW)': self.__InputSetter.loadList,
'PV Generation': self.__InputSetter.pvGenList,
'Battery State (kW)': battState,
'Energy from the grid (kW)': energyFromGrid,
'Energy into the grid (kW)': energyToGrid
, 'Bat at beg': battStateATBeg},
)
# ans = ans.round(2)
ans = ans[
['Price', 'load (kW)', 'PV Generation', 'Battery State (kW)', 'Energy from the grid (kW)',
'Energy into the grid (kW)', 'Bat at beg']]
energyToGrid = np.array(energyToGrid)
energyFromGrid = np.array(energyFromGrid)
revenue = np.dot(self.__Policy.FIT, energyToGrid) - np.dot(self.__Policy.retailElectricity, energyFromGrid) + \
self.__Policy.FIT[0] * batteryState
self.energyToGridBAU = energyToGrid
self.energyFromGridBAU = energyFromGrid
self.energyStorage = battState
self.revenue = revenue
self.referenceRevenue = np.dot(-self.__Policy.retailElectricity, self.__InputSetter.loadList)
self.directUse = self.__InputSetter.pvGenList - energyToGrid
self.directUse = sum(self.directUse) - batteryState
return ans
def __optimizerDispatch(self):
self._optimizationStatus = 2
if self.__Policy.isFixedNetworkCharges:
capacity = ' Capacity'
else:
capacity = ' Volumetric'
if self.__Policy.isRTP and not self.__Policy.isVFIT:
self.optimizationState = 'RTP and Fixed FIT' + capacity
elif self.__Policy.isRTP and self.__Policy.isVFIT:
self.optimizationState = 'RTP and Variable FIT' + capacity
elif not self.__Policy.isRTP and self.__Policy.isVFIT:
self.optimizationState = 'Fixed Price and Variable FIT' + capacity
logging.info(
"Real Time Pricing Optimization: Day {}, Time Duration {}, PV Size {} ".format(self.__InputSetter.day,
self.__InputSetter.timeDuration,
self.__InputSetter.PV.size))
# Getting Parameters
wholesalepri = self.__InputSetter.priceList
pri = self.__Policy.retailElectricity
load = self.__InputSetter.loadList
PV = self.__InputSetter.pvGenList
FeedIn = self.__Policy.FIT
optimization_duration = self.__InputSetter.timeDuration
model = Model("RTP_withForesight") # Create Gurobi Model
model.setParam('OutputFlag', 0)
eStorage, ePVtoBatt, ePVtoLoad, ePVtoGrid, eBatttoGrid, eBatttoLoad, eGridtoLoad, eGridtoBatt = {}, {}, {}, {}, {}, {}, {}, {} # Intialize Constraint Dictionary
y = {}
''' All Efficiencies are taken with reference to the battery. if battery discharges 1kwh,
this means it actually gives etaDischarge*1kwh to the grid...if battery charges by 1 kwh, this means it
took 1/etacharge from the grid/pv
'''
for j in range(optimization_duration): # creates variables along with lower and upper bounds
y[j] = model.addVar(vtype='b')
ePVtoBatt[j] = model.addVar(vtype='C', lb=0,
ub=self.__InputSetter.Battery.maximumChargeDischargeCapacity / self.__InputSetter.Battery.chargeEfficiency,
name="ePVtoBatt[%s]" % j)
eBatttoLoad[j] = model.addVar(vtype='C', lb=0,
ub=self.__InputSetter.Battery.maximumChargeDischargeCapacity * self.__InputSetter.Battery.dischargeEfficiency,
name="eBatttoLoad[%s]" % j)
ePVtoLoad[j] = model.addVar(vtype='C', lb=0, name="ePVtoLoad[%s]" % j)
ePVtoGrid[j] = model.addVar(vtype='C', lb=0, name="ePVtoGrid[%s]" % j)
eBatttoGrid[j] = model.addVar(vtype='C', lb=0,
ub=self.__InputSetter.Battery.maximumChargeDischargeCapacity * self.__InputSetter.Battery.dischargeEfficiency,
name="eBatttoGrid[%s]" % j)
eGridtoBatt[j] = model.addVar(vtype='C', lb=0,
ub=self.__InputSetter.Battery.maximumChargeDischargeCapacity / self.__InputSetter.Battery.chargeEfficiency,
name="eGridtoBatt[%s]" % j)
eStorage[j] = model.addVar(vtype='C', lb=0, ub=self.__InputSetter.Battery.size, name="eStorage[%s]" % j)
eGridtoLoad[j] = model.addVar(vtype='C', lb=0, name="eGridtoLoad[%s]" % j)
model.update()
model.setObjective(sum(
eBatttoGrid[j] * wholesalepri[j] - eGridtoBatt[j] * pri[j] + FeedIn[j] * ePVtoGrid[j] - pri[j] *
eGridtoLoad[j]
for j in range(optimization_duration)),
GRB.MAXIMIZE)
# set objective function maximizing revenue
for i in range(optimization_duration): # Adding energy constraints for length of optimization_duration
if i == 0: # intial value
model.addConstr(
eStorage[i] - self.__InputSetter.Battery.initialBatteryCapacity * (
1 - self.__InputSetter.Battery.selfDischarge) - ePVtoBatt[
i] * self.__InputSetter.Battery.chargeEfficiency
- eGridtoBatt[i] * self.__InputSetter.Battery.chargeEfficiency + eBatttoLoad[
i] / self.__InputSetter.Battery.dischargeEfficiency + eBatttoGrid[
i] / self.__InputSetter.Battery.dischargeEfficiency == 0)
else:
model.addConstr(
eStorage[i] - eStorage[i - 1] * (1 - self.__InputSetter.Battery.selfDischarge) - ePVtoBatt[
i] * self.__InputSetter.Battery.chargeEfficiency -
eGridtoBatt[i] * self.__InputSetter.Battery.chargeEfficiency + eBatttoLoad[
i] / self.__InputSetter.Battery.dischargeEfficiency + eBatttoGrid[
i] / self.__InputSetter.Battery.dischargeEfficiency == 0)
model.addConstr(ePVtoLoad[i] + ePVtoBatt[i] + ePVtoGrid[i] == PV[i])
model.addConstr(eGridtoLoad[i] + eBatttoLoad[i] + ePVtoLoad[i] == load[i])
model.addConstr(eBatttoGrid[i] <= self.__InputSetter.Battery.maximumChargeDischargeCapacity * y[
i] * self.__InputSetter.Battery.dischargeEfficiency)
model.addConstr(eGridtoBatt[i] <= self.__InputSetter.Battery.maximumChargeDischargeCapacity * (
1 - y[i]) / self.__InputSetter.Battery.chargeEfficiency)
model.update()
model.optimize()
if model.status == GRB.Status.INF_OR_UNBD:
# Turn presolve off to determine whether model is infeasible
# or unbounded
model.setParam(GRB.Param.Presolve, 0)
model.optimize()
print(model.status)
# #extracting optimization results. Pretty ugly
PVtoGrid, PVtoLoad, PVtoBatt, BatttoLoad, BatttoGrid, BatteryState, GridtoLoad, GridtoBatt = [], [], [], [], [], [], [], []
for i in range(optimization_duration):
vars = model.getVarByName("ePVtoBatt[%s]" % i)
PVtoBatt.append(vars.x)
vars = model.getVarByName("eBatttoLoad[%s]" % i)
BatttoLoad.append(vars.x)
vars = model.getVarByName("ePVtoLoad[%s]" % i)
PVtoLoad.append(vars.x)
vars = model.getVarByName("ePVtoGrid[%s]" % i)
PVtoGrid.append(vars.x)
vars = model.getVarByName("eBatttoGrid[%s]" % i)
BatttoGrid.append(vars.x)
vars = model.getVarByName("eGridtoBatt[%s]" % i)
GridtoBatt.append(vars.x)
vars = model.getVarByName("eStorage[%s]" % i)
BatteryState.append(vars.x)
vars = model.getVarByName("eGridtoLoad[%s]" % i)
GridtoLoad.append(vars.x)
ans = DataFrame({"Prices": pri,
"load": load,
"PV": PV,
"Feed in": FeedIn,
'Battery State (kW)': BatteryState,
'Energy PV to Batt (kW)': PVtoBatt,
'Energy PV to Load (kW)': PVtoLoad,
'Energy PV to Grid (kW)': PVtoGrid,
'Energy Battery to Grid (kW)': BatttoGrid,
'Energy Battery to Load (kW)': BatttoLoad,
'Energy Grid to Load (kW)': GridtoLoad,
'Energy Grid to Batt (kW)': GridtoBatt
}, )
ans = ans[['Prices', 'load', 'PV', 'Feed in', 'Battery State (kW)', 'Energy PV to Batt (kW)',
'Energy PV to Load (kW)',
'Energy PV to Grid (kW)', 'Energy Battery to Grid (kW)', 'Energy Battery to Load (kW)',
'Energy Grid to Load (kW)', 'Energy Grid to Batt (kW)']]
self.energyStorage = BatteryState # used for SFI
self.revenue = (np.dot(self.__Policy.FIT, PVtoGrid) + np.dot(self.__InputSetter.priceList, BatttoGrid) - np.dot(
self.__Policy.retailElectricity, GridtoLoad) -
np.dot(self.__Policy.retailElectricity, GridtoBatt))
self.sumEnergyFromGrid = np.array(GridtoLoad) + np.array(GridtoBatt) # used for avoided network costs
self.sumEnergyToGrid = np.array(PVtoGrid) + np.array(BatttoGrid)
self.deltaBatt = sum(np.array(BatttoGrid) - np.array(GridtoBatt))
self.PVtoGrid = sum(PVtoGrid)
self.GridtoLoad = sum(GridtoLoad)
self.referenceRevenue = np.dot(-self.__Policy.retailElectricity, self.__InputSetter.loadList)
# ans = ans.round(3)
return ans, model.objVal # function returns results as DataFrame and the value of objective function
``` |
{
"source": "0-krish/rock-paper-scissors-exercise",
"score": 4
} |
#### File: 0-krish/rock-paper-scissors-exercise/game.py
```python
def determine_winner(user_decision, computer_decision):
if user_decision == computer_decision:
winning_player = None
elif user_decision == "rock":
if computer_decision == "paper":
winning_player = computer_decision
if computer_decision == "scissors":
winning_player = user_decision
elif user_decision == "paper":
if computer_decision == "rock":
winning_player = user_decision
if computer_decision == "scissors":
winning_player = computer_decision
elif user_decision == "scissors":
if computer_decision == "paper":
winning_player = user_decision
if computer_decision == "rock":
winning_player = computer_decision
return winning_player
if __name__ == "__main__":
# Importing the Environment Variable
import os
player_name = os.getenv("PLAYER_NAME", default="Player One")
# Welcome player to game
print("----------")
print("Welcome to the Rock-Paper-Scissors Game, " + player_name + "! Let's get started.")
print("----------")
# Processing User Inputs
user_input = input("Play your hand: rock, paper, or scissors? ")
# Validating User Inputs
validated_user_input = user_input.lower()
if ((validated_user_input != "rock") and
(validated_user_input != "paper") and
(validated_user_input != "scissors")):
print("Sorry! Invalid input detected.")
print("This game accepts only three inputs:",
"rock, paper, and scissors")
print("This game will now quit.")
exit()
else:
print("You chose:", validated_user_input)
# Simulating Computer Selection
from random import choice
valid_choices = ["rock", "paper", "scissors"]
computer_choice = choice(valid_choices)
print("Computer chose:", computer_choice)
# Determining the Winner
winner = determine_winner(validated_user_input, computer_choice)
# Displaying Results
print("----------")
if winner is None:
print("It's a tie!")
elif winner == computer_choice:
print("Oops! Computer won.")
elif winner == validated_user_input:
if player_name == "Player One":
print("Yay! You won.")
else:
print("Yay!", player_name, "won.")
print("----------")
print("Thanks for playing! Play again soon.")
```
#### File: 0-krish/rock-paper-scissors-exercise/game_test.py
```python
from game import determine_winner
def test_determination_of_the_winner():
assert determine_winner("rock", "rock") == None # represents a tie
assert determine_winner("rock", "paper") == "paper"
assert determine_winner("rock", "scissors") == "rock"
assert determine_winner("paper", "rock") == "paper"
assert determine_winner("paper", "paper") == None # represents a tie
assert determine_winner("paper", "scissors") == "scissors"
assert determine_winner("scissors", "rock") == "rock"
assert determine_winner("scissors", "paper") == "scissors"
assert determine_winner("scissors", "scissors") == None # represents a tie
``` |
{
"source": "0k/sact.epoch",
"score": 3
} |
#### File: sact/epoch/interfaces.py
```python
from zope.interface import Interface, Attribute
class ITimeZone(Interface):
"""Standard TimeZone interface as defined in datetime module"""
def utcoffset(dt):
"""Return offset of local time from UTC, in minutes"""
def dst(dt):
"""Return the daylight saving time (DST) adjustment, in minutes"""
def tzname(dt):
"""Return the time zone name corresponding to the datetime object dt"""
class ITime(Interface):
"""Factory to make time object.
minimal interface
"""
def now():
"""Return a time object that represent the current time"""
class IClock(Interface):
"""Factory to make time object.
minimal interface
"""
def time():
"""Return a time object that represent the current time"""
class IManageableClock(IClock):
is_running = Attribute(
u"Freeze the return result of now() command")
def set(date):
"""Set the result of now() command to date"""
def stop():
"""Freeze the return result of now() command"""
def start():
"""Unfreeze the return result of now() command"""
def wait(timelapse=None, days=0, hours=0, minutes=0, seconds=0):
"""Shortcut to alter_now relative
Should accept negative value also.
"""
``` |
{
"source": "0L-Analytics/ol-analytical-research",
"score": 3
} |
#### File: 0L-Analytics/ol-analytical-research/test_animate.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
def create_video(n):
global X
X = np.random.binomial(1, 0.3, size = (n, n))
fig = plt.figure()
im = plt.imshow(X, cmap = plt.cm.gray)
def animate(t):
global X
X = np.roll(X, +1, axis = 0)
im.set_array(X)
return im,
anim = FuncAnimation(
fig,
animate,
frames = 100,
interval = 1000/30,
blit = True
)
plt.show()
return anim
anim = create_video(10)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
def create_video(n):
global X
X = np.random.binomial(1, 0.3, size = (n, n))
fig = plt.figure()
im = plt.imshow(X, cmap = plt.cm.gray)
def animate(t):
global X
X = np.roll(X, +1, axis = 0)
im.set_array(X)
return im,
anim = FuncAnimation(
fig,
animate,
frames = 100,
interval = 1000/30,
blit = True
)
plt.show()
return anim
anim = create_video(10)
from matplotlib import animation
import random
plt.rcParams["figure.figsize"] = [7.50, 3.50]
plt.rcParams["figure.autolayout"] = True
fig = plt.figure()
G = nx.DiGraph()
G.add_nodes_from([0, 1, 2, 3, 4])
nx.draw(G, with_labels=True)
def animate(frame):
fig.clear()
num1 = random.randint(0, 4)
num2 = random.randint(0, 4)
G.add_edges_from([(num1, num2)])
nx.draw(G, with_labels=True)
ani = animation.FuncAnimation(fig, animate, frames=range(0,6), interval=10, repeat=True)
plt.show()
```
#### File: 0L-Analytics/ol-analytical-research/utils.py
```python
import requests
import csv
def load_addresses_list(path) -> list:
"""
Read in csv of address keys
Output a list, ready for querying API of explorer
"""
all_addresses = []
with open(path) as f:
reader = csv.reader(f)
for row in reader:
all_addresses.append(row[0])
return all_addresses
def get_permission_tree(account_list):
"""
Input a list of address keys
Queries API for permission tree of validator
Returns a dictionary for that address list
"""
web_address = "https://0l.interblockcha.in:444/permission-tree/validator/"
genesis_dict = {}
for account in account_list:
# print(web_address+account)
response = requests.get(web_address+account)
genesis_dict[str(account)] = response.json()
# print(response.json())
return genesis_dict
def get_epoch():
"""
get current epoch
"""
web_address = "https://0l.interblockcha.in:444/epochs"
response = requests.get(web_address)
epochs = response.json()
return epochs
``` |
{
"source": "0lddriv3r/djangobbs_nmb",
"score": 2
} |
#### File: djangobbs_nmb/devproject/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(models.Model):
'''用户模型类'''
avator = models.URLField('头像')
password = models.CharField('密码', max_length=255)
ipaddr = models.GenericIPAddressField('地址', null=True)
email = models.CharField('邮箱', max_length=255, unique=True)
username = models.CharField('昵称', max_length=128, unique=True)
createtime = models.DateTimeField(auto_now_add=True, verbose_name='创建')
lastpublishtime = models.DateTimeField(auto_now=True, verbose_name='最后')
class Meta:
ordering = ['createtime']
verbose_name = '用户'
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class Category(models.Model):
'''类别模型类'''
name = models.CharField('类别', max_length=20)
index = models.IntegerField('排序', default=0)
nickname = models.CharField('匿名', max_length=20)
createtime = models.DateTimeField(auto_now_add=True, verbose_name='创建')
class Meta:
ordering = ['createtime']
verbose_name = '类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Thread(models.Model):
'''线程模型类'''
body = models.TextField('正文', default='')
commentnum = models.IntegerField('评论', default=0)
title = models.CharField('标题', max_length=100, null=True)
attachment = models.TextField('附件', blank=True, null=True)
updatetime = models.DateTimeField(auto_now=True, verbose_name='更新')
createtime = models.DateTimeField(auto_now_add=True, verbose_name='创建')
musicurl = models.CharField('音乐', max_length=300, blank=True, null=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='作者')
category = models.ForeignKey(Category, on_delete=models.CASCADE, verbose_name='类别')
class Meta:
ordering = ['createtime']
verbose_name = '线程'
verbose_name_plural = verbose_name
def __str__(self):
return self.title
class Comment(models.Model):
'''评论模型类'''
body = models.TextField('正文', default='')
floor = models.IntegerField('楼层', default=1)
title = models.CharField('标题', max_length=100, null=True)
attachment = models.TextField('附件', blank=True, null=True)
createtime = models.DateTimeField(auto_now_add=True, verbose_name='时间')
musicurl = models.CharField('音乐', max_length=300, blank=True, null=True)
author = models.ForeignKey(User, verbose_name='作者', on_delete=models.CASCADE, )
thread = models.ForeignKey(Thread, verbose_name='线程', on_delete=models.CASCADE, )
class Meta:
ordering = ['createtime']
verbose_name = '评论'
verbose_name_plural = verbose_name
def __str__(self):
return self.body
class Black(models.Model):
'''黑名单模型类'''
ipaddr = models.GenericIPAddressField('地址')
createtime = models.DateTimeField(auto_now_add=True, verbose_name='创建')
class Meta:
ordering = ['createtime']
verbose_name = '黑单'
verbose_name_plural = verbose_name
def __str__(self):
return self.ipaddr
```
#### File: djangobbs_nmb/devproject/views.py
```python
import os
import re
import time
import glob
import hashlib
import requests
import cv2 as cv
from .forms import *
from .models import *
import urllib.request
from PIL import Image
from django.shortcuts import render
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.views.decorators.cache import cache_page
from django.http import HttpResponseRedirect, JsonResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
pagesize = 10
def index(request):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
categorys = Category.objects.all().order_by('index')
threads = Thread.objects.all().order_by('-updatetime')
paginator = Paginator(threads, pagesize, )
threads = paginator.get_page(1)
if not authorid:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': 'comprehensive'})
else:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center,
'categoryactive': 'comprehensive'})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def page(request, categorynick, pageindex):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
if categorynick == 'comprehensive':
threads = Thread.objects.all().order_by('-updatetime')
else:
threads = Thread.objects.filter(category=Category.objects.get(nickname=categorynick)).order_by('-updatetime')
paginator = Paginator(threads, pagesize, )
try:
threads = paginator.get_page(pageindex)
except PageNotAnInteger:
threads = paginator.page(1)
except EmptyPage:
threads = paginator.page(paginator.num_pages)
categorys = Category.objects.all().order_by('index')
if not authorid:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick})
else:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def detail(request, categorynick, threadid):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
categorys = Category.objects.all().order_by('index')
nexttopicid = 0
thread = Thread.objects.get(id=threadid)
threads = Thread.objects.filter(category=Category.objects.get(nickname=categorynick)).all()
if threads:
for threadtemp in threads:
if threadtemp.id > threadid:
nexttopicid = threadtemp.id
break
comments = Comment.objects.filter(thread=thread).order_by('createtime')
paginator = Paginator(comments, pagesize, )
comments = paginator.page(1)
if not authorid:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments, 'nexttopicid': nexttopicid})
else:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments, 'nexttopicid': nexttopicid})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def detailnext(request, categorynick, threadid):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
categorys = Category.objects.all().order_by('index')
nexttopicid = 0
thread = Thread.objects.get(id=threadid)
threads = Thread.objects.filter(category=Category.objects.get(nickname=categorynick)).all()
if threads:
for threadtemp in threads:
if threadtemp.id > threadid:
nexttopicid = threadtemp.id
break
comments = Comment.objects.filter(thread=thread).order_by('createtime')
paginator = Paginator(comments, pagesize, )
comments = paginator.page(1)
if not authorid:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments, 'nexttopicid': nexttopicid})
else:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments, 'nexttopicid': nexttopicid})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def detailpage(request, categorynick, threadid, pageindex):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
categorys = Category.objects.all().order_by('index')
thread = Thread.objects.get(id=threadid)
comments = Comment.objects.filter(thread=thread).order_by('createtime')
paginator = Paginator(comments, pagesize, )
try:
comments = paginator.get_page(pageindex)
except PageNotAnInteger:
comments = paginator.page(1)
except EmptyPage:
comments = paginator.page(paginator.num_pages)
if not authorid:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments})
else:
rend = render(request, 'detail.html',
{'thread': thread, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick,
'comments': comments})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def category(request, categorynick):
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
errmsg_sider = request.session.get('errmsg_sider', None)
errmsg_center = request.session.get('errmsg_center', None)
if categorynick == 'comprehensive':
threads = Thread.objects.all().order_by('-updatetime')
else:
threads = Thread.objects.filter(category=Category.objects.get(nickname=categorynick)).order_by('-updatetime')
paginator = Paginator(threads, pagesize, )
threads = paginator.page(1)
categorys = Category.objects.all().order_by('index')
if not authorid:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'authorid': None, 'username': None,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick})
else:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'username': username, 'authorid': authorid,
'errmsg_sider': errmsg_sider, 'errmsg_center': errmsg_center, 'categoryactive': categorynick})
request.session['errmsg_sider'] = None
request.session['errmsg_center'] = None
return rend
def login(request):
form = UserForm(request.POST)
if form.is_valid():
data = form.cleaned_data
username = data['username']
password = data['password']
ipaddr = request.META['REMOTE_ADDR']
flag = True
try:
Black.objects.get(ipaddr=ipaddr)
except Black.DoesNotExist:
flag = False
if flag:
return render(request, 'black.html')
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5]+$', username):
request.session['errmsg_sider'] = '不可以包含非法字符!'
return HttpResponseRedirect('/')
try:
userobj = User.objects.get(username=str(username))
except User.DoesNotExist:
request.session['errmsg_sider'] = '用户名或密码错误!'
return HttpResponseRedirect('/')
if userobj.password == password:
request.session['authorid'] = userobj.id
request.session['username'] = userobj.username
else:
request.session['errmsg_sider'] = '用户名或密码错误!'
return HttpResponseRedirect('/')
def register(request):
form = UserForm(request.POST)
if form.is_valid():
data = form.cleaned_data
email = data['email'].strip()
username = data['username'].strip()
password = data['password'].strip()
ipaddr = request.META['REMOTE_ADDR']
flag = True
try:
Black.objects.get(ipaddr=ipaddr)
except Black.DoesNotExist:
flag = False
if flag:
return render(request, 'black.html')
if len(username) <= 4 and len(username) >= 14:
request.session['errmsg_sider'] = '用户名长度只能在4到14个字符之间!'
return HttpResponseRedirect('/')
if len(username) <= 4 and len(username) >= 14:
request.session['errmsg_sider'] = '密码长度只能在4到14个字符之间!'
return HttpResponseRedirect('/')
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5]+$', username):
request.session['errmsg_sider'] = '不可以包含非法字符!'
return HttpResponseRedirect('/')
try:
validate_email(email)
except ValidationError:
request.session['errmsg_sider'] = '邮箱格式错误!'
return HttpResponseRedirect('/')
m = hashlib.md5()
m.update(email.encode("utf-8"))
avator = 'http://www.gravatar.com/avatar/' + m.hexdigest() + '?s=50'
flag = 0
try:
User.objects.get(username=str(username))
except User.DoesNotExist:
flag += 1
try:
User.objects.get(email=email)
except User.DoesNotExist:
flag += 1
if flag == 2:
userobj = User.objects.create(username=str(username), password=str(password), email=email, avator=avator,
ipaddr=ipaddr)
request.session['authorid'] = userobj.id
request.session['username'] = userobj.username
else:
request.session['errmsg_sider'] = '用户名或邮箱已存在!'
return HttpResponseRedirect('/')
request.session['errmsg_sider'] = '填写的数据有误!'
return HttpResponseRedirect('/')
def logout(request):
if not request.session.get('username', None):
request.session['errmsg_sider'] = '未登录!'
return HttpResponseRedirect('/')
request.session.flush()
return HttpResponseRedirect('/')
def search(request):
form = SearchForm(request.POST)
if form.is_valid():
data = form.cleaned_data
keyword = data['keyword']
if not re.search(u'^[_a-zA-Z0-9\u4e00-\u9fa5]+$', keyword):
request.session['errmsg_keyword'] = '不可以包含非法字符!'
return HttpResponseRedirect('/')
threads = Thread.objects.filter(title__icontains=keyword)
if len(threads) > 10:
threads = threads[:10]
authorid = request.session.get("authorid", None)
username = request.session.get("username", None)
categorys = Category.objects.all().order_by('index')
if not authorid:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'authorid': None, 'username': None,
'categoryactive': 'comprehensive'})
else:
rend = render(request, 'index.html',
{'threads': threads, 'categorys': categorys, 'username': username, 'authorid': authorid,
'categoryactive': 'comprehensive'})
return rend
request.session['errmsg_keyword'] = '输入关键词错误!'
return HttpResponseRedirect('/')
def searchphoto(request):
form = SearchPhotoForm(request.POST, request.FILES)
if form.is_valid():
imgkey = form.cleaned_data['imgkey']
ext = os.path.splitext(imgkey.name)[1]
if ext != '.jpg' and ext != '.png':
return JsonResponse({'res': '图片格式不支持!'})
if imgkey.size > 6291456:
return JsonResponse({'res': '图片大小不能超过6兆!'})
flag = False
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ext = os.path.splitext(imgkey.name)[1]
dir = '/static/chimg/'
filename = str(int(time.time()))
filepath = dir + filename + ext
f = open(basepath + filepath, 'wb')
for line in imgkey.chunks():
f.write(line)
f.close()
if imgkey.size > 1572864:
if ext == '.png':
png2jpg(basepath + filepath)
realpath = dir + filename + '.jpg'
for infile in glob.glob(basepath + realpath):
im = Image.open(infile)
size = im.size
im.thumbnail(size, Image.ANTIALIAS)
im.save(basepath + realpath, 'jpeg')
flag = True
if flag:
path = realpath
else:
path = filepath
filename, ext2 = os.path.splitext(path)
files = None
if ext2 == '.jpg':
files = {'file': (filename + ext2, open(basepath + path, 'rb'), 'image/jpeg', {})}
else:
files = {'file': (filename + ext2, open(basepath + path, 'rb'), 'image/png', {})}
res = requests.post(url='http://saucenao.com/search.php', files=files)
obj = re.search(r'"https://danbooru.donmai.us/(.*?)"', res.text)
if obj:
return JsonResponse({'res': obj.group(0).replace(r'"', '')})
else:
return JsonResponse({'res': '没有发现这张图片呢~'})
return JsonResponse({'res': '上传出现错误!'})
def publish(request):
username = request.session.get('username', None)
if not username:
request.session['errmsg_center'] = '未登录!'
return HttpResponseRedirect('/')
flag = True
try:
userobj = User.objects.get(username=str(username))
Black.objects.get(ipaddr=userobj.ipaddr)
except Black.DoesNotExist:
flag = False
if flag:
return render(request, 'black.html')
category = None
form = ThreadForm(request.POST, request.FILES)
if form.is_valid():
data = form.cleaned_data
body = data['body']
title = data['title']
authorid = data['authorid']
attachment = form.cleaned_data['attachment']
category = request.POST.get('category')
musicurl = data['musicurl']
if len(title) >= 50:
request.session['errmsg_center'] = '标题长度不能大于50个字符!'
return HttpResponseRedirect('/category/' + category)
if len(body) >= 10000:
request.session['errmsg_center'] = '内容长度不能大于10000个字符!'
return HttpResponseRedirect('/category/' + category)
if musicurl:
ext = os.path.splitext(musicurl)[1]
if ext != '.mp3':
request.session['errmsg_center'] = 'MP3链接格式错误!'
return HttpResponseRedirect('/category/' + category)
try:
with urllib.request.urlopen(musicurl) as file:
flag = False
except urllib.request.URLError:
flag = True
if flag:
request.session['errmsg_center'] = 'MP3链接可能失效了!'
return HttpResponseRedirect('/category/' + category)
if attachment:
ext = os.path.splitext(attachment.name)[1]
if ext != '.jpg' and ext != '.png':
request.session['errmsg_center'] = '图片格式不支持!'
return HttpResponseRedirect('/category/' + category)
if attachment.size > 6291456:
request.session['errmsg_center'] = '图片大小不能超过6兆!'
return HttpResponseRedirect('/category/' + category)
if not title:
title = '无标题'
path = None
if attachment:
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ext = os.path.splitext(attachment.name)[1]
dir = '/static/img/'
filename = str(int(time.time()))
filepath = dir + filename + ext
f = open(basepath + filepath, 'wb')
for line in attachment.chunks():
f.write(line)
f.close()
if attachment.size > 1572864:
if ext == '.png':
png2jpg(basepath + filepath)
realpath = dir + filename + '.jpg'
for infile in glob.glob(basepath + realpath):
im = Image.open(infile)
size = im.size
im.thumbnail(size, Image.ANTIALIAS)
im.save(basepath + realpath, 'jpeg')
flag = True
if flag:
path = realpath
else:
path = filepath
author = User.objects.get(id=authorid)
category = Category.objects.get(nickname=category)
Thread.objects.create(title=title, body=body, author=author, attachment=path, category=category,
musicurl=musicurl)
return HttpResponseRedirect('/category/' + category.nickname)
request.session['errmsg_center'] = '信息输入错误'
return HttpResponseRedirect('/category/' + category.nickname)
def comment(request):
username = request.session.get('username', None)
if not username:
request.session['errmsg_center'] = '未登录!'
return HttpResponseRedirect('/')
flag = True
try:
userobj = User.objects.get(username=str(username))
Black.objects.get(ipaddr=userobj.ipaddr)
except Black.DoesNotExist:
flag = False
if flag:
return render(request, 'black.html')
thread = None
category = None
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
data = form.cleaned_data
body = data['body']
title = data['title']
threadid = data['threadid']
authorid = data['authorid']
attachment = form.cleaned_data['attachment']
categoryactive = data['categoryactive']
musicurl = data['musicurl']
if len(title) >= 50:
request.session['errmsg_center'] = '标题长度不能大于50个字符!'
return HttpResponseRedirect('/category/' + categoryactive + '/thread/' + str(threadid))
if len(body) >= 10000:
request.session['errmsg_center'] = '内容长度不能大于10000个字符!'
return HttpResponseRedirect('/category/' + categoryactive + '/thread/' + str(threadid))
if musicurl:
ext = os.path.splitext(musicurl)[1]
if ext != '.mp3':
request.session['errmsg_center'] = 'MP3链接格式错误!'
return HttpResponseRedirect('/category/' + categoryactive + '/thread/' + str(threadid))
flag = False
try:
with urllib.request.urlopen(musicurl) as file:
flag = False
except urllib.request.URLError:
flag = True
if flag:
request.session['errmsg_center'] = 'MP3链接可能失效了!'
return HttpResponseRedirect('/category/' + categoryactive + '/thread/' + str(threadid))
if attachment:
ext = os.path.splitext(attachment.name)[1]
if ext != '.jpg' and ext != '.png':
request.session['errmsg_center'] = '图片格式不支持!'
return HttpResponseRedirect('/category/' + categoryactive + '/thread/' + str(threadid))
if attachment.size > 6291456:
request.session['errmsg_center'] = '图片大小不能超过6兆!'
return HttpResponseRedirect('/category/' + categoryactive + '/thread/' + str(threadid))
if not title:
title = '无标题'
path = None
if attachment:
basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ext = os.path.splitext(attachment.name)[1]
dir = '/static/img/'
filename = str(int(time.time()))
filepath = dir + filename + ext
f = open(basepath + filepath, 'wb')
for line in attachment.chunks():
f.write(line)
f.close()
if attachment.size > 1572864:
if ext == '.png':
png2jpg(basepath + filepath)
realpath = dir + filename + '.jpg'
for infile in glob.glob(basepath + realpath):
im = Image.open(infile)
size = im.size
im.thumbnail(size, Image.ANTIALIAS)
im.save(basepath + realpath, 'jpeg')
flag = True
if flag:
path = realpath
else:
path = filepath
author = User.objects.get(id=authorid)
thread = Thread.objects.get(id=threadid)
thread.commentnum = thread.commentnum + 1
thread.save()
category = Category.objects.get(nickname=categoryactive)
Comment.objects.create(title=title, body=body, author=author, attachment=path, thread=thread,
musicurl=musicurl, floor=thread.commentnum)
return HttpResponseRedirect('/category/' + category.nickname + '/thread/' + str(thread.id))
request.session['errmsg_center'] = '信息输入错误'
return HttpResponseRedirect('/category/' + category.nickname + '/thread/' + str(thread.id))
def png2jpg(path):
img = cv.imread(path, 0)
w, h = img.shape[::-1]
infile = path
outfile = os.path.splitext(infile)[0] + ".jpg"
img = Image.open(infile)
try:
if len(img.split()) == 4:
r, g, b, a = img.split()
img = Image.merge("RGB", (r, g, b))
img.convert('RGB').save(outfile, quality=100)
os.remove(path)
else:
img.convert('RGB').save(outfile, quality=100)
os.remove(path)
return outfile
except Exception as e:
pass
``` |
{
"source": "0ldm0s/base_mio",
"score": 3
} |
#### File: util/crontabs/crontabs.py
```python
import datetime
import functools
import time
import traceback
import warnings
import daiquiri
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from fleming import fleming
from .processes import ProcessMonitor
import logging
daiquiri.setup(level=logging.INFO)
class Cron:
@classmethod
def get_logger(self, name='crontab_log'):
logger = daiquiri.getLogger(name)
return logger
def __init__(self):
"""
A Cron object runs many "tabs" of asynchronous tasks.
"""
self.monitor = ProcessMonitor()
self._tab_list = []
def schedule(self, *tabs):
self._tab_list = list(tabs)
return self
def go(self, max_seconds=None):
for tab in self._tab_list:
target = tab._get_target()
self.monitor.add_subprocess(tab._name, target, tab._robust, tab._until)
try:
self.monitor.loop(max_seconds=max_seconds)
except KeyboardInterrupt: # pragma: no cover
pass
class Tab:
_SILENCE_LOGGER = False
def __init__(self, name, robust=True, verbose=True, memory_friendly=False):
"""
Schedules a Tab entry in the cron runner
:param name: Every tab must have a string name
:param robust: A robust tab will be restarted if an error occures
A non robust tab will not be restarted, but all other
non-errored tabs should continue running
:param verbose: Set the verbosity of log messages.
:memory friendly: If set to true, each iteration will be run in separate process
"""
if not isinstance(name, str):
raise ValueError('Name argument must be a string')
self._name = name
self._robust = robust
self._verbose = verbose
self._starting = None
self._every_kwargs = None
self._func = None
self._func_args = None
self._func_kwargs = None
self._exclude_func = lambda t: False
self._during_func = lambda t: True
self._memory_friendly = memory_friendly
self._until = None
self._lasting_delta = None
def _log(self, msg):
if self._verbose and not self._SILENCE_LOGGER: # pragma: no cover
logger = daiquiri.getLogger(self._name)
logger.info(msg)
def _process_date(self, datetime_or_str):
if isinstance(datetime_or_str, str):
return parse(datetime_or_str)
elif isinstance(datetime_or_str, datetime.datetime):
return datetime_or_str
else:
raise ValueError('.starting() and until() method can only take strings or datetime objects')
def starting(self, datetime_or_str):
"""
Set the starting time for the cron job. If not specified, the starting time will always
be the beginning of the interval that is current when the cron is started.
:param datetime_or_str: a datetime object or a string that dateutil.parser can understand
:return: self
"""
self._starting = self._process_date(datetime_or_str)
return self
def starting_at(self, datetime_or_str):
warnings.warn('.starting_at() is depricated. Use .starting() instead')
return self.starting(datetime_or_str)
def until(self, datetime_or_str):
"""
Run the tab until the specified time is reached. At that point, deactivate the expired
tab so that it no longer runs.
:param datetime_or_str: a datetime object or a string that dateutil.parser can understand
:return: self
"""
self._until = self._process_date(datetime_or_str)
return self
def lasting(self, **kwargs):
"""
Run the tab so that it lasts this long. The argument structure is exactly the same
as that of the .every() method
"""
relative_delta_kwargs = {k if k.endswith('s') else k + 's': v for (k, v) in kwargs.items()}
self._lasting_delta = relativedelta(**relative_delta_kwargs)
return self
def excluding(self, func, name=''):
"""
Pass a function that takes a timestamp for when the function should execute.
It inhibits running when the function returns True.
Optionally, add a name to the exclusion. This name will act as an explanation
in the log for why the exclusion was made.
"""
self._exclude_func = func
self._exclude_name = name
return self
def during(self, func, name=''):
"""
Pass a function that takes a timestamp for when the function should execute.
It will only run if the function returns true.
Optionally, add a name. This name will act as an explanation in the log for why
any exclusions were made outside the "during" specification.
"""
self._during_func = func
self._during_name = name
return self
def every(self, **kwargs):
"""
Specify the interval at which you want the job run. Takes exactly one keyword argument.
That argument must be one named one of [second, minute, hour, day, week, month, year] or
their plural equivalents.
:param kwargs: Exactly one keyword argument
:return: self
"""
if len(kwargs) != 1:
raise ValueError('.every() method must be called with exactly one keyword argument')
self._every_kwargs = self._clean_kwargs(kwargs)
return self
def run(self, func, *func_args, **func__kwargs):
"""
Specify the function to run at the scheduled times
:param func: a callable
:param func_args: the args to the callable
:param func__kwargs: the kwargs to the callable
:return:
"""
self._func = func
self._func_args = func_args
self._func_kwargs = func__kwargs
return self
def _clean_kwargs(self, kwargs):
allowed_key_map = {
'seconds': 'second',
'second': 'second',
'minutes': 'minute',
'minute': 'minute',
'hours': 'hour',
'hour': 'hour',
'days': 'day',
'day': 'day',
'weeks': 'week',
'week': 'week',
'months': 'month',
'month': 'month',
'years': 'year',
'year': 'year',
}
kwargs = {k if k.endswith('s') else k + 's': v for (k, v) in kwargs.items()}
out_kwargs = {}
for key in kwargs.keys():
out_key = allowed_key_map.get(key.lower())
if out_key is None:
raise ValueError('Allowed time names are {}'.format(sorted(allowed_key_map.keys())))
out_kwargs[out_key] = kwargs[key]
return out_kwargs
def _is_uninhibited(self, time_stamp):
can_run = True
msg = 'inhibited: '
if self._exclude_func(time_stamp):
if self._exclude_name:
msg += self._exclude_name
can_run = False
if can_run and not self._during_func(time_stamp):
if self._during_name:
msg += self._during_name
can_run = False
if not can_run:
self._log(msg)
return can_run
def _loop(self, max_iter=None):
if not self._SILENCE_LOGGER: # pragma: no cover don't want to clutter tests
logger = daiquiri.getLogger(self._name)
logger.info('Starting {}'.format(self._name))
# fleming and dateutil have arguments that just differ by ending in an "s"
fleming_kwargs = self._every_kwargs
relative_delta_kwargs = {}
# build the relative delta kwargs
for k, v in self._every_kwargs.items():
relative_delta_kwargs[k + 's'] = v
# if a starting time was given use the floored second of that time as the previous time
if self._starting is not None:
previous_time = fleming.floor(self._starting, second=1)
# otherwise use the interval floored value of now as the previous time
else:
previous_time = fleming.floor(datetime.datetime.now(), **fleming_kwargs)
# keep track of iterations
n_iter = 0
# this is the infinite loop that runs the cron. It will only be stopped when the
# process is killed by its monitor.
while True:
n_iter += 1
if max_iter is not None and n_iter > max_iter:
break
# everything is run in a try block so errors can be explicitly handled
try:
# push forward the previous/next times
next_time = previous_time + relativedelta(**relative_delta_kwargs)
previous_time = next_time
# get the current time
now = datetime.datetime.now()
# if our job ran longer than an interval, we will need to catch up
if next_time < now:
continue
# sleep until the computed time to run the function
sleep_seconds = (next_time - now).total_seconds()
time.sleep(sleep_seconds)
# See what time it is on wakeup
timestamp = datetime.datetime.now()
# If passed until date, break out of here
if self._until is not None and timestamp > self._until:
break
# If not inhibited, run the function
if self._is_uninhibited(timestamp):
self._log('Running {}'.format(self._name))
self._func(*self._func_args, **self._func_kwargs)
except KeyboardInterrupt: # pragma: no cover
pass
except: # noqa
# only raise the error if not in robust mode.
if self._robust:
s = 'Error in tab\n' + traceback.format_exc()
logger = daiquiri.getLogger(self._name)
logger.error(s)
else:
raise
self._log('Finishing {}'.format(self._name))
def _get_target(self):
"""
returns a callable with no arguments designed
to be the target of a Subprocess
"""
if None in [self._func, self._func_kwargs, self._func_kwargs, self._every_kwargs]:
raise ValueError('You must call the .every() and .run() methods on every tab.')
if self._memory_friendly: # pragma: no cover TODO: need to find a way to test this
target = functools.partial(self._loop, max_iter=1)
else: # pragma: no cover TODO: need to find a way to test this
target = self._loop
if self._lasting_delta is not None:
self._until = datetime.datetime.now() + self._lasting_delta
return target
```
#### File: web/main/views.py
```python
import os
import sys
from flask import send_from_directory, render_template
from typing import Optional
from mio.util.Helper import get_root_path, get_real_ip
from mio.util.Logs import LogHandler
from mio.util.Local import I18n
from . import main
logger = LogHandler('view')
@main.app_template_filter("get_local_text")
def get_local_text(text: str, lang: Optional[str] = None):
tt = I18n(lang)
return tt.get_text(text)
@main.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(get_root_path(), 'web', 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@main.route('/')
def index():
# logger.debug(u'当前访客的IP地址为:{}'.format(get_real_ip()))
sys_ver = sys.version
return render_template('index.html', sys_ver=sys_ver)
@main.route('/client.cfm')
def client_page():
return render_template('client.html')
``` |
{
"source": "0ldm0s/pymio",
"score": 2
} |
#### File: pymio/util/Local.py
```python
import os
import gettext
from flask import request, current_app
from typing import Optional
from mio.util.Helper import get_root_path
class I18n(object):
_tran_: Optional[gettext.GNUTranslations] = None
@staticmethod
def __get_language__() -> str:
language: Optional[str] = request.accept_languages.best_match(current_app.config['LANGUAGES'])
if language is None:
return current_app.config['DEFAULT_LANGUAGE']
return language
def __init__(self, language: Optional[str] = None, domain: str = 'messages'):
if language is None or len(language) <= 0:
language = self.__get_language__()
try:
localedir: str = os.path.join(get_root_path(), 'translations')
self._tran_ = gettext.translation(domain, localedir, languages=[language])
except FileNotFoundError:
pass
def get_text(self, text: str) -> str:
try:
if self._tran_ is None:
return text
text = self._tran_.gettext(text)
except Exception as e:
str(e)
return text
``` |
{
"source": "0ldm0s/zto-api-helper",
"score": 2
} |
#### File: zto-api-helper/zto_api_helper/__init__.py
```python
import requests
import simplejson as json
from hashlib import md5
from base64 import b64encode
from typing import Dict, Any, Optional, Tuple
class ZtoApiHelper(object):
VERSION: str = '0.1'
api_host: str
app_key: str
app_secret: str
def __init__(self, app_key: str, app_secret: str, is_sandbox: bool = True):
self.api_host = 'japi-test.zto.com' if is_sandbox else 'japi.zto.com'
self.app_key = app_key
self.app_secret = app_secret
def gen_digest(self, data: Dict[str, Any]) -> Optional[str]:
try:
json_str: str = json.dumps(data)
except json.JSONDecodeError:
return None
plan_text = f'{json_str}{self.app_secret}'
result = md5(plan_text.encode('UTF-8'))
crypto: bytes = b64encode(result.digest())
return crypto.decode('utf-8')
def get_remote_data(self, api_uri: str, data: Dict[str, Any], company_id: Optional[str] = None) \
-> Tuple[Optional[Dict[str, Any]], str]:
digest: Optional[str] = self.gen_digest(data)
if digest is None:
return None, '生成签名错误'
headers = {
'User-Agent': f'pymio-zto-sdk/{self.VERSION}',
'Content-Type': 'application/json',
'x-dataDigest': digest
}
if company_id is not None:
headers['x-companyId'] = company_id
else:
headers['x-appKey'] = self.app_key
try:
r = requests.post(f'https://{self.api_host}/{api_uri}', data=json.dumps(data),
headers=headers)
return r.json(), 'OK'
except Exception as e:
return None, str(e)
``` |
{
"source": "0lever/loghub_exporter",
"score": 3
} |
#### File: 0lever/loghub_exporter/loghub.py
```python
import aliyun.log as log
class LogHub(object):
endpoint = None
access_key_id = None
access_key = None
client = None
def __init__(self, endpoint, access_key_id, access_key):
self.endpoint = endpoint
self.access_key_id = access_key_id
self.access_key = access_key
self._init_client()
def _init_client(self):
self.client = log.LogClient(self.endpoint, self.access_key_id, self.access_key)
def get_projects(self):
projects = self.client.list_project()
return [i["projectName"] for i in projects.body["projects"]]
def get_logstores(self, project):
logstores = self.client.list_logstore(project_name=project)
return logstores.body["logstores"]
def get_consumer_groups(self, project, logstore):
consumer_groups = self.client.list_consumer_group(project, logstore)
return [i["name"] for i in consumer_groups.body]
def get_check_point(self, project, logstore, consumer_group):
check_points = self.client.get_check_point(project, logstore, consumer_group)
return check_points.body
def test():
endpoint = 'cn-beijing.log.aliyuncs.com'
# endpoint = "us-west-1.log.aliyuncs.com"
access_key_id = 'xxx'
access_key = 'xxx'
loghub = LogHub(endpoint,access_key_id,access_key)
for i in loghub.get_projects():
for j in loghub.get_logstores(i):
for k in loghub.get_consumer_groups(i, j):
print i,
print j,
print loghub.get_check_point(i, j, k)
# for l in loghub.get_check_point()
``` |
{
"source": "0lidaxiang/data-analysis",
"score": 3
} |
#### File: data-analysis/homework3/main (1).py
```python
import pandas as pd
import csv
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
# In[ ]:
df = pd.read_excel('./data.xlsx')
df.head()
# In[48]:
df['Description'] = df['Description'].str.strip()
df.dropna(axis=0, subset=['InvoiceNo'], inplace=True)
df['InvoiceNo'] = df['InvoiceNo'].astype('str')
df = df[~df['InvoiceNo'].str.contains('C')]
# In[81]:
basket = (df[df['Country'] =="United Kingdom"]
.groupby(['InvoiceNo', 'Description'])['Quantity']
.sum().unstack().reset_index().fillna(0)
.set_index('InvoiceNo'))
# In[82]:
def encode_units(x):
if x <= 0:
return 0
if x >= 1:
return 1
basket_sets = basket.applymap(encode_units)
basket.drop('POSTAGE', inplace=True, axis=1)
# In[83]:
df['Description'].shape
# In[84]:
# basket[0:1].shape
# print(basket[0:1]["<NAME>,PINK"])
# print()
# for i in basket[0:1][:40]:
# print(i)
# In[88]:
frequent_itemsets = apriori(basket_sets, min_support=0.01, use_colnames=True)
# In[100]:
# support min threshold(0.01),confidence min threshold(0.5)
rules1 = association_rules(frequent_itemsets, metric="support", min_threshold= 0.01)
rules1.head()
# In[101]:
rules2 = association_rules(frequent_itemsets, metric="confidence", min_threshold= 0.5)
rules2.head()
# In[105]:
result1 = rules2[ (rules2['support'] >= 0.01) &
(rules2['confidence'] >= 0.5) ]
print(result1.shape)
result1.head()
# In[177]:
result2 = result1
# test_res_1 = result2[:1]["antecedants"].values[0]
# slipt_str = test_res_1.split("{'")[1].split("'}")[0]
# teststr = "60 CAKE CASES DOLLY GIRL DESIGN"
# print( slipt_str, teststr, "\n", slipt_str == teststr, "\n")
# print(result1["consequents"][:1])
# In[200]:
# create the result csv
import numpy as np
submit_data = pd.read_csv('submit.csv')
print(submit_data.shape)
submit_data.head()
print(submit_data["Association Rule antecedants"][:3][0])
print(submit_data["Association Rule antecedants"][:3])
index = 1
final_result = np.zeros(3055)
print(type(result2), result2[:3], result2.shape)
for ant_res2,con_res2 in zip(result2["antecedants"], result2["consequents"]):
# for sub in submit_data:
print("\n\n res2", type(ant_res2), ant_res2)
print("\n\n res2", type(con_res2), con_res2 )
# test_res_1 = res2["antecedants"].values[0]
# slipt_str1 = test_res_1.split("{'")[1].split("'}")[0]
# test_res_2 = res2["consequents"].values[0]
# slipt_str2 = test_res_1.split("{'")[1].split("'}")[0]
# for sub1 in sub["Association Rule antecedants"]:
# if sub1 == slipt_str1:
# for sub2 in sub["Association Rule consequents"]:
# if sub2 == slipt_str2:
# final_result[index] = 1
# print("1")
index += 1
# In[20]:
basket2 = (df[df['Country'] =="Germany"]
.groupby(['InvoiceNo', 'Description'])['Quantity']
.sum().unstack().reset_index().fillna(0)
.set_index('InvoiceNo'))
basket_sets2 = basket2.applymap(encode_units)
basket_sets2.drop('POSTAGE', inplace=True, axis=1)
frequent_itemsets2 = apriori(basket_sets2, min_support=0.05, use_colnames=True)
rules2 = association_rules(frequent_itemsets2, metric="lift", min_threshold=1)
rules2[ (rules2['lift'] >= 4) &
(rules2['confidence'] >= 0.5)]
# In[23]:
``` |
{
"source": "0lidaxiang/detection-car-pedestrian",
"score": 3
} |
#### File: detection-car-pedestrian/python-camera/tegra-cam-caffe.py
```python
import os
import sys
import argparse
import cv2
import numpy as np
import socket
CAFFE_ROOT = "/home/nvidia/caffe/"
sys.path.insert(0, CAFFE_ROOT + "python")
import caffe
from caffe.proto import caffe_pb2
DEFAULT_PROTOTXT = CAFFE_ROOT + "models/bvlc_reference_caffenet/deploy.prototxt"
DEFAULT_MODEL = CAFFE_ROOT + "models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"
DEFAULT_LABELS = CAFFE_ROOT + "data/ilsvrc12/synset_words.txt"
DEFAULT_MEAN = CAFFE_ROOT + "data/ilsvrc12/imagenet_mean.binaryproto"
windowName = "CameraCaffeDemo"
helpText = "'Esc' to Quit, 'H' to Toggle Help, 'F' to Toggle Fullscreen"
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description="Capture and display live camera video, and do real-time image classification with Caffe on Jetson TX2/TX1")
parser.add_argument("--rtsp", dest="use_rtsp",
help="use IP CAM (remember to also set --uri)",
action="store_true")
parser.add_argument("--uri", dest="rtsp_uri",
help="RTSP URI string, e.g. rtsp://192.168.1.64:554",
default=None, type=str)
parser.add_argument("--latency", dest="rtsp_latency",
help="latency in ms for RTSP [200]",
default=200, type=int)
parser.add_argument("--usb", dest="use_usb",
help="use USB webcam (remember to also set --vid)",
action="store_true")
parser.add_argument("--vid", dest="video_dev",
help="video device # of USB webcam (/dev/video?) [1]",
default=1, type=int)
parser.add_argument("--width", dest="image_width",
help="image width [640]",
default=640, type=int)
parser.add_argument("--height", dest="image_height",
help="image width [480]",
default=480, type=int)
parser.add_argument("--cpu", dest="cpu_mode",
help="use CPU mode for Caffe (GPU mode is used by default)",
action="store_true")
parser.add_argument("--crop", dest="crop_center",
help="crop the square at center of image for Caffe inferencing [False]",
action="store_true")
parser.add_argument("--prototxt", dest="caffe_prototxt",
help="[{}]".format(DEFAULT_PROTOTXT),
default=DEFAULT_PROTOTXT, type=str)
parser.add_argument("--model", dest="caffe_model",
help="[{}]".format(DEFAULT_MODEL),
default=DEFAULT_MODEL, type=str)
parser.add_argument("--labels", dest="caffe_labels",
help="[{}]".format(DEFAULT_LABELS),
default=DEFAULT_LABELS, type=str)
parser.add_argument("--mean", dest="caffe_mean",
help="[{}]".format(DEFAULT_MEAN),
default=DEFAULT_MEAN, type=str)
parser.add_argument("--output", dest="caffe_output",
help='name of Caffe output blob [prob]',
default="prob", type=str)
args = parser.parse_args()
return args
def get_caffe_mean(filename):
mean_blob = caffe_pb2.BlobProto()
with open(filename, "rb") as f:
mean_blob.ParseFromString(f.read())
mean_array = np.asarray(mean_blob.data, dtype=np.float32).reshape(
(mean_blob.channels, mean_blob.height, mean_blob.width))
return mean_array.mean(1).mean(1)
def open_cam_rtsp(uri, width, height, latency):
gst_str = ("rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! "
"nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! "
"videoconvert ! appsink").format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = ("v4l2src device=/dev/video{} ! "
"video/x-raw, width=(int){}, height=(int){}, format=(string)RGB ! "
"videoconvert ! appsink").format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
# On versions of L4T previous to L4T 28.1, flip-method=2
# Use Jetson onboard camera
gst_str = ("nvcamerasrc ! "
"video/x-raw(memory:NVMM), width=(int)2592, height=(int)1458, format=(string)I420, framerate=(fraction)30/1 ! "
"nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! "
"videoconvert ! appsink").format(width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_window(windowName, width, height):
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
cv2.resizeWindow(windowName, width, height)
cv2.moveWindow(windowName, 0, 0)
#cv2.setWindowTitle(windowName, "Camera Caffe Classification Demo for Jetson TX2/TX1")
def show_top_preds(img, top_probs, top_labels):
x = 10
y = 40
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(top_probs)):
pred = "{:.4f} {:20s}".format(top_probs[i], top_labels[i])
#cv2.putText(img, pred,
# (x+1,y), font, 1.0, (32,32,32), 4, cv2.LINE_AA)
cv2.putText(img, pred,
(x,y), font, 1.0, (0,0,240), 1, cv2.LINE_AA)
y += 20
def read_cam_and_classify(windowName, cap, net, transformer, labels, caffe_output, crop):
showHelp = True
showFullScreen = False
font = cv2.FONT_HERSHEY_PLAIN
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # 创建 socket 对象
s.bind(('', 12345)) # 绑定端口
s.listen(1) # 等待客户端连接
print('The server is ready to receive')
c, addr = s.accept() # 建立客户端连接。
print '连接地址:', addr
c.send("欢迎访问人工智能识别!")
oldObject = ""
while True:
if cv2.getWindowProperty(windowName, 0) < 0: # Check to see if the user closed the window
# This will fail if the user closed the window; Nasties get printed to the console
break;
ret_val, img = cap.read()
if crop:
height, width, channels = img.shape
if height < width:
img_crop = img[:, ((width-height)//2):((width+height)//2), :]
else:
img_crop = img[((height-width)//2):((height+width)//2), :, :]
else:
img_crop = img;
# inferencing the image
net.blobs["data"].data[...] = transformer.preprocess("data", img_crop)
output = net.forward()
output_prob = output[caffe_output][0] # output["prob"][0]
top_inds = output_prob.argsort()[::-1][:1]
top_probs = output_prob[top_inds]
top_labels = labels[top_inds]
#show_top_preds(img, top_probs, top_labels)
#for i in range(len(top_probs)):
newObject = top_labels[0]
if newObject != oldObject:
pred = "{:.1f}{:2s} {:20s}".format(top_probs[0] *100, "%", top_labels[0][9:])
c.send(pred)
print(pred)
oldObject = newObject
c.close()
#print("\n")
#if showHelp == True:
#cv2.putText(img, helpText, (11,20), font, 1.0, (32,32,32), 4, cv2.LINE_AA)
#cv2.putText(img, helpText, (10,20), font, 1.0, (240,240,240), 1, cv2.LINE_AA)
#cv2.imshow(windowName, img)
#key = cv2.waitKey(10)
#elif key == ord('H') or key == ord('h'): # toggle help message
#showHelp = not showHelp
#elif key == ord('F') or key == ord('f'): # toggle fullscreen
# showFullScreen = not showFullScreen
#if showFullScreen == True:
#cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
#else:
#cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
if __name__ == "__main__":
args = parse_args()
#print("Called with args:")
#print(args)
#print("OpenCV version: {}".format(cv2.__version__))
if not os.path.isfile(args.caffe_prototxt):
sys.exit("File not found: ".format(args.caffe_prototxt))
if not os.path.isfile(args.caffe_model):
sys.exit("File not found: ".format(args.caffe_model))
if not os.path.isfile(args.caffe_labels):
sys.exit("File not found: ".format(args.caffe_labels))
if not os.path.isfile(args.caffe_mean):
sys.exit("File not found: ".format(args.caffe_mean))
# initialize Caffe
if args.cpu_mode:
print("Running Caffe in CPU mode")
caffe.set_mode_cpu()
else:
print("Running Caffe in GPU mode")
caffe.set_device(0)
caffe.set_mode_gpu()
net = caffe.Net(args.caffe_prototxt, caffe.TEST, weights=args.caffe_model)
mu = get_caffe_mean(args.caffe_mean)
print("Mean-subtracted values:", zip('BGR', mu))
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose("data", (2,0,1))
transformer.set_mean("data", mu)
# no need to to swap color channels since captured images are already BGR
labels = np.loadtxt(args.caffe_labels, str, delimiter='\t')
# initialize camera
if args.use_rtsp:
cap = open_cam_rtsp(args.rtsp_uri, args.image_width, args.image_height, args.rtsp_latency)
elif args.use_usb:
cap = open_cam_usb(args.video_dev, args.image_width, args.image_height)
else: # by default, use the Jetson onboard camera
cap = open_cam_onboard(args.image_width, args.image_height)
if not cap.isOpened():
sys.exit("Failed to open camera!")
# start capturing live video and do inference
open_window(windowName, args.image_width, args.image_height)
read_cam_and_classify(windowName, cap, net, transformer, labels,
args.caffe_output, args.crop_center)
cap.release()
cv2.destroyAllWindows()
```
#### File: detection-car-pedestrian/python-camera/tegra-cam.py
```python
import sys
import argparse
import cv2
import numpy as np
windowName = "CameraDemo"
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description=
"Capture and display live camera video on Jetson TX2/TX1")
parser.add_argument("--rtsp", dest="use_rtsp",
help="use IP CAM (remember to also set --uri)",
action="store_true")
parser.add_argument("--uri", dest="rtsp_uri",
help="RTSP URI string, e.g. rtsp://192.168.1.64:554",
default=None, type=str)
parser.add_argument("--latency", dest="rtsp_latency",
help="latency in ms for RTSP [200]",
default=200, type=int)
parser.add_argument("--usb", dest="use_usb",
help="use USB webcam (remember to also set --vid)",
action="store_true")
parser.add_argument("--vid", dest="video_dev",
help="video device # of USB webcam (/dev/video?) [1]",
default=1, type=int)
parser.add_argument("--width", dest="image_width",
help="image width [1920]",
default=1920, type=int)
parser.add_argument("--height", dest="image_height",
help="image width [1080]",
default=1080, type=int)
args = parser.parse_args()
return args
def open_cam_rtsp(uri, width, height, latency):
gst_str = ("rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! "
"nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! "
"videoconvert ! appsink").format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = ("v4l2src device=/dev/video{} ! "
"video/x-raw, width=(int){}, height=(int){}, format=(string)RGB ! "
"videoconvert ! appsink").format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
# On versions of L4T previous to L4T 28.1, flip-method=2
# Use Jetson onboard camera
gst_str = ("nvcamerasrc ! "
"video/x-raw(memory:NVMM), width=(int)2592, height=(int)1458, format=(string)I420, framerate=(fraction)30/1 ! "
"nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! "
"videoconvert ! appsink").format(width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_window(windowName, width, height):
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
cv2.resizeWindow(windowName, width, height)
cv2.moveWindow(windowName, 0, 0)
cv2.setWindowTitle(windowName, "Camera Demo for Jetson TX2/TX1")
def read_cam(windowName, cap):
showHelp = True
showFullScreen = False
helpText = "'Esc' to Quit, 'H' to Toggle Help, 'F' to Toggle Fullscreen"
font = cv2.FONT_HERSHEY_PLAIN
while True:
if cv2.getWindowProperty(windowName, 0) < 0: # Check to see if the user closed the window
# This will fail if the user closed the window; Nasties get printed to the console
break;
ret_val, displayBuf = cap.read();
if showHelp == True:
cv2.putText(displayBuf, helpText, (11,20), font, 1.0, (32,32,32), 4, cv2.LINE_AA)
cv2.putText(displayBuf, helpText, (10,20), font, 1.0, (240,240,240), 1, cv2.LINE_AA)
cv2.imshow(windowName, displayBuf)
key = cv2.waitKey(10)
if key == 27: # ESC key: quit program
break
elif key == ord('H') or key == ord('h'): # toggle help message
showHelp = not showHelp
elif key == ord('F') or key == ord('f'): # toggle fullscreen
showFullScreen = not showFullScreen
if showFullScreen == True:
cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
if __name__ == "__main__":
args = parse_args()
print("Called with args:")
print(args)
print("OpenCV version: {}".format(cv2.__version__))
if args.use_rtsp:
cap = open_cam_rtsp(args.rtsp_uri, args.image_width, args.image_height, args.rtsp_latency)
elif args.use_usb:
cap = open_cam_usb(args.video_dev, args.image_width, args.image_height)
else: # by default, use the Jetson onboard camera
cap = open_cam_onboard(args.image_width, args.image_height)
if not cap.isOpened():
sys.exit("Failed to open camera!")
open_window(windowName, args.image_width, args.image_height)
read_cam(windowName, cap)
cap.release()
cv2.destroyAllWindows()
``` |
{
"source": "0lidaxiang/IR",
"score": 3
} |
#### File: IR/homework1/documentTF.py
```python
import os
import dictionary
import getFileList
def createDocumentTF():
allDictionary = dictionary.getDictionary()
fileList = getFileList.getFilesListFromFile()
fname = './initialResult/documentTFResult.txt'
res = os.path.isfile(fname)
if res:
pass
else:
f = open(fname, 'w')
for sub in allDictionary:
strWrite = sub + " "
for fv in fileList:
strWrite = strWrite + " " + str(fv.split().count(sub))
f.write(strWrite + "\n")
f.close()
# print " Write to " +fname + " file over."
def getDocumentTF():
createDocumentTF()
res = []
with open('./initialResult/documentTFResult.txt') as f:
lines = f.read().splitlines()
for line in lines:
strTemp = ''.join(line.split("\r\n"))
res.append(map(int, strTemp.split()) )
return res
# res = getDocumentTF()
# print "len(res): " , len(res)
#
# k =0
# for v in res:
# if k < 10:
# print v
# k = k + 1
```
#### File: homework3-EM/code/base.py
```python
import numpy as np
import math
from datetime import datetime
import dictionary
import document
import wordDocsCount
import wordDocsNoCount
startInitial = datetime.now()
docList = document.getFilesName()
wordList = dictionary.getDictionary()
wNumber = len(wordList)
dNumber = len(docList) # 2265
topicNum = 10
P_d = np.random.dirichlet(np.ones(dNumber),size=1).tolist()[0]
P_w_T = np.random.dirichlet(np.ones(topicNum),size= wNumber)
P_T_d = np.random.dirichlet(np.ones(dNumber),size= topicNum)
P_T_wd = np.zeros(shape=(topicNum * dNumber,wNumber))
count_w_d = wordDocsCount.getWordDocCount()
noCount_w_d = wordDocsNoCount.getWordDocNoCount()
dict_DocCountWord = wordDocsCount.getDict_DocCountWord()
def get_BG():
fname = "./BGLM.txt"
with open(fname) as f:
lines = f.read().splitlines()
res = []
for line in lines:
res.append(line.split())
return res
bgList = get_BG()
def getQuery():
fname = "./Query.txt"
with open(fname) as f:
lines = f.read().splitlines()
res = []
for line in lines:
res.append(line.split())
return res
bgList = getQuery()
```
#### File: homework3-EM/code/docLength.py
```python
import os
import document
def createDocLength():
fname = "../initialResult/docLength.txt"
allFilesContent = document.getAllFilesContent()
res = os.path.isfile(fname)
if res:
pass
else:
f = open(fname, 'w')
for doc in allFilesContent:
docLength = str(len(doc.split(" "))) + "\r\n"
f.write(docLength)
f.close()
def getDocLength():
fname = "../initialResult/docLength.txt"
res1 = os.path.isfile(fname)
if res1:
pass
else:
createDocLength()
res = []
with open(fname) as f:
lines = f.read().splitlines()
for line in lines:
strTemp = ''.join(line.split("\r\n"))
res.append(strTemp)
return res
res = getDocLength()
print("len(res): " , len(res))
k =0
for v in res:
if k < 10:
print(v)
k = k + 1
```
#### File: IR/homework5/idfResult.py
```python
import os
import dictionary
import getFileList
def createIDFFile():
allDocumentNumber = 2265.0
fname = "./initialResult/idfResult.txt"
res = os.path.isfile(fname)
if res:
# print fname + ' file has exists.'
pass
else:
fileList = getFileList.getIntsFromFile()
f = open(fname, 'w')
dIDFs = []
allDictionary = dictionary.getDictionary()
# fileList = getFileList.getFilesListFromFile()
for sub in allDictionary:
dIDF = {}
dIDFsub = 0
for fv in fileList:
if sub in fv:
dIDFsub = dIDFsub + 1
idfNum = allDocumentNumber / dIDFsub
strWrite = str(idfNum)
f.write(strWrite + "\n")
f.close()
# print "Write to " + fname + "file over."
def getIDF():
createIDFFile()
res = []
with open('./initialResult/idfResult.txt') as f:
lines = f.read().splitlines()
for line in lines:
strTemp = ''.join(line.split("\r\n"))
res.append(float(strTemp) )
# res.append(list(map(float, strTemp.split() )) )
return res
# res = getIDF()
# print ("len(res): " , len(res))
# # print ("len(res): " , len(res[0]))
#
# k =0
# for v in res:
# if k < 10:
# print( v)
# k = k + 1
``` |
{
"source": "0lidaxiang/leetcode",
"score": 3
} |
#### File: algorithm/simple/roman-to-integer.py
```python
```
罗马数字包含以下七种字符: I, V, X, L,C,D 和 M。
字符 数值
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
例如, 罗马数字 2 写做 II ,即为两个并列的 1。12 写做 XII ,即为 X + II 。 27 写做 XXVII, 即为 XX + V + II 。
通常情况下,罗马数字中小的数字在大的数字的右边。但也存在特例,例如 4 不写做 IIII,而是 IV。数字 1 在数字 5 的左边,所表示的数等于大数 5 减小数 1 得到的数值 4 。同样地,数字 9 表示为 IX。这个特殊的规则只适用于以下六种情况:
I 可以放在 V (5) 和 X (10) 的左边,来表示 4 和 9。
X 可以放在 L (50) 和 C (100) 的左边,来表示 40 和 90。
C 可以放在 D (500) 和 M (1000) 的左边,来表示 400 和 900。
给定一个罗马数字,将其转换成整数。输入确保在 1 到 3999 的范围内。
示例 1:
输入: "III"
输出: 3
示例 2:
输入: "IV"
输出: 4
示例 3:
输入: "IX"
输出: 9
示例 4:
输入: "LVIII"
输出: 58
解释: L = 50, V= 5, III = 3.
示例 5:
输入: "MCMXCIV"
输出: 1994
解释: M = 1000, CM = 900, XC = 90, IV = 4.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/roman-to-integer
```
# Answer 1: 116 ms
class Solution:
def romanToInt(self, s: str) -> int:
ret = 0
to_int = {"I": 1,"V": 5, "X": 10, "L": 50, "C" : 100, "D": 500, "M" : 1000}
special = {"IV": 4,"IX": 9, "XL": 40, "XC": 90, "CD" : 400, "CM": 900 }
new_s = s
for i in range(0, len(s) -1):
part = s[i:i+2]
if part in special.keys():
ret += special[part]
new_s = new_s.replace(part, "")
for x in new_s:
x_int = to_int[x]
ret += x_int
# ret += 999
return ret
# Answer 2: 64 ms
class Solution:
def romanToInt(self, s: str) -> int:
ret = 0
convert = {"I": 1,"V": 5, "X": 10, "L": 50, "C" : 100, "D": 500, "M" : 1000, "IV": 4,"IX": 9, "XL": 40, "XC": 90, "CD" : 400, "CM": 900 }
length = len(s)
i = 0
while i < length:
part = s[i:i+2]
if part in convert.keys():
ret += convert[part]
i+=2
else:
ret += convert[s[i:i+1]]
i+=1
return ret
```
#### File: algorithm/simple/two-sum.py
```python
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
for i in range(0,len(nums)-1):
x = nums[i]
for j in range(i+1, len(nums)):
y = nums[j]
if x+y == target:
return [i, j]
# Answer 2:
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
tul = dict()
for i in range(0,len(nums)):
x = nums[i]
y = target -x
if y not in tul.keys():
tul[x] = i
else:
return [tul[y], i]
# Answer 3: 最快
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dic = dict()
for i, x in enumerate(nums):
if dic.get(target -x) is None:
dic[x] = i
else:
return [dic.get(target -x) , i]
```
#### File: algorithm/simple/valid-parentheses.py
```python
```
给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。
有效字符串需满足:
左括号必须用相同类型的右括号闭合。
左括号必须以正确的顺序闭合。
注意空字符串可被认为是有效字符串。
示例 1:
输入: "()"
输出: true
示例 2:
输入: "()[]{}"
输出: true
示例 3:
输入: "(]"
输出: false
示例 4:
输入: "([)]"
输出: false
示例 5:
输入: "{[]}"
输出: true
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/valid-parentheses
```
# Answer 1: 68ms
```` 堆的设计,先进先出 ```
class Solution:
def isValid(self, s: str) -> bool:
convert = {"(" : ")", ")" : "(", "{" : "}","}" : "{", "[" : "]", "]" : "[" }
lefts = ["(", "[", "{"]
stack = ["?"]
if len(s) == 1:
return False
for i,x in enumerate(s):
if x in lefts:
stack.append(x)
elif stack[-1] == convert[x]:
stack.pop()
else:
stack.append(x)
stack.remove("?")
if stack:
return False
return True
``` |
{
"source": "0lidaxiang/offline-friends",
"score": 2
} |
#### File: home/view/compute.py
```python
import sys
import time
import random
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.views.decorators import csrf
from user.models import User
# from django.http import JsonResponse
#
# def computeGPS(request):
# context = {}
# # if "userName" not in request.POST:
# # return render(request, 'home/index.html')
#
# try:
# id = request.POST['id']
# nickname = request.POST['nickname']
# longitude = request.POST['longitude']
# latitude = request.POST['latitude']
#
# # userId = userName + "_" + str(int(time.time())) + "_" + str(random.randint(1000))
# # print(userName,password,email)
# user = User.getValueByUserId(userId, "all")
# context["status"] = False
# context["resCode"] = 11
# context["mes"] = " fanRegister error "
#
# # name_dict = {'twz': 'Love python and Django', 'zqxt': 'I am teaching Django'}
# return JsonResponse(context)
# # return context
# except Exception as e:
# print(str(e))
# return JsonResponse(context)
```
#### File: offlineFriends/view/index.py
```python
from user.models import User
from django.shortcuts import render
from django.http import JsonResponse
import requests
from bs4 import BeautifulSoup
def sendGPS(request):
context = {}
try:
id = request.POST['id']
nickname = request.POST['nickname']
longitude = float(request.POST['longitude'])
latitude = float(request.POST['latitude'])
print("gps index.py", longitude, latitude)
# userId = userName + "_" + str(int(time.time())) + "_" + str(random.randint(1000))
# print(userName,password,email)
status, message = User.modifyLL(id, longitude, latitude)
# context["status"] = True
# context["longitude"] = longitude
# context["latitude"] = latitude
res_dict = {'status': status, 'message': message}
return JsonResponse(res_dict)
# return context
except Exception as e:
res_dict = {'status': False, 'message': latitude}
print(str(e))
return JsonResponse(res_dict)
``` |
{
"source": "0lidaxiang/WeArt",
"score": 2
} |
#### File: WeArt/book/models.py
```python
from django.db import models
from tool.tools import createId
from time import localtime,strftime
from reader.models import reader
# Create your models here.
class book(models.Model):
class Meta:
app_label = "book"
db_table = 'book'
verbose_name = "書籍"
verbose_name_plural = "書籍列表管理"
STATUS_CHOICES = (
("active", 'active'),
("inactive", 'inactive'),
("locked", 'locked'),
)
id = models.CharField("編號", max_length=20,primary_key=True,blank=False,null=False)
name = models.CharField("書名", max_length=100,blank=False,null=False)
# remoteIP = models.CharField("遠端倉庫IP", max_length=20)
remoteIP = models.GenericIPAddressField("遠端倉庫IP",default='192.168.0.1',blank=False,null=False)
location = models.CharField("存儲位置", max_length=100,blank=False,null=False)
chapterCount = models.IntegerField("章節數量",blank=False,null=False)
status = models.CharField("書籍狀態", max_length=20,blank=False,null=False, choices = STATUS_CHOICES)
createTime = models.DateTimeField("創建時間", max_length=50,blank=False,null=False)
idReader_id = models.CharField("作者編號", max_length=30,blank=False,null=False)
def accountStatus(self):
return self.status == 'active'
accountStatus.boolean = True
accountStatus.short_description = "允許讀者查看"
def accountCreateTime(self):
return self.createTime.strftime('%Y-%m-%d %H:%M:%S')
accountCreateTime.short_description = '申請時間'
@classmethod
def getIdByNameAndAuthor(self, nameArg, idReaderArg):
try:
obj = self.objects.get(name = nameArg, idReader_id = idReaderArg)
return True, 130000, obj.id
except self.DoesNotExist:
return False, 130004, "錯誤: getIdByNameAndAuthor 讀取 book 表錯誤。"
except Exception as e:
return False, 130005, str(e)
@classmethod
def getValue(self, idBookArg, returnArg):
try:
obj = self.objects.get(id=idBookArg)
if returnArg == "id":
return True, 130000, obj.id
elif returnArg == "name":
return True, 130000, obj.name
elif returnArg == "remoteIP":
return True, 130000, obj.remoteIP
elif returnArg == "location":
return True, 130000, obj.location
elif returnArg == "status":
return True, 130000, obj.status
elif returnArg == "chapterCount":
return True, 130000, obj.chapterCount
elif returnArg == "createTime":
return True, 130000, obj.createTime
elif returnArg == "idReader_id":
return True, 130000, obj.idReader_id
elif returnArg == "all":
return True, 130000, obj
else:
return False, 130001, "錯誤: book 表中不存在該屬性,returnArg錯誤。"
except self.DoesNotExist:
return False, 130002, "錯誤: book 表不存在該數據。"
except Exception as e:
return False, 130003, str(e)
@classmethod
def add(self, bookName, remoteIP, location, idReader_id):
try:
nowTime = strftime("%Y-%m-%d %H:%M:%S", localtime())
idVal = createId(20, bookName)
obj = self(id=idVal, name=bookName, remoteIP = remoteIP, location=location, chapterCount = 0, status = "active", createTime=nowTime, idReader_id=idReader_id)
obj.save()
return True, idVal
except Exception as e:
return False, str(e)
@classmethod
def modify(self, idBookArg, property, value):
try:
obj = self.objects.get(id=idBookArg)
if property == "id":
obj = self(id=idBookArg)
obj.id = value
obj.save(update_fields=["id"])
return True, obj.id
elif property == "name":
obj = self(id=idBookArg)
obj.name = value
obj.save(update_fields=["name"])
return True, obj.name
elif property == "remoteIP":
obj = self(id=idBookArg)
obj.remoteIP = remoteIP
obj.save(update_fields=["remoteIP"])
return True, obj.remoteIP
elif property == "location":
obj = self(id=idBookArg)
obj.location = value
obj.save(update_fields=["location"])
return True, obj.location
elif property == "status":
obj = self(id=idBookArg)
obj.status = value
obj.save(update_fields=["status"])
return True, obj.status
elif property == "chapterCount":
obj = self(id=idBookArg)
obj.chapterCount = value
obj.save(update_fields=["chapterCount"])
return True, obj.chapterCount
elif property == "createTime":
obj = self(id=idBookArg)
obj.createTime = value
obj.save(update_fields=["createTime"])
return True, obj.createTime
elif property == "idReader_id":
obj = self(id=idBookArg)
obj.idReader_id = value
obj.save(update_fields=["idReader_id"])
return True, obj.idReader_id
else:
return False, "錯誤1001: book表中不存在該屬性,returnArg錯誤。"
except self.DoesNotExist:
return False, "錯誤1002: 讀取book表錯誤。"
@classmethod
def deleteObj(self, idBookArg):
try:
obj = self.objects.get(id=idBookArg)
location = obj.location
obj.delete()
return True,130100, location
except self.DoesNotExist:
return False, 130102, "delete 讀取 book 表錯誤"
except Exception as e:
return False, 130103, str(e)
@classmethod
def getAll(self, amount):
try:
# get all data when amount=0
if amount == 0:
obj = self.objects.all()
return True, 130200, obj
obj = self.objects.all()[:amount]
return True, 130200, obj
except self.DoesNotExist:
return False, 130201, "book 表不存在該數值"
except Exception as e:
return False, 130202, str(e)
@classmethod
def getAllByAuthor(self, idReaderArg):
try:
obj = self.objects.all().filter(idReader_id=idReaderArg)
return True, 130300, obj
except self.DoesNotExist:
return False, 130301, "book 表不存在該數值"
except Exception as e:
return False, 130302, str(e)
```
#### File: book/view/deleteBook.py
```python
import os
import subprocess
import paramiko
from django.shortcuts import render
from django.http import JsonResponse
from book.models import book
from django.conf import settings
def deleteObj(request):
if "readerId" not in request.session:
return render(request, 'reader/login.html')
idBook = request.GET["idBook"]
idReader = request.session["readerId"]
context = {}
try:
# delete book's data in database and get its directory location
res, statusNumber, mes = book.deleteObj(idBook)
if not res:
context['res'] = "fail"
context['statusNumber'] = statusNumber
context['message'] = mes
return JsonResponse(context)
path = mes + "/"
operateDirName = idBook
# rm the repo directory in webServer
res, statusNumber, mes = rmDirOfWebServer(path, operateDirName)
if not res:
context['res'] = "fail"
context['statusNumber'] = statusNumber
context['message'] = mes
return JsonResponse(context)
# rm the repo directory in gitServer
gitserver_ip = settings.GIT_SERVER_IP
gitserver_user = settings.GIT_SERVER_USER
gitserver_userPasswd = settings.GIT_SERVER_USERPASSWD
res, statusNumber, mes = rmDirOfGitServer(gitserver_ip, gitserver_user, gitserver_userPasswd, path, operateDirName)
if not res:
context['res'] = "fail"
context['statusNumber'] = statusNumber
context['message'] = mes
return JsonResponse(context)
context['res'] = "success"
context['statusNumber'] = 130600
context['message'] = ""
except Exception as e:
context['res'] = "fail"
context['statusNumber'] = 130601
context['message'] = str(e)
print str(e)
return JsonResponse(context)
def rmDirOfWebServer(path, operateDir):
try:
cmd = "rm -rf " + path + operateDir
p = subprocess.Popen(cmd, shell=True)
(stdoutput,erroutput) = p.communicate()
return True, 130700, ""
except Exception as e:
print str(e)
return False, 130701, str(e)
def rmDirOfGitServer(gitserver_ip, gitserver_user, gitserver_userPasswd, path, operateDir):
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(gitserver_ip,22,gitserver_user, gitserver_userPasswd,timeout=5)
cmd = "rm -rf " + path + operateDir + ".git"
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)
ssh.close()
return True, 130800, ""
except Exception as e:
print str(e)
return False, 130801, str(e)
```
#### File: book/view/getRecommendArts.py
```python
import sys
import json
from django.http import JsonResponse
from django.shortcuts import render
from book.models import book
from reader.models import reader
def getRecommendArts(request):
context = {}
reload(sys)
sys.setdefaultencoding('utf8')
# get the new book name of user input if it is not null
if 'amount' not in request.GET:
context['status'] = "fail"
context['message'] = "The amount variable is not in request.GET."
return JsonResponse(context)
inputAmount = request.GET['amount']
res, statusNumber, mes = book.getAll(inputAmount)
if not res:
context['status'] = "fail"
context['message'] = "錯誤: " + mes
return JsonResponse(context)
context['status'] = "success"
response_data = []
for m in mes:
response_record = {}
response_record['id'] = m.id
response_record['name'] = m.name
response_record['chapterCount'] = m.chapterCount
response_record['authorName'] = reader.getValueById(m.idReader_id, "name")[2]
response_data.append(response_record)
context["message"] = response_data
return JsonResponse(context)
```
#### File: collection/view/getCollection.py
```python
from django.shortcuts import render
from django.http import JsonResponse
import sys
from book.models import book
from collection.models import collection
def getMyCollection(request):
# dealing with Chinese questions
reload(sys)
sys.setdefaultencoding('utf8')
if "readerId" not in request.session:
return render(request, 'reader/login.html')
idReader = request.session["readerId"]
context = {}
try:
res, status, mes = collection.getAll(idReader)
collections = []
if not res:
coll = {}
coll["id"] = 0
coll["idReader"] = "Server Error"
coll["idBook"] = str(status)
coll["bookName"] = str(mes)
coll["createTime"] = str(mes)
coll["operation"] = str(mes)
collections.append(coll)
context['data'] = collections
return JsonResponse(context)
idx = 0
for v in mes:
coll = {}
res,status,mes = book.getValue(v.idBook_id, "name")
if res:
coll["id"] = idx
coll["idReader"] = v.idReader_id
coll["idBook"] = v.idBook_id
coll["bookName"] = str(mes)
coll["createTime"] = v.createTime
coll["operation"] = "<a href=javascript:deleteCollection('" + v.id + "');> delete" + "</a>"
collections.append(coll)
idx+=1
context['data'] = collections
return JsonResponse(context)
except Exception as e:
coll = {}
coll["id"] = 0
coll["idReader"] = "Server Exception"
coll["idBook"] = str(170501)
coll["bookName"] = str(e)
coll["createTime"] = str(e)
coll["operation"] = str(e)
collections.append(coll)
context['data'] = collections
return JsonResponse(context)
```
#### File: content/view/createContent.py
```python
from django.http import JsonResponse
from django.shortcuts import render
from django.conf import settings
from git import Repo
from book.models import book
from chapter.models import chapter
from version.models import version
import paramiko
import sys
import json
import os
import datetime
import subprocess
def createAContent(request):
if "readerId" not in request.session:
return render(request, 'reader/login.html')
if request.session["authorStatus"] != "active":
return render(request, 'author/authorStatus/')
reload(sys)
sys.setdefaultencoding('utf8')
context = {}
# get the new book name of user input if it is not null
if 'bookName' not in request.GET:
context['status'] = "fail"
context['message'] = "The bookname variable is not in request.GET."
return JsonResponse(context)
userInputBookName = request.GET['bookName']
if userInputBookName == "":
context['status'] = "fail"
context['message'] = "您還沒有填寫書名稱。請重載後重新嘗試。"
return JsonResponse(context)
# get the new chapter name of user input if it is not null
if 'chapterOrder' not in request.GET:
context['status'] = "fail"
context['message'] = "The chapter name variable is not in request.GET."
return JsonResponse(context)
userInputChpaterOrder = request.GET['chapterOrder']
if userInputChpaterOrder == "":
context['status'] = "fail"
context['message'] = "您還沒有填寫章節序號。請重載後重新嘗試。"
return JsonResponse(context)
# get the new commit content of user input if it is not null
if 'commitContent' not in request.GET:
context['status'] = "fail"
context['message'] = "The commitContent variable is not in request.GET."
return JsonResponse(context)
userInputCommitContent = request.GET['commitContent']
if userInputBookName == "":
context['status'] = "fail"
context['message'] = "您還沒有填寫更新摘要名稱。請重載後重新嘗試。"
return JsonResponse(context)
# get the new content of user input even if it is null
if 'content' not in request.GET:
context['status'] = "fail"
context['message'] = "The content variable is not in request.GET."
return JsonResponse(context)
userInputContent = request.GET['content']
try:
# step1: get idBook and check whether a book is exist by bookname
readerId = request.session['readerId']
res, statusNumber, mes = book.getIdByNameAndAuthor(userInputBookName, readerId)
if not res:
if statusNumber == 130004:
context['status'] = "fail"
context['message'] = "不存在該本書!請重新輸入其它書名或登錄其它賬號"
elif statusNumber == 130005:
context['status'] = "fail"
context['message'] = "未知服務器錯誤:" + str(statusNumber) + mes
else:
context['status'] = "fail"
context['message'] = "其它服務器錯誤:" + str(statusNumber) + mes
return JsonResponse(context)
# step2: get new book name and create file in webServer
idBook = mes
locationBook = ""
res, statusNumber, mes = book.getValue(idBook, "location")
if res:
locationBook = mes
myhome_path = locationBook + "/" + idBook
filePath = myhome_path + "/" + idBook + "_" + str(userInputChpaterOrder) + ".txt"
f = open(filePath, "w")
f.write(request.GET['content'] + "\n")
f.close()
# step3: modify the git config info to this author
cmd1 = "cd " + myhome_path
cmd2 = "; git config --local user.name " + readerId
cmd3 = "; git config --local user.email " + readerId + "@weart.com; "
cmd = cmd1 + cmd2 + cmd3
p = subprocess.Popen(cmd, shell=True)
(stdoutput,erroutput) = p.communicate()
# step4: git commit to gitServer
# 新建版本库对象
repo = Repo(myhome_path)
# 获取版本库暂存区
index = repo.index
# 添加修改文件
index.add([idBook + "_" + str(userInputChpaterOrder) + ".txt"])
# 提交修改到本地仓库
index.commit(userInputCommitContent)
# 获取远程仓库
remote = repo.remote()
origin = repo.remotes.origin
# 推送本地修改到远程仓库
origin.push(refspec="master:master")
# step5: write data into version table
res, statusNumber, mes = chapter.getValueByIdBookAndOrder(idBook, userInputChpaterOrder, "id")
idChapter = mes
if res:
res, statusNumber, mes = version.add(idChapter, 0, 0, readerId)
if res:
context['status'] = "success"
context['message'] = '您已經成功更新 《' + userInputBookName + "》的第 " + userInputChpaterOrder + " 章節內容。"
else:
context['status'] = "fail"
context['message'] = str(statusNumber) + " , " + mes
return JsonResponse(context)
context['status'] = "fail"
context['message'] = str(statusNumber) + " , " + mes
except Exception as e:
context['status'] = "fail"
context['message'] = "異常錯誤: " + str(e)
return JsonResponse(context)
```
#### File: content/view/readerGetContent.py
```python
from django.http import HttpResponse
from django.http import JsonResponse
from django.shortcuts import render
from django.conf import settings
from git import Repo
from book.models import book
from chapter.models import chapter
from version.models import version
from reader.models import reader
import paramiko
import sys
import json
import os
import datetime
import subprocess
import re
def chapterContent(request):
context = {}
# get the book id of user input if it is not null
if 'idBook' not in request.GET:
context['status'] = "fail"
context['message'] = "The idBook variable is not in request.GET."
return JsonResponse(context)
inputIdBook = request.GET['idBook']
# get the book name of user input if it is not null
if 'bookName' not in request.GET:
context['status'] = "fail"
context['message'] = "The bookName variable is not in request.GET."
return JsonResponse(context)
bookName = request.GET['bookName']
# get the chapter order of user input if it is not null
if 'chapterOrder' not in request.GET:
context['status'] = "fail"
context['message'] = "The chapterOrder variable is not in request.GET."
return JsonResponse(context)
chapterOrder = request.GET['chapterOrder']
# get the chapter name of user input if it is not null
if 'chapterName' not in request.GET:
context['status'] = "fail"
context['message'] = "The chapterName variable is not in request.GET."
return JsonResponse(context)
chapterName = request.GET['chapterName']
return render(request, 'content/chapterContent.html', context={'idBook': inputIdBook,'chapterOrder': chapterOrder, "chapterName":chapterName, "bookName":bookName})
def readerGetContent(request):
context = {}
idBook = request.GET['idBook'];
chapterOrder = request.GET['chapterOrder'];
locationBook = ""
res, statusNumber, mes = book.getValue(idBook, "location")
if not res:
context['status'] = "fail"
context['message'] = str(statusNumber) + " 錯誤: " + mes
return JsonResponse(context)
locationBook = mes
cmd1 = "cd " + locationBook + "/" + idBook
cmd2= ";cat " + idBook + "_" + chapterOrder + ".txt"
cmd = cmd1 + cmd2
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
content = list(p.stdout.readlines())
contentList = []
for v in content:
contentList.append(v)
context["status"] = "success"
context["message"] = contentList
return JsonResponse(context)
# return HttpResponse(json.dumps(contentList), content_type="application/json")
def showHistory(request):
reload(sys)
sys.setdefaultencoding('utf8')
idBook = request.GET['idBook'];
chapterOrder = request.GET['chapterOrder'];
context = {}
locationBook = ""
res, statusNumber, mes = book.getValue(idBook, "location")
if not res:
context['status'] = "fail"
context['message'] = str(statusNumber) + " 錯誤: " + mes
return JsonResponse(context)
locationBook = mes
# authorList: get authors list, just for html to list
cmd1 = "cd " + locationBook + "/" + idBook
cmd2= ";git log --all --format='%aN' | sort -u"
cmd = cmd1 + cmd2
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
authors = p.stdout.readlines()
authorNames = []
authorIds = []
for authorId in authors:
authorNames.append(reader.getValueById(authorId.rstrip("\n"), "name")[2])
context['authorList'] = authorNames
for authorId in authors:
authorIds.append(authorId)
context['authorIds'] = authorIds
# step1: get version and score info of every author from databases
idAuthorsAndVotes = []
res, statusNumber, mes = chapter.getValueByIdBookAndOrder(idBook,chapterOrder ,"id")
idChapterArg = mes
if res:
res,statusNumber,ver = version.getVersionsByIdChapter(idChapterArg)
for v in ver:
idAuthorAndVote = {"idVersion" : "", "voteCount": 0, "score": 0, "idAuthor": ""}
idAuthorAndVote["idVersion"] = v.id
idAuthorAndVote["voteCount"] = int(v.voteCount)
idAuthorAndVote["score"] = float(v.score)
idAuthorAndVote["idAuthor"] = v.idAuthor_id
idAuthorsAndVotes.append(idAuthorAndVote)
idAuthorsAndVotes.sort(reverse = True, key=lambda x:(x['score'],x['voteCount']))
# lastest content of the author which you want to search
idAuthorToSearch = ""
if "idAuthor" in request.GET :
idAuthorToSearch = request.GET["idAuthor"]
else:
idAuthorToSearch = idAuthorsAndVotes[0]["idAuthor"]
for idAuthorsAndVote in idAuthorsAndVotes:
if idAuthorsAndVote["idAuthor"] == idAuthorToSearch:
context['idVersion'] = idAuthorsAndVote["idVersion"]
# step2: lastest content of the author which you want to search
status,mes = getLastestContent(idBook, chapterOrder, idAuthorToSearch, locationBook)
if status:
context["status"] = "success"
else:
context["status"] = "fail"
context['content'] = mes
return JsonResponse(context)
def getLastestContent(idBook, chapterOrder, idAuthorToSearch, locationBook):
try:
# get latest log sha1Vals of the author which you want to search
cmd1 = "cd " + locationBook + "/" + idBook
cmd2= ";git log --date=format:'%Y-%m-%d %H:%M:%S' --author " + idAuthorToSearch + " -1 --pretty=format:'%H'"
# + " -1 --pretty=format:'%H %an %ad %s'"
cmd = cmd1 + cmd2
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
sha1Vals = list(p.stdout.readlines())
# get content of every latest log sha1Vals for the file which you want to search
sha1ValToSearch = sha1Vals[0]
cmd1 = "cd " + locationBook + "/" + idBook
cmd2= ";git show " + sha1ValToSearch + ":" + idBook + "_" + chapterOrder + ".txt"
cmd = cmd1 + cmd2
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
content = list(p.stdout.readlines())
return True, content
except Exception as e:
return False, str(e)
```
#### File: reader/view/registerView.py
```python
import socket
from time import localtime, strftime
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings as django_settings
from django.core.mail import send_mail
from django.views.decorators import csrf
from time import localtime,strftime
# from django.conf import settings
from tool.Token import *
from tool.tools import *
from reader.models import reader
#this need catch the id
#catch the "name"
def registerReader(request):
if "userName" not in request.POST:
return render(request, 'reader/register.html')
if "password" not in request.POST:
return render(request, 'reader/register.html')
if "email" not in request.POST:
return render(request, 'reader/register.html')
userName = unicode(request.POST['userName'])
password = request.POST['password']
email = unicode(request.POST['email'])
nowTime = strftime("%Y-%m-%d %H:%M:%S", localtime())
print(userName,password,email)
userName = userName.encode('utf8')
try:
res, statusNumber, message = reader.getValueByEmail(email, "status")
if not res:
idVal = createId(20, userName + email)
passwordEncrypted = createId(96,password)
res, statusNumber, message = reader.add(idVal, userName, passwordEncrypted, email, "abuse", nowTime)
if not res:
return render(request, 'reader/registerFail.html', {'message': u"註冊失敗!請重新嘗試或聯絡管理員" + str(statusNumber) + " : " + message})
if sendVerifyEmail(userName, email):
return render(request, 'reader/registerVerificating.html')
else:
return render(request, 'reader/registerFail.html', {'message': u"發送郵件失敗!請更換郵箱地址重新注冊或聯繫管理員!"})
if message == "abuse":
if sendVerifyEmail(userName, email):
return render(request, 'reader/registerVerificating.html')
else:
return render(request, 'reader/registerFail.html', {'message': u"發送郵件失敗!請更換郵箱地址重新注冊或聯繫管理員!"})
else:
return render(request, 'reader/registerFail.html', {'message': u"您填寫的郵箱已經被注冊!請更換郵箱地址重新注冊!"})
except Exception as e:
print e
return render(request, 'reader/registerFail.html', {'message': u"註冊失敗!請重新嘗試或聯絡管理員" + str(e)})
def sendVerifyEmail(userName, email):
try:
token_confirm = Token(django_settings.SECRET_KEY)
token = token_confirm.generate_validate_token(email)
# port 80
ipAddress = django_settings.REGISTER_SERVER_DOMAIN
message = "\n".join([u'{0} , 歡迎加入 WeArt !'.format(userName), u'\n\n請訪問以下鏈接,完成用戶驗證:', '/'.join([ipAddress,'reader/activate',token]), u'\n\n如果您沒有注冊 WeArt,請忽略該郵件!',])
send_mail(
'WeArt注冊身份驗證',
message,
'<EMAIL>',
[email],
fail_silently=False,
)
return True
except Exception as e:
print e
return False
def activeReader(request, token):
token_confirm = Token(django_settings.SECRET_KEY)
try:
emailSignature = token_confirm.confirm_validate_token(token)
except:
#delete this user's Token
emailSignature = token_confirm.remove_validate_token(token)
# users = reader.objects.filter(email=emailSignature)
res, statusNumber, message = reader.deleteObj("email", emailSignature)
if not res:
return render(request, 'reader/registerFail.html', {'message': u"驗證中獲取 reader info 失敗!請重新嘗試或聯絡管理員" + str(statusNumber) + " : " + message})
return render(request, 'reader/registerFail.html', {'message': u'對不起,驗證鏈接已經過期!請重新注冊!'})
try:
readerObj = reader.objects.get(email=emailSignature)
except reader.DoesNotExist:
return render(request, 'reader/registerFail.html', {'message': u"對不起,您所驗證的用戶不存在!請重新注冊!"})
readerObj.status = "allowed"
readerObj.save()
return render(request, 'reader/registerSuccess.html')
```
#### File: WeArt/tool/Token.py
```python
from itsdangerous import URLSafeTimedSerializer as utsr
import base64
class Token():
def __init__(self,security_key):
self.security_key = security_key
self.salt = base64.encodestring(security_key)
def generate_validate_token(self,username):
serializer = utsr(self.security_key)
return serializer.dumps(username,self.salt)
def confirm_validate_token(self,token,expiration=600):
serializer = utsr(self.security_key)
return serializer.loads(token,salt=self.salt, max_age=expiration)
def remove_validate_token(self, token):
serializer = utsr(self.security_key)
return serializer.loads(token, salt=self.salt)
```
#### File: WeArt/tool/tools.py
```python
from django.http import HttpResponse
import hashlib
import sys
def createId(length, str):
reload(sys)
sys.setdefaultencoding('utf8')
str = str.encode('utf8')
res = hashlib.sha384(str).hexdigest()
return res[0:length]
# def createGitRepository(request):
# pass
```
#### File: WeArt/version/models.py
```python
from django.db import models
from tool.tools import createId
from time import localtime,strftime
from chapter.models import chapter
from reader.models import reader
from book.models import book
# Create your models here.
class version(models.Model):
class Meta:
app_label = "version"
db_table = 'version'
id = models.CharField(max_length=50,primary_key=True, blank=False,null=False)
idChapter_id = models.CharField(max_length=30,blank=False,null=False)
voteCount = models.PositiveIntegerField(blank=False,null=False)
score = models.FloatField(max_length=11,blank=False,null=False)
idAuthor_id = models.CharField(max_length=20,blank=False,null=False)
createTime = models.DateTimeField(max_length=50,blank=False,null=False)
modifyTime = models.DateTimeField(max_length=50,blank=False,null=False)
@classmethod
def getValueById(self, idVersion, returnArg):
try:
obj = self.objects.get(id=idVersion)
if returnArg == "id":
return True, 160400, obj.id
elif returnArg == "idChapter":
return True, 160400, obj.idChapter
elif returnArg == "voteCount":
return True, 160400, obj.voteCount
elif returnArg == "score":
return True, 160400, obj.score
elif returnArg == "idAuthor":
return True, 160400, obj.idAuthor
elif returnArg == "createTime":
return True, 160400, obj.createTime
elif returnArg == "modifyTime":
return True, 160400, obj.modifyTime
elif returnArg == "all":
return True, 160400, obj
else:
return False, 160401, "錯誤 : version 表中不存在該屬性,returnArg錯誤。"
except self.DoesNotExist:
return False, 160402, "錯誤 : version 表不存在該數據。"
except Exception as e:
return False, 160403, str(e)
@classmethod
def getValuesByIdChapter(self, idChapterArg, idAuthorArg, returnArg):
try:
obj = self.objects.get(idChapter_id=idChapterArg, idAuthor_id=idAuthorArg)
if returnArg == "id":
return True, 160000, obj.id
elif returnArg == "idChapter":
return True, 160000, obj.idChapter
elif returnArg == "voteCount":
return True, 160000, obj.voteCount
elif returnArg == "score":
return True, 160000, obj.score
elif returnArg == "idAuthor":
return True, 160000, obj.idAuthor
elif returnArg == "createTime":
return True, 160000, obj.createTime
elif returnArg == "modifyTime":
return True, 160000, obj.modifyTime
else:
return False, 160001, "錯誤 : version 表中不存在該屬性,returnArg錯誤。"
except self.DoesNotExist:
return False, 160002, "錯誤 : version 表不存在該數據。"
except Exception as e:
return False, 160003, str(e)
@classmethod
def getVersionsByIdChapter(self, idChapterArg):
try:
obj = self.objects.filter(idChapter_id=idChapterArg)
return True, 160200, obj
except self.DoesNotExist:
return False, 160202, "錯誤 : version 表不存在該數據。"
except Exception as e:
return False, 160203, str(e)
@classmethod
def add(self, idChapter, voteCount, score, idAuthor):
try:
nowTime = strftime("%Y-%m-%d %H:%M:%S", localtime())
idVal = createId(50, idChapter + idAuthor) # every author writing is a version
# idVal = createId(50, idChapter + idAuthor + nowTime) # every update is a version
obj = self(id=idVal, idChapter_id=idChapter, voteCount = voteCount, score=score, idAuthor_id = idAuthor, createTime=nowTime, modifyTime=nowTime,)
obj.save()
return True,160101, ""
except Exception as e:
return False,160102, str(e)
@classmethod
# def modifyObj(self, idChapter, voteCount, score, idAuthor):
def modifyObj(self, idVersion, argName, value):
try:
obj = self.objects.get(id=idVersion)
if argName == "voteCount":
obj.voteCount = value;
obj.save()
elif argName == "score":
obj.score = value;
obj.save()
elif argName == "modifyTime":
obj.modifyTime = value;
obj.save()
else:
return False, 160301, "modify 讀取 reader 表錯誤"
return True,160300, ""
except self.DoesNotExist:
return False, 160302, "modify 讀取 reader 表錯誤"
except Exception as e:
print str(e)
return False,160303, str(e)
```
#### File: voteChapter/view/chapterVersionVote.py
```python
from django.http import JsonResponse
from django.shortcuts import render
# from django.shortcuts import redirect
# from book.models import book
from voteChapter.models import voteChapter
from version.models import version
def getRating(request):
context = {}
if 'idVersion' not in request.GET:
context['status'] = "fail"
context['message'] = "The idVersion variable is not in request.GET."
return JsonResponse(context)
idVersion = request.GET['idVersion']
if idVersion == "":
context['status'] = "fail"
context['message'] = "投票评分为空"
return JsonResponse(context)
try:
# get old rating
res,statusNumber,mes = version.getValueById(idVersion, "score")
if not res:
context['res'] = "fail"
context['statusNumber'] = statusNumber
context['message'] = '錯誤: ' + str(statusNumber) + " , " + mes
return JsonResponse(context)
context['res'] = "success"
context['statusNumber'] = 180600
context['message'] = mes
except Exception as e:
context['res'] = "fail"
context['statusNumber'] = 180601
context['message'] = "異常錯誤: " + str(180501) + " " + str(e)
return JsonResponse(context)
def chapterVersionVote(request):
context = {}
# if "readerId" not in request.session:
# return render(request, 'reader/login.html')
if "readerId" not in request.session:
# lastUrl = request.GET["lastUrl"]
context['res'] = "fail1"
context['message'] = "/reader/login/"
# print(context)
return JsonResponse(context)
idReader = request.session["readerId"]
if 'idVersion' not in request.GET:
context['res'] = "fail"
context['message'] = "The idVersion variable is not in request.GET."
return JsonResponse(context)
idVersion = request.GET['idVersion']
if idVersion == "":
context['status'] = "fail"
context['message'] = "投票评分为空"
return JsonResponse(context)
if 'rating' not in request.GET:
context['status'] = "fail"
context['message'] = "The rating variable is not in request.GET."
return JsonResponse(context)
ratingUser = request.GET['rating']
if ratingUser == "":
context['status'] = "fail"
context['message'] = "投票评分为空"
return JsonResponse(context)
if 'chapterFileName' not in request.GET:
context['status'] = "fail"
context['message'] = "The idChapter variable is not in request.GET."
return JsonResponse(context)
chapterFileNameUser = request.GET['chapterFileName']
if chapterFileNameUser == "":
context['status'] = "fail"
context['message'] = "章节编号为空"
return JsonResponse(context)
try:
# write data(every vote history) to voteChapter
res,statusNumber,mes = voteChapter.add(idReader, idVersion, float(ratingUser))
if not res:
context['res'] = "fail"
context['statusNumber'] = statusNumber
context['message'] = '錯誤: ' + str(statusNumber) + " , " + mes
return JsonResponse(context)
# get old rating
res,statusNumber,mes = version.getValueById(idVersion, "all")
if not res:
context['res'] = "fail"
context['statusNumber'] = statusNumber
context['message'] = '錯誤: ' + str(statusNumber) + " , " + mes
return JsonResponse(context)
# check is or not this chapter
# modify the voteCount and score into database
voteCountOld = int(mes.voteCount)
voteCountNew = voteCountOld + 1
print(float(mes.score), voteCountOld , float(ratingUser), voteCountNew)
scoreNew = (float(mes.score)* voteCountOld + float(ratingUser)) / voteCountNew
res, statusNumber, mes = version.modifyObj(idVersion, "voteCount", voteCountNew)
if not res:
context['res'] = "fail"
context['statusNumber'] = statusNumber
context['message'] = '錯誤: ' + str(statusNumber) + " , " + mes
return JsonResponse(context)
res, statusNumber, mes = version.modifyObj(idVersion, "score", scoreNew)
if not res:
context['res'] = "fail"
context['statusNumber'] = statusNumber
context['message'] = '錯誤: ' + str(statusNumber) + " , " + mes
return JsonResponse(context)
context['res'] = "success"
context['statusNumber'] = 180500
context['message'] = str(statusNumber) + " : " + mes
except Exception as e:
context['res'] = "fail"
context['message'] = "異常錯誤: " + str(180501) + " " + str(e)
return JsonResponse(context)
``` |
{
"source": "0lionelzhang0/chat_plays_go",
"score": 3
} |
#### File: 0lionelzhang0/chat_plays_go/lionel_bot.py
```python
import requests
import json
import socket, string
import time
import re
import threading
import chat_plays_go as cpg
import queue
import time
CHANNEL = "t4rquin"
USERNAME = # Your bots nick
OAUTH = # OAuth
class Chatbot:
def __init__(self):
# Some basic variables used to configure the bot
self.server = "irc.chat.twitch.tv" # Server
self.channel = "#" + CHANNEL # Channel
self.botnick = USERNAME
self.password = <PASSWORD>
self.connected = False
self.start = time.time()
self.streamers = []
self.connect()
self.last_ping = time.time()
self.cpg_flag = 1
if self.cpg_flag:
self.cpgUi = cpg.ChatPlaysGoUi()
# self.joinchan()
def connect(self):
self.ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ircsock.connect((self.server, 6667)) # Here we connect to the server using the port 6667
self.send_data("PASS " + self.password)
self.send_data("NICK "+ self.botnick)
def send_data(self,msg):
msg = msg + "\r\n"
self.ircsock.send(msg.encode())
def check_buffer(self):
buffer = self.ircsock.recv(2048)
buffer = buffer.decode()
buffer = buffer.rstrip("\r\n")
msg = buffer.split()
if not self.connected:
if buffer.endswith("End of /NAMES list"):
print("Connected to channel %s" % self.channel)
self.connected = True
if not msg:
print("ERROR in channel %s" % self.channel)
self.connected = False
self.connect()
# self.joinchan()
elif msg[0] == "PING":
self.send_data("PONG %s" % msg[1])
print("PONG %s" % self.channel)
def ping(self): # respond to server Pings.
self.send_data("PONG :pingis\n")
def sendmsg(self, msg): # sends messages to the channel.
self.send_data("PRIVMSG "+ self.channel +" :"+ msg +"\n")
def joinchan(self, chan): # join channel(s).
self.send_data("JOIN "+ "#" + chan +"\n")
print("Joining channel: " + chan)
def partchan(self, chan): # part channel(s).
self.send_data("PART "+ "#" + chan +"\n")
def whisper(self, msg, user): # whisper a user
self.send_data("PRIVMSG " + user + ' :' + msg.strip('\n\r') + '\n')
def getUserAndMessage(self, buffer):
msg = buffer.split()
user = re.findall("(?<=@)(.*)(?=.tmi.twitch)", msg[0])
message = (" ").join(msg[3:])
return user[0], message[1:]
def parseMessage(self, buffer):
user, msg = self.getUserAndMessage(buffer)
print(user + "::" + msg)
if self.cpg_flag:
list_coords = self.cpgUi.getCoordsFromMsg(msg)
if list_coords:
self.cpgUi.addToHighlightQueue(list_coords)
if self.cpgUi.isCoordinate(msg) or self.cpgUi.isCommand(msg):
self.cpgUi.addToQueue(user, msg)
def bot_main(self):
self.joinchan(self.channel)
# start infinite loop to continually check for and receive new info from server
while 1:
if time.time() - self.last_ping > 500:
print("sending ping")
self.send_data("PING :tmi.twitch.tv")
self.last_ping = time.time()
try:
buffer = self.ircsock.recv(2048)
buffer = buffer.decode()
buffer = buffer.rstrip("\r\n")
msg = buffer.split()
except:
continue
try:
self.parseMessage(buffer)
except:
pass
if buffer.endswith(" :End of /NAMES list"):
buffer.rstrip(" :End of /NAMES list")
msg = buffer.split()
print("Connected to channel %s" % msg[-5])
elif msg[0] == "PING":
self.send_data("PONG %s" % msg[1])
print("PONG %s" % msg[1])
self.last_ping = time.time()
#print(time.time() - self.start)
def sendmsg_main(self):
time.sleep(1)
while (1):
msg = self.cpgUi.getChatbotMsgFromQueue()
if msg:
self.sendmsg(msg)
time.sleep(0.2)
def time_elapsed(self):
return time.time() - self.start
def run_bot(self):
if self.cpg_flag:
cb_sendmsg_thread = threading.Thread(target=cb.sendmsg_main)
cb_sendmsg_thread.start()
self.bot_main()
if __name__ == "__main__":
time.sleep(1)
cb = Chatbot()
cb_thread = threading.Thread(target=cb.run_bot)
cb_thread.start()
if cb.cpg_flag:
cb.cpgUi.run()
``` |
{
"source": "0liu/Gradient-Free-Optimizers",
"score": 3
} |
#### File: Gradient-Free-Optimizers/gradient_free_optimizers/memory.py
```python
import numpy as np
import pandas as pd
class Memory:
def __init__(self, warm_start, conv, dict_proxy=None):
self.memory_dict = {}
self.memory_dict_new = {}
self.conv = conv
if dict_proxy is not None:
self.memory_dict = dict_proxy
if warm_start is None:
return
if not isinstance(warm_start, pd.DataFrame):
print("Memory warm start must be of type pandas.DataFrame")
print("Optimization will continue without memory warm start")
return
if len(warm_start) == 0:
print("Memory warm start has no values in current search space")
print("Optimization will continue without memory warm start")
return
self.memory_dict.update(self.conv.dataframe2memory_dict(warm_start))
def memory(self, objective_function):
def wrapper(para):
value = self.conv.para2value(para)
position = self.conv.value2position(value)
pos_tuple = tuple(position)
if pos_tuple in self.memory_dict:
return self.memory_dict[pos_tuple]
else:
score = objective_function(para)
self.memory_dict[pos_tuple] = score
self.memory_dict_new[pos_tuple] = score
return score
return wrapper
```
#### File: tests/test_optimizers/test_max_time.py
```python
import time
import pytest
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
from ._parametrize import optimizers
def objective_function(para):
score = -para["x1"] * para["x1"]
return score
search_space = {
"x1": np.arange(0, 10, 1),
}
@pytest.mark.parametrize(*optimizers)
def test_max_time_0(Optimizer):
c_time1 = time.time()
opt = Optimizer(search_space)
opt.search(objective_function, n_iter=1000000, max_time=0.1)
diff_time1 = time.time() - c_time1
assert diff_time1 < 1
@pytest.mark.parametrize(*optimizers)
def test_max_time_1(Optimizer):
c_time1 = time.time()
opt = Optimizer(search_space)
opt.search(objective_function, n_iter=1000000, max_time=1)
diff_time1 = time.time() - c_time1
assert 0.3 < diff_time1 < 2
``` |
{
"source": "0LL13/advent",
"score": 3
} |
#### File: advent/15/5_naughty.py
```python
def first_set_of_rules():
data = get_data()
vowel_strings = []
for line in data.split('\n'):
if has_3_vowels(line):
vowel_strings.append(line.strip())
double_strings = []
for line in vowel_strings:
if has_double(line):
double_strings.append(line)
nice_strings = []
for line in double_strings:
if has_no_naughty(line):
nice_strings.append(line)
for i, line in enumerate(nice_strings):
print(i, line)
return len(nice_strings)
def has_3_vowels(line) -> bool:
counter = 0
for ch in line:
if ch in ['a', 'e', 'i', 'o', 'u']:
counter += 1
if counter == 3:
return True
return False
def has_double(line) -> bool:
for i, ch in enumerate(line):
ll = len(line)
if i < ll:
if ch == line[i+1]:
return True
return False
def has_no_naughty(line) -> bool:
for i, ch in enumerate(line):
if i >= 1:
if line[i-1] + ch in ['ab', 'cd', 'pq', 'xy']:
return False
return True
def get_data():
with open('./input/5_input.txt', 'r') as fin:
data = fin.read()
return data
def second_set_of_rules():
data = 'qjhvhtzxzqqjkmpb\nxxyxx\nuurcxstgmygtbstg\nieodomkazucvgmuy' # noqa
data = get_data()
double_pairs = list()
for line in data.split('\n'):
if has_double_pair(line):
double_pairs.append(line.strip())
sandwiches = []
for line in double_pairs:
if has_sandwich(line):
sandwiches.append(line)
for i, line in enumerate(sandwiches):
print(i, line)
print(len(sandwiches))
def has_double_pair(line) -> bool:
pairs = []
for i, ch in enumerate(line):
ll = len(line)
if i <= ll and i > 0:
pair = line[i-1] + ch
pairs.append(pair)
check = pairs[:]
for i, pair in enumerate(pairs):
if i < len(pairs):
for j, el in enumerate(check):
if pair == el and j > i+1:
# print(i, line, pairs, pair)
return True
return False
def has_sandwich(line) -> bool:
for i, ch in enumerate(line):
if i >= 2:
if line[i-2] == ch and ch != line[i-1]:
return True
return False
if __name__ == '__main__':
second_set_of_rules()
```
#### File: advent/15/7_assembly.py
```python
from typing import Dict
INPUT = """123 -> x
456 -> y
x AND y -> d
x OR y -> e
x LSHIFT 2 -> f
y RSHIFT 2 -> g
NOT x -> h
NOT y -> i"""
with open('15/input/7_input.txt', 'r') as fin:
INPUT = fin.read()
def and_gate(sig_left: int, sig_right: int) -> int:
sig = sig_left & sig_right
if sig < 0:
sig = 65536 + sig
return sig
def or_gate(sig_left: int, sig_right: int) -> int:
sig = sig_left | sig_right
if sig < 0:
sig = 65536 + sig
return sig
def lshift(sig_left: int, shift: int) -> int:
sig = sig_left << shift
if sig < 0:
sig = 65536 + sig
return sig
def rshift(sig_left: int, shift: int) -> int:
sig = sig_left >> shift
if sig < 0:
sig = 65536 + sig
return sig
def bitwise_not(sig_: int) -> int:
sig = ~sig_
if sig < 0:
sig = 65536 + sig
return sig
def is_known(wires, id_) -> bool:
for k, v in wires.items():
if k == id_:
return True
return False
def is_int(sig) -> bool:
try:
if isinstance(int(sig), int):
return True
except ValueError:
pass
return False
def assign_sig(wires: dict, sig: str, id_res: str) -> Dict[str, int]:
wires[id_res] = sig
return wires
def get_sig(wires: dict, id_: str) -> int:
for k, v in wires.items():
if k == id_:
return v
raise Exception
def return_sig(wires: dict, id_) -> int:
if is_int(id_):
return int(id_)
else:
return get_sig(wires, id_)
def get_wires(wires, line):
fields = line.split(' ')
id_res = fields[-1]
if len(fields) == 3:
wires_ = assign_at_3(wires, fields, id_res)
elif len(fields) == 4:
wires_ = assign_at_4(wires, fields, id_res)
elif len(fields) == 5:
wires_ = assign_at_5(wires, fields, id_res)
else:
wires_ = wires
# print("Nothing to declare!!!!!!!!!!!!!!!!!!!!!!!!")
# sys.exit()
return wires_
def assign_at_3(wires, fields, id_res):
sig_ = fields[0]
if is_int(sig_):
wires_ = assign_sig(wires, int(sig_), id_res)
elif is_known(wires, sig_):
sig = get_sig(wires, sig_)
wires_ = assign_sig(wires, sig, id_res)
else:
wires_ = wires
return wires_
def assign_at_4(wires, fields, id_res):
sig_ = fields[1]
if is_known(wires, sig_):
sig = get_sig(wires, sig_)
sig = bitwise_not(sig)
wires_ = assign_sig(wires, sig, id_res)
else:
wires_ = wires
return wires_
def assign_at_5(wires, fields, id_res):
id_left, command, id_right, sep, id_res = fields
if command == 'LSHIFT' and is_known(wires, id_left):
shift = int(id_right)
sig_left = get_sig(wires, id_left)
sig = lshift(sig_left, shift)
wires_ = assign_sig(wires, sig, id_res)
elif command == 'RSHIFT' and is_known(wires, id_left):
shift = int(id_right)
if id_left == 'b':
sig_left = 956
else:
sig_left = get_sig(wires, id_left)
sig = rshift(sig_left, shift)
wires_ = assign_sig(wires, sig, id_res)
elif command == 'AND' and id_left == '1' and is_known(wires, id_right):
sig_right = get_sig(wires, id_right)
if sig_right % 2 == 0:
wires_ = assign_sig(wires, 0, id_res)
else:
wires_ = assign_sig(wires, 1, id_res)
elif command == 'AND' and is_known(wires, id_left) and is_known(wires, id_right): # noqa
if id_left == 'b':
sig_left = 956
else:
sig_left = get_sig(wires, id_left)
sig_right = get_sig(wires, id_right)
sig = and_gate(sig_left, sig_right)
wires_ = assign_sig(wires, sig, id_res)
elif command == 'OR' and is_known(wires, id_left) and is_known(wires, id_right): # noqa
if id_left == 'b':
sig_left = 956
else:
sig_left = get_sig(wires, id_left)
sig_right = get_sig(wires, id_right)
sig = or_gate(sig_left, sig_right)
wires_ = assign_sig(wires, sig, id_res)
else:
wires_ = wires
return wires_
if __name__ == '__main__':
wires: Dict[str, int] = {}
INPUT_ = [inp for inp in INPUT.split('\n') if inp]
len_INPUT = len(INPUT_)
i = 0
counter = 0
while INPUT_:
if len_INPUT == 0:
break
elif i >= len_INPUT:
i = 0
line = INPUT_[i]
wires_ = get_wires(wires, line)
if len(wires.keys()) == len_INPUT:
break
i += 1
for wire, sig in wires.items():
print(wire, sig)
```
#### File: advent/20/day_4.py
```python
import re
from typing import List
INPUT_1 = """ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in"""
INPUT_2 = """pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719"""
INPUT_invalid = """eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007"""
INPUT_valid = """pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719"""
with open('./20/input/day_4.txt', 'r') as fin:
INPUT = fin.read()
def one_pp_one_line(INPUT):
lines = INPUT.splitlines()
passports = []
tmp_line = ''
for i, line in enumerate(lines):
# print(i, line)
if line == '':
passports.append(tmp_line.strip())
tmp_line = ''
else:
tmp_line = tmp_line + ' ' + line
passports.append(tmp_line.strip())
return passports
def check_passport(passport: List[str]) -> bool:
if len(passport) < 7:
return False
elif len(passport) == 7:
for item in passport:
if 'cid' in item.split(':'):
return False
for item in passport:
k, v = item.split(':')
if k == 'ecl':
if v not in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
return False
elif k == 'byr':
if not 1920 <= int(v) <= 2002:
return False
elif k == 'iyr':
if not 2010 <= int(v) <= 2020:
return False
elif k == 'eyr':
if not 2020 <= int(v) <= 2030:
return False
elif k == 'hcl':
if not re.match(r'^#[0-9a-f]{6}$', v):
return False
elif k == 'pid':
if not re.match(r'^\d{9}$', v) or len(v) != 9:
return False
elif k == 'hgt':
if not re.match(r'1[5-8]\dcm$|19[0-3]cm$|59in$|6\din$|7[0-6]in$', v): # noqa
return False
return True
if __name__ == '__main__':
pp = one_pp_one_line(INPUT)
print('# of passports:', len(pp))
count = 0
for i, passport in enumerate(sorted(pp)):
valid = True
fields = passport.split(' ')
print(i, fields, len(fields), end=' ')
if len(fields) == 8:
count += 1
print('count:', count)
elif len(fields) == 7:
for field in fields:
if 'cid' in field.split(':'):
valid = False
if valid:
count += 1
print('count:', count)
else:
print()
print('part 1:', count)
count = 0
for i, passport in enumerate(pp):
if check_passport(passport.split(' ')):
count += 1
print()
print('part 2:', count)
``` |
{
"source": "0LL13/open_speech",
"score": 3
} |
#### File: 0LL13/open_speech/agenda_and_speaker_list.py
```python
import bs4
import os
import re
import sys
from parse_data import create_parser
from parse_data import clean_tag_text
from settings import (
PROTOCOL_DIR,
PROTOCOL_FILE_TEMPLATE,
)
# RE for find_start() actually ENDS the agenda!
BEGIN_RE = re.compile(r'Beginn:|Beginn \d\d[:\.]\d\d|Seite 3427')
# RE for attachments
ATTACHMENT_RE = re.compile(r'Anlage \w*')
# RE for either only a number or a number and agenda item
RE_AGENDA_ITEM = re.compile(r'^\d+[^\/^.^:]*\w+$')
# RE for member of parliament (name + party) or minister
NAME_DEF = r'[^Gesetz][\w\-‑.’\' ]+'
PARTY_RE = r'\([ACDEfFGINPRSTUÜ]*\)|\(fraktionslos\)'
PAGE_NO_RE = r'\d*'
MOP_RE = re.compile(NAME_DEF + PARTY_RE + PAGE_NO_RE)
MINISTER_RE = re.compile('((?:geschäftsführender? )?minister(?:in)?) (.+)', re.I) # noqa
# RE for results and absent
RESULT_RE = re.compile(r'Ergebnis[\.]*', re.I)
ABSENT_RE = re.compile(r'Entschuldigt[\w]*', re.I)
# parties
PARTIES = ["(AfD)", "(CDU)", "(FDP)", "(GRÜNE)", "(PIRATEN)", "(SPD)",
"(fraktionslos)"]
def find_agenda_start(soup: bs4.BeautifulSoup) -> bs4.element.Tag:
"""
Special cases:
- "Regierungserklärung" (16/7)
- no speeches (16/10)
- only one TOP without number (17/138)
- only one TOP with number (17/139)
"""
agenda_end_index = find_agenda_end(soup)
paragraphs = soup.find_all('p')[:agenda_end_index]
for i, p in enumerate(paragraphs):
if p.text.split() and p.text.split()[0] == "1":
agenda_start = p
return agenda_start
for i, p in enumerate(paragraphs):
if "Regierungserklärung" in p.text:
agenda_start = p
return agenda_start
# if no agenda start was found, try without number:
agenda_start = find_agenda_start_wo_number(paragraphs)
return agenda_start
def find_agenda_end(soup: bs4.BeautifulSoup) -> int:
paragraphs = soup.find_all('p')
for i, p in enumerate(paragraphs):
if "Anlage" in p.text and "siehe Anlage" not in p.text:
return i
elif "Entschuldigt" in p.text:
return i
elif "Beginn:" in p.text:
return i
print("Did not find proper ending for agenda, return default index: 30")
return 30
def find_agenda_start_wo_number(paragraphs: list) -> bs4.element.Tag: # noqa
classes_w_agenda_items = ["MsoToc9", "MsoToc7", "MsoToc1"]
for p in paragraphs:
try:
class_ = p['class'][0]
if class_ in classes_w_agenda_items:
agenda_start = p
# print("found agenda start wo number:", agenda_start)
return agenda_start
except KeyError:
pass
print("Did not find agenda start wo number")
continue_()
def is_numbered_agenda_item(text: str) -> bool:
CHARS_TO_REMOVE = [':', '-', '!', '?', '„', '“', ',', '–', '.']
items = text.split()
try:
agenda_no = items[0]
if agenda_no.isnumeric():
if int(agenda_no) <= 99:
try:
topic = items[1]
# https://stackoverflow.com/a/10017169/6597765
rx = '[' + re.escape(''.join(CHARS_TO_REMOVE)) + ']'
topic = re.sub(rx, '', topic)
if topic.isalpha():
return True
elif topic.isnumeric():
topic = items[2]
if topic.isalpha():
return True
except (IndexError):
pass
except IndexError:
pass
return False
def process_protocol(html_filename: str) -> dict:
# Parse file
soup = create_parser(html_filename)
agenda = parse_agenda(soup)
if 1:
print("process protocol")
print()
for key, val in agenda.items():
print(f"{key}:\n{val}")
print()
return agenda
def mk_html_filename(period=None, index=None) -> str:
if period is None:
period = int(sys.argv[1])
if index is None:
index = int(sys.argv[2])
html_filename = os.path.join(
PROTOCOL_DIR,
PROTOCOL_FILE_TEMPLATE % (period, index, 'html'))
return html_filename
def parse_agenda(soup: bs4.BeautifulSoup) -> dict:
session_agenda = {}
speakers = []
actual_key = None
found_agenda_item = False
agenda_start = find_agenda_start(soup)
# print("agenda_start:", agenda_start)
agenda_start_text = clean_tag_text(agenda_start)
if agenda_start:
if 0:
print("item:", agenda_start_text)
print("agenda_start:", agenda_start)
actual_key = agenda_start_text
found_agenda_item = True
session_agenda[actual_key] = speakers
else:
print("no agenda found!")
continue_()
return None
for tag in agenda_start.find_all_next('p'):
if 0:
print("agenda:")
print(session_agenda)
continue_()
tag_text = clean_tag_text(tag)
# print("tag_text:", tag_text)
if not tag_text:
continue
match_speaker_mop = MOP_RE.match(tag_text)
match_result = RESULT_RE.match(tag_text)
match_absent = ABSENT_RE.match(tag_text)
match_begin = BEGIN_RE.match(tag_text)
match_attachment = ATTACHMENT_RE.match(tag_text)
if is_numbered_agenda_item(tag_text):
found_agenda_item = True
if actual_key is not None:
actual_key = tag_text
speakers = []
session_agenda[actual_key] = speakers
else:
actual_key = tag_text
session_agenda[actual_key] = speakers
elif found_agenda_item and "inister" in tag_text:
if "konferenz" not in tag_text and "Einzelplan" not in tag_text:
# print("tag_text:", tag_text)
speakers.append(tag_text)
elif found_agenda_item and match_speaker_mop:
# print("tag_text:", tag_text)
if "Abgeordneten" not in tag_text and "Einzelplan" not in tag_text: # noqa
speakers.append(tag_text)
elif found_agenda_item and match_result:
pass
elif match_absent:
break
elif match_begin:
break
elif match_attachment:
break
elif found_agenda_item and tag_text.split()[-1].isnumeric():
words = tag_text.split()[:-1]
if words and words[-1] in PARTIES:
# print("mop that wasnt caught by RE:", tag_text)
speakers.append(tag_text)
if 0:
print("parse agenda")
print()
for key, val in session_agenda.items():
print(f"{key}:\n{val}")
print()
return session_agenda
def parse_agenda_wo_numbers(soup: bs4.BeautifulSoup) -> dict:
session_agenda = {}
speakers = []
agenda_start = find_agenda_start_wo_number(soup)
agenda_start_text = clean_tag_text(agenda_start)
only_item = agenda_start_text # no numbering means that there is only one item # noqa
for tag in agenda_start.find_all_next('p'):
# print(session_agenda)
tag_text = clean_tag_text(tag)
# print("tag_text:", tag_text)
if not tag_text:
continue
match_speaker_mop = MOP_RE.match(tag_text)
match_result = RESULT_RE.match(tag_text)
match_begin = BEGIN_RE.match(tag_text)
match_absent = ABSENT_RE.match(tag_text)
match_attachment = ATTACHMENT_RE.match(tag_text)
if "inister" in tag_text and "konferenz" not in tag_text:
speakers.append(tag_text)
elif match_speaker_mop and "Abgeordneten" not in tag_text: # noqa
speakers.append(tag_text)
elif match_result:
pass
elif match_absent:
break
elif match_begin:
break
elif match_attachment:
# no more speeches (except written statements)
break
session_agenda[only_item] = speakers
if 0:
for key, val in session_agenda.items():
print(f"{key}:\n{val}")
print()
return session_agenda
def mk_agenda_list_w_speakers(period: str, index: str) -> dict:
html_filename = mk_html_filename(period, index)
agenda = process_protocol(html_filename)
return agenda
def continue_() -> None:
inp = input("")
if inp == "n":
sys.exit()
if __name__ == "__main__":
html_filename = mk_html_filename()
process_protocol(html_filename)
```
#### File: 0LL13/open_speech/sentiment_speech_analysis_w_bert.py
```python
import json
import os
import sys
import time
from load_data import load_period_data
from settings import (
PROTOCOL_FILE_TEMPLATE,
NLTK_DIR,
BERT_DIR,
)
def load_json_file_nltk(period: str, index: str) -> dict:
"""
Marc's loading function using a template.
"""
json_filename = os.path.join(
NLTK_DIR,
PROTOCOL_FILE_TEMPLATE % (period, index, 'json'))
with open(json_filename) as json_file:
session = json.load(json_file)
return session
def load_json_file_bert(period: str, index: str) -> dict:
"""
Marc's loading function using a template.
"""
json_filename = os.path.join(
BERT_DIR,
PROTOCOL_FILE_TEMPLATE % (period, index, 'json'))
with open(json_filename) as json_file:
session = json.load(json_file)
return session
def save_json_protocol_bert(period, index, protocol):
""" Save protocol data to JSON file for period and index.
"""
filename = os.path.join(
BERT_DIR,
PROTOCOL_FILE_TEMPLATE % (period, index, 'json'))
json.dump(protocol, open(filename, 'w', encoding='utf-8'))
def show_session(session: dict) -> None:
for key, val in session.items():
if key == "content":
for el in val:
for item in el[:5]:
print(item)
for i, sent in enumerate(el[5]):
print(i, sent)
continue_()
print(key)
print(val)
def walk_through_session_w_sentiments(session: dict) -> None:
"""
After adding sentiments.
"""
print("Walking through speeches, sentences marked with sentiment.\n")
for key, val in session.items():
if key == "content":
print()
for speech in val:
print("Datum:", speech[0])
print("Protokollnr.:", speech[1])
print("Tagesordnungspunkt:", speech[2])
print("Redner:", speech[3])
print("Partei/Ministerium:", speech[4])
print()
text = speech[5]
sentiment_res = speech[6]
for i, sent in enumerate(text):
print(i, sent)
sentiment = sentiment_res[i]
if sentiment == "positive":
print("+++ positiv +++")
elif sentiment == "negative":
print("--- negativ ---")
print()
pos = speech[7]
neg = speech[8]
neutral = speech[9]
print(f"positive: {pos}, negative: {neg}, neutral: {neutral}")
continue_()
def collect_speech_sentiments(session: dict) -> dict:
start_time = time.time()
print("start time:", time.ctime(start_time))
from germansentiment import SentimentModel
model = SentimentModel()
session_with_sentiments = {}
speeches_w_sentiments = []
for key, val in session.items():
if key == "content":
for speech in val:
text = speech[5]
sentiment_res = model.predict_sentiment(text)
pos = sentiment_res.count("positive")
neg = sentiment_res.count("negative")
neutral = sentiment_res.count("neutral")
speech.append(sentiment_res)
speech.append(pos)
speech.append(neg)
speech.append(neutral)
speeches_w_sentiments.append(speech)
else:
print(f"{key}: {val}")
session_with_sentiments[key] = val
session_with_sentiments["content"] = speeches_w_sentiments
if 0:
for speech in speeches_w_sentiments:
print(speech)
continue_()
if 0:
end_time = time.time()
print("end time:", time.ctime(end_time))
exec_time = end_time - start_time
minutes = exec_time // 60
seconds = exec_time % 60
print(f"total exec time: {minutes:.0f} minutes, {seconds:.0f} seconds")
return session_with_sentiments
def process_whole_period_bert(period) -> None:
file_data = load_period_data(period)
for filename, protocol in sorted(file_data.items()):
if os.path.splitext(filename)[1] != '.html':
continue
index = protocol['index']
file_name = os.path.splitext(filename)[0] + '.json'
file_name = file_name.split('/')[-1]
file_name = os.path.join(NLTK_DIR, file_name)
print('-' * 72)
print(f'Processing {period}-{index}: {file_name}')
session = load_json_file_nltk(period, index)
session_w_sentiments = collect_speech_sentiments(session)
save_json_protocol_bert(period, index, session_w_sentiments)
def continue_() -> None:
inp = input("")
if inp == "n":
sys.exit()
def check_bert():
period = int(sys.argv[1])
if len(sys.argv) > 2:
index = int(sys.argv[2])
session_w_sentiments = load_json_file_bert(period, index)
walk_through_session_w_sentiments(session_w_sentiments)
else:
print("Needs period and index.")
def main():
if 1:
check_bert()
period = int(sys.argv[1])
if len(sys.argv) > 2:
index = int(sys.argv[2])
session = load_json_file_nltk(period, index)
show_session(session)
if 0: # sentiment collection
session_w_sentiments = collect_speech_sentiments(session)
save_json_protocol_bert(period, index, session_w_sentiments)
elif 0:
process_whole_period_bert(period)
if __name__ == "__main__":
main()
```
#### File: 0LL13/open_speech/speaker_names.py
```python
import json
import feed_opensearch
QUERY = json.dumps(
{
'size': 10000,
'query': {
'match_all': {}
},
'collapse': {
'field' : 'speaker_name.keyword'
},
}
)
def find_all_speaker_names(os_index_name=feed_opensearch.INDEX_NAME):
with feed_opensearch.opensearch_client() as client:
result = client.search(
QUERY,
index=os_index_name,
)
#print (f'result={result!r}')
names = [
hit['_source']
for hit in result['hits']['hits']
]
return sorted(names, key=lambda x: x['speaker_name'])
if __name__ == '__main__':
speakers = find_all_speaker_names()
for data in speakers:
print (f'{data["speaker_name"]!r} ({data["speaker_party"]!r}) '
f'({data["protocol_period"]}-{data["protocol_index"]}): {data!r}')
``` |
{
"source": "0LL13/person",
"score": 2
} |
#### File: person/personroles/mop_role.py
```python
import os
import sys
from dataclasses import asdict, dataclass, field
from typing import List, Optional
PACKAGE_PARENT = ".."
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))
) # isort:skip # noqa # pylint: disable=wrong-import-position
sys.path.append(
os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT))
) # isort: skip # noqa # pylint: disable=wrong-import-position
from personroles.politician_role import Politician # type: ignore # noqa
from personroles.resources.helpers import AttrDisplay # type: ignore # noqa
from personroles.resources.helpers import NotInRange # type: ignore # noqa
from personroles.resources.mop_tinyDB import Mops_TinyDB # type: ignore # noqa
@dataclass
class _MoP_default:
mop_id: int = field(default=0)
electoral_ward: str = field(default="ew")
ward_no: Optional[int] = field(default=None)
voter_count: Optional[int] = field(default=None)
parl_pres: bool = field(default=False)
parl_vicePres: bool = field(default=False)
parliament_entry: str = field(default="unknown") # date string: "11.3.2015" # noqa
parliament_exit: str = field(default="unknown") # dto.
speeches: List[str] = field(
default_factory=lambda: []
) # identifiers for speeches # noqa
reactions: List[str] = field(
default_factory=lambda: []
) # identifiers for reactions
def renamed_wards(self):
"""Some electoral wards have been renamed in the Wikipedia."""
wards = {
"<NAME>": "<NAME>",
"Hochsauerlandkreis II – Soest III": "Hochsauerlandkreis II",
"<NAME>": "Aachen IV"
if self.last_name in ["Wirtz", "Weidenhaupt"]
else "<NAME>",
}
if self.electoral_ward in wards.keys():
self.electoral_ward = wards[self.electoral_ward]
def scrape_wiki_for_ward(self) -> None:
"""Find tables in Wikipedia containing informations about electoral wards.""" # noqa
import requests
from bs4 import BeautifulSoup # type: ignore
URL_base = "https://de.wikipedia.org/wiki/Landtagswahlkreis_{}"
URL = URL_base.format(self.electoral_ward)
req = requests.get(URL)
bsObj = BeautifulSoup(req.text, "lxml")
table = bsObj.find(class_="infobox float-right toptextcells")
self.scrape_wiki_table_for_ward(table)
def scrape_wiki_table_for_ward(self, table) -> None:
for td in table.find_all("td"):
if "Wahlkreisnummer" in td.text:
ward_no = td.find_next().text.strip()
ward_no = ward_no.split(" ")[0]
self.ward_no = int(ward_no)
elif "Wahlberechtigte" in td.text:
voter_count = td.find_next().text.strip()
voter_count = self.fix_voter_count(voter_count)
self.voter_count = int(voter_count)
def fix_voter_count(self, voter_count):
if voter_count[-1] == "]":
voter_count = voter_count[:-3]
if " " in voter_count:
voter_count = "".join(voter_count.split(" "))
else:
voter_count = "".join(voter_count.split("."))
return voter_count
@dataclass
class _MoP_base:
legislature: str
state: str
@dataclass
class MoP(_MoP_default, Politician, _MoP_base, AttrDisplay):
"""
Module mop_role.py covers the role as member of parliament.
The role integrates the role of politician and adds a federal state (like
"NRW" or "BY") and legislature (legislative term) as obligatory
informations to define the role. More informations like speeches held or
offices (like president) filled can be added. Call politician's
__post_init__ to initialize wards and voters.
"""
def __post_init__(self):
"""
Check if legislature is correct for NRW and add legislature into the
mop's list of memberships (in case more than one term is spent in
parliament.
"""
if int(self.legislature) not in range(14, 18):
raise NotInRange("Number for legislature not in range")
Politician.__post_init__(self)
self.change_ward()
def change_ward(self, ward=None):
if ward:
self.electoral_ward = ward
if self.electoral_ward not in ["ew", "Landesliste"]:
self.renamed_wards()
self.scrape_wiki_for_ward()
else:
self.electoral_ward = "ew"
if __name__ == "__main__":
mop_1 = MoP("14", "NRW", "SPD", "Tom", "Schwadronius", party_entry="1990",
peer_title="Junker von", date_of_birth="1950")
mop_2 = MoP("15", "NRW", "Grüne", "Sabine", "Dingenskirchen",
electoral_ward="Essen II", peer_preposition="von")
mop_3 = MoP("15", "NRW", "Grüne", "Sammy", "Goodwill",
electoral_ward="Duisburg II", academic_title="Dr")
mop_4 = MoP("15", "NRW", "FDP", "Ralf", "Witzel",
electoral_ward="Essen III")
mop_5 = MoP("16", "NRW", "SPD", "Horst", "Schmitt",
electoral_ward="Düsseldorf III")
print(mop_1)
mop_1.add_Party("Grüne", party_entry="30.11.1999")
mop_1.change_ward("Düsseldorf II")
print(mop_1)
print()
print(mop_1.__dict__)
print()
# Dataclasses come with an asdict module:
# https://stackoverflow.com/a/35282286/6597765
print(asdict(mop_1))
db = Mops_TinyDB(".")
db.delete_all()
db.add_mop(asdict(mop_1))
print(mop_2)
db.add_mop(asdict(mop_2))
field = "last_name" # type: ignore
value = "Schwadronius" # type: ignore
print(f"print(db.all(field={field}, value={value})):")
print(db.list_mops(field=field, value=value))
print()
print("-" * 50)
print("for item in db.list_mops()")
for item in db.list_mops():
# convert dict back to dataclass:
# https://www.reddit.com/r/learnpython/comments/9h74no/convert_dict_to_dataclass/e69p8m8?utm_source=share&utm_medium=web2x&context=3 # noqa
mop = MoP(**item)
print(mop)
db.add_mop(asdict(mop_3))
db.add_mop(asdict(mop_4))
db.add_mop(asdict(mop_5))
field = "party_name" # type: ignore
value = "Grüne"
print("-" * 50)
print("for item in db.list_mops(field=party_name, value=Grüne):")
for item in db.list_mops(field=field, value=value):
# convert dict back to dataclass:
# https://www.reddit.com/r/learnpython/comments/9h74no/convert_dict_to_dataclass/e69p8m8?utm_source=share&utm_medium=web2x&context=3 # noqa
mop = MoP(**item)
print(mop)
field = "party_name" # type: ignore
value = "SPD"
print("-" * 50)
print("for item in db.list_mops(field=party_name, value=SPD):")
for item in db.list_mops(field=field, value=value):
# convert dict back to dataclass:
# https://www.reddit.com/r/learnpython/comments/9h74no/convert_dict_to_dataclass/e69p8m8?utm_source=share&utm_medium=web2x&context=3 # noqa
mop = MoP(**item)
print(mop)
print("-" * 50)
print("db.get_mop(mop_id=2):")
item = db.get_mop(mop_id=2)
mop = MoP(**item)
print(mop)
os.remove("./.mops_db.json")
```
#### File: personroles/resources/mop_tinyDB.py
```python
import os
import sys
from typing import Optional
import tinydb # type: ignore # isort: skip # noqa # pylint: disable=wrong-import-position
PACKAGE_PARENT = ".."
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))
) # isort:skip # noqa # pylint: disable=wrong-import-position
sys.path.append(
os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT))
) # isort: skip # noqa # pylint: disable=wrong-import-position
class Mops_TinyDB():
"""Wrapper class for TinyDB."""
def __init__(self, db_path, db_name=None):
"""Connect to DB."""
if db_name is None:
self._db = tinydb.TinyDB(db_path + "db.json")
else:
self._db = tinydb.TinyDB(db_path + db_name)
def add_mop(self, mop: dict) -> int:
"""Add a mop dict to DB."""
mop_id = self._db.insert(mop)
mop["mop_id"] = mop_id
self._db.update(mop, doc_ids=[mop_id])
return mop_id
def get_mop(self, mop_id: int) -> Optional[dict]:
"""Return a mop dict with matching id."""
if self._db.contains(doc_id=mop_id):
return self._db.get(doc_id=mop_id)
else:
return None
def list_mops(self, field=None, value=None): # type (str) -> list[dict]
"""Return list of mops."""
if field is None:
return self._db.all()
else:
return self._db.search(tinydb.where(field) == value)
def count(self) -> int:
"""Return number of mops in DB."""
return len(self._db)
def update_mop(self, mop_id: int, field=None, value=None) -> None:
"""Modify mop in DB with given mop_id."""
self._db.update({field: value}, doc_ids=[mop_id])
if field in ["party_entry", "party_exit"]:
self._update_parties(mop_id, field, value)
def _update_parties(self, mop_id: int, field, value) -> None:
mop = self._db.get(doc_id=mop_id)
party_name = mop["party_name"]
index = None
for i, party in enumerate(mop["parties"]):
if party["party_name"] == party_name:
index = i
if index is not None:
party_entry, party_exit = self._assign_party_details(mop, party, field, value) # noqa
mop["parties"][index] = {"party_name": party_name,
"party_entry": party_entry,
"party_exit": party_exit}
self.delete(mop_id)
self.add_mop(mop)
def _assign_party_details(self, mop, party, field, value):
if field == "party_entry":
party_entry = value
party_exit = party["party_exit"]
elif field == "party_exit":
party_entry = party["party_entry"]
party_exit = value
return party_entry, party_exit
def delete(self, mop_id: int) -> None:
"""Remove a mop from DB with given mop_id."""
self._db.remove(doc_ids=[mop_id])
def delete_all(self):
"""Remove all mops from DB."""
self._db.truncate()
def unique_id(self): # type () -> int
"""Return an integer that does not exist in the db."""
i = 1
while self._db.contains(doc_id=i):
i += 1
return i
def stop_mops_db(db_path: str) -> None:
"""Disconnect from DB."""
pass
def start_mops_db(db_path: str) -> Mops_TinyDB:
"""Connect to DB."""
return Mops_TinyDB(db_path)
```
#### File: tests/func/test_add.py
```python
from dataclasses import asdict
from context import mop_role
new_mop = mop_role.MoP("17", "NRW", "Grüne", "<NAME>.", "Heinz")
def test_add_returns_valid_id(mops_db_fixture):
"""Test mop_db.add_mop(<valid mop>) should return an integer."""
db = mops_db_fixture
mop_id = db.add_mop(asdict(new_mop))
assert isinstance(mop_id, int) # nosec
def test_add_increases_count(db_with_3_mops):
"""Test add_mop() affect on mop_db.count()."""
db = db_with_3_mops
mop_id = db.add_mop(asdict(new_mop))
assert db.count() == mop_id # nosec
def test_add_returns_correct_id(db_with_3_mops):
"""Test add_mop() affect on mop_db.count()."""
db = db_with_3_mops
mop_id = db.add_mop(asdict(new_mop))
assert mop_id == 4 # nosec
```
#### File: tests/func/test_get.py
```python
from context import mop_role
new_mop = mop_role.MoP("17", "NRW", "Grüne", "<NAME>.", "Heinz")
def test_get_returns_valid_mop(db_with_3_mops, three_mops_fixture):
"""Test mop_db.get_mop(<valid mop_id>) should return the right mop."""
db = db_with_3_mops
mop_1 = three_mops_fixture[0]
mop_1.mop_id = 1
mop_dict = db.get_mop(1)
mop = mop_role.MoP(**mop_dict)
assert mop == mop_1 # nosec
mop_2 = three_mops_fixture[1]
mop_2.mop_id = 2
mop_dict = db.get_mop(2)
mop = mop_role.MoP(**mop_dict)
assert mop == mop_2 # nosec
def test_get_returns_None(db_with_3_mops):
db = db_with_3_mops
mop_dict = db.get_mop(100)
assert mop_dict is None # nosec
```
#### File: person/tests/test_person.py
```python
from dataclasses import dataclass
import pytest
from context import helpers # noqa
from context import person
# pylint: disable=redefined-outer-name
names = [
["<NAME>", "Boeselager"],
["<NAME>.", "Pimpernell"],
["<NAME>", "<NAME>"],
]
def equivalent_names(n1, n2):
fn = n2[0].split()[0]
ln = n2[-1]
try:
mn_2 = n2[0].split()[2]
except IndexError:
mn_2 = None
try:
mn_1 = n2[0].split()[1]
except IndexError:
mn_1 = None
return (
(n1.first_name == fn)
and (n1.middle_name_1 == mn_1)
and (n1.middle_name_2 == mn_2)
and (n1.last_name == ln)
)
@pytest.mark.parametrize("n", names)
def test_person_Name_para(n):
name = person.Name(*n)
assert equivalent_names(name, n) # nosec
def test_person_Name():
# pylint: disable=W0612, W0613
name = person.Name("Alfons-Reimund <NAME>", "Boeselager")
assert name.first_name == "Alfons-Reimund" # nosec
assert name.middle_name_1 == "Horst" # nosec
assert name.middle_name_2 == "Emil" # nosec
assert name.last_name == "Boeselager" # nosec
def test_person_Academic():
# pylint: disable=W0612, W0613
academic = person.Academic(
"Horatio",
"Pimpernell",
middle_name_1="R.",
academic_title="Prof.Dr. Dr", # noqa
)
assert academic.first_name == "Horatio" # nosec
assert academic.middle_name_1 == "R." # nosec
assert academic.last_name == "Pimpernell" # nosec
assert academic.academic_title == "Prof. Dr. Dr." # nosec
academic = person.Academic(
"<NAME>.", "Pimpernell", academic_title="Prof.Dr.Dr"
)
assert academic.first_name == "Horatio" # nosec
assert academic.middle_name_1 == "Rübennase" # nosec
assert academic.middle_name_2 == "D." # nosec
assert academic.last_name == "Pimpernell" # nosec
assert academic.academic_title == "Prof. Dr. Dr." # nosec
academic = person.Academic("Horatio", "Pimpernell", academic_title="B.A.")
assert academic.academic_title == "B. A." # nosec
def test_person_Noble():
# pylint: disable=W0612, W0613
noble = person.Noble("<NAME>", "Müller", peer_title="von und zu")
assert noble.first_name == "Sepp" # nosec
assert noble.middle_name_1 == "Theo" # nosec
assert noble.last_name == "Müller" # nosec
assert noble.peer_preposition == "von und zu" # nosec
noble = person.Noble("Seppl", "Müller", peer_title="<NAME>")
assert noble.first_name == "Seppl" # nosec
assert noble.last_name == "Müller" # nosec
assert noble.peer_title == "Junker" # nosec
assert noble.peer_preposition == "van" # nosec
noble = person.Noble("<NAME>", "Müller", peer_title="Graf Eumel von")
assert noble.first_name == "Sven" # nosec
assert noble.middle_name_1 == "Oskar" # nosec
assert noble.last_name == "Müller" # nosec
assert noble.peer_title == "Graf" # nosec
assert noble.peer_preposition == "von" # nosec
def test_person_Person():
# pylint: disable=W0612, W0613
pers = person.Person(
"Hugo", "Berserker", academic_title="MBA", date_of_birth="2000"
) # noqa
assert pers.gender == "male" # nosec
assert pers.academic_title == "MBA" # nosec
assert pers.age == "20" # nosec
pers = person.Person(
"<NAME>", "Berserker", date_of_birth="1980-2010"
) # noqa
assert pers.gender == "unknown" # nosec
assert pers.middle_name_1 == "Mathilde" # nosec
assert pers.year_of_birth == "1980" # nosec
assert pers.deceased is True # nosec
assert pers.year_of_death == "2010" # nosec
pers = person.Person("Sigrid", "Berserker", date_of_birth="10.1.1979") # noqa
assert pers.gender == "female" # nosec
assert pers.year_of_birth == "1979" # nosec
pers = person.Person(
"Sigrid", "Berserker", date_of_birth="10.1.1979 - 22.10.2019"
) # noqa
assert pers.date_of_birth == "10.1.1979" # nosec
assert pers.date_of_death == "22.10.2019" # nosec
def test_person_TooManyFirstNames():
# pylint: disable=W0612, W0613
name = person.Name
with pytest.raises(helpers.TooManyFirstNames):
name("<NAME>", "Schulze")
def test_person_AttrDisplay(capsys):
# pylint: disable=W0612, W0613
@dataclass
class MockClass(helpers.AttrDisplay):
a: str
b: str
c: str
var_1 = "späm"
var_2 = "ham"
var_3 = "ew"
mock_instance = MockClass(var_1, var_2, var_3)
print(mock_instance)
captured = capsys.readouterr()
expected = """MockClass:\na=späm\nb=ham\n\n"""
assert expected == captured.out # nosec
``` |
{
"source": "0LL13/persontitles",
"score": 3
} |
#### File: src/persontitles/academic_degrees.py
```python
import json
import os
import importlib.resources as pkg_resources
import sys
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(),
os.path.expanduser(__file__))),
) # isort:skip # noqa # pylint: disable=wrong-import-position
sys.path.append(
os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)),
) # isort: skip # noqa # pylint: disable=wrong-import-position
from persontitles.academic_german import degrees_ger # noqa
from persontitles.academic_german import german_abbrevs # noqa
from persontitles.academic_uk import degrees_uk # noqa
from persontitles.academic_us import degrees_us # noqa
def degrees() -> dict:
print(os.getcwd())
try:
with open('./src/persontitles/data/degrees.json', mode='r', encoding='utf-8') as fin: # noqa
DEGREES = json.load(fin)
except FileNotFoundError:
try:
DEGREES = collect_degrees()
with open('./src/persontitles/data/degrees.json', mode='w', encoding='utf-8') as fout: # noqa
json.dump(DEGREES, fout)
except FileNotFoundError:
DEGREES = load_file_within_package()
return DEGREES
def load_file_within_package():
from . import data
print("Now in load_file_within_package() - degrees")
with pkg_resources.open_text(data, 'degrees.json') as fin:
DATA_FILE = json.load(fin)
return DATA_FILE
def collect_degrees():
DEGREES = dict()
degrees = []
for degree in degrees_ger():
degrees.append(degree)
DEGREES['D'] = degrees
degrees = []
degrees_d = degrees_ger()
for degree in german_abbrevs(degrees_d):
degrees.append(degree)
DEGREES['german_abbrevs'] = degrees
degrees = []
for degree in degrees_uk():
degrees.append(degree)
DEGREES['UK'] = degrees
degrees = []
for degree in degrees_us():
degrees.append(degree)
DEGREES['US'] = degrees
return DEGREES
if __name__ == '__main__':
DEGREES = collect_degrees()
for k, v in DEGREES.items():
print(k)
print(v)
print()
print(os.getcwd())
```
#### File: src/persontitles/academic_uk.py
```python
import importlib.resources as pkg_resources
import unicodedata
import requests
from bs4 import BeautifulSoup
def degrees_uk() -> list:
try:
with open('./src/persontitles/data/academic_uk.txt', mode='r', encoding='utf-8') as fin: # noqa
DEGREES = fin.read().split('\n')
except FileNotFoundError:
try:
DEGREES = uk_degrees()
with open('./src/persontitles/data/academic_uk.txt', mode='a', encoding='utf-8') as fout: # noqa
fout.write('\n'.join(item for item in DEGREES))
except FileNotFoundError:
DEGREES = load_file_within_package()
return DEGREES
def load_file_within_package():
from . import data
with pkg_resources.open_text(data, 'academic_uk.txt') as fin:
DATA_FILE = fin.read().split('\n')
return DATA_FILE
def uk_degrees():
data = requests.get('https://en.wikipedia.org/wiki/British_degree_abbreviations') # noqa
soup = BeautifulSoup(data.text, 'lxml')
lines = get_lines(soup)
uk_degrees = []
for i, degree in enumerate(lines):
if i > 19 and i < 440:
uk_degrees.append(degree)
abbrevs = strip_degrees(uk_degrees)
fnl_degrees = final_degrees(abbrevs)
degrees = normalize_degrees(fnl_degrees)
return degrees
def get_lines(soup):
lines = []
for li in soup.find_all('li'):
values = [li.get_text(strip=True)]
lines.append(values[0])
return lines
def strip_degrees(degrees) -> list:
abbrevs = []
for i, degree in enumerate(degrees):
abbr = degree.split('-')[0].strip()
abbrevs.append(abbr.strip())
return abbrevs
def final_degrees(abbrevs):
final_degrees = []
for abbr in abbrevs:
if ' or ' in abbr:
abbrs = abbr.split('or')
for ab in abbrs:
final_degrees.append(ab.strip())
elif ',' in abbr:
abbrs = abbr.split(',')
for ab in abbrs:
final_degrees.append(ab.strip())
elif ') ' in abbr:
abbrs = abbr.split(') ')
ab = abbrs[0].strip()
if ab[-1] != ')':
ab = ab + ')'
final_degrees.append(ab)
else:
final_degrees.append(abbr.strip())
return final_degrees
def normalize_degrees(degrees):
DEGREES = []
for abbr in set(degrees):
abbr = unicodedata.normalize('NFKD', abbr)
DEGREES.append(abbr)
return DEGREES
if __name__ == '__main__':
ACADEMIC_UK = degrees_uk()
for i, degree in enumerate(ACADEMIC_UK):
print(i, degree)
```
#### File: src/persontitles/gov_jobs.py
```python
import bs4
import requests
import unicodedata
from bs4 import BeautifulSoup
# from pprint import pprint
urls = [
# this url was locked away from public:
# 'http://www.besoldungstabelle.de/besoldungsgruppen_amtsbezeichnungen_besoldungstabelle'
'https://www.future-beamtenkredit.de/beamtenberufe/',
]
def gov_jobs() -> list:
try:
with open('./src/persontitles/data/gov_jobs.txt', mode='r', encoding='utf-8') as fin: # noqa
GOV_JOBS = fin.read().split('\n')
except FileNotFoundError:
try:
GOV_JOBS = gov_job_titles()
with open('./src/persontitles/data/gov_jobs.txt', mode='a', encoding='utf-8') as fout: # noqa
fout.write('\n'.join(item for item in GOV_JOBS))
except FileNotFoundError:
GOV_JOBS = load_file_within_package()
return GOV_JOBS
def load_file_within_package():
from . import data
with pkg_resources.open_text(data, 'gov_jobs.txt') as fin:
DATA_FILE = fin.read().split('\n')
return DATA_FILE
def gov_job_titles() -> list:
titles_1 = titles_url_1()
print('titles_1')
print(titles_1)
job_titles = [ttle for ttle in set(titles_1)] # noqa
job_titles = change_first_word_to_female(job_titles)
fem_male_collection = change_2_or_more_words_to_female(job_titles)
return fem_male_collection
def get_soup(url) -> bs4.element.NavigableString:
data = requests.get(url)
soup = BeautifulSoup(data.text, 'lxml')
return soup
def titles_url_1() -> list:
url = urls[0]
soup = get_soup(url)
lines = []
for p in soup.find_all('p'):
lines.append(p.get_text(strip=True))
print(lines)
lines = lines[12:47]
title_collection = []
for line in lines:
title = line.split(':')[-1]
if '-Besoldung' not in title:
title_collection.append(title.strip())
ttle_collection = title_collection[:]
title_collection = []
for title in ttle_collection:
titles = title.split(',')
for ttle in titles:
title_collection.append(ttle.strip())
ttle_collection = title_collection[:]
title_collection = []
for title in ttle_collection:
title = title.split('(')[0]
title_collection.append(title.strip())
ttle_collection = title_collection[:]
title_collection = []
for title in ttle_collection:
if 'usw.' in title:
title = title.split('usw.')[0]
elif ' einer' in title:
title = title.split(' einer')[0]
elif 'ehem.' in title:
title = title.split('ehem.')[-1]
elif ' und' in title:
title = title.split(' und')[0]
title_collection.append(title.strip())
title_collection = normalize_titles(title_collection)
return title_collection
def normalize_titles(titles) -> list:
normalized_titles = []
for title in titles:
title = unicodedata.normalize('NFKD', title.strip())
title = title.strip()
if title not in normalized_titles:
normalized_titles.append(title)
return normalized_titles
def change_first_word_to_female(titles) -> list:
female_vers_added = []
for title in titles:
female_vers_added.append(title.strip())
fields = title.split(' ')
end = ' '.join(fields[1:])
ttle = fields[0]
fem_title = change_to_female_variant(ttle) + ' ' + end
while ' ' in fem_title:
fem_title = fem_title.replace(' ', ' ')
fem_title = fem_title.strip()
if fem_title not in female_vers_added:
female_vers_added.append(fem_title)
return female_vers_added
def change_to_female_variant(title) -> str:
# need to normalize with NFC because otherwise "Präsident" will not be
# recognized and even counted with 10 letters!
ttle = unicodedata.normalize('NFC', title.strip())
if ttle in ['Direktor', 'Konsul', 'Präsident', 'Chef', 'Kanzler', 'Sekretär']: # noqa
fem_title = ttle + 'in '
elif ttle.endswith('ektor') or ttle.endswith('onsul') or\
ttle.endswith('räsident') or ttle.endswith('hef') or\
ttle.endswith('anzler') or ttle.endswith('ekretär') or\
ttle.endswith('rofessor') or ttle.endswith('fleger') or\
ttle.endswith('eister') or ttle.endswith('ührer') or\
ttle.endswith('ommissar') or ttle.endswith('rinär') or\
ttle.endswith('theker') or ttle.endswith('konservator') or\
ttle.endswith('schafter') or ttle.endswith('stent'): # noqa
fem_title = ttle + 'in '
elif ttle.endswith('rat'):
fem_title = ttle[:-3] + 'rätin'
elif ttle.endswith('arzt'):
fem_title = ttle[:-4] + 'ärztin'
elif ttle.endswith('walt'):
fem_title = ttle[:-4] + 'wältin'
elif ttle == 'Rat':
fem_title = 'Rätin '
elif ttle == 'Arzt':
fem_title = 'Ärztin '
else:
fem_title = title
return fem_title
def change_2_or_more_words_to_female(titles) -> list:
female_vers_added = []
for title in titles:
# append the male versions and the female versions so far
female_vers_added.append(title)
fields = title.split(' ')
if len(fields) > 1:
male_ttle = fields[-1]
male_ttle = unicodedata.normalize('NFC', male_ttle.strip())
fem_ttle = change_to_female_variant(male_ttle)
if male_ttle != fem_ttle:
fem_title = change_to_female(fields) + ' ' + fem_ttle.strip()
female_vers_added.append(fem_title)
return female_vers_added
def change_to_female(fields) -> str:
fields_wo_last_field = fields[:-1]
fem_words = ''
for field in fields_wo_last_field:
if field.endswith('r'):
field = field[:-1]
fem_words = fem_words + field + ' '
return fem_words.strip()
if __name__ == '__main__':
titles = gov_job_titles()
for i, title in enumerate(sorted(titles)):
print(i, title)
```
#### File: src/persontitles/job_titles.py
```python
import bs4
import json
import importlib.resources as pkg_resources
import requests
import unicodedata
from bs4 import BeautifulSoup
urls = [
'https://www.nachrichten.at/wirtschaft/karriere/tipps/bewerbung/das-steckt-hinter-englischen-berufsbezeichnungen;art215506,3298147', # noqa
'https://www.xing.com/campus/de/job-search',
'https://zety.com/blog/job-titles',
]
def job_titles() -> dict:
JOB_TITLES = {}
try:
with open('./src/persontitles/data/german_jobtitles.txt', mode='r', encoding='utf-8') as fin: # noqa
german_jobtitles = fin.read().split('\n')
JOB_TITLES["German"] = german_jobtitles
with open('./src/persontitles/data/english_jobtitles.txt', mode='r', encoding='utf-8') as fin: # noqa
english_jobtitles = fin.read().split('\n')
JOB_TITLES["English"] = english_jobtitles
except FileNotFoundError:
try:
german_jobtitles, english_jobtitles = job_titles_mix()
with open('./src/persontitles/data/german_jobtitles.txt', mode='a', encoding='utf-8') as fout: # noqa
fout.write('\n'.join(item for item in german_jobtitles))
with open('./src/persontitles/data/english_jobtitles.txt', mode='a', encoding='utf-8') as fout: # noqa
fout.write('\n'.join(item for item in english_jobtitles))
except FileNotFoundError:
JOB_TITLES = load_file_within_package()
return JOB_TITLES
def load_file_within_package():
from . import data
DATA_FILE = {}
with pkg_resources.open_text(data, 'german_jobtitles.txt') as fin:
jobtitles = fin.read().split('\n')
DATA_FILE["German"] = jobtitles
with pkg_resources.open_text(data, 'english_jobtitles.txt') as fin:
jobtitles = fin.read().split('\n')
DATA_FILE["English"] = jobtitles
return DATA_FILE
def job_titles_mix() -> list:
titles_1 = titles_url_1()
titles_2 = titles_url_2()
english_jobtitles = english_job_titles()
german_jobtitles = [ttle for ttle in set(titles_1) | set(titles_2)] # noqa
return german_jobtitles, english_jobtitles
def get_soup(url) -> bs4.element.NavigableString:
data = requests.get(url)
soup = BeautifulSoup(data.text, 'lxml')
return soup
def titles_url_1() -> list:
url = urls[0]
soup = get_soup(url)
lines = []
for p in soup.find_all('p'):
lines.append(p.get_text(strip=True))
lines = lines[5:87]
title_collection = []
for line in lines:
titles = line.split('-')
for title in titles:
title_collection.append(title.strip())
ttle_collection = title_collection[:]
title_collection = []
for title in ttle_collection:
titles = title.split('/')
for ttle in titles:
title_collection.append(ttle.strip())
ttle_collection = title_collection[:]
title_collection = []
for title in ttle_collection:
titles = title.split(',')
for ttle in titles:
if ttle == 'Consultat':
ttle = 'Consultant'
elif 'Worker' in ttle:
pass
title_collection.append(ttle.strip())
ttle_collection = title_collection[:]
title_collection = []
for title in ttle_collection:
ttle = title.split('(')[0]
title_collection.append(ttle.strip())
title_collection = normalize_titles(title_collection)
title_collection.remove('Head of...')
return title_collection
def titles_url_2() -> list:
titles = []
url = urls[1]
soup = get_soup(url)
rawJ = soup.find('script')
# this ignores the json stuff and only makes use of splitting strings
J = str(rawJ)
J1 = J.split('var _env=')[-1]
J2 = J1.split(';')[4]
J3 = J2.split('var REDUX_STATE=')[-1]
for line in J3.split(','):
if line.startswith('"name"'):
title = line.split(':')[-1]
title = title[1:].split('"')[0]
if title not in ['Arbeiter', 'Absolvent', 'Satz']:
titles.append(title)
return titles
def english_job_titles() -> list:
titles = []
url = urls[2]
soup = get_soup(url)
# take a peek at the structure of those nested json dicts:
# https://stackoverflow.com/a/63151716/6597765
# for i, rawJ in enumerate(soup.find_all('script', type="application/ld+json")): # noqa
# print("index:", i)
# data = json.loads(rawJ.string)
# for k, v in data.items():
# print("key:", k)
# print("value")
# pprint(v)
# print()
rawJs = soup.find_all('script', type='application/ld+json')
J1 = json.loads(rawJs[1].string)
J2 = J1['@graph'][2]
J3 = J2['articleBody']
titles_ = J3.split('&nbsp;')
for i, title in enumerate(titles_):
if i not in [17, 35, 42, 51, 58, 66, 74, 81, 85, 92, 98, 103, 108, 113, 118, 123, 128, 133, 138, 144, 149, 152, 156, 159, 162, 165, 169, 172, 175, 179]: # noqa
titles.append(title.strip())
titles_ = []
for title in titles:
title = title.split('\n')
for ttle in title:
titles_.append(ttle)
titles = titles_[2:]
titles_ = []
titles_ = normalize_titles(titles)
extracted_titles = extract_titles(titles_)
titles = [title for title in set(extracted_titles)]
for i, title in enumerate(sorted(titles)):
print(i, title)
return titles
def normalize_titles(titles) -> list:
normalized_titles = []
for title in titles:
# print(title)
title = unicodedata.normalize('NFKD', title.strip())
title = title.strip()
# print(title)
if title not in normalized_titles:
normalized_titles.append(title)
return normalized_titles
def extract_titles(titles_):
extracted_titles = []
for title in titles_:
if "Resume" in title:
pass
elif "resume" in title:
pass
elif "guide:" in title:
pass
elif "Positions" in title:
pass
elif "Key Takeaway" in title:
pass
elif "job" in title:
pass
elif title.startswith("Top"):
pass
elif title.startswith("450"):
pass
elif title.endswith("!"):
pass
elif title.endswith("?"):
pass
elif title.endswith(";"):
pass
elif title.endswith("."):
pass
elif title.endswith(":"):
pass
elif title.endswith("Jobs"):
pass
elif title.endswith("Sample"):
pass
elif title.endswith("Titles"):
pass
elif title in ["", "A", "Head", "Lead"]:
pass
elif '&mdash;' in title:
extracted_titles.append(title.split('&mdash;')[0])
extracted_titles.append(title.split('&mdash;')[-1])
elif '/' in title:
extracted_titles.append(title.split('/')[0].strip())
extracted_titles.append(title.split('/')[-1].strip())
elif '(' in title:
extracted_titles.append(title.split('(')[0].strip())
extracted_titles.append(title.split('(')[-1][:-1].strip())
elif '&rsquo;' in title:
title = title.replace('&rsquo;', "'")
extracted_titles.append(title.strip())
elif '&amp;' in title:
extracted_titles.append(title.split('&amp;')[0].strip())
extracted_titles.append(title.split('&amp;')[-1].strip())
elif " or " in title:
or_titles = []
if title.split()[-1] in ["Cleaner", "Producer"]:
or_titles.append("Vehicle Cleaner")
or_titles.append("Equipment Cleaner")
or_titles.append("Film Producer")
or_titles.append("Video Producer")
elif title.endswith("Taxis"):
or_titles.append("Dispatcher for Taxis")
or_titles.append("Dispatcher for Trucks")
else:
or_titles.append("Caretaker")
or_titles.append("House Sitter")
for ttle in or_titles:
extracted_titles.append(ttle)
extracted_titles.append(ttle)
else:
extracted_titles.append(title)
return extracted_titles
if __name__ == '__main__':
JOB_TITLES = job_titles()
for i, title in enumerate(sorted(JOB_TITLES["German"])):
print(i, title)
# for i, title in enumerate(sorted(JOB_TITLES["English"])):
# print(i, title)
```
#### File: persontitles/tests/test_degrees.py
```python
import os
import sys
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(
os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))),
) # isort:skip # noqa # pylint: disable=wrong-import-position
sys.path.append(
os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)),
) # isort: skip # noqa # pylint: disable=wrong-import-position
from context import academic_degrees # noqa
def test_no_file():
try:
os.remove('./src/persontitles/data/degrees.json')
except FileNotFoundError:
pass
ACADEMIC = academic_degrees.degrees_ger()
assert isinstance(ACADEMIC, list)
def test_degrees_is_dict():
DEGREES = academic_degrees.degrees()
assert isinstance(DEGREES, dict)
def test_keys():
DEGREES = academic_degrees.degrees()
for k, v in DEGREES.items():
assert k in ['D', 'UK', 'US', 'german_abbrevs']
```
#### File: persontitles/tests/test_uk.py
```python
import os
from context import academic_uk
def test_filenotfound():
try:
os.remove('./src/persontitles/data/academic_uk.txt')
except FileNotFoundError:
pass
ACADEMIC = academic_uk.degrees_uk()
assert isinstance(ACADEMIC, list)
def test_academic_is_set():
ACADEMIC = academic_uk.degrees_uk()
assert isinstance(ACADEMIC, list)
def test_degree_in_list():
ACADEMIC = academic_uk.degrees_uk()
assert 'MBA' in ACADEMIC
assert 'BSc(HealthSc)' in ACADEMIC
assert 'Dr. <NAME>' not in ACADEMIC
``` |
{
"source": "0lmi/deviceauth",
"score": 2
} |
#### File: tests/tests/test_token.py
```python
import base64
import bravado
import json
import pytest
import requests
from contextlib import contextmanager
from common import (
Device,
DevAuthorizer,
device_auth_req,
explode_jwt,
clean_migrated_db,
clean_db,
get_fake_tenantadm_addr,
make_fake_tenant_token,
mongo,
cli,
management_api,
internal_api,
device_api,
)
import orchestrator
import mockserver
@contextmanager
def mock_tenantadm_auth(tenant_addons=[]):
def tenantadm_handler(req):
auth = req.headers["Authorization"]
# jwt = <header (base64)>.<claims (base64)>.<signature (base64)>
jwt_b64 = auth.split(".")
if len(jwt_b64) > 1:
print(jwt_b64)
# Convert base64 from url- to std-encoding and append padding
claims_b64 = jwt_b64[1].replace("+", "-").replace("?", "_")
# Add padding
claims_b64 += "=" * (-len(claims_b64) % 4)
# Decode claims
claims = base64.b64decode(claims_b64)
d = json.loads(claims)
tenant_id = d["mender.tenant"]
return (
200,
{},
{
"id": tenant_id,
"name": "Acme",
"addons": [
{"name": addon, "enabled": True} for addon in tenant_addons
],
},
)
else:
return (500, {}, {})
with mockserver.run_fake(
get_fake_tenantadm_addr(),
handlers=[
("POST", "/api/internal/v1/tenantadm/tenants/verify", tenantadm_handler)
],
) as srv:
yield srv
def request_token(device, dev_auth, url, tenant_addons=[]):
with mock_tenantadm_auth(tenant_addons):
rsp = device_auth_req(url, dev_auth, device)
assert rsp.status_code == 200
dev_auth.parse_rsp_payload(device, rsp.text)
return device.token
@pytest.yield_fixture(scope="function")
def accepted_device(device_api, management_api, clean_migrated_db):
"""Fixture that sets up an accepted device. Yields a tuple:
(device ID, instance of Device, instance of DevAuthorizer)"""
yield accept_device(device_api, management_api)
def accept_device(device_api, management_api, tenant_token=None):
d = Device()
da = DevAuthorizer(tenant_token)
url = device_api.auth_requests_url
kwargs = {}
if tenant_token is not None:
kwargs["Authorization"] = "Bearer " + tenant_token
try:
with orchestrator.run_fake_for_device_id(1) as server:
with mock_tenantadm_auth():
# poke devauth so that device appears
rsp = device_auth_req(url, da, d)
assert rsp.status_code == 401
# try to find our devices in all devices listing
dev = management_api.find_device_by_identity(d.identity, **kwargs)
assert dev is not None
print("found matching device with ID", dev.id)
devid = dev.id
# extract authentication data set ID
aid = dev.auth_sets[0].id
with orchestrator.run_fake_for_device_id(devid) as server:
management_api.accept_device(devid, aid, **kwargs)
except bravado.exception.HTTPError as e:
assert e.response.status_code == 204
return devid, d, da
@pytest.yield_fixture(scope="function")
def device_token(accepted_device, device_api):
devid, d, da = accepted_device
try:
with orchestrator.run_fake_for_device_id(devid) as server:
token = request_token(d, da, device_api.auth_requests_url)
except bravado.exception.HTTPError as e:
assert e.response.status_code == 204
print("device token:", token)
assert token
yield token
@pytest.yield_fixture(scope="session")
def token_verify_url(internal_api):
verify_url = internal_api.make_api_url("/tokens/verify")
print("verify URL:", verify_url)
yield verify_url
class TestToken:
def test_token_claims(self, accepted_device, management_api, device_api):
devid, d, da = accepted_device
try:
with orchestrator.run_fake_for_device_id(devid) as server:
token = request_token(d, da, device_api.auth_requests_url)
except bravado.exception.HTTPError as e:
assert e.response.status_code == 204
assert len(token) > 0
print("device token:", d.token)
thdr, tclaims, tsign = explode_jwt(d.token)
assert "typ" in thdr and thdr["typ"] == "JWT"
assert "jti" in tclaims
assert "exp" in tclaims
assert "sub" in tclaims and tclaims["sub"] == devid
assert "iss" in tclaims and tclaims["iss"] == "Mender"
assert "mender.device" in tclaims and tclaims["mender.device"] == True
def test_token_verify_ok(self, device_token, token_verify_url):
# verify token; the token is to be placed in the Authorization header
# and it looks like bravado cannot handle a POST request with no data
# in body, hence we fall back to sending request directly
auth_hdr = "Bearer {}".format(device_token)
# successful verification
rsp = requests.post(
token_verify_url, data="", headers={"Authorization": auth_hdr}
)
assert rsp.status_code == 200
def test_token_verify_none(self, token_verify_url):
# no auth header should raise an error
rsp = requests.post(token_verify_url, data="")
assert rsp.status_code == 401
def test_token_verify_bad(self, token_verify_url):
# use a bogus token that is not a valid JWT
rsp = requests.post(
token_verify_url, data="", headers={"Authorization": "bogus"}
)
assert rsp.status_code == 401
def test_token_verify_corrupted(self, device_token, token_verify_url):
auth_hdr = "Bearer {}".format(device_token)
rsp = requests.post(
token_verify_url, data="", headers={"Authorization": auth_hdr + "==foo"}
)
assert rsp.status_code == 401
def test_token_delete(self, device_token, token_verify_url, management_api):
_, tclaims, _ = explode_jwt(device_token)
# bravado cannot handle DELETE requests either
# self.client.tokens.delete_tokens_id(id=tclaims['jti'])
# use requests instead
rsp = requests.delete(
management_api.make_api_url("/tokens/{}".format(tclaims["jti"]))
)
assert rsp.status_code == 204
auth_hdr = "Bearer {}".format(device_token)
# unsuccessful verification
rsp = requests.post(
token_verify_url, data="", headers={"Authorization": auth_hdr}
)
assert rsp.status_code == 401
class TestTokenEnterprise:
@pytest.mark.parametrize(
"test_case",
[
{
"addons": ["troubleshoot", "configure"],
"forwarded_uri": "/api/devices/v1/deviceconfig/configuration",
"method": "PUT",
"status_code": 200,
},
{
"addons": ["troubleshoot"],
"forwarded_uri": "/api/devices/v1/deviceconfig/configuration",
"method": "PUT",
"status_code": 403,
},
],
)
def test_token_addons(
self, test_case, clean_migrated_db, device_api, management_api, internal_api
):
tenant_token = make_fake_tenant_token(
"123456789012345678901234",
)
dev_auth = DevAuthorizer(tenant_token=tenant_token)
jwt = None
dev = accept_device(device_api, management_api, tenant_token)[1]
with orchestrator.run_fake_for_device_id(1) as server:
jwt = request_token(
dev, dev_auth, device_api.auth_requests_url, test_case["addons"]
)
assert len(jwt) > 0
rsp = requests.post(
internal_api.api_url + "tokens/verify",
data="",
headers={
"Authorization": "Bearer " + jwt,
"X-Forwarded-Uri": test_case.get("forwarded_uri"),
"X-Forwarded-Method": test_case.get("method"),
},
)
assert rsp.status_code == test_case.get("status_code", 200)
``` |
{
"source": "0lmi/deviceconnect",
"score": 2
} |
#### File: deviceconnect/tests/common.py
```python
import base64
import json
import uuid
import re
from contextlib import contextmanager
from datetime import datetime, timedelta, timezone
from websocket import create_connection
import devices_api
import internal_api
import management_api
@contextmanager
def ws_session(url, **kwargs):
conn = create_connection(url, **kwargs)
yield conn
conn.close()
class Device:
def __init__(self, device_id=None, plan=None, tenant_id=None):
if device_id is None:
device_id = str(uuid.uuid4())
self.id = device_id
self.tenant_id = tenant_id
if tenant_id is None:
tenant_id = ""
self.plan = plan
client = internal_api.InternalAPIClient()
r = client.provision_device_with_http_info(
tenant_id=tenant_id,
device=internal_api.Device(device_id=device_id),
_preload_content=False,
)
assert r.status == 201
def connect(self):
return ws_session(
devices_api.Configuration.get_default_copy().host.replace(
"http://", "ws://"
)
+ "/connect",
cookie="JWT=%s" % self.jwt,
)
@property
def jwt(self):
claims = {
"jti": str(uuid.uuid4()),
"sub": self.id,
"exp": int((datetime.now(tz=timezone.utc) + timedelta(days=7)).timestamp()),
"mender.device": True,
}
if self.tenant_id is not None:
claims["mender.tenant"] = self.tenant_id
if self.plan is not None:
claims["mender.plan"] = self.plan
return ".".join(
[
base64.urlsafe_b64encode(b'{"alg":"RS256","typ":"JWT"}')
.decode("ascii")
.strip("="),
base64.urlsafe_b64encode(json.dumps(claims).encode())
.decode("ascii")
.strip("="),
base64.urlsafe_b64encode(b"Signature").decode("ascii").strip("="),
]
)
@property
def api(self):
# Setup device api with token
api_conf = devices_api.Configuration.get_default_copy()
api_conf.access_token = self.jwt
return devices_api.DeviceAPIClient(devices_api.ApiClient(api_conf))
def make_user_token(user_id=None, plan=None, tenant_id=None):
if user_id is None:
user_id = str(uuid.uuid4())
claims = {
"jti": str(uuid.uuid4()),
"sub": user_id,
"exp": int((datetime.now(tz=timezone.utc) + timedelta(days=7)).timestamp()),
"mender.user": True,
}
if tenant_id is not None:
claims["mender.tenant"] = tenant_id
if plan is not None:
claims["mender.plan"] = plan
return ".".join(
[
base64.urlsafe_b64encode(b'{"alg":"RS256","typ":"JWT"}')
.decode("ascii")
.strip("="),
base64.urlsafe_b64encode(json.dumps(claims).encode())
.decode("ascii")
.strip("="),
base64.urlsafe_b64encode(b"Signature").decode("ascii").strip("="),
]
)
def management_api_with_params(user_id, plan=None, tenant_id=None):
api_conf = management_api.Configuration.get_default_copy()
api_conf.access_token = make_user_token(user_id, plan, tenant_id)
return management_api.ManagementAPIClient(management_api.ApiClient(api_conf))
def management_api_connect(
device_id: str,
user_id: str = None,
tenant_id: str = None,
plan: str = None,
api_conf: management_api.Configuration = None,
**sess_args,
):
if api_conf is None:
api_conf = management_api.Configuration.get_default_copy()
jwt = make_user_token(user_id=user_id, tenant_id=tenant_id, plan=plan)
url = (
re.sub(r"^http(s?://.+$)", r"ws\1", api_conf.host).rstrip("/")
+ f"/devices/{device_id}/connect"
)
return ws_session(url, cookie=f"JWT={jwt}", **sess_args)
```
#### File: deviceconnect/tests/conftest.py
```python
import base64
import json
import os
import uuid
import pytest
import signal
import bson
import pymongo
import devices_api
import internal_api
import management_api
def pytest_addoption(parser):
parser.addoption(
"--host",
action="store",
default=os.environ["TESTING_HOST"]
if "TESTING_HOST" in os.environ
else "localhost",
help="Address for host hosting deviceconnect API (env: TEST_HOST)",
)
def pytest_configure(config):
host = config.getoption("host")
devices_api.Configuration.set_default(
devices_api.Configuration(
host="http://" + host + "/api/devices/v1/deviceconnect"
)
)
internal_api.Configuration.set_default(
internal_api.Configuration(
host="http://" + host + "/api/internal/v1/deviceconnect"
)
)
management_api.Configuration.set_default(
management_api.Configuration(
host="http://" + host + "/api/management/v1/deviceconnect"
)
)
@pytest.fixture(scope="session")
def mongo():
return pymongo.MongoClient("mongodb://mender-mongo")
def mongo_cleanup(client):
dbs = client.list_database_names()
for db in dbs:
if db in ["local", "admin", "config"]:
continue
client.drop_database(db)
@pytest.fixture(scope="function")
def clean_mongo(mongo):
mongo_cleanup(client=mongo)
yield mongo
mongo_cleanup(client=mongo)
@pytest.fixture(scope="function")
def tenant(tenant_id=None):
"""
This fixture provisions a new tenant database.
:param tenant_id: can be indirectly overridden with
@pytest.mark.fixture decorator.
"""
if tenant_id is None:
tenant_id = str(bson.objectid.ObjectId())
client = internal_api.InternalAPIClient()
client.provision_tenant(new_tenant=internal_api.NewTenant(tenant_id=tenant_id))
yield tenant_id
def _test_timeout(signum, frame):
raise TimeoutError("TestConnect did not finish in time")
@pytest.fixture(scope="function")
def timeout(request, timeout_sec=30):
""""""
alrm_handler = signal.getsignal(signal.SIGALRM)
def timeout(signum, frame):
raise TimeoutError("%s did not finish in time" % request.function.__name__)
signal.signal(signal.SIGALRM, timeout)
signal.alarm(timeout_sec)
yield
signal.signal(signal.SIGALRM, alrm_handler)
``` |
{
"source": "0lmi/integration",
"score": 2
} |
#### File: backend-tests/tests/test_api_endpoints.py
```python
import glob
import json
import logging
import os
import re
import subprocess
import tempfile
import pytest
import requests
import yaml
from testutils.api.client import GATEWAY_HOSTNAME
logging.basicConfig(format="%(asctime)s %(message)s")
logger = logging.getLogger("test_decomission")
logger.setLevel(logging.INFO)
REPO_TO_ENV_VARIABLE = {
"auditlogs": "AUDITLOGS_REV",
"deployments": "DEPLOYMENTS_REV",
"deployments-enterprise": "DEPLOYMENTS_ENTERPRISE_REV",
"deviceauth": "DEVICEAUTH_REV",
"deviceconfig": "DEVICECONFIG_REV",
"deviceconnect": "DEVICECONNECT_REV",
"inventory": "INVENTORY_REV",
"inventory-enterprise": "INVENTORY_ENTERPRISE_REV",
"tenantadm": "TENANTADM_REV",
"useradm": "USERADM_REV",
"useradm-enterprise": "USERADM_ENTERPRISE_REV",
"workflows": "WORKFLOWS_REV",
"workflows-enterprise": "WORKFLOWS_ENTERPRISE_REV",
}
def get_api_docs(repo):
# do not proceed if the SSH_PRIVATE_KEY env variable is not set
if not bool(os.environ.get("SSH_PRIVATE_KEY")):
return
git_repository = f"<EMAIL>:mendersoftware/{repo}.git"
with tempfile.TemporaryDirectory() as tmp:
tmp_repo = os.path.join(tmp, repo)
subprocess.check_output(["git", "clone", git_repository, tmp_repo])
env_var_name = REPO_TO_ENV_VARIABLE.get(repo)
ref_name = env_var_name and os.getenv(env_var_name) or "master"
if ref_name != "master":
tag_match = re.match(r"^[0-9]+\.[0-9]+\.[0-9]+(?:-build[0-9]+)?", ref_name)
if tag_match:
subprocess.check_output(
["git", "checkout", "-b", "prtest", ref_name], cwd=tmp_repo,
)
else:
subprocess.check_output(
["git", "fetch", "origin", ref_name + ":prtest"], cwd=tmp_repo,
)
subprocess.check_output(
["git", "checkout", "prtest"], cwd=tmp_repo,
)
files = glob.glob(os.path.join(tmp_repo, "docs", "*.yml"))
for file in files:
basename = os.path.basename(file)
kind = (
basename.startswith("management_")
and "management"
or basename.startswith("devices_")
and "devices"
or "internal"
)
with open(file) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
yield kind, data
def get_api_endpoints(repo):
for kind, data in get_api_docs(repo):
if data.get("swagger"):
scheme, host, base_path = (
data["schemes"][0],
data["host"],
data.get("basePath", "/"),
)
elif data.get("openapi"):
parts = data["servers"][0]["url"].split("/", 3)
scheme = parts[0].rstrip(":")
host, base_path = parts[2:4]
else:
logger.error(f"unknown specification file: {json.dumps(data)}")
raise ValueError(
"Unknown specification file, only swagger and openapi 3 are supported!"
)
for path, path_value in data["paths"].items():
for method, definition in path_value.items():
requires_auth = (
len(definition.get("security") or ()) > 0
or len(data.get("security") or ()) > 0
or path.rstrip("/").endswith("/verify") # JWT token verifications
or path.rstrip("/").endswith("/2faqr") # 2FA QR code
or path.rstrip("/").endswith("/2faverify") # 2FA code verification
)
yield {
"auth": requires_auth,
"kind": kind,
"method": method,
"scheme": scheme,
"host": host,
"path": base_path.rstrip("/") + path,
}
def get_all_api_endpoints(repos):
for repo in repos:
for endpoint in get_api_endpoints(repo):
yield (
endpoint["kind"],
endpoint["auth"],
endpoint["method"],
endpoint["scheme"],
endpoint["host"],
endpoint["path"],
)
class BaseTestAPIEndpoints:
def do_test_api_endpoints(
self, kind, auth, method, scheme, host, path, get_endpoint_url
):
assert method in ("get", "post", "put", "delete", "patch")
requests_method = getattr(requests, method)
if host == "hosted.mender.io" or kind in ("management", "devices"):
base_url = f"{scheme}://{GATEWAY_HOSTNAME}"
else:
base_url = get_endpoint_url(f"{scheme}://{host}")
r = requests_method(
base_url + "/" + path.lstrip("/"), verify=False, timeout=2.0
)
if auth:
assert 401 == int(r.status_code)
else:
assert 401 != int(r.status_code)
assert (
int(r.status_code) >= 200
and int(r.status_code) < 500
and int(r.status_code) != 405
)
class TestAPIEndpoints(BaseTestAPIEndpoints):
REPOS = (
"deployments",
"deviceauth",
"deviceconfig",
"deviceconnect",
"inventory",
"useradm",
"workflows",
)
@pytest.mark.skipif(
not bool(os.environ.get("SSH_PRIVATE_KEY")),
reason="SSH_PRIVATE_KEY not provided",
)
@pytest.mark.parametrize(
"kind,auth,method,scheme,host,path", get_all_api_endpoints(REPOS),
)
def test_api_endpoints(
self, kind, auth, method, scheme, host, path, get_endpoint_url
):
self.do_test_api_endpoints(
kind, auth, method, scheme, host, path, get_endpoint_url
)
class TestAPIEndpointsEnterprise(BaseTestAPIEndpoints):
REPOS = (
"auditlogs",
"deployments-enterprise",
"deviceauth",
"deviceconfig",
"deviceconnect",
"devicemonitor",
"inventory-enterprise",
"tenantadm",
"useradm-enterprise",
"workflows-enterprise",
)
@pytest.mark.skipif(
not bool(os.environ.get("SSH_PRIVATE_KEY")),
reason="SSH_PRIVATE_KEY not provided",
)
@pytest.mark.parametrize(
"kind,auth,method,scheme,host,path", get_all_api_endpoints(REPOS),
)
def test_api_endpoints(
self, kind, auth, method, scheme, host, path, get_endpoint_url
):
self.do_test_api_endpoints(
kind, auth, method, scheme, host, path, get_endpoint_url
)
``` |
{
"source": "0lmi/inventory",
"score": 2
} |
#### File: tests/tests/test_inventory_searching.py
```python
from common import inventory_attributes, management_client, internal_client, clean_db, mongo
import requests
import pytest
import os
@pytest.mark.usefixtures("clean_db")
class TestInventorySearching:
def test_inventory_searching(self, management_client, internal_client, inventory_attributes):
extra_inventory_items = {
"users_logged_in": 100,
"open_connections": 1231,
"open_ports": 523,
}
for i in extra_inventory_items.keys():
it = list(inventory_attributes)
it.append(management_client.inventoryAttribute(name=i,
value=extra_inventory_items[i]))
did = "".join([ format(i, "02x") for i in os.urandom(128)])
internal_client.create_device(did, it)
r = requests.get(management_client.client.swagger_spec.api_url + "/devices",
params=({"users_logged_in": 100}),
verify=False)
assert len(r.json()) == 1
r = requests.get(management_client.client.swagger_spec.api_url + "/devices",
params=({"open_connections": 1231}),
verify=False)
assert len(r.json()) == 1
``` |
{
"source": "0lru/p3ui",
"score": 3
} |
#### File: demos/canvas/text.py
```python
from p3ui import *
import asyncio
family_names = [skia.FontMgr.RefDefault().getFamilyName(i) for i in range(skia.FontMgr.RefDefault().countFamilies())]
class TextSurface(Surface):
def __init__(self):
super().__init__(
width=(100 | percent, 0, 0),
height=(len(family_names) * 50.0 + 50. | px, 0, 0)
)
offset = 50
with self as canvas:
for name in family_names:
typeface = skia.Typeface(name)
font = skia.Font(typeface, 50.0, 1.0, 0.0)
paint = skia.Paint(AntiAlias=True, Color=skia.ColorWHITE)
canvas.drawString(f'{name}: ABC abc Xx 123', 0, offset, font, paint)
offset += 50
async def main():
window = Window(title='Skia Fonts')
window.position = (50, 50)
window.size = (1024, 768)
await window.serve(UserInterface(content=ScrollArea(content=TextSurface())))
asyncio.run(main())
```
#### File: demos/matplotlib/filled_chart.py
```python
from p3ui import *
import numpy as np
class FilledChart(MatplotlibSurface):
def __init__(self):
self.shift = 0.0
super().__init__(width=(auto, 1, 1), height=(auto, 1, 1))
self._update()
def _update(self):
with self as figure:
figure.clear()
ax = figure.add_subplot()
x = np.arange(0.0, 2, 0.01)
y1 = np.sin(2 * np.pi * x + self.shift)
ax.fill_between(x, y1)
ax.set_title('fill between y1 and 0')
async def update(self):
self._update()
```
#### File: demos/matplotlib/gradient_chart.py
```python
from p3ui import *
import matplotlib.pyplot as plt
import numpy as np
def gradient_image(ax, extent, direction=0.3, cmap_range=(0, 1), **kwargs):
phi = direction * np.pi / 2
v = np.array([np.cos(phi), np.sin(phi)])
X = np.array([[v @ [1, 0], v @ [1, 1]],
[v @ [0, 0], v @ [0, 1]]])
a, b = cmap_range
X = a + (b - a) / X.max() * X
im = ax.imshow(X, extent=extent, interpolation='bicubic',
vmin=0, vmax=1, **kwargs)
return im
def gradient_bar(ax, x, y, width=0.5, bottom=0):
for left, top in zip(x, y):
right = left + width
gradient_image(ax, extent=(left, right, bottom, top),
cmap=plt.cm.Blues_r, cmap_range=(0, 0.8))
class GradientChart(MatplotlibSurface):
# https://matplotlib.org/stable/gallery/lines_bars_and_markers/bar_stacked.html#sphx-glr-gallery-lines-bars-and-markers-bar-stacked-py
def __init__(self, **kwargs):
width = kwargs.pop('width', (auto, 1, 1))
height = kwargs.pop('height', (auto, 1, 1))
super().__init__(width=width, height=height, **kwargs)
self._update()
def _update(self):
with self as figure:
np.random.seed(19680801)
figure.clear()
ax = figure.add_subplot()
ax.set(xlim=(0, 10), ylim=(0, 1), autoscale_on=False)
gradient_image(ax, direction=1, extent=(0, 1, 0, 1), transform=ax.transAxes,
cmap=plt.cm.RdYlGn, cmap_range=(0.2, 0.8), alpha=0.5)
N = 10
x = np.arange(N) + 0.15
y = np.random.rand(N)
gradient_bar(ax, x, y, width=0.7)
ax.set_aspect('auto')
async def update(self):
self._update()
```
#### File: p3ui/mpl/_units.py
```python
dpi = 92
def points_to_pixels(points):
return points * dpi / 72
def pixels_to_points(pixels):
return pixels * 72 / dpi
``` |
{
"source": "0lucasmoura/dog_breed_clf",
"score": 2
} |
#### File: dog_breed_clf/source/train.py
```python
import argparse
import json
import os
import time
import copy
import pickle
import sys
import sagemaker_containers
import pandas as pd
import torch
import torch.optim as optim
import torch.nn.functional as F
import torch.utils.data
from torch.optim import lr_scheduler
from torchvision import datasets
from torchvision import transforms
from model import ConvNet
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = torch.nn.DataParallel(ConvNet(model_info["hidden_dim"], model_info["output_dim"]))
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
model.to(device).eval()
print("Done loading model.")
return model
def _get_train_data_loader(batch_size, data_dir, num_workers):
"""Returns the data loader for training, uses data augmentation to try to make the model to generalize better"""
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(data_dir, transform=transforms.Compose([
transforms.RandomResizedCrop(size=312, scale=(0.6, 1.0)),
transforms.RandomRotation(10, expand=True),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=batch_size, shuffle=True, num_workers=num_workers)
return train_loader
def _get_test_data_loader(batch_size, data_dir, num_workers):
"""Returns the data loader for testing the model during its training"""
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(data_dir, transform=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=batch_size, shuffle=True, num_workers=num_workers)
return train_loader
def train(model, dataloaders, num_epochs, optimizer, criterion, device):
"""
This is the training method that is called by the PyTorch training script. The parameters
passed are as follows:
model - The PyTorch model that we wish to train.
dataloaders - A Dict with DataLoaders for train and test.
num_epochs - The total number of epochs to train for.
optimizer - The optimizer to use during training.
criterion - The loss function used for training.
device - Where the model and data should be loaded (gpu or cpu).
"""
start = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print(f'Epoch {epoch}/{num_epochs - 1}')
print('-' * 30)
for phase in ['train', 'val']:
model.train() if phase == 'train' else model.eval()
running_loss = 0.0
running_corrects = 0
for batch_X, batch_y in dataloaders[phase]:
batch_X = batch_X.to(device)
batch_y = batch_y.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(batch_X)
loss = criterion(outputs, batch_y)
_, preds = torch.max(outputs, 1)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * batch_X.size(0)
running_corrects += torch.sum(preds == batch_y.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - start
model.load_state_dict(best_model_wts)
print(f'Training complete in {(time_elapsed // 60):.0f}m {(time_elapsed % 60):.0f}s')
print(f'Best val Acc: {best_acc:4f}')
def save_model_params(args):
"""Takes args to save model parameters to specified location"""
model_info_path = os.path.join(args.model_dir, 'model_info.pth')
with open(model_info_path, 'wb') as f:
model_info = {
'hidden_dim': args.hidden_dim,
'output_dim': args.output_dim
}
torch.save(model_info, f)
def save_model(model, model_path):
"""Receives model and the path to save it to"""
with open(model_path, 'wb') as f:
torch.save(model.cpu().state_dict(), f)
if __name__ == '__main__':
# All of the model parameters and training parameters are sent as arguments when the script
# is executed. Here we set up an argument parser to easily access the parameters.
parser = argparse.ArgumentParser()
# Training Parameters
parser.add_argument('--batch-size', type=int, default=30, metavar='N',
help='input batch size for training (default: 512)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Model Parameters
parser.add_argument('--output_dim', type=int, default=120, metavar='N',
help='number of classes of the problem')
parser.add_argument('--hidden_dim', type=int, default=120, metavar='N',
help='hidden dim serving as outputs of trained models')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate of optimizer')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum of sgd optmizer')
# SageMaker Parameters
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--test-dir', type=str, default=os.environ['SM_CHANNEL_TEST'])
parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--num-cpus', type=int, default=os.environ['SM_NUM_CPUS'])
args = parser.parse_args()
model_path = os.path.join(args.model_dir, 'model.pth')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {}.".format(device))
torch.manual_seed(args.seed)
# Load the training data.
data_loaders = {'train': _get_train_data_loader(args.batch_size, args.train_dir, args.num_cpus),
'val': _get_test_data_loader(args.batch_size, args.test_dir, args.num_cpus)}
# Build the model.
model = ConvNet(args.hidden_dim, args.output_dim).to(device)
model = torch.nn.DataParallel(model) # recommended by sagemaker sdk python devs
optimizer = optim.Adam([param for param in model.parameters() if param.requires_grad], lr=args.lr)
criterion = torch.nn.CrossEntropyLoss()
# train model
train(model, data_loaders, args.epochs, optimizer, criterion, device)
# Save the model and its parameters
save_model_params(args)
save_model(model, model_path)
``` |
{
"source": "0luhancheng0/dql",
"score": 2
} |
#### File: src/dql/environment.py
```python
import gym
import torch
from torchvision import transforms as T
from PIL import Image
class ScreenProcessor:
def __init__(self, env, image_size):
self.env = env
self.transform = T.Compose([T.ToPILImage(),
T.Resize(
image_size, interpolation=Image.CUBIC),
T.functional.to_grayscale,
T.ToTensor()])
return
def get_screen(self):
screen = self.env.render(mode='rgb_array')
return self.transform(screen)
class Environment:
def __init__(self, hps):
self.hps = hps
self.env = gym.make(hps['envname'])
self.screen_processor = ScreenProcessor(self.env, self.hps['image_size'])
self.action_space = self.env.action_space
return
def get_init_state(self):
self.env.reset()
current_screen = self.screen_processor.get_screen()
state = torch.cat([current_screen] * self.hps['frame_length'], dim=0)
return state
def get_screen(self):
return self.screen_processor.get_screen()
def step(self, *args, **kwargs):
return self.env.step(*args, **kwargs)
def reset(self, *args, **kwargs):
return self.env.reset(*args, **kwargs)
def render(self, *args, **kwargs):
return self.env.render(*args, **kwargs)
def close(self, *args, **kwargs):
return self.env.close(*args, **kwargs)
```
#### File: src/dql/logger.py
```python
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
import torch
import psutil
class Logger:
def __init__(self, hps):
self.hps = hps
self.log_dir = (hps['log_dir']/ hps['envname']).resolve()
self.log_dir.mkdir(exist_ok=True, parents=True)
self.checkpoint_dir = self.log_dir / 'checkpoints'
self.saved_episode_dir = self.log_dir / 'episodes'
self.saved_episode_dir.mkdir(exist_ok=True)
self.checkpoint_dir.mkdir(exist_ok=True)
self.summary_writter = SummaryWriter(log_dir=str(self.log_dir / 'tensorboard_events'))
self.current_process = psutil.Process()
self.episode_offset = 0
self.log_hps()
return
def log_hps(self):
for k,v in self.hps.items():
if isinstance(v, int) or isinstance(v, float):
self.summary_writter.add_scalar(
'hyperparameter/{}'.format(k), v)
else:
self.summary_writter.add_text(
'hyperparameter/{}'.format(k), str(v))
return
def save_episodes(self, episodes):
torch.save(episodes, str(self.saved_episode_dir))
return
def add_episode_offset(self, current_episode):
return self.episode_offset + current_episode
def set_episode_offset(self, offset):
self.episode_offset = offset
return
def save_checkpoint(self, model, i_episode):
# print(self.add_episode_offset(i_episode))
saved_dict = {
'policynet': model.policy_net.state_dict(),
'optimizer': model.optimizer.state_dict()
}
torch.save(saved_dict, str(self.checkpoint_dir / 'saved_model.ckpt-{}'.format(self.add_episode_offset(i_episode))))
return
def add_sys_info(self, i_episode):
self.summary_writter.add_scalar('sys/cpu_percentage', self.current_process.cpu_percent(), self.add_episode_offset(i_episode))
self.summary_writter.add_scalar(
'sys/memory_percent', self.current_process.memory_percent(), self.add_episode_offset(i_episode))
return
def add_scalar(self, tag, scalar_value, i_episode):
return self.summary_writter.add_scalar(tag, scalar_value, global_step=self.add_episode_offset(i_episode))
def add_graph(self, model, **kwargs):
if self.episode_offset != 0:
return
self.summary_writter.add_graph(model.policy_net, **kwargs)
self.summary_writter.add_graph(model.target_net, **kwargs)
return
def close(self):
return self.summary_writter.close()
```
#### File: src/dql/model.py
```python
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from dql.helper import Transition
from dql.helper import get_latest_model_path
# from torch.utils.tensorboard import SummaryWriter
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class ReplayMemory:
def __init__(self, capacity, batch_size):
self.capacity = capacity
self.memory = []
self.batch_size = batch_size
self.position = 0
def push(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
# did find a torch equivalent to np.random.choice :(
def sample(self):
random_idx = np.random.choice(
np.arange(len(self.memory)), size=self.batch_size)
res = [self.memory[i] for i in random_idx]
return res
def __len__(self):
return len(self.memory)
# the architecture for both policy net and target net
class DQN(nn.Module):
def __init__(self, h, w, n_actions, frame_length):
super(DQN, self).__init__()
self.n_actions = n_actions
self.conv1 = nn.Conv2d(frame_length, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
def conv2d_size_out(size, kernel_size=5, stride=2):
return (size - kernel_size) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.output_layer = nn.Linear(linear_input_size, n_actions)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.output_layer(x.view(x.size(0), -1))
def eps_greedy_policy(self, state, eps):
if np.random.sample() > eps:
with torch.no_grad():
return self.greedy_policy(state)
else:
return torch.tensor([[np.random.choice(np.arange(self.n_actions))]], device=device, dtype=torch.long)
def greedy_policy(self, state):
return self(state).argmax(1)
class Model:
def __init__(self, env, hps):
self.hps = hps
# self.summary_writter = SummaryWriter(
# log_dir='runs/{}'.format(self.hps['envname']))
self.policy_net = DQN(
*self.hps['image_size'], env.action_space.n, self.hps['frame_length'])
self.target_net = DQN(
*self.hps['image_size'], env.action_space.n, self.hps['frame_length'])
self.target_net.load_state_dict(self.policy_net.state_dict())
self.loss = torch.empty(1)
self.optimizer = optim.RMSprop(self.policy_net.parameters())
self.target_net.eval()
self.policy_net.train()
return
def optimize_model(self, batch):
state_batch = torch.stack(batch.state)
action_batch = torch.stack(batch.action)
reward_batch = torch.stack(batch.reward)
next_state_batch = torch.stack(batch.next_state)
done_batch = torch.tensor(batch.done, device=device)
current_q = self.policy_net(state_batch).gather(
1, action_batch.unsqueeze(1)).squeeze()
next_q = self.target_net(next_state_batch)
TD_target_batch = reward_batch + \
(~done_batch).type(torch.float32) * \
self.hps['gamma'] * next_q.max(dim=1).values
self.loss = F.smooth_l1_loss(current_q, TD_target_batch)
self.optimizer.zero_grad()
self.loss.backward()
self.optimizer.step()
return self.loss
def update_target(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
return
def load_checkpoint(self):
model_ckpt_path, latest_episode_num = get_latest_model_path(self.hps)
checkpoint = torch.load(model_ckpt_path)
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.policy_net.load_state_dict(checkpoint['policynet'])
self.update_target()
# self.target_net.load_state_dict(checkpoint['target_net'])
return latest_episode_num
```
#### File: src/dql/runner.py
```python
from pathlib import Path
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from dql.environment import Environment
from dql.logger import Logger
from dql.model import Model
from dql.trainer import Trainer
from dql.evaluator import Evaluator
def parse_argument():
parser = ArgumentParser('My Implementation of deep q network',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--envname', type=str, nargs='?', default='CartPole-v0',
help='Need to be a valid gym atari environment')
parser.add_argument('--play', action='store_true', help='flag the eval mode')
parser.add_argument('-b', '--batch_size', type=int, nargs='?', default=32,
help='How many samples to gather from replay memory for each update')
parser.add_argument('-n', '--num_episode', type=int, nargs='?', default=100,
help='number of episode to train on, this does not include the episode generated while populate replay memory')
parser.add_argument('--frame_length', type=int, nargs='?', default=4,
help='this amount of frame with be stacked together to create a state')
parser.add_argument('--replay_mem_cap', type=int, nargs='?', default=100,
help='this argument defines the capacity of replay memory, the number must be strictly greater than the size of batch')
parser.add_argument('--gamma', type=float, nargs='?',
default=0.99, help='discount factor')
parser.add_argument('--target_update', type=int, nargs='?', default=5,
help='This argument specify how frequent target net will be updated')
parser.add_argument('--log_interval', type=int, nargs='?', default=1,
help='log will be written every <interval> episodes')
parser.add_argument('--checkpoint_interval',
type=int, nargs='?', default=5, help='checkpoint of model will be written every <interval> episodes. This value also defines how frequently model will be evaluated during training')
parser.add_argument('--log_dir', type=Path, nargs='?', default=Path('./runs'),
help='tensorboard events and saved model will be placed in <log_dir>/<envname> directory ')
parser.add_argument('--eps_schedule', type=float, nargs='*', default=[
0.9, 0.05, 200], help='consume 3 values which defines the epsilon decay schedule (start, end, steps)')
parser.add_argument('--image_size', type=int, nargs='*',
default=[84, 84], help='Size of cropped image')
parser.add_argument('--eval_episode', type=int, nargs='?', default=10, help='how many episode to run while evaluating model. The reward is caluated as the average of those episode')
parser.add_argument('--resume', type=bool, nargs='?', default=True, help='Resuem from the checkpoints from <log_dir>/runs/<envname>/checkpoints')
args = parser.parse_args()
if args.batch_size >= args.replay_mem_cap:
raise ValueError(
'Capacity of replay memory must be strictly greater than batch size. Otherwise how would you sample from it?')
if len(args.eps_schedule) != 3:
raise ValueError(
'epsilon schedule must consume 3 values (start, end, step)')
assert len(args.image_size) == 2
return args
def main():
args = parse_argument()
hps = vars(args)
env = Environment(hps)
model = Model(env, hps)
logger = Logger(hps)
evaluator = Evaluator(env, model, hps, logger)
trainer = Trainer(env, model, hps, logger, evaluator)
checkpoint_path = (args.log_dir / args.envname / 'checkpoints').resolve()
# print(checkpoint_dir)
if args.resume and len(list(checkpoint_path.rglob('saved_model.ckpt-*'))) != 0:
latest_episode_num = model.load_checkpoint()
print('load checkpoint from {0}/{1}'.format(checkpoint_path,
'saved_model.ckpt-'+str(latest_episode_num)))
logger.set_episode_offset(latest_episode_num)
# if args.resume:
# model_ckpt_path = get_latest_model_path(hps)
# model.l
if args.play:
print('Start playing')
evaluator.play()
else:
print('Start training')
trainer.train()
env.close()
return
if __name__ == "__main__":
main()
``` |
{
"source": "0luhancheng0/hpccm-containers",
"score": 2
} |
#### File: hpccm_containers/ccp-em/ccp-em.py
```python
from hpccm import config, Stage
from hpccm import primitives
from hpccm.building_blocks import packages
from hpccm.primitives import label, baseimage, comment, shell, environment
from fire import Fire
from hpccm_containers.utils import from_prefix, add_binary
def build(container_format='singularity', os_release='ubuntu', os_version='20.04', cuda_version='11.0'):
config.set_container_format(container_format)
image = f'nvcr.io/nvidia/cuda:{cuda_version}-devel-{os_release}{os_version}'
stage0 = Stage(name='stage0')
stage0 += baseimage(image=image, _bootstrap='docker')
stage0 += environment(variables={
'LC_ALL': 'en_AU.UTF-8',
'LANGUAGE': 'en_AU.UTF-8',
})
stage0 += label(metadata={'maintainer': '<NAME>', 'email': '<EMAIL>'})
stage0 += shell(commands=[
'rm -f /bin/sh && ln -s /bin/bash /bin/sh',
'rm -f /usr/bin/sh && ln -s /usr/bin/bash /usr/bin/sh',
'/bin/bash',
])
stage0 += environment(variables=from_prefix('/usr/local/cuda'))
stage0 += packages(apt=['wget', 'git', 'software-properties-common', 'build-essential', 'locales', 'zlib1g-dev'])
stage0 += shell(commands=['locale-gen en_AU.UTF-8'])
stage0 += comment('Installing vglrun and TurboVNC')
stage0 += packages(apt=['ubuntu-desktop', 'vim', 'mesa-utils', 'python3-pip', 'python3-pyqt5', 'pyqt5-dev', 'python3-tk'])
stage0 += shell(commands=[
'wget https://swift.rc.nectar.org.au/v1/AUTH_810/CVL-Singularity-External-Files/turbovnc_2.2.5_amd64.deb && dpkg -i turbovnc_2.2.5_amd64.deb && rm turbovnc_2.2.5_amd64.deb',
'wget https://swift.rc.nectar.org.au/v1/AUTH_810/CVL-Singularity-External-Files/virtualgl_2.6.4_amd64.deb && dpkg -i virtualgl_2.6.4_amd64.deb && rm virtualgl_2.6.4_amd64.deb',
'apt update',
'apt -y upgrade'
])
stage0 += comment('Installing pre-requisites')
stage0 += primitives.copy(src="./ccp4-7.1.014-shelx-arpwarp-linux64.tar.gz", dest="/opt/ccp4-7.1.014-shelx-arpwarp-linux64.tar.gz")
stage0 += shell(commands=[
'cd /opt && tar -xf ccp4-7.1.014-shelx-arpwarp-linux64.tar.gz && rm ccp4-7.1.014-shelx-arpwarp-linux64.tar.gz',
'touch $HOME/.agree2ccp4v6',
'cd ccp4-7.1',
'./BINARY.setup',
])
stage0 += environment(variables=add_binary('/opt/ccp4-7.1/bin'))
stage0 += comment('Installing CCP-EM')
stage0 += primitives.copy(src="./ccpem-1.5.0-linux-x86_64.tar.gz", dest="/opt/ccpem-1.5.0-linux-x86_64.tar.gz")
stage0 += primitives.copy(src="./input.txt", dest="/opt/input.txt")
stage0 += shell(commands=[
'touch $HOME/.agree2ccpemv1',
'cd /opt && tar -xf ccpem-1.5.0-linux-x86_64.tar.gz && rm ccpem-1.5.0-linux-x86_64.tar.gz',
'cd ccpem-1.5.0',
'./install_ccpem.sh',
'cat /opt/input.txt | bash install_modeller.sh'
])
return stage0
if __name__ == '__main__':
Fire(build)
```
#### File: hpccm_containers/cfdem/cfdem.py
```python
from os import environ
from hpccm import config, Stage
from hpccm.building_blocks import gnu, openmpi, packages, boost, python, generic_build
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.primitives import label, baseimage, comment
from fire import Fire
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
from hpccm_containers.utils import from_library, from_prefix, shell_with_log, add_flags, add_library_path, add_include_path
def build(container_format='singularity', openmpi_version='2.0.4', gnu_version='10', cfdem_prefix='/usr/local/cfdem',
cfdem_version='3.8.0', liggghts_prefix='/usr/local/ligghts', lpp_prefix='/usr/local/lpp', image='ubuntu:20.04',
mlton_version='on-20210117-release', gmp_version='6.2.1'):
config.set_container_format(container_format)
stage0 = Stage(name='stage0')
stage0 += baseimage(image=image, _bootstrap='docker')
stage0 += label(metadata={'maintainer': '<NAME>', 'email': '<EMAIL>'})
stage0 += shell(commands=['rm /usr/bin/sh', 'ln -s /usr/bin/bash /usr/bin/sh', '/usr/bin/bash'])
stage0 += packages(apt=['locales', 'wget', 'software-properties-common', 'git', 'build-essential', 'flex',
'bison', 'cmake', 'zlib1g-dev', 'gnuplot', 'libreadline-dev', 'libncurses-dev',
'libxt-dev', 'libscotch-dev', 'libptscotch-dev', 'libvtk6-dev', 'python-numpy',
'python-dev', 'qt5-default', 'git-core', 'libboost-system-dev', 'libboost-thread-dev',
'libqt5x11extras5-dev', 'qttools5-dev', 'curl', 'libgl1-mesa-dev', 'libosmesa6-dev', 'libssh2-1',
'libtool'])
compilers = gnu(version=gnu_version)
stage0 += compilers
openmpi_building_block = openmpi(version=openmpi_version, toolchain=compilers.toolchain, cuda=False)
stage0 += openmpi_building_block
stage0 += generic_autotools(
url=f'https://gmplib.org/download/gmp/gmp-{gmp_version}.tar.xz',
prefix='/usr/local/gmp',
directory=f'gmp-{gmp_version}/',
)
stage0 += environment(variables=from_library('/usr/local/gmp'))
stage0 += generic_build(
repository='https://github.com/MLton/mlton.git',
branch=mlton_version,
build=['make -j'],
install=['make PREFIX=/usr/local/mlton']
)
if cfdem_version == '3.8.0':
OF_release = '5.x'
OF_commitHashtag = '538044ac05c4672b37c7df607dca1116fa88df88'
else:
raise Exception('Check https://github.com/CFDEMproject/CFDEMcoupling-PUBLIC/blob/master/src/lagrangian/cfdemParticle/cfdTools/versionInfo.H')
stage0 += comment('Obtain CFDEM source')
stage0 += shell(commands=[
f'mkdir -p {cfdem_prefix} {liggghts_prefix} {lpp_prefix}',
f'git clone --branch {cfdem_version} https://github.com/CFDEMproject/CFDEMcoupling-PUBLIC.git {cfdem_prefix}',
f'git clone --branch {cfdem_version} https://github.com/CFDEMproject/LIGGGHTS-PUBLIC.git {liggghts_prefix}',
f'git clone https://github.com/CFDEMproject/LPP.git {lpp_prefix}'
])
stage0 += comment('Install OpenFoam')
openfoam_prefix = f'/usr/local/OpenFOAM-{OF_release}'
thirdparty_prefix = f'/usr/local/ThirdParty-{OF_release}'
stage0 += shell(commands=[
f'mkdir -p {openfoam_prefix} {thirdparty_prefix}',
f'git clone https://github.com/OpenFOAM/OpenFOAM-{OF_release}.git {openfoam_prefix} && cd {openfoam_prefix} && git checkout {OF_commitHashtag}',
f'git clone https://github.com/OpenFOAM/ThirdParty-{OF_release}.git {thirdparty_prefix}',
])
stage0 += shell(commands=[
f'echo "source {openfoam_prefix}/etc/bashrc" >> ~/.bashrc',
])
# DLIB_PATH = '/usr/lib/x86_64-linux-gnu'
# INCLUDE_PATH = '/usr/include'
stage0 += shell_with_log(commands=[
f'{thirdparty_prefix}/Allwmake -j', # this breaks with openmpi >= 3, error: static assertion failed: "MPI_Type_extent was removed in MPI-3.0. Use MPI_Type_get_extent instead."
# f'{thirdparty_prefix}/makeParaView -mpi -mesa -mesa-lib {DLIB_PATH}/libOSMesa.so -mesa-include {INCLUDE_PATH}/GL -verbose',
f'{thirdparty_prefix}/makeParaView -mpi'
'wmRefresh'
])
stage0 += shell(commands=[
f'{openfoam_prefix}/Allwmake -j',
])
# /usr/bin/g++ -fPIC -O3 -DNDEBUG -Wl,--no-undefined -lc -shared -Wl,-soname,libvtkCommonSystem-pv5.4.so.1 -o ../../../lib/libvtkCommonSystem-pv5.4.so.1 CMakeFiles/vtkCommonSystem.dir/vtkClientSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkDirectory.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkServerSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkSocket.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkSocketCollection.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkThreadMessager.cxx.o CMakeFiles/vtkCommonSystem.dir/vtkTimerLog.cxx.o -Wl,-rpath,/usr/local/ThirdParty-5.x/build/linux64Gcc/ParaView-5.4.0/lib: ../../../lib/libvtkCommonCore-pv5.4.so.1 ../../../lib/libvtksys-pv5.4.so.1 -lpthread -ldl
return stage0
if __name__ == '__main__':
Fire(build)
```
#### File: hpccm_containers/cp2k/cp2k.py
```python
from os import environ
from hpccm import config, Stage
from hpccm.building_blocks import gnu, openmpi, generic_build, mkl, python
from hpccm.building_blocks.packages import packages
from hpccm.primitives import label, baseimage, workdir, shell, environment, runscript, comment
from fire import Fire
from hpccm_containers.utils import from_prefix
# ==================== generating arch files ====================
# arch files can be found in the /opt/cp2k-toolchain/install/arch subdirectory
# Wrote /opt/cp2k-toolchain/install/arch/local.ssmp
# Wrote /opt/cp2k-toolchain/install/arch/local.sdbg
# Wrote /opt/cp2k-toolchain/install/arch/local.psmp
# Wrote /opt/cp2k-toolchain/install/arch/local.pdbg
# Wrote /opt/cp2k-toolchain/install/arch/local_warn.psmp
# Wrote /opt/cp2k-toolchain/install/arch/local_coverage.pdbg
# ========================== usage =========================
# Done!
# Now copy:
# cp /opt/cp2k-toolchain/install/arch/* to the cp2k/arch/ directory
# To use the installed tools and libraries and cp2k version
# compiled with it you will first need to execute at the prompt:
# source /opt/cp2k-toolchain/install/setup
# To build CP2K you should change directory:
# cd cp2k/
# make -j 1 ARCH=local VERSION="ssmp sdbg psmp pdbg"
# arch files for GPU enabled CUDA versions are named "local_cuda.*"
# arch files for valgrind versions are named "local_valgrind.*"
# arch files for coverage versions are named "local_coverage.*"
# Note that these pre-built arch files are for the GNU compiler, users have to adapt them for other compilers.
# It is possible to use the provided CP2K arch files as guidance.
def build(container_format='singularity', os='ubuntu20.04', cuda_version='11.0', gpu_version='V100', mkl_version='2020.0-088', version='psmp'):
image = f'nvcr.io/nvidia/cuda:{cuda_version}-devel-{os}'
config.set_container_format(container_format)
stage0 = Stage(name='stage0')
stage0 += baseimage(image=image, _bootstrap='docker')
stage0 += label(metadata={'maintainer': '<NAME>', 'email': '<EMAIL>'})
stage0 += comment('Toolchain installation translated from https://github.com/cp2k/cp2k/blob/master/tools/toolchain/Dockerfile.cuda_mkl')
stage0 += packages(apt=['git', 'gfortran', 'mpich', 'libmpich-dev', 'software-properties-common', 'python-dev'])
stage0 += python()
stage0 += shell(commands=[
'git clone --depth 1 --branch v8.1.0 https://github.com/cp2k/cp2k.git /cp2k',
'cd /cp2k',
'git submodule update --init --recursive'
])
stage0 += shell(commands=['/cp2k/tools/toolchain/install_requirements_ubuntu.sh'])
stage0 += environment(variables={
'CUDA_PATH': '/usr/local/cuda',
'LD_LIBRARY_PATH': '/usr/local/cuda/:${LD_LIBRARY_PATH}',
'MKLROOT': '/opt/intel/compilers_and_libraries/linux/mkl',
**from_prefix('/usr/local/cuda')
})
stage0 += mkl(
eula=True,
version=mkl_version
)
stage0 += workdir(directory='/opt/cp2k-toolchain')
stage0 += shell(commands=[
'cd /opt/cp2k-toolchain',
'mkdir scripts && cp -r /cp2k/tools/toolchain/scripts/* ./scripts/',
'cp /cp2k/tools/toolchain/install_cp2k_toolchain.sh .',
])
stage0 += shell(commands=[
'cd /opt/cp2k-toolchain',
f'./install_cp2k_toolchain.sh --mpi-mode=mpich --math-mode=mkl --with-reflapack=no --with-scalapack=no --with-elpa=no --gpu-ver={gpu_version}',
'rm -rf ./build'
])
stage0 += shell(commands=[
'cp /opt/cp2k-toolchain/install/arch/* /cp2k/arch/',
'cd /cp2k',
"sed 's/source /. /g' /opt/cp2k-toolchain/install/setup > /opt/cp2k-toolchain/install/setup ",
'. /opt/cp2k-toolchain/install/setup',
f'make ARCH=local VERSION="{version}" | tee /var/tmp/log.txt'
])
stage0 += environment(variables={
'PATH': '/cp2k/exe/local:$PATH'
})
stage0 += shell(commands=[
'ln -s /usr/lib/x86_64-linux-gnu/libncursesw.so.6 /usr/lib/x86_64-linux-gnu/libncursesw.so.5',
'ln -s /usr/lib/x86_64-linux-gnu/libtinfo.so.6 /usr/lib/x86_64-linux-gnu/libtinfo.so.5'
])
stage0 += environment(variables={'LD_LIBRARY_PATH': '/usr/lib/x86_64-linux-gnu/:$LD_LIBRARY_PATH'})
return stage0
if __name__ == '__main__':
Fire(build)
```
#### File: hpccm_containers/mashtree/mashtree.py
```python
from hpccm.building_blocks import packages, boost, generic_autotools, generic_build
from hpccm.primitives import baseimage, shell, label, environment
from hpccm_containers.utils import from_prefix
import hpccm
from fire import Fire
def build(image="Characterisation-Virtual-Laboratory/CharacterisationVL-Software:2004", _bootstrap='shub', mash_version='v2.2.2', capnp_version='0.8.0', quicktree_version='v2.5'):
stage0 = hpccm.Stage()
hpccm.config.set_container_format("singularity")
stage0 += baseimage(image=image, _bootstrap=_bootstrap, _distro='ubuntu20')
stage0 += label(metadata={'maintainer': '<NAME>', 'email': '<EMAIL>'})
stage0 += packages(ospackages=['cpanminus', 'libexpat1-dev', 'sqlite3', 'libsqlite3-dev', 'autoconf'])
stage0 += generic_build(
repository='https://github.com/khowe/quicktree',
branch=quicktree_version,
build=['make'],
install=[
'mv quicktree /usr/local/bin',
'mv include/* /usr/local/include/'
],
)
stage0 += boost()
stage0 += generic_autotools(
url=f'https://capnproto.org/capnproto-c++-{capnp_version}.tar.gz'
)
stage0 += shell(commands=['cpanm -l /usr/local/perl5 --notest BioPerl Bio::Sketch::Mash DBD::SQLite DBI'])
stage0 += generic_autotools(
repository=f'https://github.com/marbl/Mash',
preconfigure=['./bootstrap.sh'],
branch=mash_version,
with_capnp='/usr/local/',
with_boost='/usr/local/boost/',
)
stage0 += environment(variables={'PERL5LIB': '$PERL5LIB:/usr/local', **from_prefix('/usr/local/mashtree')})
stage0 += shell(commands=['cpanm -f -l /usr/local/mashtree Mashtree'])
return stage0
if __name__ == "__main__":
Fire(build)
```
#### File: hpccm_containers/perfsonar/perfsonar.py
```python
from hpccm import config, Stage
from hpccm.building_blocks import gnu, openmpi, packages, nvhpc, generic_build
from hpccm.primitives import label, baseimage
from fire import Fire
from hpccm.primitives.comment import comment
from hpccm.primitives.environment import environment
from hpccm.primitives.runscript import runscript
from hpccm.primitives.shell import shell
def build(container_format='singularity', os='ubuntu:18.04'):
image = os
config.set_container_format(container_format)
stage0 = Stage(name='stage0')
stage0 += baseimage(image=image, _bootstrap='docker')
stage0 += environment(variables={
'LC_ALL': 'en_AU.UTF-8',
'LANGUAGE': 'en_AU.UTF-8',
})
stage0 += label(metadata={'maintainer': '<NAME>', 'email': '<EMAIL>'})
stage0 += shell(commands=['rm /usr/bin/sh', 'ln -s /usr/bin/bash /usr/bin/sh', '/usr/bin/bash'])
stage0 += packages(apt=['wget', 'git', 'software-properties-common', 'build-essential', 'locales'])
stage0 += shell(commands=['locale-gen en_AU.UTF-8'])
stage0 += shell(commands=[
'cd /etc/apt/sources.list.d/',
'wget http://downloads.perfsonar.net/debian/perfsonar-release.list',
'wget -qO - http://downloads.perfsonar.net/debian/perfsonar-official.gpg.key | apt-key add -',
'add-apt-repository -y universe',
'apt-get update'
])
stage0 += packages(apt=['perfsonar-tools', 'perfsonar-testpoint', 'perfsonar-core', 'perfsonar-centralmanagement', 'perfsonar-toolkit'])
stage0 += shell(commands=['/usr/lib/perfsonar/scripts/install-optional-packages.py'])
return stage0
if __name__ == '__main__':
Fire(build)
``` |
{
"source": "0luhancheng0/ray",
"score": 2
} |
#### File: experimental/client/__init__.py
```python
from ray.experimental.client.api import ClientAPI
from ray.experimental.client.api import APIImpl
from typing import Optional, List, Tuple
from contextlib import contextmanager
import logging
import os
logger = logging.getLogger(__name__)
# About these global variables: Ray 1.0 uses exported module functions to
# provide its API, and we need to match that. However, we want different
# behaviors depending on where, exactly, in the client stack this is running.
#
# The reason for these differences depends on what's being pickled and passed
# to functions, or functions inside functions. So there are three cases to care
# about
#
# (Python Client)-->(Python ClientServer)-->(Internal Raylet Process)
#
# * _client_api should be set if we're inside the client
# * _server_api should be set if we're inside the clientserver
# * Both will be set if we're running both (as in a test)
# * Neither should be set if we're inside the raylet (but we still need to shim
# from the client API surface to the Ray API)
#
# The job of RayAPIStub (below) delegates to the appropriate one of these
# depending on what's set or not. Then, all users importing the ray object
# from this package get the stub which routes them to the appropriate APIImpl.
_client_api: Optional[APIImpl] = None
_server_api: Optional[APIImpl] = None
# The reason for _is_server is a hack around the above comment while running
# tests. If we have both a client and a server trying to control these static
# variables then we need a way to decide which to use. In this case, both
# _client_api and _server_api are set.
# This boolean flips between the two
_is_server: bool = False
@contextmanager
def stash_api_for_tests(in_test: bool):
global _is_server
is_server = _is_server
if in_test:
_is_server = True
try:
yield _server_api
finally:
if in_test:
_is_server = is_server
def _set_client_api(val: Optional[APIImpl]):
global _client_api
global _is_server
if _client_api is not None:
raise Exception("Trying to set more than one client API")
_client_api = val
_is_server = False
def _set_server_api(val: Optional[APIImpl]):
global _server_api
global _is_server
if _server_api is not None:
raise Exception("Trying to set more than one server API")
_server_api = val
_is_server = True
def reset_api():
global _client_api
global _server_api
global _is_server
_client_api = None
_server_api = None
_is_server = False
def _get_client_api() -> APIImpl:
global _client_api
return _client_api
def _get_server_instance():
"""Used inside tests to inspect the running server.
"""
global _server_api
if _server_api is not None:
return _server_api.server
class RayAPIStub:
def connect(self,
conn_str: str,
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
stub=None) -> None:
from ray.experimental.client.worker import Worker
_client_worker = Worker(conn_str, secure=secure, metadata=metadata)
_set_client_api(ClientAPI(_client_worker))
def disconnect(self):
global _client_api
if _client_api is not None:
_client_api.close()
_client_api = None
def __getattr__(self, key: str):
global _get_client_api
api = _get_client_api()
return getattr(api, key)
def is_connected(self) -> bool:
global _client_api
return _client_api is not None
def init(self, *args, **kwargs):
if _is_client_test_env():
global _test_server
import ray.experimental.client.server.server as ray_client_server
_test_server, address_info = ray_client_server.init_and_serve(
"localhost:50051", test_mode=True, *args, **kwargs)
self.connect("localhost:50051")
return address_info
else:
raise NotImplementedError(
"Please call ray.connect() in client mode")
ray = RayAPIStub()
_test_server = None
def _stop_test_server(*args):
global _test_server
_test_server.stop(*args)
def _is_client_test_env() -> bool:
return os.environ.get("RAY_TEST_CLIENT_MODE") == "1"
# Someday we might add methods in this module so that someone who
# tries to `import ray_client as ray` -- as a module, instead of
# `from ray_client import ray` -- as the API stub
# still gets expected functionality. This is the way the ray package
# worked in the past.
#
# This really calls for PEP 562: https://www.python.org/dev/peps/pep-0562/
# But until Python 3.6 is EOL, here we are.
``` |
{
"source": "0lvin-cfy/cloudify-kubernetes-provider",
"score": 2
} |
#### File: cloudify_node_drop/cloudify_drop/workflow.py
```python
from cloudify.decorators import workflow
from cloudify.plugins import lifecycle
@workflow
def delete(ctx,
scalable_entity_name,
delta,
scale_compute,
removed_ids_exclude_hint,
removed_ids_include_hint,
ignore_failure=False,
**kwargs):
"""Scales in/out the subgraph of node_or_group_name.
If a node name is passed, and `scale_compute` is set to false, the
subgraph will consist of all the nodes that are contained in the node and
the node itself.
If a node name is passed, and `scale_compute` is set to true, the subgraph
will consist of all nodes that are contained in the compute node that
contains the node and the compute node itself.
If a group name or a node that is not contained in a compute
node, is passed, this property is ignored.
`delta` is used to specify the scale factor.
For `delta > 0`: If current number of instances is `N`, scale out to
`N + delta`.
For `delta < 0`: If current number of instances is `N`, scale in to
`N - |delta|`.
:param ctx: cloudify context
:param scalable_entity_name: the node or group name to scale
:param delta: scale in/out factor
:param scale_compute: should scale apply on compute node containing
the specified node
:param ignore_failure: ignore operations failures in uninstall workflow
"""
if isinstance(delta, basestring):
try:
delta = int(delta)
except ValueError:
raise ValueError('The delta parameter must be a number. Got: {0}'
.format(delta))
ctx.logger.info(('Delete with scalable_entity_name: {}, delta: {}, ' +
'removed_ids_exclude_hint: {}, ' +
'removed_ids_include_hint: {}').format(
repr(scalable_entity_name),
repr(delta),
repr(removed_ids_exclude_hint),
repr(removed_ids_include_hint)
))
if delta == 0:
ctx.logger.info('delta parameter is 0, so no scaling will take place.')
return
scaling_group = ctx.deployment.scaling_groups.get(scalable_entity_name)
if scaling_group:
curr_num_instances = scaling_group['properties']['current_instances']
planned_num_instances = curr_num_instances + delta
scale_id = scalable_entity_name
else:
node = ctx.get_node(scalable_entity_name)
if not node:
raise ValueError("No scalable entity named {0} was found".format(
scalable_entity_name))
host_node = node.host_node
scaled_node = host_node if (scale_compute and host_node) else node
curr_num_instances = scaled_node.number_of_instances
planned_num_instances = curr_num_instances + delta
scale_id = scaled_node.id
if planned_num_instances < 0:
raise ValueError('Provided delta: {0} is illegal. current number of '
'instances of entity {1} is {2}'
.format(delta,
scalable_entity_name,
curr_num_instances))
modification = ctx.deployment.start_modification({
scale_id: {
'instances': planned_num_instances,
# These following parameters are not exposed at the moment,
# but should be used to control which node instances get scaled in
# (when scaling in).
# They are mentioned here, because currently, the modification API
# is not very documented.
# Special care should be taken because if `scale_compute == True`
# (which is the default), then these ids should be the compute node
# instance ids which are not necessarily instances of the node
# specified by `scalable_entity_name`.
'removed_ids_exclude_hint': removed_ids_exclude_hint,
# Node instances denoted by these instance ids should be *kept* if
# possible.
# 'removed_ids_exclude_hint': [],
'removed_ids_include_hint': removed_ids_include_hint,
# Node instances denoted by these instance ids should be *removed*
# if possible.
# 'removed_ids_include_hint': []
}
})
graph = ctx.graph_mode()
try:
ctx.logger.info('Deployment modification started. '
'[modification_id={0}]'.format(modification.id))
if delta > 0:
added_and_related = set(modification.added.node_instances)
added = set(i for i in added_and_related
if i.modification == 'added')
related = added_and_related - added
try:
lifecycle.install_node_instances(
graph=graph,
node_instances=added,
related_nodes=related)
except Exception as ex:
ctx.logger.error('Scale out failed, scaling back in. {}'
.format(repr(ex)))
for task in graph.tasks_iter():
graph.remove_task(task)
lifecycle.uninstall_node_instances(
graph=graph,
node_instances=added,
ignore_failure=ignore_failure,
related_nodes=related)
raise ex
else:
removed_and_related = set(modification.removed.node_instances)
removed = set(i for i in removed_and_related
if i.modification == 'removed')
related = removed_and_related - removed
lifecycle.uninstall_node_instances(
graph=graph,
node_instances=removed,
ignore_failure=ignore_failure,
related_nodes=related)
except Exception as ex:
ctx.logger.warn('Rolling back deployment modification. '
'[modification_id={0}]: {}'
.format(modification.id, repr(ex)))
try:
modification.rollback()
except Exception as ex:
ctx.logger.warn('Deployment modification rollback failed. The '
'deployment model is most likely in some corrupted'
' state. [modification_id={0}]: {}'
.format(modification.id, repr(ex)))
raise ex
raise ex
else:
try:
modification.finish()
except Exception as ex:
ctx.logger.warn('Deployment modification finish failed. The '
'deployment model is most likely in some corrupted'
' state.[modification_id={0}]: {}'
.format(modification.id, repr(ex)))
raise ex
```
#### File: scripts/kubernetes_master/create.py
```python
import subprocess
import socket
import time
from cloudify import ctx
from cloudify.exceptions import OperationRetry
def check_command(command):
try:
process = subprocess.Popen(
command.split()
)
except OSError:
return False
output, error = process.communicate()
ctx.logger.debug('command: {0} '.format(command))
ctx.logger.debug('output: {0} '.format(output))
ctx.logger.debug('error: {0} '.format(error))
ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
if process.returncode:
ctx.logger.error('Running `{0}` returns error.'.format(command))
return False
return True
def execute_command(command):
ctx.logger.debug('command {0}.'.format(repr(command)))
subprocess_args = {
'args': command,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE
}
ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
process = subprocess.Popen(**subprocess_args)
output, error = process.communicate()
ctx.logger.debug('error: {0} '.format(error))
ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
if process.returncode:
ctx.logger.error('Running `{0}` returns error.'.format(repr(command)))
return False
return output
if __name__ == '__main__':
# Check if Docker PS works
docker = check_command('sudo docker ps')
if not docker:
raise OperationRetry(
'Docker is not present on the system.')
ctx.logger.info('Docker is present on the system.')
# Next check if Cloud Init is running.
finished = False
ps = execute_command(['ps', '-ef'])
for line in ps.split('\n'):
if '/usr/bin/python /usr/bin/cloud-init modules' in line:
raise OperationRetry(
'You provided a Cloud-init Cloud Config to configure '
'instances. Waiting for Cloud-init to complete.')
ctx.logger.info('Cloud-init finished.')
execute_command(["sudo", "sed", "-i", "s|cgroup-driver=systemd|"
"cgroup-driver=systemd --provider-id='{}'|g"
.format(socket.gethostname()),
"/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"])
ctx.logger.info("Reload kubeadm")
status = execute_command(["sudo", "systemctl", "daemon-reload"])
if status is False:
raise OperationRetry('Failed daemon-reload')
restart_service = execute_command(["sudo", "systemctl", "stop", "kubelet"])
if restart_service is False:
raise OperationRetry('Failed to stop kubelet')
time.sleep(5)
restart_service = execute_command(
["sudo", "systemctl", "start", "kubelet"])
if restart_service is False:
raise OperationRetry('Failed to start kubelet')
time.sleep(5)
``` |
{
"source": "0lvin-cfy/cloudify-libvirt-plugin",
"score": 2
} |
#### File: cloudify-libvirt-plugin/cloudify_libvirt/iso9660_tasks.py
```python
import libvirt
import os
from cloudify import ctx
from cloudify.decorators import operation
from cloudify import exceptions as cfy_exc
import cloudify_common_sdk.iso9660 as iso9660
import cloudify_libvirt.common as common
@operation
def create(**kwargs):
ctx.logger.info("Creating new iso image.")
libvirt_auth, template_params = common.get_libvirt_params(**kwargs)
conn = libvirt.open(libvirt_auth)
if conn is None:
raise cfy_exc.NonRecoverableError(
'Failed to open connection to the hypervisor'
)
try:
# lookup the default volume by name
try:
pool = conn.storagePoolLookupByName(template_params["pool"])
volume = pool.storageVolLookupByName(template_params["volume"])
except libvirt.libvirtError as e:
raise cfy_exc.NonRecoverableError(
'Failed to find the volume: {}'.format(repr(e))
)
outiso = iso9660.create_iso(
vol_ident=template_params.get('vol_ident', 'cidata'),
sys_ident=template_params.get('sys_ident', ""),
get_resource=ctx.get_resource,
files=template_params.get('files', {}),
files_raw=template_params.get('files_raw', {}))
outiso.seek(0, os.SEEK_END)
iso_size = outiso.tell()
outiso.seek(0, os.SEEK_SET)
ctx.logger.info("ISO size: {}".format(repr(iso_size)))
stream = conn.newStream(0)
volume.upload(stream, 0, iso_size, 0)
outiso.seek(0, os.SEEK_SET)
read_size = iso_size
while read_size > 0:
buffer = outiso.read(read_size)
read_size -= len(buffer)
stream.send(buffer)
stream.finish()
finally:
conn.close()
```
#### File: cloudify_libvirt/tests/test_network.py
```python
import mock
import unittest
from cloudify.state import current_ctx
from cloudify.mocks import MockCloudifyContext
from cloudify.exceptions import NonRecoverableError, RecoverableError
from cloudify_common_sdk._compat import builtins_open
from cloudify_libvirt.tests.test_common_base import LibVirtCommonTest
import cloudify_libvirt.network_tasks as network_tasks
class TestNetworkTasks(LibVirtCommonTest):
def test_unlink(self):
_target = MockCloudifyContext(
'target',
properties={},
runtime_properties={
'resource_id': 'target_id',
'name': 'target',
'ip': '1.2.3.4'
}
)
_source = MockCloudifyContext(
'source',
properties={},
runtime_properties={
'resource_id': 'source_id',
'name': 'source'
}
)
_ctx = MockCloudifyContext(
target=_target,
source=_source
)
current_ctx.set(_ctx)
network_tasks.unlink(ctx=_ctx)
self.assertEqual(_source.instance.runtime_properties, {
'resource_id': 'source_id',
'name': 'source'})
self.assertEqual(_target.instance.runtime_properties, {
'ip': None,
'resource_id': 'target_id',
'name': 'target'})
def test_link(self):
_target = MockCloudifyContext(
'target',
properties={},
runtime_properties={
'resource_id': 'target_id',
'name': 'target',
'ip': '1.2.3.4'
}
)
_source = MockCloudifyContext(
'source',
properties={},
runtime_properties={
'resource_id': 'source_id',
'name': 'source',
'params': {
"networks": [{
'mac': "ab:cd:ef"
}]
}
}
)
_ctx = MockCloudifyContext(
target=_target,
source=_source
)
current_ctx.set(_ctx)
# check correct handle exception with empty connection
self._check_correct_connect(
"cloudify_libvirt.network_tasks.libvirt.open",
network_tasks.link, [], {'ctx': _ctx})
# check correct handle exception with unexisted object
self._check_no_such_object_network(
"cloudify_libvirt.network_tasks.libvirt.open",
network_tasks.link, [], {'ctx': _ctx}, 'target_id')
# no leases
network = mock.Mock()
network.destroy = mock.Mock(return_value=-1)
network.DHCPLeases = mock.Mock(return_value=[])
network.name = mock.Mock(return_value="network_name")
connect = self._create_fake_connection()
connect.networkLookupByName = mock.Mock(return_value=network)
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"time.sleep",
mock.Mock(return_value=None)
):
with self.assertRaisesRegexp(
RecoverableError,
'No ip for now, try later'
):
network_tasks.link(ctx=_ctx)
# lease
network.DHCPLeases = mock.Mock(return_value=[{
'mac': "ab:cd:ef",
'ipaddr': "1.2.3.4"
}])
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"time.sleep",
mock.Mock(return_value=None)
):
network_tasks.link(ctx=_ctx)
self.assertEqual(
_ctx.source.instance.runtime_properties['ip'],
"1.2.3.4"
)
def _test_empty_connection(self, func):
# check correct handle exception with empty connection
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
self._check_correct_connect(
"cloudify_libvirt.network_tasks.libvirt.open",
func, [], {'ctx': _ctx})
def _test_empty_connection_backup(self, func):
# check correct handle exception with empty connection
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
self._check_correct_connect(
"cloudify_libvirt.network_tasks.libvirt.open",
func, [], {'ctx': _ctx, "snapshot_name": "backup"})
def _test_empty_network(self, func):
# check correct handle exception with empty network
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
self._check_no_such_object_network(
"cloudify_libvirt.network_tasks.libvirt.open",
func, [], {'ctx': _ctx}, 'resource')
def test_reuse_network_create_not_exist(self):
# check correct handle exception with empty network
_ctx = self._create_ctx()
self._check_no_such_object_network(
"cloudify_libvirt.network_tasks.libvirt.open",
network_tasks.create, [], {
'ctx': _ctx,
"resource_id": 'resource',
"use_external_resource": True,
}, 'resource')
def test_reuse_network_create_exist(self):
# check that we can use network
_ctx = self._create_ctx()
network = mock.Mock()
network.name = mock.Mock(return_value="resource")
connect = self._create_fake_connection()
connect.networkLookupByName = mock.Mock(return_value=network)
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
network_tasks.create(ctx=_ctx,
resource_id='resource',
use_external_resource=True)
connect.networkLookupByName.assert_called_with('resource')
self.assertEqual(
_ctx.instance.runtime_properties['resource_id'], 'resource'
)
self.assertTrue(
_ctx.instance.runtime_properties['use_external_resource']
)
def _test_empty_network_backup(self, func):
# check correct handle exception with empty network
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
self._check_no_such_object_network(
"cloudify_libvirt.network_tasks.libvirt.open",
func, [], {'ctx': _ctx, "snapshot_name": "backup"}, 'resource')
def _create_fake_network_backup(self):
network = mock.Mock()
network.XMLDesc = mock.Mock(return_value="<network/>")
network.isActive = mock.Mock(return_value=1)
network.name = mock.Mock(return_value="network_name")
connect = self._create_fake_connection()
connect.networkLookupByName = mock.Mock(return_value=network)
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
_ctx.instance.runtime_properties["backups"] = {
"node_name-backup": "<xml/>"}
return _ctx, connect, network
def test_snapshot_apply(self):
self._test_no_resource_id(network_tasks.snapshot_apply,
"No network for restore")
self._test_no_snapshot_name(
self._create_ctx(),
"cloudify_libvirt.network_tasks.libvirt.open",
network_tasks.snapshot_apply)
self._test_empty_connection_backup(network_tasks.snapshot_apply)
self._test_empty_network_backup(network_tasks.snapshot_apply)
# no such snapshot
_ctx, connect, network = self._create_fake_network_backup()
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No snapshots found with name: node_name-backup!."
):
network_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=True)
# we have such snapshot
_ctx, connect, network = self._create_fake_network_backup()
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
network_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
# no such backup
_ctx, connect, network = self._create_fake_network_backup()
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=False)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No backups found with name: node_name-backup!."
):
network_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
# have backup
_ctx, connect, network = self._create_fake_network_backup()
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=True)
):
fake_file = mock.mock_open()
fake_file().read.return_value = "<network/>"
with mock.patch(
builtins_open, fake_file
):
network_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
fake_file.assert_called_with('./backup!/resource.xml', 'r')
def test_snapshot_create(self):
self._test_no_resource_id(network_tasks.snapshot_create,
"No network for backup")
self._test_no_snapshot_name(
self._create_ctx(),
"cloudify_libvirt.network_tasks.libvirt.open",
network_tasks.snapshot_create)
self._test_empty_connection_backup(network_tasks.snapshot_create)
self._test_empty_network_backup(network_tasks.snapshot_create)
# check create snapshot with error, already exists
_ctx, connect, network = self._create_fake_network_backup()
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Snapshot node_name-backup already exists."
):
network_tasks.snapshot_create(ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
connect.networkLookupByName.assert_called_with('resource')
# no such snapshots
_ctx.instance.runtime_properties["backups"] = {}
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
network_tasks.snapshot_create(ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
self.assertEqual(
_ctx.instance.runtime_properties["backups"],
{"node_name-backup": "<network/>"})
# check create snapshot
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isdir",
mock.Mock(return_value=True)
):
fake_file = mock.mock_open()
fake_file().read.return_value = "!!!!"
with mock.patch(
builtins_open, fake_file
):
# with error, already exists
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=True)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Backup node_name-backup already exists."
):
network_tasks.snapshot_create(
ctx=_ctx, snapshot_name="backup",
snapshot_incremental=False)
# without error
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=False)
):
network_tasks.snapshot_create(
ctx=_ctx, snapshot_name="backup",
snapshot_incremental=False)
fake_file().write.assert_called_with("<network/>")
def test_snapshot_delete(self):
self._test_no_resource_id(network_tasks.snapshot_delete,
"No network for backup delete")
self._test_no_snapshot_name(
self._create_ctx(),
"cloudify_libvirt.network_tasks.libvirt.open",
network_tasks.snapshot_delete)
# no such snapshots
_ctx, connect, network = self._create_fake_network_backup()
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No snapshots found with name: node_name-backup!."
):
network_tasks.snapshot_delete(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=True)
self.assertEqual(
_ctx.instance.runtime_properties["backups"],
{'node_name-backup': "<xml/>"})
# remove snapshot
_ctx, connect, network = self._create_fake_network_backup()
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
network_tasks.snapshot_delete(ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
self.assertEqual(_ctx.instance.runtime_properties["backups"], {})
# no such backup
_ctx, connect, network = self._create_fake_network_backup()
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=False)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No backups found with name: node_name-backup!."
):
network_tasks.snapshot_delete(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
# remove backup
_ctx, connect, network = self._create_fake_network_backup()
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=True)
):
fake_file = mock.mock_open()
fake_file().read.return_value = "!!!!"
remove_mock = mock.Mock()
with mock.patch(
"os.remove",
remove_mock
):
with mock.patch(
builtins_open, fake_file
):
network_tasks.snapshot_delete(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
fake_file.assert_called_with('./backup!/resource.xml', 'r')
remove_mock.assert_called_with('./backup!/resource.xml')
def test_delete(self):
self._test_no_resource_id(network_tasks.delete)
self._test_empty_connection(network_tasks.delete)
self._test_empty_network(network_tasks.delete)
self._test_reused_object(
"cloudify_libvirt.network_tasks.libvirt.open",
network_tasks.delete)
# delete with error
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
_ctx.instance.runtime_properties["backups"] = {
"node_name-backup": "<xml/>"}
network = mock.Mock()
network.destroy = mock.Mock(return_value=-1)
network.name = mock.Mock(return_value="network_name")
connect = self._create_fake_connection()
connect.networkLookupByName = mock.Mock(return_value=network)
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
'Can not undefine network.'
):
network_tasks.delete(ctx=_ctx)
connect.networkLookupByName.assert_called_with('resource')
# delete without error
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
network = mock.Mock()
network.destroy = mock.Mock(return_value=0)
network.name = mock.Mock(return_value="network_name")
connect = self._create_fake_connection()
connect.networkLookupByName = mock.Mock(return_value=network)
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
network_tasks.delete(ctx=_ctx)
self.assertFalse(_ctx.instance.runtime_properties.get('resource_id'))
self.assertFalse(_ctx.instance.runtime_properties.get("backup"))
def test_create(self):
# check correct handle exception with empty connection
self._check_correct_connect(
"cloudify_libvirt.network_tasks.libvirt.open",
network_tasks.create, [], {'ctx': self._create_ctx()})
# check error with create network
self._check_create_object(
'Failed to create a virtual network',
"cloudify_libvirt.network_tasks.libvirt.open",
network_tasks.create, [], {'ctx': self._create_ctx()})
# create network
network = mock.Mock()
network.isActive = mock.Mock(return_value=1)
network.name = mock.Mock(return_value="network_name")
connect = self._create_fake_connection()
connect.networkCreateXML = mock.Mock(return_value=network)
_ctx = self._create_ctx()
_ctx.get_resource = mock.Mock(return_value='<somexml/>')
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
network_tasks.create(ctx=_ctx,
template_resource="template_resource")
connect.networkCreateXML.assert_called_with('<somexml/>')
self.assertEqual(
_ctx.instance.runtime_properties['resource_id'], "network_name"
)
self.assertFalse(
_ctx.instance.runtime_properties['use_external_resource']
)
# unactive
network.isActive = mock.Mock(return_value=0)
connect.networkCreateXML = mock.Mock(return_value=network)
_ctx = self._create_ctx()
_ctx.get_resource = mock.Mock(return_value='<somexml/>')
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
network_tasks.create(ctx=_ctx,
template_resource="template_resource")
connect.networkCreateXML.assert_called_with('<somexml/>')
self.assertEqual(
_ctx.instance.runtime_properties['resource_id'], "network_name"
)
# rerun on created
connect.networkLookupByName = mock.Mock(
side_effect=network_tasks.libvirt.libvirtError("e"))
with mock.patch(
"cloudify_libvirt.network_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
'Failed to find the network.'
):
network_tasks.create(ctx=_ctx,
template_resource="template_resource")
if __name__ == '__main__':
unittest.main()
```
#### File: cloudify_libvirt/tests/test_volume.py
```python
import mock
import unittest
from cloudify.state import current_ctx
from cloudify.mocks import MockCloudifyContext
from cloudify.exceptions import NonRecoverableError
from cloudify_common_sdk._compat import builtins_open
from cloudify_libvirt.tests.test_common_base import LibVirtCommonTest
import cloudify_libvirt.volume_tasks as volume_tasks
class TestVolumeTasks(LibVirtCommonTest):
def _create_ctx(self):
_ctx = MockCloudifyContext(
'node_name',
properties={
'libvirt_auth': {'a': 'c'},
'params': {'pool': 'pool_name'},
},
runtime_properties={
'libvirt_auth': {'a': 'd'}
}
)
current_ctx.set(_ctx)
return _ctx
def _test_empty_connection_backup(self, func):
# check correct handle exception with empty connection
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
self._check_correct_connect(
"cloudify_libvirt.volume_tasks.libvirt.open",
func, [], {'ctx': _ctx, "snapshot_name": "backup"})
def _test_empty_volume_backup(self, func):
# check correct handle exception with empty volume
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
self._check_no_such_object_volume(
"cloudify_libvirt.volume_tasks.libvirt.open",
func, [], {'ctx': _ctx, "snapshot_name": "backup"}, 'resource')
def _test_empty_volume(self, func):
# check correct handle exception with empty volume
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
self._check_no_such_object_volume(
"cloudify_libvirt.volume_tasks.libvirt.open",
func, [], {'ctx': _ctx}, 'resource')
def _create_fake_volume_backup(self):
volume = mock.Mock()
volume.XMLDesc = mock.Mock(return_value="<volume/>")
volume.isActive = mock.Mock(return_value=1)
volume.name = mock.Mock(return_value="volume_name")
pool = mock.Mock()
pool.XMLDesc = mock.Mock(return_value="<pool/>")
pool.isActive = mock.Mock(return_value=1)
pool.name = mock.Mock(return_value="pool_name")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
_ctx.node.properties['params'] = {}
_ctx.instance.runtime_properties["backups"] = {
"node_name-backup": "<xml/>"}
return _ctx, connect, pool, volume
def test_snapshot_apply(self):
self._test_no_resource_id(volume_tasks.snapshot_apply,
"No volume for restore")
self._test_no_snapshot_name(
self._create_ctx(),
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.snapshot_apply)
self._test_empty_connection_backup(volume_tasks.snapshot_apply)
self._test_empty_volume_backup(volume_tasks.snapshot_apply)
# no such snapshot
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No snapshots found with name: node_name-backup!."
):
volume_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=True)
# we have such snapshot
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
# no such backup
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=False)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No backups found with name: node_name-backup!."
):
volume_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
# have backup
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=True)
):
fake_file = mock.mock_open()
fake_file().read.return_value = "<volume/>"
with mock.patch(
builtins_open, fake_file
):
volume_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
fake_file.assert_called_with('./backup!/resource.xml', 'r')
def test_snapshot_create(self):
self._test_no_resource_id(volume_tasks.snapshot_create,
"No volume for backup")
self._test_no_snapshot_name(
self._create_ctx(),
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.snapshot_create)
self._test_empty_connection_backup(volume_tasks.snapshot_create)
self._test_empty_volume_backup(volume_tasks.snapshot_create)
# check create snapshot with error, already exists
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Snapshot node_name-backup already exists."
):
volume_tasks.snapshot_create(ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
connect.storagePoolLookupByName.assert_called_with('pool_name')
pool.storageVolLookupByName.assert_called_with('resource')
# no such snapshots
_ctx.instance.runtime_properties["backups"] = {}
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.snapshot_create(ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
self.assertEqual(
_ctx.instance.runtime_properties["backups"],
{"node_name-backup": "<volume/>"})
# check create snapshot
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isdir",
mock.Mock(return_value=True)
):
fake_file = mock.mock_open()
fake_file().read.return_value = "!!!!"
with mock.patch(
builtins_open, fake_file
):
# with error, already exists
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=True)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Backup node_name-backup already exists."
):
volume_tasks.snapshot_create(
ctx=_ctx, snapshot_name="backup",
snapshot_incremental=False)
# without error
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=False)
):
volume_tasks.snapshot_create(
ctx=_ctx, snapshot_name="backup",
snapshot_incremental=False)
fake_file().write.assert_called_with("<volume/>")
def test_snapshot_delete(self):
self._test_no_resource_id(volume_tasks.snapshot_delete,
"No volume for backup delete")
self._test_no_snapshot_name(
self._create_ctx(),
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.snapshot_delete)
# no such snapshots
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No snapshots found with name: node_name-backup!."
):
volume_tasks.snapshot_delete(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=True)
self.assertEqual(
_ctx.instance.runtime_properties["backups"],
{'node_name-backup': "<xml/>"})
# remove snapshot
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.snapshot_delete(ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
self.assertEqual(_ctx.instance.runtime_properties["backups"], {})
# no such backup
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=False)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No backups found with name: node_name-backup!."
):
volume_tasks.snapshot_delete(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
# remove backup
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=True)
):
fake_file = mock.mock_open()
fake_file().read.return_value = "!!!!"
remove_mock = mock.Mock()
with mock.patch(
"os.remove",
remove_mock
):
with mock.patch(
builtins_open, fake_file
):
volume_tasks.snapshot_delete(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
fake_file.assert_called_with('./backup!/resource.xml', 'r')
remove_mock.assert_called_with('./backup!/resource.xml')
def test_create(self):
# check correct handle exception with empty connection
self._check_correct_connect(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.create, [], {'ctx': self._create_ctx()})
# check error with create volume image
self._check_create_object(
'Failed to find the pool',
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.create, [], {'ctx': self._create_ctx(),
'params': {'pool': 'empty'}})
# successful create
_ctx = self._create_ctx()
_ctx.get_resource = mock.Mock(return_value='<somexml/>')
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume_name")
pool = mock.Mock()
pool.createXML = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
# without params
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.create(ctx=_ctx,
template_resource="template_resource",
params={'pool': 'empty'})
pool.createXML.assert_called_with('<somexml/>')
self.assertEqual(
_ctx.instance.runtime_properties['resource_id'], "volume_name"
)
# failed check size of download
_ctx.instance.runtime_properties['resource_id'] = None
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
# empty
head_response = mock.Mock()
head_response.headers = {'Content-Length': 0}
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.head",
mock.Mock(return_value=head_response)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Failed to download volume."
):
volume_tasks.create(
ctx=_ctx,
template_resource="template_resource",
params={
'pool': 'empty',
'url': "https://fake.org/centos.iso"})
# sucessful check size of download
_ctx.instance.runtime_properties['resource_id'] = None
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
head_response = mock.Mock()
head_response.headers = {'Content-Length': 512,
'Accept-Ranges': 'bytes'}
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.head",
mock.Mock(return_value=head_response)
):
volume_tasks.create(
ctx=_ctx,
template_resource="template_resource",
params={
'pool': 'empty',
'url': "https://fake.org/centos.iso"})
# failed on create
_ctx.instance.runtime_properties['resource_id'] = None
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
pool.createXML = mock.Mock(return_value=None)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
'Failed to create a virtual volume'
):
volume_tasks.create(ctx=_ctx,
template_resource="template_resource",
params={'pool': 'empty'})
def test_reuse_volume_create_not_exist(self):
# check correct handle exception with empty network
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
self._check_no_such_object_volume(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.create, [], {
'ctx': _ctx,
"resource_id": 'resource',
"use_external_resource": True,
}, 'resource')
def test_reuse_volume_create_exist(self):
# check that we can use network
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
pool = mock.Mock()
pool.name = mock.Mock(return_value="pool")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.create(ctx=_ctx,
resource_id='resource',
use_external_resource=True)
connect.storagePoolLookupByName.assert_called_with('pool_name')
pool.storageVolLookupByName.assert_called_with('resource')
self.assertEqual(
_ctx.instance.runtime_properties['resource_id'], 'volume'
)
self.assertTrue(
_ctx.instance.runtime_properties['use_external_resource']
)
def test_start(self):
# check correct handle exception with empty connection
self._test_check_correct_connect_action(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.start)
self._test_empty_volume(volume_tasks.start)
self._test_reused_object(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.start)
self._test_no_resource_id(volume_tasks.start)
def test_start_wipe(self):
# zero wipe
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'volume'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
volume.upload = mock.Mock()
pool = mock.Mock()
pool.name = mock.Mock(return_value="pool")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.start(ctx=_ctx,
params={
'zero_wipe': True,
'allocation': 1
})
def test_start_download(self):
# download
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'volume'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
volume.upload = mock.Mock()
pool = mock.Mock()
pool.name = mock.Mock(return_value="pool")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
# empty
head_response = mock.Mock()
head_response.headers = {'Content-Length': 0}
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.head",
mock.Mock(return_value=head_response)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Failed to download volume."
):
volume_tasks.start(
ctx=_ctx,
params={
'url': "https://fake.org/centos.iso"})
# 512 for download
head_response = mock.Mock()
head_response.headers = {'Content-Length': 512,
'Accept-Ranges': 'bytes'}
head_response.iter_content = mock.Mock(return_value=["\0" * 256])
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.head",
mock.Mock(return_value=head_response)
):
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.get",
mock.Mock(return_value=head_response)
):
volume_tasks.start(
ctx=_ctx,
params={
'url': "https://fake.org/centos.iso"})
def test_stop(self):
# check correct handle exception with empty connection
self._test_check_correct_connect_action(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.stop)
self._test_empty_volume(volume_tasks.stop)
self._test_reused_object(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.stop)
self._test_no_resource_id(volume_tasks.stop)
def test_stop_wipe(self):
# failed to wipe/error ignored
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'volume'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
volume.wipe = mock.Mock(
side_effect=volume_tasks.libvirt.libvirtError("e"))
pool = mock.Mock()
pool.name = mock.Mock(return_value="pool")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.stop(ctx=_ctx)
# failed to wipe/wrong response
volume.wipe = mock.Mock(return_value=-1)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"cloudify_libvirt.volume_tasks.time.sleep",
mock.Mock(return_value=mock.Mock())
):
volume_tasks.stop(ctx=_ctx)
# correctly wiped
volume.wipe = mock.Mock(return_value=0)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.stop(ctx=_ctx)
def test_delete(self):
# check correct handle exception with empty connection
self._test_check_correct_connect_action(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.delete)
self._test_empty_volume(volume_tasks.delete)
self._test_reused_object(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.delete)
self._test_no_resource_id(volume_tasks.delete)
# failed to remove
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'volume'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
volume.delete = mock.Mock(return_value=-1)
pool = mock.Mock()
pool.name = mock.Mock(return_value="pool")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
'Can not undefine volume.'
):
volume_tasks.delete(ctx=_ctx)
# sucessful remove
volume.delete = mock.Mock(return_value=0)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.delete(ctx=_ctx)
self.assertEqual(
_ctx.instance.runtime_properties,
{
'backups': {},
'libvirt_auth': {'a': 'd'},
'params': {},
'resource_id': None
}
)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "0M1N0U5/SteganoAnki",
"score": 2
} |
#### File: Anki/lib/ankiwrapper.py
```python
from ankipandas import Collection
from ankipandas import AnkiDataFrame
import ankipandas
import lib.UtilesAnki as UtilesAnki
import pathlib
import pandas as pd
import sqlite3
import time
class AnkiWrapper:
__instance = None
@staticmethod
def getInstance():
if AnkiWrapper.__instance == None:
AnkiWrapper()
return AnkiWrapper.__instance
def __init__(self):
if AnkiWrapper.__instance != None:
raise Exception("ERROR. Usa getInstance() para no crear otra instancia.")
else:
AnkiWrapper.__instance = self
self.col = Collection()
self.cards = self.col.cards
self.notes = self.col.notes
self.cardsRaw = ankipandas.raw.get_table(self.col.db, "cards")
self.notesRaw = ankipandas.raw.get_table(self.col.db, "notes")
self.rutaBase = self.obtenerRutaBase()
pd.options.mode.chained_assignment = None
ankipandas.util.log.set_log_level('critical')
def __del__(self):
self.col.db.close()
def getDecks(self):
return self.cards.list_decks()
def deckExiste(self,nombreDeck):
return self.getDecks().count(nombreDeck) > 0
def getCardsFromDeck(self, nombreMazo):
mazoSelecionado = self.cards[self.cards['cdeck'] == nombreMazo]
mazoSelecionadoRaw = self.cardsRaw[self.cardsRaw.id.isin(mazoSelecionado.cid)]
return mazoSelecionadoRaw
def getNotesFromDeck(self, nombreMazo):
mazoSelecionado = self.cards[self.cards['cdeck'] == nombreMazo]
idNotas = mazoSelecionado.nid
notasSelecionadas = self.notesRaw[self.notes.id.isin(idNotas)]
return notasSelecionadas
def getFlagsDeck(self, nombreMazo):
mazo = self.getCardsFromDeck(nombreMazo)
flags = mazo['flags']
return flags.to_list()
def updateCards(self):
return ankipandas.raw.set_table(self.col.db,self.cardsRaw,"cards","update")
def updateNotes(self):
return ankipandas.raw.set_table(self.col.db,self.notesRaw,"notes","update")
def updateColCards(self, nombreCol, mazoActualizar):
self.cardsRaw[nombreCol] = mazoActualizar[nombreCol].combine_first(self.cardsRaw[nombreCol]).astype(type(self.cardsRaw[nombreCol][0]))
return self.updateCards()
def updateRowsNotes(self, lista):
for i in lista:
#Actualizar Notas
antiguoFlds = self.notesRaw.loc[i['index']].flds
nuevaRespuestaCadena = "."+antiguoFlds.replace(i['name'], i['newName'])
nuevaRespuestaLista = UtilesAnki.decodificarFlds(nuevaRespuestaCadena)
self.notesRaw.at[i['index'],"usn"] = -1
self.notesRaw.at[i['index'],"flds"] = nuevaRespuestaCadena
self.notesRaw.at[i['index'], "sfld"] = nuevaRespuestaLista[0]
self.notesRaw.at[i['index'], "csum"] = UtilesAnki.calcularCsum(nuevaRespuestaLista[0])
self.updateCards()
self.updateNotes()
return self.forzarActualizacion()
def obtenerRutaBase(self):
rutaDB = ankipandas.paths.db_path_input()
rutaPartes = list(rutaDB.parts)
rutaPartes[-1] = 'collection.media'
return str(pathlib.Path(*rutaPartes))+"/"
def forzarActualizacion(self):
tablaCol = self.getTableCol(self.col.db)
tablaCol.at[0, 'mod'] = int(time.time()*1000)
self.setTable(self.col.db, tablaCol, 'update')
def guardarFlagsMazo(self, nombreMazo, nuevaFlags):#mirar
mazoSelecionado = self.getCardsFromDeck(nombreMazo)
mazo = self.cardsRaw[self.cardsRaw.id.isin(mazoSelecionado.id)]
for i in range(0, len(nuevaFlags)):
index = mazo.iloc[i].name
self.cardsRaw.at[index,'mod'] = time.time()
self.cardsRaw.at[index,'usn'] = -1
self.cardsRaw.at[index,'flags'] = nuevaFlags[i]
self.updateCards()
return self.forzarActualizacion()
#Funciones adaptación de:
#https://github.com/klieret/AnkiPandas/blob/aaa7583c38d9dadf2ff6c4ef13bceef50bbfc99d/ankipandas/raw.py
def getTableCol(self, db) -> pd.DataFrame:
df = pd.read_sql_query(
"SELECT * FROM col", db
)
return df
def consolidateTables(self, df: pd.DataFrame, df_old: pd.DataFrame, mode: str, id_column="id"):
if not list(df.columns) == list(df_old.columns):
raise ValueError(
"Columns do not match: Old: {}, New: {}".format(
", ".join(df_old.columns), ", ".join(df.columns)
)
)
old_indices = set(df_old[id_column])
new_indices = set(df[id_column])
# Get indices
# -----------
if mode == "update":
indices = set(old_indices)
elif mode == "replace":
indices = set(new_indices)
else:
raise ValueError(f"Unknown mode '{mode}'.")
df = df[df[id_column].isin(indices)]
# Apply
# -----
if mode == "update":
df_new = df_old.copy()
df_new.update(df)
elif mode == "replace":
df_new = df.copy()
else:
raise ValueError(f"Unknown mode '{mode}'.")
return df_new
def setTable(self, db: sqlite3.Connection, df: pd.DataFrame, mode: str, id_column="id",) -> None:
df_old = self.getTableCol(db)
df_new = self.consolidateTables(
df=df, df_old=df_old, mode=mode, id_column=id_column
)
df_new.to_sql("col", db, if_exists="replace", index=False)
```
#### File: Anki/lib/stegoFlagsV1.py
```python
from numpy import *
import random
import lib.utils as utils
MAX_CAPACITY = 1 #hex values
def calculateRandomCombination(number):
possibleCombinations=[]
#This limitation is necessary so that each digit has two digits that adds it up
size = 0
for i in range(1, 10, 1):
for l in range(1, 10, 1):
if (i+l)%16==number:
#print(str(i)+str(l), "->", (i+l)%16)
possibleCombinations.append(str(i)+str(l))
size += 1
if size==1:
cont=0
else:
cont=random.randint(0, size-1)
#We choose a combination in random way
return possibleCombinations[cont]
#Main function to encode the secret in a flag
def encode(flag, data, password):
#If there is something strange about the flag
if flag > 10:
return False
#Time to hide secret! - We generate a random valid combination
solutionArray=[]
for x in data:
solutionArray += utils.splitIntoChars(calculateRandomCombination(int(x,16)))
#We modify the order according to its password in a pseudorandom way
utils.randomArray(solutionArray,password)
#Now we add the padding, so it is equal to the color given (the cover). It will have two digits.
paddingNumber = random.randint(10, 92)
secretFinal = int(''.join(solutionArray)+str(paddingNumber))
#We check for the correct module for anki
while secretFinal % 8 != flag:
secretFinal += 1
return secretFinal
#Main function to decode the flag and discover its secret
def decode(flagHidden,password):
#If there is something strange about the flag
if flagHidden < 10 or len(str(flagHidden)) % 2 != 0 :
return False
#We take out the padding (two last digits out)
secretComputed=str(flagHidden//100)
#Conversion of string in a list
listSecret= utils.splitIntoChars(secretComputed)
#Now we do the random disorder again
realSecretArray=utils.inverseRandomArray(listSecret,password)
#We have to add up its two digits in lineal order
secretHex=""
for index, i in enumerate(realSecretArray):
if index % 2 == 1:
value = (int(i)+int(realSecretArray[index-1])) % 16
secretHex += f'{value:0>1x}'
return secretHex
def estimate():
global MAX_CAPACITY
return MAX_CAPACITY
#for i in range(16):
# calculateRandomCombination(i)
```
#### File: Anki/lib/utils.py
```python
import hashlib
from random import randrange
import random
import statistics
import numpy
import secrets
from base64 import urlsafe_b64encode as b64e, urlsafe_b64decode as b64d
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
import re
import collections
backend = default_backend()
iterations = 100_000
COLOR_SIZE = 3
MIN_STD = 14
MAX_STD = 100
def _derive_key(password: bytes, salt: bytes, iterations: int = iterations) -> bytes:
"""Derive a secret key from a given password and salt"""
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(), length=32, salt=salt,
iterations=iterations, backend=backend)
return b64e(kdf.derive(password))
def password_encrypt(message: bytes, password: str, iterations: int = iterations) -> bytes:
salt = secrets.token_bytes(16)
key = _derive_key(password.encode(), salt, iterations)
return b64e(
b'%b%b%b' % (
salt,
iterations.to_bytes(4, 'big'),
b64d(Fernet(key).encrypt(message)),
)
)
def password_decrypt(token: bytes, password: str) -> bytes:
decoded = b64d(token)
salt, iter, token = decoded[:16], decoded[16:20], b64e(decoded[20:])
iterations = int.from_bytes(iter, 'big')
key = _derive_key(password.encode(), salt, iterations)
return Fernet(key).decrypt(token)
def splitIntoChars(word):
return [char for char in word]
def stringToBin(text):
return ''.join(format(ord(char), '08b') for char in text)
def stringToHex(text):
return text.encode('utf-8').hex()
def hexToString(s):
return bytes.fromhex(s).decode('utf-8')
def intToBin(x):
return '{0:b}'.format(x)
def binToInt(x):
return int(x,2)
supportedExtensionsMap = {
"defaultExt" : "png",
"defaultType" : "PNG",
"png" : "PNG"
#"jpg" : "JPEG"
}
def isSupported(extension):
if extension.lower() in supportedExtensionsMap.keys():
return True
else:
print("Extension |", extension, "| not supported. PNG will be used.")
return False
def manageExtension(extension, outputFile):
if extension is None or extension == "" or not isSupported(extension):
outputFile["ext"] = supportedExtensionsMap["defaultExt"]
outputFile["type"] = supportedExtensionsMap["defaultType"]
return False
outputFile["ext"] = extension
outputFile["type"] = supportedExtensionsMap[extension]
return True
def manageOutputFileName(outputFileName):
outputFile = {}
if outputFileName is None or type(outputFileName) != str or outputFileName == "":
outputFile["name"] = "secret"
outputFile["ext"] = supportedExtensionsMap["defaultExt"]
outputFile["type"] = supportedExtensionsMap["defaultType"]
return outputFile
if "." in outputFileName:
splited = outputFileName.split(".")
extension = splited[-1]
if manageExtension(extension, outputFile):
outputFile["name"] = '.'.join(splited[:-1])
else:
outputFile["name"] = outputFileName
else:
outputFile["name"] = outputFileName
outputFile["ext"] = supportedExtensionsMap["defaultExt"]
outputFile["type"] = supportedExtensionsMap["defaultType"]
if outputFile["name"].endswith("."):
outputFile["name"] = outputFile["name"] + outputFile["ext"]
else:
outputFile["name"] = outputFile["name"] + "." + outputFile["ext"]
return outputFile
def sha256(data):
return hashlib.sha256(data.encode()).hexdigest()
def sha256Iterations(password, iterations):
seed = sha256(password)
for i in range(iterations):
seed = sha256(password + seed)
return seed
def calculateSha256File(filename):
sha256_hash = hashlib.sha256()
with open(filename,"rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096),b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def ofuscate(data):
#print("Ofuscate:", data)
return data
def deofuscate(data):
#print("Deofuscate:", data)
return data
def randomSplit(toSplit, size):
splited = {}
for i in range(size):
splited[i] = 0
while toSplit > 0:
pos = randrange(size)
splited[pos] = splited[pos]+1
toSplit = toSplit-1
return splited
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def getVectorValue(vector):
return sum([x % 10 for x in vector])
vectorsMap = {
"0" : [], "1" : [], "2" : [], "3" : [], "4" : [], "5" : [], "6" : [], "7" : [], "8" : [], "9" : [], "10" : [], "11" : [], "12" : [], "13" : [], "14" : [], "15" : []
}
def getVectorsList(target):
vectors = vectorsMap[str(target)]
if not vectors:
for i in range(9):
for l in range(9):
for z in range(9):
v = [i, l, z]
vectorValue = getVectorValue(v)
if vectorValue < 16:
vectorsMap[str(vectorValue)].append(v)
vectors = vectorsMap[str(target)]
return vectors
kDTreeMap = {
"0" : None, "1" : None, "2" : None, "3" : None, "4" : None, "5" : None, "6" : None, "7" : None, "8" : None, "9" : None, "10" : None, "11" : None, "12" : None, "13" : None, "14" : None, "15" : None
}
from scipy import spatial
def getVectorFromKdTree(target, vector):
global COLOR_SIZE
kdtree = kDTreeMap[str(target)]
vectors = getVectorsList(target)
if kdtree is None:
kdtree = spatial.KDTree(vectors)
kDTreeMap[str(target)] = kdtree
tmpVector = []
for i in range(COLOR_SIZE):
tmpVector.append(vector[i])
return vectors[kdtree.query(tmpVector)[1]]
def getBestVector(initVector, targetValue):
global COLOR_SIZE
global MAX_STD
v = getVectorFromKdTree(targetValue, initVector)
finalVector = initVector.copy()
for i in range(COLOR_SIZE):
finalVector[i] = finalVector[i] - finalVector[i] % 10 + v[i]
diffPosition = abs(finalVector[i] - initVector[i])
if diffPosition > 5:
if(finalVector[i] > initVector[i]):
finalVector[i] -= 10
else:
finalVector[i] += 10
while finalVector[i] > 255:
finalVector[i] -= 10
while finalVector[i] < 0:
finalVector[i] += 10
if not isValidPixel(finalVector):
max_pos = 0
min_pos = 0
last_max = -1
last_min = 256
for i in range(COLOR_SIZE):
if finalVector[i] >= last_max:
last_max = finalVector[i]
max_pos = i
if finalVector[i] < last_min:
last_min = finalVector[i]
min_pos = i
action = 1
printe = False
prints = 0
fails = 0
while not isValidPixel(finalVector):
fails += 1
lastaction = action
std = getSTDev(finalVector)
#Los separamos
maxOperator = 10
minOperator = -10
if(std > MAX_STD):
#Los juntamos
maxOperator = -10
minOperator = 10
if action == 1:
newMaxValue = finalVector[max_pos] + maxOperator
if newMaxValue > -1 and newMaxValue < 256:
finalVector[max_pos] = newMaxValue
action = -1
else:
newMinValue = finalVector[min_pos] + minOperator
if newMinValue > -1 and newMinValue < 256:
finalVector[min_pos] = newMinValue
action = 1
if fails >= 100:
print("Bloqueo con pixel", initVector, "->", finalVector)
if(fails >= 109):
exit(0)
#if printe and prints < 10:
# print(finalVector)
# prints += 1
return finalVector
def randomArray(array, password):
seed = sha256(password)
for i in range(10000):
seed = sha256(seed + password)
random.seed(seed)
random.shuffle(array)
return array
def inverseRandomArray(array,password):
count=0
indices=[]
while count!=len(array):
indices.append(count)
count=count+1
mixedIndices = randomArray(indices.copy(), password)
originalVector = {}
for i in range(len(mixedIndices)):
originalVector[mixedIndices[i]] = array[i]
originalVector = collections.OrderedDict(sorted(originalVector.items()))
return list(originalVector.values())
def randomPositions(rows, columns, password=None):
pos = { "x": 0, "y": 0}
positions = []
for x in range(0, rows):
for y in range(0, columns):
#for x in range(0, width):
# for y in range(0, height):
pos["x"] = x
pos["y"] = y
positions.append(pos.copy())
if password is None:
return positions
else:
return randomArray(positions, password)
def getSTDev(vector):
tmpVector = list(map(int, vector[0:COLOR_SIZE]))
return statistics.stdev(tmpVector)
def isValidPixel(vector):
global MIN_STD
global MAX_STD
std = getSTDev(vector)
result = std > MIN_STD and std < MAX_STD
#print("vector", vector, "tmpVector", tmpVector, result, "std", std)
return result
def calculatePreHeader(password):
preHeader = sha256Iterations(password, 5000)
return preHeader[-2:] + preHeader[:4]
def processCardText(text):
images = re.findall("<img src=\"([^\"]+)\">", text)
sounds = re.findall("\[sound:([^\]]+)\]", text)
groups = re.findall("([^\[^\<]+)*(<img src=\"([^\"]+)\">)*(\[sound:([^\]]+)\])*", text)
texts = []
ignore = 0
for f in groups:
for t in f:
if t != '':
if ignore:
ignore = False
else:
if t.startswith("<img"):
ignore = True
elif t.startswith("[sound"):
ignore = True
else:
texts.append(t)
fields = { "texts" : texts, "images" : images, "sounds": sounds }
return fields
``` |
{
"source": "0mars/ektebly-api",
"score": 2
} |
#### File: infrastructure/di/domain.py
```python
from buslane.events import EventBus
from injector import singleton, provider, Module
from meerkat.data_providers.database.mongo import PostMongoRepository
from meerkat.domain.post.use_cases import AddNewPostUseCase, PublishPostUseCase
class UseCasesConfigurator(Module):
@singleton
@provider
def add_new(self) -> AddNewPostUseCase:
return AddNewPostUseCase(self.__injector__.get(PostMongoRepository), self.__injector__.get(EventBus))
@singleton
@provider
def publish(self) -> PublishPostUseCase:
return PublishPostUseCase(self.__injector__.get(PostMongoRepository), self.__injector__.get(EventBus))
```
#### File: rest/health/registry.py
```python
from meerkat.configurations.app import settings
from meerkat.configurations.infrastructure.rest.health import HealthCheck
from meerkat.configurations.infrastructure.rest.health.definitions import HealthConfigurator
from registry.services import BootableService, Container
class HealthService(BootableService):
def boot(self, container: Container):
provider = container.get(settings.Props.DI_PROVIDER)
provider.add_configurator(HealthConfigurator())
def post_boot(self, container):
falcon = container.get(settings.Props.FALCON)
provider = container.get(settings.Props.DI_PROVIDER)
injector = provider.get_injector()
health_check = injector.get(HealthCheck)
falcon.add_route("/api", health_check)
falcon.add_route("/api/health", health_check)
```
#### File: database/mongo/repositories.py
```python
from injector import inject
from meerkat.domain.post.data_providers import PostDataProvider
from meerkat.domain.post.entities import Post
from meerkat.domain.post.value_objects import Id
from meerkat.domain.post.data_providers.exceptions import EntityNotFoundException
from meerkat.data_providers.database.mongo.transformers import PostDocumentTransformer
from meerkat.data_providers.database.mongo.documents import PostDocument
class PostMongoRepository(PostDataProvider):
@inject
def __init__(self, transformer: PostDocumentTransformer):
self.transformer = transformer
def save(self, post: Post):
post_document = self.transformer.transform_to_document(post)
post_document.save()
def get(self, id: Id) -> Post:
posts = PostDocument.objects(id=id.value)
if posts.count() < 1:
raise EntityNotFoundException('Cannot find document with id #{}'.format(str(id)))
return self.transformer.transform_to_domain_object(next(posts))
```
#### File: post/use_cases/publish_post.py
```python
from buslane.events import EventBus
from dataclasses import dataclass
from meerkat.domain.post.data_providers import PostDataProvider
from meerkat.domain.post.events import PostPublished
from meerkat.domain.post.value_objects import Id
@dataclass(frozen=True)
class PublishPostCommand:
id: Id
class PublishPostUseCase:
def __init__(self, data_provider: PostDataProvider, event_bus: EventBus):
self.data_provider = data_provider
self.event_bus = event_bus
def exec(self, command: PublishPostCommand) -> None:
post = self.data_provider.get(command.id)
post.publish()
self.data_provider.save(post)
self.event_bus.publish(PostPublished(post))
```
#### File: post/value_objects/body.py
```python
from dataclasses import dataclass
@dataclass(frozen=True)
class Body:
value: str
def is_valid(self):
return len(self.value) > 0
def __str__(self):
return self.value
```
#### File: rest/post/schemas.py
```python
from marshmallow import fields, Schema
from meerkat.domain.post.entities import Post
class PostSchema(Schema):
class Meta:
ordered = True
id: fields.Str = fields.Str()
title: fields.Str = fields.Str(required=True)
body: fields.Str = fields.Str(required=True)
@classmethod
def from_domain_object(cls, post: Post):
object = cls()
return object.load({
"id": str(post.id),
"title": str(post.title),
"body": str(post.body)
})
class AddNewPostSchema(Schema):
class Meta:
ordered = True
title: fields.Str = fields.Str(required=True)
body: fields.Str = fields.Str(required=True)
``` |
{
"source": "0mars/esengine",
"score": 2
} |
#### File: utils/payload/filters.py
```python
from esengine.utils.payload.meta import BaseFilterQuery, MetaFilterQuery
from esengine.utils.payload.exception import NoFilter
from six import with_metaclass
FILTERS = {
'and_': ['_filter'],
'bool': {
'kwargs': ({('must', 'must_not', 'should'): ['_filter']},)
},
'exists': {
'args': ('field',)
},
'geo_bounding_box': {
'field': True,
'kwargs': ('top_left', 'bottom_right')
},
'geo_distance': {
'field': True,
'kwargs': ('lat', 'lon')
},
'geo_distance_range': {
'field': True,
'kwargs': ('lat', 'lon')
},
'geo_polygon': {
'field': True,
'args': ({'points': []},)
},
'geo_shape': {
'field': True,
'kwargs': ('type', {'coordinates': []}),
'field_process': lambda q: {'shape': q}
},
'geohash_shell': {
'field': True,
'kwargs': ('lat', 'lon',)
},
'has_child': {
'args': ('type',),
'kwargs': ({'query': '_query', 'filter': '_filter'},)
},
'has_parent': {
'args': ('parent_type',),
'kwargs': ({'query': '_query', 'filter': '_filter'},)
},
'ids': {
'args': ({'values': []},),
'kwargs': ('type',)
},
'indices': {
'args': ({'indices': []},),
'kwargs': ({('filter', 'no_match_filter'): '_filter'},)
},
'limit': {
'args': ('value',)
},
'match_all': {},
'missing': {
'args': ('field',)
},
'nested': {
'args': ('path', {'filter': '_filter'}),
},
'not_': {
'kwargs': ({'query': '_query', 'filter': '_filter'},)
},
'or_': ['_filter'],
'prefix': {
'field': True,
'args': ('value',)
},
'range': {
'field': True,
'kwargs': ('gte', 'gt', 'lte', 'lt')
},
'regexp': {
'field': True,
'args': ('value',),
'kwargs': ('flags', 'max_determinized_states')
},
'script': {
'args': ('script',)
},
'term': {
'field': True,
'args': ('value',)
},
'terms': {
'field': True,
'value_only': True,
'args': ({'value': []},)
},
'type': {
'args': ('value',)
}
}
class Filter(with_metaclass(MetaFilterQuery, BaseFilterQuery)):
_ee_type = 'filter'
_definitions = FILTERS
_exception = NoFilter
@classmethod
def query(cls, query, cache=False):
if cache:
return cls('fquery', {
'query': query,
'_cache': True
})
else:
return cls('query', query)
```
#### File: utils/payload/meta.py
```python
from esengine.utils.payload.meta_util import (
make_struct, unroll_definitions, unroll_struct
)
class MetaFilterQuery(type):
def __init__(self, name, bases, d):
super(MetaFilterQuery, self).__init__(name, bases, d)
unroll_definitions(self._definitions)
def __getattr__(self, key):
if key == '__test__':
return None
self._validate_key(key)
return lambda *args, **kwargs: self(
key,
make_struct(self._definitions[key], *args, **kwargs)
)
def _validate_key(self, key):
if key != "__slots__" and key not in self._definitions:
raise self._exception(key)
class MetaAggregate(MetaFilterQuery):
def __getattr__(self, key):
if key == '__test__':
return None
self._validate_key(key)
return lambda *args, **kwargs: self(
key,
args[0],
make_struct(self._definitions[key], *args[1:], **kwargs)
)
class MetaSuggester(MetaFilterQuery):
def __getattr__(self, key):
if key == '__test__':
return None
self._validate_key(key)
return lambda *args, **kwargs: self(
key,
args[0],
args[1],
make_struct(self._definitions[key], *args[2:], **kwargs)
)
class BaseFilterQuery(object):
_struct = None
_dsl_type = None
def __init__(self, dsl_type, struct):
self._dsl_type = dsl_type
self._struct = struct
@property
def dict(self):
return self.as_dict()
def as_dict(self):
# Handle reserved Python keyword alternatives (from_, or_)
dsl_type = (
self._dsl_type[:-1]
if self._dsl_type.endswith('_')
else self._dsl_type
)
return {dsl_type: unroll_struct(self._struct)}
class BaseAggregate(BaseFilterQuery):
_name = None
def __init__(self, dsl_type, name, struct):
self._dsl_type = dsl_type
self._struct = struct
self._name = name
self._aggs = []
def as_dict(self):
struct = {
self._name: {
self._dsl_type: unroll_struct(self._struct)
}
}
if self._aggs:
aggregates = {}
for agg in self._aggs:
aggregates.update(agg.as_dict())
struct[self._name]['aggregations'] = aggregates
return struct
def aggregate(self, *aggregates):
self._aggs.extend(aggregates)
return self
class BaseSuggester(BaseFilterQuery):
_name = None
def __init__(self, dsl_type, name, text, struct):
self._dsl_type = dsl_type
self._struct = struct
self._name = name
self._text = text
self._suggs = []
def as_dict(self):
struct = {
self._name: {
"text": self._text,
self._dsl_type: unroll_struct(self._struct)
}
}
if self._suggs:
for sugg in self._suggs:
struct.update(sugg.as_dict())
return struct
```
#### File: esengine/tests/test_base_document.py
```python
import pytest
from esengine.bases.py3 import * # noqa
from esengine.bases.document import BaseDocument
from esengine.bases.field import BaseField
from esengine.fields import KeywordField, IntegerField
from esengine.exceptions import FieldTypeMismatch
def test_raise_when_doc_has_no_doc_type():
with pytest.raises(ValueError):
BaseDocument()
def test_raise_when_doc_has_no_index():
class WhitoutIndex(BaseDocument):
_doctype = 'test'
class WhitIndex(BaseDocument):
_doctype = 'test'
_index = 'test'
_fields = {}
with pytest.raises(ValueError) as ex:
WhitoutIndex()
assert str(ex.value) == '{} have no _index attribute'.format(
WhitoutIndex.__name__
)
WhitIndex()
def test_raise_if_doc_has_no_fields():
class WhitoutFields(BaseDocument):
_doctype = 'test'
_index = 'test'
class WhitFields(BaseDocument):
_doctype = 'test'
_index = 'test'
_fields = {}
with pytest.raises(AttributeError) as ex:
WhitoutFields()
assert str(ex.value) == "type object '{}' has no attribute '{}'".format(
WhitoutFields.__name__,
'_fields'
)
WhitFields()
def test_doc_set_kwargs():
class Doc(BaseDocument):
_doctype = 'test'
_index = 'test'
_fields = {}
def __setattr__(self, key, value):
if key not in self._fields:
if isinstance(value, basestring):
self._fields[key] = KeywordField()
elif isinstance(value, int):
self._fields[key] = IntegerField()
else:
self._fields[key] = KeywordField(_multi=True)
super(Doc, self).__setattr__(key, value)
x = Doc(asdf='0', x=10, value=['a', 'b'], _value='aaa')
assert x.asdf == '0'
assert x.x == 10
assert x.value == ['a', 'b']
assert x._value == 'aaa'
def test_raise_if_attr_not_in_fields():
class Doc(BaseDocument):
_doctype = 'test'
_index = 'test'
_fields = {}
with pytest.raises(KeyError) as ex:
Doc(asdf='0')
assert str(ex.value) == "'`{}` is an invalid field'".format('asdf')
def test_doc_setattr_():
def pass_func(self, ignore=None):
pass
class Doc(BaseDocument):
_doctype = 'test'
_index = 'test'
_fields = {"asdf": 1}
Doc._initialize_defaults_fields = pass_func
doc = Doc()
with pytest.raises(AttributeError) as ex:
doc.asdf = "0"
assert ex.message == "'int' object has no attribute 'from_dict'"
doc.__setattr__('_test', 10)
assert doc._test == 10
def test_doc_initialize_multi_fields():
class Doc(BaseDocument):
_doctype = 'test'
_index = 'test'
_fields = {
'multiple': BaseField(field_type=int, multi=True),
'simple': BaseField(field_type=int)
}
doc = Doc()
assert doc.multiple == []
assert doc.simple is None
def test_doc_to_dict():
class Doc(BaseDocument):
_doctype = 'test'
_index = 'test'
_fields = {
'multiple': BaseField(field_type=int, multi=True),
'simple': BaseField(field_type=int)
}
doc = Doc(multiple=[1, 2], simple=10)
assert doc.to_dict() == {'multiple': [1, 2], 'simple': 10}
def test_doc_to_dict_call_validate():
class Doc(BaseDocument):
_doctype = 'test'
_index = 'test'
_strict = True
_fields = {
'multiple': BaseField(field_type=int, multi=True,
field_name='multiple'),
'simple': BaseField(field_type=int, field_name='simple')
}
doc = Doc(multiple=[1, 2], simple="10")
with pytest.raises(FieldTypeMismatch) as ex:
doc.to_dict()
assert str(ex.value) == (
"`simple` expected `" + str(int) + "`, actual `" + str(str) + "`"
)
def test_doc_from_dict():
class Doc(BaseDocument):
_doctype = 'test'
_index = 'test'
_fields = {
'multiple': BaseField(field_type=int, multi=True),
'simple': BaseField(field_type=int)
}
dict_doc = {'multiple': [1, 2], 'simple': 10}
doc = Doc.from_dict(dict_doc)
assert doc.multiple == [1, 2]
assert doc.simple == 10
```
#### File: esengine/tests/test_results.py
```python
import pytest
from esengine.bases.result import ResultSet
from esengine.bases.result import HITS
def test_resultset_has_values(MockES, INDEX, DOC_TYPE, Doc):
resp = MockES().search(index=INDEX, doc_type=DOC_TYPE, size=2)
results = ResultSet(
resp=resp,
model=Doc
)
assert results._values == [obj for obj in resp['hits']['hits']]
for result in results:
assert result.id in MockES().test_ids
def test_get_item_by_index(DocWithDefaultClient, MockES, QUERY):
results = DocWithDefaultClient.search(QUERY)
assert results[0].id == MockES().test_ids[0]
def test_get_item_by_index_1(DocWithDefaultClient, MockES, QUERY):
results = DocWithDefaultClient.search(QUERY)
assert results[-1].id == MockES().test_ids[-1]
def test_assert_hits():
assert HITS == 'hits'
def test_resultset_extract_meta(Doc):
resultset = ResultSet({}, Doc)
resp = {
HITS: {
HITS: '',
'c': 'd'
},
'a': 'a',
'b': 'c'
}
meta = resultset._extract_meta(resp)
assert meta == {
'a': 'a',
'b': 'c',
HITS: {'c': 'd'}
}
``` |
{
"source": "0mars/graphx",
"score": 2
} |
#### File: rest/swagger/__init__.py
```python
import json
import falcon
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from falcon import Request
from falcon.response import Response
from falcon_apispec import FalconPlugin
from graphx.core.rest.resources import NodeCollection, EdgeCollection
from graphx.core.rest.schemas import Node, Edge
class SwaggerResource:
def __init__(self):
from graphx.configurations.app.settings import Props
from graphx.configurations.app.main import app
from graphx.configurations.app.main import container
# todo: should be moved to env vars
self.spec = APISpec(title='graphx',
version='1.0.0',
openapi_version='2.0',
plugins=[
FalconPlugin(app),
MarshmallowPlugin(),
])
injector = container.get(Props.DI_PROVIDER).get_injector()
self.spec.components.schema('Node', schema=injector.get(Node))
self.spec.path(resource=injector.get(NodeCollection))
self.spec.components.schema('Edge', schema=injector.get(Edge))
self.spec.path(resource=injector.get(EdgeCollection))
def on_get(self, req: Request, resp: Response):
resp.status = falcon.HTTP_200
resp.body = json.dumps(self.spec.to_dict(), ensure_ascii=False)
```
#### File: rest/swagger/registry.py
```python
import os
from registry.services import BootableService, Container
from falcon_swagger_ui import register_swaggerui_app
class SwaggerService(BootableService):
def boot(self, container: Container):
from graphx.configurations.app import settings
from graphx.configurations.infrastructure.rest.swagger import SwaggerResource
falcon = container.get(settings.Props.FALCON)
swagger_resource = SwaggerResource()
falcon.add_route('/v1/swagger.json', swagger_resource)
page_title = 'Swagger UI'
favicon_url = 'https://falconframework.org/favicon-32x32.png'
swagger_ui_url = '/v1/docs' # without trailing slash
schema_url = '{}/v1/swagger.json'.format(container.get(settings.Props.APP_URL))
register_swaggerui_app(
falcon, swagger_ui_url, schema_url,
page_title=page_title,
favicon_url=favicon_url,
config={'supportedSubmitMethods': ['get', 'post', 'put'], }
)
```
#### File: core/rest/assemblers.py
```python
from typing import List
from graphx.core.entities import Node, Edge
from graphx.core.rest.schemas import Node as NodeResource
from graphx.core.rest.schemas import Edge as EdgeResource
class NodeAssembler(object):
@staticmethod
def assemble_collection(nodes: List[Node]) -> List[NodeResource]:
return [NodeResource.from_domain_object(node) for node in nodes]
class EdgeAssembler(object):
@staticmethod
def assemble_collection(edges: List[Edge]) -> List[EdgeResource]:
return [EdgeResource.from_domain_object(edge) for edge in edges]
```
#### File: core/use_cases/find_all_nodes.py
```python
from dataclasses import dataclass
from typing import List
from injector import inject
from graphx.core.data_providers.memory import Node, MemoryNodeRepository
@inject
@dataclass
class FindAllNodes:
repository: MemoryNodeRepository
def execute(self) -> List[Node]:
return self.repository.find_all_nodes()
```
#### File: monomanage/docs/graphs.py
```python
import os
import textwrap
from glob import glob
from ..draw.utilities import remove_postfix
ROOT = os.getcwd()
def write_graphs_rst(save_directory):
search_string = os.path.join(save_directory, "*.svg")
svg_files = [
os.path.basename(filepath)
for filepath in sorted(glob(search_string), key=os.path.splitext)
]
modules = [remove_postfix(filepath, '.svg') for filepath in svg_files]
images_paths = ["../graphs/{}.svg".format(module) for module in modules]
sections = ".. This is automatically generated. DO NOT DIRECTLY EDIT.\n\n"
for module, images_path in zip(modules, images_paths):
header_border = '*' * len(module)
sections += textwrap.dedent("""\
{0}
{1}
{0}
`Back to pymedphys <#pymedphys>`_
.. raw:: html
:file: {2}
""".format(header_border, module, images_path))
save_file = os.path.join(save_directory, 'graphs.rst')
with open(save_file, 'w') as file:
file.write(sections)
```
#### File: monomanage/draw/directories.py
```python
import os
import networkx as nx
from copy import copy
from ..tree import PackageTree
from .utilities import (
save_dot_file, remove_prefix, get_levels, create_labels, create_href)
ROOT = os.getcwd()
def draw_directory_modules(save_directory):
package_tree = PackageTree(os.path.join(ROOT, 'packages'))
internal_packages = copy(package_tree.roots)
internal_packages.remove('pymedphys')
module_paths = [
item
for package in internal_packages
for item in package_tree.digraph.neighbors(package)
]
modules = {
item: os.path.splitext(item)[0].replace(os.sep, '.')
for item in module_paths
}
dependencies = {
module.replace(os.sep, '.'): {
'.'.join(item.split('.')[0:2])
for item in
package_tree.descendants_dependencies(module)['internal_module'] +
package_tree.descendants_dependencies(module)['internal_package']
# package_tree.descendants_dependencies(module)['internal_file'] +
# list(package_tree.imports[module]['internal_module']) +
# list(package_tree.imports[module]['internal_package']) +
# list(package_tree.imports[module]['internal_file'])
}
for module in modules.keys()
}
dependents = { # type: ignore
key: set() for key in dependencies.keys()
}
try:
for key, values in dependencies.items():
for item in values:
dependents[item].add(key) # type: ignore
except KeyError:
print("\n{}".format(dependents.keys()))
print("\n{}".format(dependencies))
raise
for package in internal_packages:
build_graph_for_a_module(
package, package_tree, dependencies, dependents, save_directory)
def build_graph_for_a_module(graphed_package, package_tree, dependencies,
dependents, save_directory):
print(graphed_package)
current_modules = sorted([
item.replace(os.sep, '.')
for item in package_tree.digraph.neighbors(graphed_package)
])
outfilepath = os.path.join(
save_directory, "{}.svg".format(graphed_package.replace(os.sep, '.')))
if not current_modules:
dot_file_contents = """
strict digraph {{
subgraph cluster_0 {{
"";
label = "{}";
style = dashed;
}}
}}
""".format(graphed_package)
save_dot_file(dot_file_contents, outfilepath)
return
module_internal_relationships = {
module.replace(os.sep, '.'): [
'.'.join(item.split('.')[0:2])
for item in
package_tree.descendants_dependencies(module)['internal_module']
]
for module in sorted(list(package_tree.digraph.neighbors(graphed_package)))
}
levels = get_levels(module_internal_relationships)
internal_nodes = sorted(list(set(module_internal_relationships.keys())))
external_nodes = set()
for module in current_modules:
external_nodes |= dependencies[module]
external_nodes |= dependents[module]
external_nodes = sorted(list(external_nodes))
all_nodes = internal_nodes + external_nodes
def simplify(text):
text = remove_prefix(text, "{}.".format(graphed_package))
text = remove_prefix(text, 'pymedphys_')
return text
label_map = {
node: simplify(node)
for node in all_nodes
}
nodes = ""
for level in range(max(levels.keys()) + 1):
if levels[level]:
grouped_packages = '"; "'.join(sorted(list(levels[level])))
nodes += """
{{ rank = same; "{}"; }}
""".format(grouped_packages)
edges = ""
current_packages = ""
current_dependents = set()
current_dependencies = set()
for module in current_modules:
current_packages += '"{}";\n'.format(module)
for dependency in sorted(list(dependencies[module])):
edges += '"{}" -> "{}";\n'.format(module, dependency)
if not dependency in current_modules:
current_dependencies.add(dependency)
for dependent in sorted(list(dependents[module])):
edges += '"{}" -> "{}";\n'.format(dependent, module)
if not dependent in current_modules:
current_dependents.add(dependent)
external_ranks = ""
if current_dependents:
grouped_dependents = '"; "'.join(sorted(list(current_dependents)))
external_ranks += '{{ rank = same; "{}"; }}\n'.format(
grouped_dependents)
if current_dependencies:
grouped_dependencies = '"; "'.join(sorted(list(current_dependencies)))
external_ranks += '{{ rank = same; "{}"; }}\n'.format(
grouped_dependencies)
external_labels = create_labels(label_map)
dot_file_contents = """
strict digraph {{
rankdir = LR;
subgraph cluster_0 {{
{}
label = "{}";
URL = "{}";
style = dashed;
{}
}}
{}
{}
{}
}}
""".format(
current_packages, graphed_package, create_href(graphed_package),
nodes, external_labels, external_ranks, edges)
save_dot_file(dot_file_contents, outfilepath)
```
#### File: monomanage/draw/packages.py
```python
import os
import networkx as nx
from ..tree.build import PackageTree
from .utilities import save_dot_file, create_link
ROOT = os.getcwd()
def draw_packages(save_directory):
print('pymedphys')
tree = PackageTree('packages').package_dependencies_dict
tree.pop('pymedphys')
internal_packages = tuple(tree.keys())
keys = list(tree.keys())
keys.sort(reverse=True)
dag = nx.DiGraph()
for key in keys:
values = tree[key]
dag.add_node(key)
dag.add_nodes_from(values['internal'])
edge_tuples = [
(key, value) for value in values['internal']
]
dag.add_edges_from(edge_tuples)
levels = get_levels(dag, internal_packages)
dot_contents = build_dot_contents(dag, levels)
save_dot_file(dot_contents, os.path.join(save_directory, 'pymedphys.svg'))
def get_levels(dag, internal_packages):
topological = list(nx.topological_sort(dag))
level_map = {}
for package in topological[::-1]:
if package not in internal_packages:
level_map[package] = 0
else:
depencencies = nx.descendants(dag, package)
levels = {0}
for dependency in depencencies:
if dependency in internal_packages:
try:
levels.add(level_map[dependency])
except KeyError:
pass
max_level = max(levels)
level_map[package] = max_level + 1
levels = {
level: []
for level in range(max(level_map.values()) + 1)
}
for package, level in level_map.items():
levels[level].append(package)
return levels
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
else:
raise ValueError("Prefix not found.")
def build_dot_contents(dag, levels):
nodes = ""
for level in range(max(levels.keys()) + 1):
if levels[level]:
trimmed_nodes = [
'"{}" {}'.format(
remove_prefix(node, 'pymedphys_'), create_link(node))
for node in levels[level]
]
grouped_packages = '; '.join(trimmed_nodes)
nodes += """
{{ rank = same; {}; }}
""".format(grouped_packages)
edges = ""
for edge in dag.edges():
trimmed_edge = [
remove_prefix(node, 'pymedphys_') for node in edge
]
edges += "{} -> {};\n".format(*trimmed_edge)
dot_file_contents = """
strict digraph {{
rankdir = LR;
{}\n{}
}}
""".format(nodes, edges)
return dot_file_contents
```
#### File: monomanage/parse/imports.py
```python
import os
import ast
from stdlib_list import stdlib_list
STDLIB = set(stdlib_list())
IMPORT_TYPES = {
type(ast.parse('import george').body[0]), # type: ignore
type(ast.parse('import george as macdonald').body[0])} # type: ignore
IMPORT_FROM_TYPES = {
type(ast.parse('from george import macdonald').body[0]) # type: ignore
}
ALL_IMPORT_TYPES = IMPORT_TYPES.union(IMPORT_FROM_TYPES)
CONVERSIONS = {
'attr': 'attrs',
'PIL': 'Pillow',
'Image': 'Pillow',
'mpl_toolkits': 'matplotlib',
'dateutil': 'python_dateutil'
}
def get_imports(filepath, relative_filepath, internal_packages, depth):
with open(filepath, 'r') as file:
data = file.read()
parsed = ast.parse(data)
imports = [
node for node in ast.walk(parsed) if type(node) in ALL_IMPORT_TYPES]
stdlib_imports = set()
external_imports = set()
internal_package_imports = set()
internal_module_imports = set()
internal_file_imports = set()
def get_base_converted_module(name):
name = name.split('.')[0]
try:
name = CONVERSIONS[name]
except KeyError:
pass
return name
def add_level_0(name):
base_converted = get_base_converted_module(name)
if base_converted in STDLIB:
stdlib_imports.add(base_converted)
elif base_converted in internal_packages:
internal_package_imports.add(name)
else:
external_imports.add(base_converted)
for an_import in imports:
if type(an_import) in IMPORT_TYPES:
for alias in an_import.names:
add_level_0(alias.name)
elif type(an_import) in IMPORT_FROM_TYPES:
if an_import.level == 0:
add_level_0(an_import.module)
elif an_import.level == 1 and depth == 2:
module_path = (
relative_filepath.split(os.sep)[0:2] + [an_import.module])
internal_file_imports.add('.'.join(module_path))
elif (
(an_import.level == 1 and depth == 1) or
(an_import.level == 2 and depth == 2)):
module_path = (
relative_filepath.split(os.sep)[0:1] + [an_import.module])
internal_module_imports.add('.'.join(module_path))
else:
raise ValueError(
"Unexpected depth and import level of relative "
"import")
else:
raise TypeError("Unexpected import type")
return {
'stdlib': stdlib_imports,
'external': external_imports,
'internal_package': internal_package_imports,
'internal_module': internal_module_imports,
'internal_file': internal_file_imports
}
```
#### File: monomanage/propagate/dependencies.py
```python
import os
import textwrap
import json
from glob import glob
ROOT = os.getcwd()
def main():
with open(os.path.join(ROOT, 'dependencies.json'), 'r') as file:
dependencies_data = json.load(file)
tree = dependencies_data['tree']
pypi_pins = dependencies_data['pins']['pypi']
npm_pins = dependencies_data['pins']['npm']
internal_packages = [
os.path.basename(filepath)
for filepath in glob(os.path.join(ROOT, 'packages', '*'))
]
try:
assert set(internal_packages) == set(tree.keys())
except AssertionError:
print("Internal packages not in tree: {}".format(
set(internal_packages).difference(set(tree.keys()))))
print("Tree packages not in internal: {}".format(
set(tree.keys()).difference(set(internal_packages))))
raise
try:
assert set(internal_packages) == set(pypi_pins['internal'].keys())
except AssertionError:
internal = set(internal_packages)
pypi = set(pypi_pins['internal'].keys())
print("Internal packages not in pinned: {}".format(
internal.difference(pypi)))
print("Pinned packages not in internal: {}".format(
pypi.difference(internal)))
raise
assert set(internal_packages) == set(npm_pins['internal'].keys())
for package, dependency_store in tree.items():
install_requires = []
keys_to_keep = {'internal', 'external'}
for where, dependencies in dependency_store.items():
if where in keys_to_keep:
for dependency in dependencies:
try:
pin = " " + pypi_pins[where][dependency]
except KeyError:
pin = ""
requirement_string = dependency + pin
install_requires.append(requirement_string)
install_requires.sort()
install_requires_filepath = os.path.join(
ROOT, "packages", package, "src", package, "_install_requires.py")
install_requires_contents = textwrap.dedent("""\
install_requires = {}
""").format(json.dumps(install_requires, indent=4))
with open(install_requires_filepath, 'w') as file:
file.write(install_requires_contents)
for package, dependency_store in tree.items():
internal_dependencies = {
dependency: npm_pins['internal'][dependency]
for dependency in dependency_store['internal']
}
package_json_filepath = os.path.join(
ROOT, "packages", package, "package.json")
with open(package_json_filepath, 'r') as file:
data = json.load(file)
try:
external_dependencies = {
package: pin
for package, pin in data['dependencies'].items()
if package not in internal_packages
}
except KeyError:
external_dependencies = {}
data['dependencies'] = {
**internal_dependencies,
**external_dependencies
}
with open(package_json_filepath, 'w') as file:
json.dump(data, file, indent=2, sort_keys=True)
if __name__ == "__main__":
main()
```
#### File: monomanage/propagate/versions.py
```python
import os
import json
from glob import glob
import textwrap
import semver
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd())))
def main():
version_filepath = glob(os.path.join(
"src", "*", "_version.py"))[0]
package_name = os.path.split(os.path.dirname(version_filepath))[-1]
with open('package.json', 'r') as file:
data = json.load(file)
semver_string = data['version']
loaded_version_info = semver_string.replace(
'.', ' ').replace('-', ' ').split(' ')
version_info = [
int(number)
for number in loaded_version_info[0:3]
] + [''.join(loaded_version_info[3::])] # type: ignore
__version__ = '.'.join(
map(str, version_info[:3])) + ''.join(version_info[3:]) # type: ignore
version_file_contents = textwrap.dedent("""\
version_info = {}
__version__ = "{}"
""".format(version_info, __version__))
with open(version_filepath, 'w') as file:
file.write(version_file_contents)
semver_parsed = semver.parse(semver_string)
if semver_parsed['major'] == 0:
upper_limit = semver.bump_minor(semver_string)
npm_version_prepend = "~"
else:
upper_limit = semver.bump_major(semver_string)
npm_version_prepend = "^"
dependencies_filepath = os.path.join(ROOT, "dependencies.json")
with open(dependencies_filepath, 'r') as file:
dependencies_data = json.load(file)
dependencies_data['pins']['pypi']['internal'][package_name] = (
">= {}, < {}".format(__version__, upper_limit))
dependencies_data['pins']['npm']['internal'][package_name] = (
"{}{}".format(npm_version_prepend, semver_string))
with open(dependencies_filepath, 'w') as file:
json.dump(dependencies_data, file, indent=2, sort_keys=True)
if __name__ == "__main__":
main()
``` |
{
"source": "0mars/mailgun-sdk",
"score": 3
} |
#### File: mailgun_sdk/domain/messages.py
```python
import json
from mailgun_sdk.base import ApiDomainResource
class Messages(ApiDomainResource):
"""
Mailing list resource.
"""
api_endpoint = "messages"
require_tls = False
"""
data example
data={
"from": "Excited User <YOU@YOUR_DOMAIN_NAME>",
"to": ["<EMAIL>"],
"subject": "Hello",
"template": "template.test",
"h:X-Mailgun-Variables": json.dumps(
{"title": "API documentation", "body": "Sending messages with templates"}
)
},
"""
def send_via_template(self, from_name: str, from_email: str, to: str, subject: str, template: str, variables: dict):
payload = {
"from": "{} <{}>".format(from_name, from_email),
"to": [to],
"subject": subject,
"template": template,
"h:X-Mailgun-Variables": json.dumps(variables)
}
print(self.base_url)
print(self.api_endpoint)
if self.require_tls:
payload['o:require-tls'] = 'True'
return self.request(
"POST",
data=payload,
)
```
#### File: mailgun-sdk/tests/test_init.py
```python
import unittest
import mailgun_sdk
from mailgun_sdk.api import MailgunApi
class MailgunInitTestCase(unittest.TestCase):
def test_api(self):
self.assertIsInstance(mailgun_sdk.api, MailgunApi)
def test_initialize(self):
mailgun_sdk.initialize("api-key-xxx")
self.assertEqual(mailgun_sdk.api.api_key, "api-key-xxx")
``` |
{
"source": "0mco/shadowsocksR",
"score": 2
} |
#### File: shadowsocks/plugins/subscribe.py
```python
if __name__ == '__main__':
import os, sys
file_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(file_path, '../../'))
sys.path.insert(0, os.path.join(file_path, '../'))
from shadowsocks.lib import ssrlink
from urllib import request
def fetch_ssr(url):
"""Retrive ssr links form url, return a list."""
# TODO: sometimes we need to try to get via a proxy.
# base64_ssr = requests.get(url).text
headers = {
'User-Agent': 'Mozilla/5.0',
}
req = request.Request(url, headers=headers)
base64_ssr = request.urlopen(req).read().decode('utf-8')
ssrlinks = ssrlink.base64_decode(base64_ssr).split('\n')
# The last one should be empty string.
# Of course we can handle more correctly.
# But as for now, it just works.
return [link.strip() for link in ssrlinks if ssrlink.is_valide_ssrlink(link)]
if __name__ == "__main__":
import sys
if len(sys.argv) == 2:
print(fetch_ssr(sys.argv[1]))
```
#### File: shadowsocksR/tests/test_config.py
```python
from utils import *
add_path_if_main()
from shadowsocks.lib.config import *
def test_load():
config = ClientConfigManager().load('test_config.json')
# config.create('servers', {})
print('after instancing:', config.config)
servers = config.get('servers')
config.add('servers', 'ssr://www.baidu.com/')
servers = config.get('servers')
# def test_clear():
# print('before clear')
# config = ClientConfigManager().load('test_config.json')
# servers = config.get('servers')
# print('servers:', servers)
# config.clear()
# print('after clear')
# servers = config.get('servers')
# print('servers:', servers)
def test_recursive_key():
config = ClientConfigManager().load('test_config.json')
servers = config.get('servers')
config.add('servers', 'ssr://www.google.com/')
print('creating ks')
config.create('ks', {})
print('creating ks/ww')
config.create('ks/ww', {})
print('*' * 40)
print(config.config)
print('removing ks/ww')
config.remove('ks/ww')
print(config.config)
print('removing ks')
config.remove('ks')
print(config.config)
@output_formatter
def test_subscription():
config = ClientConfigManager('test_config.pickle')
singleton_test = ClientConfigManager('test_config.pickle')
assert config is singleton_test
print('before:')
servers = config.get_server()
sub = config.get_subscription()
print('server:', servers)
print('subscription:', sub)
# config.clear()
config.add_subscription(['sub1', 'sub2'])
config.add_server(['server1', 'server2'])
servers = config.get_server()
sub = config.get_subscription()
print('server:', servers)
print('subscription:', sub)
config.add_subscription('sub3')
config.add_server('server3')
servers = config.get_server()
sub = config.get_subscription()
print('server:', servers)
print('subscription:', sub)
print(config.config)
if __name__ == "__main__":
# config = ClientConfigManager('test_config.json')
# config.clear()
# test_recursive_key()
test_subscription()
``` |
{
"source": "0merjavaid/Microscopic-segmentation",
"score": 3
} |
#### File: Microscopic-segmentation/models/deeplab.py
```python
import torch
import torchvision
import torch.nn as nn
def get_deeplab(backbone="resnet101", classes=2, pretrained=True):
if backbone == "resnet50":
model = torchvision.models.segmentation.deeplabv3_resnet50(
pretrained=pretrained)
elif backbone == "resnet101":
model = torchvision.models.segmentation.deeplabv3_resnet101(
pretrained=pretrained)
model.classifier[4] = nn.Conv2d(
in_channels=256,
out_channels=classes,
kernel_size=1,
stride=1
)
for i, param in enumerate(model.parameters()):
if i < 250:
param.requires_grad = False
return model
```
#### File: Microscopic-segmentation/models/model_factory.py
```python
import os
import torch
import torch.nn as nn
from .maskrcnn import *
from .unet import Unet34
from .deeplab import *
import torchvision.models as models
def get_model(name, weights, classes=4, max_instances=250, maskrcnn_backbone="resetnet101"):
chosen_model = None
if weights is not None:
assert os.path.exists(weights)
if name.lower() == "maskrcnn":
assert classes >= 2
chosen_model = get_mask_rcnn(classes, max_instances, maskrcnn_backbone)
elif name.lower() == "unet":
m_base = nn.Sequential(
*(list(models.resnet34(pretrained=True).children())[:8]))
chosen_model = Unet34(m_base)
elif name.lower() == "deeplab":
chosen_model = get_deeplab(classes=classes)
else:
print (name, " is currently not available, try MaskRCNN, UNET or Deeplab")
if weights is not None:
assert os.path.exists(weights)
chosen_model.load_state_dict(torch.load(weights))
return chosen_model
```
#### File: Microscopic-segmentation/utils/utils.py
```python
import os
from torchvision import transforms
import mask_utils.transforms as T
def parse_config(path):
class_to_ids = dict()
assert os.path.exists(path)
with open(path, "r") as f:
lines = f.readlines()
for id, line in enumerate(lines):
line = line.strip().lower()
class_to_ids[line] = id+1
assert len(class_to_ids) >= 1
return class_to_ids
def collate_fn(batch):
return tuple(zip(*batch))
def get_transform(model, train):
transform = []
if model.lower() == "maskrcnn":
transform.append(T.ToTensor())
if train:
transform.append(T.RandomHorizontalFlip(0.5))
else:
preprocess = transforms.Compose([
transforms.Resize((1024, 1024)),
transforms.ToTensor(),
])
return preprocess
return T.Compose(transform)
``` |
{
"source": "0Miquel/LIIF-temporal",
"score": 3
} |
#### File: LIIF-temporal/models/mlp.py
```python
import torch.nn as nn
from models import register
@register('mlp')
class MLP(nn.Module):
def __init__(self, in_dim, out_dim, hidden_list):
super().__init__()
layers = []
lastv = in_dim
for hidden in hidden_list:
layers.append(nn.Linear(lastv, hidden))
layers.append(nn.ReLU())
lastv = hidden
layers.append(nn.Linear(lastv, out_dim))
self.layers = nn.Sequential(*layers)
def forward(self, x):
shape = x.shape[:-1]
x = self.layers(x.view(-1, x.shape[-1]))
return x.view(*shape, -1)
@register('res-mlp')
class ResMLP(nn.Module):
def __init__(self, in_dim, out_dim, hidden_list):
super().__init__()
self.block1 = nn.Linear(in_dim, hidden_list[0])
self.block2 = ResBlockMLP(hidden_list[:3])
self.block3 = ResBlockMLP(hidden_list[1:])
block4 = []
block4.append(nn.ReLU())
block4.append(nn.Linear(hidden_list[-1], out_dim))
self.block4 = nn.Sequential(*block4)
def forward(self, x):
shape = x.shape[:-1]
x = self.block1(x.view(-1, x.shape[-1]))
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
return x.view(*shape, -1)
class ResBlockMLP(nn.Module):
def __init__(self, dims):
super(ResBlockMLP, self).__init__()
m = []
m.append(nn.ReLU())
m.append(nn.Linear(dims[0], dims[1]))
m.append(nn.ReLU())
m.append(nn.Linear(dims[1], dims[2]))
self.body = nn.Sequential(*m)
def forward(self, x):
res = self.body(x)
res += x
return res
``` |
{
"source": "0MNIP0TENT/example-goku-website",
"score": 2
} |
#### File: example-goku-website/videos/models.py
```python
from django.db import models
# Create your models here.
from django.contrib.auth import get_user_model
from django.urls import reverse
class Videos(models.Model):
name = models.CharField(max_length=255)
videofile = models.FileField(upload_to='videos', null=True,verbose_name="")
date = models.DateTimeField(auto_now_add=True)
verbose_name = 'videos'
author = models.ForeignKey(
get_user_model(),
on_delete = models.CASCADE,
)
def __str__(self):
return self.name + ': ' + str(self.videofile)
class Comment(models.Model):
video = models.ForeignKey(
Videos,
on_delete = models.CASCADE,
related_name = 'Comments',
)
comment = models.TextField()
author = models.ForeignKey(
get_user_model(),
on_delete = models.CASCADE,
)
def __str__(self):
return self.comment
def get_absolute_url():
return reverse('videos')
``` |
{
"source": "0mok/grpc",
"score": 2
} |
#### File: grpcio/grpc/_common.py
```python
import logging
import six
import grpc
from grpc._cython import cygrpc
CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
cygrpc.ConnectivityState.idle:
grpc.ChannelConnectivity.IDLE,
cygrpc.ConnectivityState.connecting:
grpc.ChannelConnectivity.CONNECTING,
cygrpc.ConnectivityState.ready:
grpc.ChannelConnectivity.READY,
cygrpc.ConnectivityState.transient_failure:
grpc.ChannelConnectivity.TRANSIENT_FAILURE,
cygrpc.ConnectivityState.shutdown:
grpc.ChannelConnectivity.SHUTDOWN,
}
CYGRPC_STATUS_CODE_TO_STATUS_CODE = {
cygrpc.StatusCode.ok: grpc.StatusCode.OK,
cygrpc.StatusCode.cancelled: grpc.StatusCode.CANCELLED,
cygrpc.StatusCode.unknown: grpc.StatusCode.UNKNOWN,
cygrpc.StatusCode.invalid_argument: grpc.StatusCode.INVALID_ARGUMENT,
cygrpc.StatusCode.deadline_exceeded: grpc.StatusCode.DEADLINE_EXCEEDED,
cygrpc.StatusCode.not_found: grpc.StatusCode.NOT_FOUND,
cygrpc.StatusCode.already_exists: grpc.StatusCode.ALREADY_EXISTS,
cygrpc.StatusCode.permission_denied: grpc.StatusCode.PERMISSION_DENIED,
cygrpc.StatusCode.unauthenticated: grpc.StatusCode.UNAUTHENTICATED,
cygrpc.StatusCode.resource_exhausted: grpc.StatusCode.RESOURCE_EXHAUSTED,
cygrpc.StatusCode.failed_precondition: grpc.StatusCode.FAILED_PRECONDITION,
cygrpc.StatusCode.aborted: grpc.StatusCode.ABORTED,
cygrpc.StatusCode.out_of_range: grpc.StatusCode.OUT_OF_RANGE,
cygrpc.StatusCode.unimplemented: grpc.StatusCode.UNIMPLEMENTED,
cygrpc.StatusCode.internal: grpc.StatusCode.INTERNAL,
cygrpc.StatusCode.unavailable: grpc.StatusCode.UNAVAILABLE,
cygrpc.StatusCode.data_loss: grpc.StatusCode.DATA_LOSS,
}
STATUS_CODE_TO_CYGRPC_STATUS_CODE = {
grpc_code: cygrpc_code
for cygrpc_code, grpc_code in six.iteritems(
CYGRPC_STATUS_CODE_TO_STATUS_CODE)
}
def encode(s):
if isinstance(s, bytes):
return s
else:
return s.encode('ascii')
def decode(b):
if isinstance(b, str):
return b
else:
try:
return b.decode('utf8')
except UnicodeDecodeError:
logging.exception('Invalid encoding on %s', b)
return b.decode('latin1')
def _transform(message, transformer, exception_message):
if transformer is None:
return message
else:
try:
return transformer(message)
except Exception: # pylint: disable=broad-except
logging.exception(exception_message)
return None
def serialize(message, serializer):
return _transform(message, serializer, 'Exception serializing message!')
def deserialize(serialized_message, deserializer):
return _transform(serialized_message, deserializer,
'Exception deserializing message!')
def fully_qualified_method(group, method):
return '/{}/{}'.format(group, method)
``` |
{
"source": "0mp/archinfo",
"score": 2
} |
#### File: archinfo/archinfo/arch_s390x.py
```python
try:
import capstone as _capstone
except ImportError:
_capstone = None
try:
import keystone as _keystone
except ImportError:
_keystone = None
try:
import pyvex as _pyvex
except ImportError:
_pyvex = None
from .arch import Arch, register_arch, Endness, Register
from .archerror import ArchError
from .tls import TLSArchInfo
class ArchS390X(Arch):
def __init__(self, endness=Endness.BE):
super(ArchS390X, self).__init__(endness)
if endness != Endness.BE:
raise ArchError('Arch s390x must be big endian')
self.argument_register_positions = {
self.registers['r2'][0]: 0,
self.registers['r3'][0]: 1,
self.registers['r4'][0]: 2,
self.registers['r5'][0]: 3,
self.registers['r6'][0]: 4,
# fp registers
self.registers['f0'][0]: 0,
self.registers['f2'][0]: 1,
self.registers['f4'][0]: 2,
self.registers['f6'][0]: 3,
} if _pyvex is not None else None
bits = 64
vex_arch = 'VexArchS390X' # enum VexArch
name = 'S390X'
qemu_name = 's390x' # target/s390x
triplet = 's390x-linux-gnu'
linux_name = 's390' # arch/s390
max_inst_bytes = 6
ret_offset = 584 # offsetof(VexGuestS390XState, guest_r2)
syscall_num_offset = 576 # offsetof(VexGuestS390XState, guest_r1)
call_pushes_ret = False
stack_change = -8
initial_sp = 0x40000000000
sizeof = {'short': 16, 'int': 32, 'long': 64, 'long long': 64}
if _capstone:
cs_arch = _capstone.CS_ARCH_SYSZ
cs_mode = _capstone.CS_MODE_BIG_ENDIAN
if _keystone:
ks_arch = _keystone.KS_ARCH_SYSTEMZ
ks_mode = _keystone.KS_MODE_BIG_ENDIAN
ret_instruction = b'\x07\xf4' # br %r14
nop_instruction = b'\x07\x07' # nopr %r7
instruction_alignment = 2
register_list = [
Register(name='ia', size=8, alias_names=('ip', 'pc')),
Register(name='r0', size=8,
general_purpose=True),
Register(name='r1', size=8,
general_purpose=True, subregisters=[('r1_32', 4, 4)]),
Register(name='r2', size=8,
general_purpose=True, argument=True,
subregisters=[('r2_32', 4, 4)]),
Register(name='r3', size=8,
general_purpose=True, argument=True,
linux_entry_value='argc',
subregisters = [('r3_32', 4, 4)]),
Register(name='r4', size=8,
general_purpose=True, argument=True,
linux_entry_value='argv',
subregisters=[('r4_32', 4, 4)]),
Register(name='r5', size=8,
general_purpose=True, argument=True,
linux_entry_value='envp',
subregisters=[('r5_32', 4, 4)]),
Register(name='r6', size=8,
general_purpose=True, argument=True, persistent=True,
subregisters=[('r6_32', 4, 4)]),
Register(name='r7', size=8,
general_purpose=True, persistent=True,
subregisters=[('r7_32', 4, 4)]),
Register(name='r8', size=8,
general_purpose=True, persistent=True,
subregisters=[('r8_32', 4, 4)]),
Register(name='r9', size=8,
general_purpose=True, persistent=True,
subregisters=[('r9_32', 4, 4)]),
Register(name='r10', size=8,
general_purpose=True, persistent=True,
subregisters=[('r10_32', 4, 4)]),
Register(name='r11', size=8, alias_names=('bp',),
general_purpose=True, persistent=True,
subregisters=[('r11_32', 4, 4)]),
Register(name='r12', size=8,
general_purpose=True, persistent=True,
subregisters=[('r12_32', 4, 4)]),
Register(name='r13', size=8,
general_purpose=True, persistent=True,
subregisters=[('r13_32', 4, 4)]),
# Strictly speaking, there is no fixed link register on s390x.
# However, %r14 is almost always used for that, so mark it as such.
# Situations when that's not the case (e.g. brasl %r0,X)
# can still be handled explicitly.
Register(name='r14', size=8,
general_purpose=True, alias_names=('lr',)),
Register(name='r15', size=8, alias_names=('sp',),
general_purpose=True, persistent=True,
default_value=(initial_sp, True, 'global')),
Register(name='v0', size=16, subregisters=[('f0', 0, 8)],
floating_point=True),
Register(name='v1', size=16, subregisters=[('f1', 0, 8)],
floating_point=True),
Register(name='v2', size=16, subregisters=[('f2', 0, 8)],
floating_point=True),
Register(name='v3', size=16, subregisters=[('f3', 0, 8)],
floating_point=True),
Register(name='v4', size=16, subregisters=[('f4', 0, 8)],
floating_point=True),
Register(name='v5', size=16, subregisters=[('f5', 0, 8)],
floating_point=True),
Register(name='v6', size=16, subregisters=[('f6', 0, 8)],
floating_point=True),
Register(name='v7', size=16, subregisters=[('f7', 0, 8)],
floating_point=True),
Register(name='v8', size=16, subregisters=[('f8', 0, 8)],
floating_point=True),
Register(name='v9', size=16, subregisters=[('f9', 0, 8)],
floating_point=True),
Register(name='v10', size=16, subregisters=[('f10', 0, 8)],
floating_point=True),
Register(name='v11', size=16, subregisters=[('f11', 0, 8)],
floating_point=True),
Register(name='v12', size=16, subregisters=[('f12', 0, 8)],
floating_point=True),
Register(name='v13', size=16, subregisters=[('f13', 0, 8)],
floating_point=True),
Register(name='v14', size=16, subregisters=[('f14', 0, 8)],
floating_point=True),
Register(name='v15', size=16, subregisters=[('f15', 0, 8)],
floating_point=True),
Register(name='v16', size=16, vector=True),
Register(name='v17', size=16, vector=True),
Register(name='v18', size=16, vector=True),
Register(name='v19', size=16, vector=True),
Register(name='v20', size=16, vector=True),
Register(name='v21', size=16, vector=True),
Register(name='v22', size=16, vector=True),
Register(name='v23', size=16, vector=True),
Register(name='v24', size=16, vector=True),
Register(name='v25', size=16, vector=True),
Register(name='v26', size=16, vector=True),
Register(name='v27', size=16, vector=True),
Register(name='v28', size=16, vector=True),
Register(name='v29', size=16, vector=True),
Register(name='v30', size=16, vector=True),
Register(name='v31', size=16, vector=True),
Register(name='a0', size=4),
Register(name='a1', size=4),
Register(name='a2', size=4),
Register(name='a3', size=4),
Register(name='a4', size=4),
Register(name='a5', size=4),
Register(name='a6', size=4),
Register(name='a7', size=4),
Register(name='a8', size=4),
Register(name='a9', size=4),
Register(name='a10', size=4),
Register(name='a11', size=4),
Register(name='a12', size=4),
Register(name='a13', size=4),
Register(name='a14', size=4),
Register(name='a15', size=4),
Register(name='nraddr', size=8),
Register(name='cmstart', size=8),
Register(name='cmlen', size=8),
Register(name='ip_at_syscall', size=8, artificial=True),
Register(name='emnote', size=4),
]
function_prologs = {
br'\xeb.[\xf0-\xff]..\x24', # stmg %r1,%r3,d2(%r15)
}
function_epilogs = {
br'\x07\xf4', # br %r14
}
got_section_name = '.got'
ld_linux_name = 'ld64.so.1'
elf_tls = TLSArchInfo(
variant=2, # 3.4.7 @ https://www.uclibc.org/docs/tls.pdf
tcbhead_size=64, # sizeof(tcbhead_t)
head_offsets=[0], # offsetof(tcbhead_t, tcb)
dtv_offsets=[8], # offsetof(tcbhead_t, dtv)
pthread_offsets=[16], # offsetof(tcbhead_t, self)
tp_offset=0,
dtv_entry_offset=0)
register_arch(['s390'], 64, Endness.BE, ArchS390X)
``` |
{
"source": "0mp/cbeams",
"score": 3
} |
#### File: cbeams/tests/test_shape.py
```python
from math import sqrt
import pytest
from .. import shape
from ..shape import Strip
EPSILON = 0.01
def test_subtract_row_no_subtracts():
assert shape._subtract_row([Strip(0, 1, 5)], []) == [Strip(0, 1, 5)]
def test_subtract_row_cut_in_two():
# shape 12345
# hole 23
# result 1 45
assert \
shape._subtract_row([Strip(0, 1, 5)], [Strip(0, 2, 2)]) == \
[Strip(0, 1, 1), Strip(0, 4, 2)]
def test_disc_radius_lt_0_should_raise():
with pytest.raises(ValueError):
shape.disc(1, 1, -EPSILON)
def test_disc_radius_0():
# r==0 is a special case, blank
assert shape.disc(3, 3, 0) == []
def test_disc_radius_lt_1():
# 0 < r < 1: #
# (Irregular compared to all the cases below, in that the first '<'
# is not a '<=', due to the special case of r==0)
EXPECTED = [
(3, 3, 1) #
]
assert shape.disc(3, 3, 0 + EPSILON) == EXPECTED
assert shape.disc(3, 3, 1 - EPSILON) == EXPECTED
def test_disc_radius_1():
# 1 <= r < sqrt(2)
EXPECTED = [
(2, 3, 1), #
(3, 2, 3), ###
(4, 3, 1), #
]
assert shape.disc(3, 3, 1) == EXPECTED
assert shape.disc(3, 3, sqrt(2) - EPSILON) == EXPECTED
def test_disc_radius_sqrt_2():
# sqrt(2) <= r < 2
EXPECTED = [
(2, 2, 3), ###
(3, 2, 3), ###
(4, 2, 3), ###
]
assert shape.disc(3, 3, sqrt(2)) == EXPECTED
assert shape.disc(3, 3, 2 - EPSILON) == EXPECTED
def test_disc_radius_2():
# 2 <= r < sqrt(5)
EXPECTED = [
(1, 3, 1), #
(2, 2, 3), ###
(3, 1, 5), #####
(4, 2, 3), ###
(5, 3, 1), #
]
assert shape.disc(3, 3, 2) == EXPECTED
assert shape.disc(3, 3, sqrt(5) - EPSILON) == EXPECTED
def test_disc_radius_sqrt_5():
# sqrt(5) <= r < sqrt(8)
EXPECTED = [
Strip(*s) for s in [
(1, 2, 3), ###
(2, 1, 5), #####
(3, 1, 5), #####
(4, 1, 5), #####
(5, 2, 3), ###
]
]
assert shape.disc(3, 3, sqrt(5)) == EXPECTED
assert shape.disc(3, 3, sqrt(8) - EPSILON) == EXPECTED
def test_annulus_radius_2_1():
EXPECTED = [
Strip(*s) for s in [
(1, 3, 1), #
(2, 2, 1), (2, 4, 1), # #
(3, 1, 1), (3, 5, 1), # #
(4, 2, 1), (4, 4, 1), # #
(5, 3, 1), #
]
]
assert list(shape.annulus(3, 3, 2, 1)) == EXPECTED
``` |
{
"source": "0mp/python-dtrace",
"score": 3
} |
#### File: python-dtrace/examples/cli.py
```python
from __future__ import print_function
import sys
import dtrace
def print_lquantize(values):
"""
Print a lquantize.
"""
# find max
maxi = 0
for item in values:
if item[1] > maxi:
maxi = item[1]
for item in values:
if item[0][0] > 0:
print('%10s ' % item[0][0], end=' ')
for _ in range(0, ((40 * int(item[1])) / maxi)):
sys.stdout.write('*')
for _ in range(((40 * int(item[1])) / maxi), 40):
sys.stdout.write(' ')
print(' %5s' % item[1])
def pretty_print(_iden, action, keys, values):
"""
Pretty print aggregation walker.
"""
if action in [1799]:
print(keys, values)
elif action == 1800:
# lquantize
print('\n ', keys[0], '\n')
print('{0:>10s} {1:-^40} {2}'.format('value', ' Distribution ',
'count'))
print_lquantize(values)
else:
pass
def brendan():
"""
DTrace fans will understand this :-D
"""
print('Tracing... Hit Ctrl-C to end')
def run_dtrace(script):
"""
Run DTrace till Ctrl+C is pressed...
"""
thr = dtrace.DTraceConsumerThread(script, False, walk_func=pretty_print)
thr.start()
brendan()
try:
while True:
pass
except (KeyboardInterrupt, SystemExit):
thr.stop()
thr.join()
if __name__ == '__main__':
# run_dtrace('dtrace:::BEGIN {trace("Hello World"); exit(0);}')
# run_dtrace('syscall:::entry { @num[pid,execname] = count(); }')
TMP = 'syscall::read:entry { @dist[execname] = lquantize(arg0, 0, 12, 2);}'
run_dtrace(TMP)
# run_dtrace('sysinfo:::readch { @dist[execname] = quantize(arg0); }')
```
#### File: python-dtrace/examples/read_bytes.py
```python
from __future__ import print_function
import dtrace
SCRIPT = 'sysinfo:::readch { @bytes[execname] = sum(arg0); }'
def main():
"""
Run DTrace...
"""
print('Hint: if you don\'t get any output try running it with pfexec...')
consumer = dtrace.DTraceConsumer()
consumer.run(SCRIPT, 4)
if __name__ == '__main__':
main()
```
#### File: python-dtrace/examples/syscall_count.py
```python
import dtrace
SCRIPT = 'syscall:::entry { @num[pid,execname] = count(); }'
def main():
"""
Run DTrace...
"""
consumer = dtrace.DTraceConsumer()
consumer.run(SCRIPT, 2)
if __name__ == '__main__':
main()
```
#### File: python-dtrace/tests/dtrace_ctypes_test.py
```python
__author__ = 'tmetsch'
import unittest
from ctypes import c_char_p
from dtrace_ctypes import consumer
SCRIPT = 'dtrace:::BEGIN {trace("Hello World");}'
class TestDTraceConsumer(unittest.TestCase):
"""
Tests ctypes based DTrace consumer.
"""
def setUp(self):
self.out = b''
self.consumer = consumer.DTraceConsumer(out_func=self._get_output)
def test_run_for_success(self):
"""
Test for success.
"""
self.consumer.run(SCRIPT)
def test_run_for_sanity(self):
"""
Test for sanity.
"""
self.consumer.run(SCRIPT)
self.assertEqual(self.out, b'Hello World')
def _get_output(self, data, _arg):
tmp = c_char_p(data.contents.dtbda_buffered).value.strip()
self.out += tmp
return 0
``` |
{
"source": "0mri/GStore",
"score": 2
} |
#### File: comment/api/serializers.py
```python
from rest_framework import serializers
from backend.api.comment.models import Comment
from django.contrib.humanize.templatetags.humanize import naturaltime
from backend.account.api.serializers import PublicUserSerilizer
# class ReplySerializer(serializers.ModelSerializer):
# user = serializers.SlugRelatedField('username', read_only=True)
# created_at = serializers.SerializerMethodField()
# class Meta:
# model = Comment
# fields = '__all__'
# # read_only_fields = ['replies']
# def get_created_at(self, obj):
# return naturaltime(obj.created_at)
class CommentSerializer(serializers.ModelSerializer):
user = PublicUserSerilizer(read_only=True)
created_at = serializers.SerializerMethodField()
replies = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Comment
fields = ['id', 'user', 'product', 'created_at',
'content', 'replies', 'parent']
def create(self, validate_data):
user = self.context.get('request').user
return Comment.objects.create(user=user, **validate_data)
def get_created_at(self, obj):
return naturaltime(obj.created_at)
def get_replies(self, obj):
""" self referral field """
serializer = CommentSerializer(
instance=obj.replies.all_replies(),
many=True
)
return serializer.data
``` |
{
"source": "0mza987/azureml-examples",
"score": 3
} |
#### File: basics/src/hello-iris.py
```python
import os
import argparse
import pandas as pd
# define functions
def main(args):
# read in data
df = pd.read_csv(args.iris_csv)
# print first 5 lines
print(df.head())
# ensure outputs directory exists
os.makedirs("outputs", exist_ok=True)
# save data to outputs
df.to_csv("outputs/iris.csv", index=False)
def parse_args():
# setup arg parser
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument("--iris-csv", type=str)
# parse args
args = parser.parse_args()
# return args
return args
# run script
if __name__ == "__main__":
# parse args
args = parse_args()
# run main function
main(args)
```
#### File: src/train-model/main.py
```python
import os
import mlflow
import argparse
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# TODO - add mlflow logging
# define network architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 3)
self.conv3 = nn.Conv2d(64, 128, 3)
self.fc1 = nn.Linear(128 * 6 * 6, 120)
self.dropout = nn.Dropout(p=0.2)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 128 * 6 * 6)
x = self.dropout(F.relu(self.fc1(x)))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# define functions
def train(train_loader, model, criterion, optimizer, epoch, device, print_freq, rank):
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % print_freq == 0: # print every print_freq mini-batches
print(
"Rank %d: [%d, %5d] loss: %.3f"
% (rank, epoch + 1, i + 1, running_loss / print_freq)
)
running_loss = 0.0
def main(args):
# get PyTorch environment variables
world_size = int(os.environ["WORLD_SIZE"])
rank = int(os.environ["RANK"])
local_rank = int(os.environ["LOCAL_RANK"])
distributed = world_size > 1
# set device
if distributed:
device = torch.device("cuda", local_rank)
else:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# initialize distributed process group using default env:// method
if distributed:
torch.distributed.init_process_group(backend="nccl")
# define train and dataset DataLoaders
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
train_set = torchvision.datasets.CIFAR10(
root=args.data_dir, train=True, download=False, transform=transform
)
if distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_set)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
sampler=train_sampler,
)
model = Net().to(device)
# wrap model with DDP
if distributed:
model = nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank
)
# define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
model.parameters(), lr=args.learning_rate, momentum=args.momentum
)
# train the model
for epoch in range(args.epochs):
print("Rank %d: Starting epoch %d" % (rank, epoch))
if distributed:
train_sampler.set_epoch(epoch)
model.train()
train(
train_loader,
model,
criterion,
optimizer,
epoch,
device,
args.print_freq,
rank,
)
print("Rank %d: Finished Training" % (rank))
if not distributed or rank == 0:
# log model
mlflow.pytorch.save_model(model, f"{args.model_dir}/model")
def parse_args():
# setup argparse
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument(
"--data-dir", type=str, help="directory containing CIFAR-10 dataset"
)
parser.add_argument(
"--model-dir", type=str, default="./", help="output directory for model"
)
parser.add_argument("--epochs", default=10, type=int, help="number of epochs")
parser.add_argument(
"--batch-size",
default=16,
type=int,
help="mini batch size for each gpu/process",
)
parser.add_argument(
"--workers",
default=2,
type=int,
help="number of data loading workers for each gpu/process",
)
parser.add_argument(
"--learning-rate", default=0.001, type=float, help="learning rate"
)
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument(
"--print-freq",
default=200,
type=int,
help="frequency of printing training statistics",
)
# parse args
args = parser.parse_args()
# return args
return args
# run script
if __name__ == "__main__":
# parse args
args = parse_args()
# call main function
main(args)
```
#### File: automl-with-azureml/classification-text-dnn/helper.py
```python
import pandas as pd
from azureml.core import Environment
from azureml.train.estimator import Estimator
from azureml.core.run import Run
def run_inference(
test_experiment,
compute_target,
script_folder,
train_run,
test_dataset,
target_column_name,
model_name,
):
inference_env = train_run.get_environment()
est = Estimator(
source_directory=script_folder,
entry_script="infer.py",
script_params={
"--target_column_name": target_column_name,
"--model_name": model_name,
},
inputs=[test_dataset.as_named_input("test_data")],
compute_target=compute_target,
environment_definition=inference_env,
)
run = test_experiment.submit(
est,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run
def get_result_df(remote_run):
children = list(remote_run.get_children(recursive=True))
summary_df = pd.DataFrame(
index=["run_id", "run_algorithm", "primary_metric", "Score"]
)
goal_minimize = False
for run in children:
if "run_algorithm" in run.properties and "score" in run.properties:
summary_df[run.id] = [
run.id,
run.properties["run_algorithm"],
run.properties["primary_metric"],
float(run.properties["score"]),
]
if "goal" in run.properties:
goal_minimize = run.properties["goal"].split("_")[-1] == "min"
summary_df = summary_df.T.sort_values(
"Score", ascending=goal_minimize
).drop_duplicates(["run_algorithm"])
summary_df = summary_df.set_index("run_algorithm")
return summary_df
```
#### File: automl-with-azureml/forecasting-bike-share/run_forecast.py
```python
from azureml.core import ScriptRunConfig
def run_rolling_forecast(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
inference_env = train_run.get_environment()
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env,
)
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run
```
#### File: mnist-mlproject/src/train.py
```python
import sys
import argparse
import mlflow.fastai
import fastai.vision as vis
def main():
# Parse command-line arguments
epochs = int(sys.argv[1]) if len(sys.argv) > 2 else 5
lr = float(sys.argv[2]) if len(sys.argv) > 1 else 0.01
# Download and untar the MNIST data set
path = vis.untar_data(vis.URLs.MNIST_TINY)
# Prepare, transform, and normalize the data
data = vis.ImageDataBunch.from_folder(
path, ds_tfms=(vis.rand_pad(2, 28), []), bs=64
)
data.normalize(vis.imagenet_stats)
# Train and fit the Learner model
learn = vis.cnn_learner(data, vis.models.resnet18, metrics=vis.accuracy)
# Enable auto logging
mlflow.fastai.autolog()
# Train and fit with default or supplied command line arguments
learn.fit(epochs, lr)
if __name__ == "__main__":
main()
```
#### File: pets/src/train.py
```python
import mlflow.fastai
from fastai.vision.all import *
# enable auto logging
# mlflow.fastai.autolog() # broken
path = untar_data(URLs.PETS)
path.ls()
files = get_image_files(path / "images")
len(files)
# (Path('/home/ashwin/.fastai/data/oxford-iiit-pet/images/yorkshire_terrier_102.jpg'),Path('/home/ashwin/.fastai/data/oxford-iiit-pet/images/great_pyrenees_102.jpg'))
def label_func(f):
return f[0].isupper()
# To get our data ready for a model, we need to put it in a DataLoaders object. Here we have a function that labels using the file names, so we will use ImageDataLoaders.from_name_func. There are other factory methods of ImageDataLoaders that could be more suitable for your problem, so make sure to check them all in vision.data.
dls = ImageDataLoaders.from_name_func(path, files, label_func, item_tfms=Resize(224))
# We have passed to this function the directory we're working in, the files we grabbed, our label_func and one last piece as item_tfms: this is a Transform applied on all items of our dataset that will resize each imge to 224 by 224, by using a random crop on the largest dimension to make it a square, then resizing to 224 by 224. If we didn't pass this, we would get an error later as it would be impossible to batch the items together.
dls.show_batch()
learn = cnn_learner(dls, resnet34, metrics=error_rate)
learn.fine_tune(1)
mlflow.fastai.log_model(learn, "model")
``` |
{
"source": "0n1cOn3/YT_View_Bot",
"score": 3
} |
#### File: 0n1cOn3/YT_View_Bot/viewbot.py
```python
import requests as R
import random
class test:
def __init__(self, proxy, url):
self.proxy = proxy
self.url = url
def test(self):
a = R.get(self.proxy)
b = a.content.decode('utf-8')
p = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (X11; CrOS x86_64 11895.118.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.159 Safari/537.36',
'Mozilla/5.0 (Linux; U; Android 2.2) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Mozilla/5.0 (Linux; Android 6.0; CAM-L21 Build/HUAWEICAM-L21; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/62.0.3202.84 Mobile Safari/537.36',
'Mozilla/5.0 (Linux; U; Android 4.2.2; de-de; Lenovo A7600-F Build/JDQ39) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)'
]
for i in b.split(','):
try:
while True:
for o in range(1, 10):
U = random.choice(p)
h = { "User-Agent":U }
print ("--------")
try:
rr = R.get(self.url, headers=h)
if rr.status_code == 200:
print ("Ok")
except R.exceptions.ConnectionError:
print ("Retrying...")
except R.exceptions.Timeout:
print ('')
except R.exceptions.ChunkedEncodingError:
print ('connection error!')
except KeyboardInterrupt:
print ('Quitting..')
q = input('\n Enter video url : ')
q1 = test("https://api.proxyscrape.com/?request=getproxies&proxytype=http&timeout=5000&country=all&ssl=yes&anonymity=elite", q)
q1.test()
``` |
{
"source": "0ne0rZer0/Mon-T-Python",
"score": 3
} |
#### File: 0ne0rZer0/Mon-T-Python/Main.py
```python
import os, time, sys, hashlib
# Python Recreation of MonitorSauraus Rex.
# Originally Developed by <NAME>, <NAME>, <NAME>, <NAME>.
# Aims:
# - Detect Rapid File Changes
# - Cut Wifi Connections
# - Create Logs for running processes at time of trigger, find source infection file.
# - Create "Nest" Safe folder , with encryption and new file types. ".egg" type?
# - Create Notification for a user/admin? Connect to a database?
# - kill running processes in aim to kill attack.
# Getting MD5 Hash of a string:
# print (hashlib.md5("Your String".encode('utf-8')).hexdigest())
origHashList = []
# Getting MD5 Hash of a file:
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
# Shows Correct Hash Changes Upon File Alteration.
def getOrigMd5():
fileMd5 = md5("/home/barlowl3/test/test.txt")
origHashList.append(fileMd5)
time.sleep(3) # For Testing
fileMd5 = md5("/home/barlowl3/test/test.txt")
origHashList.append(fileMd5)
updateOrigHashText(origHashList)
# Prints The Collected Hashes.
def updateOrigHashText(origList):
ohl = open("/home/barlowl3/test/test.txt", "a")
for hash in origList:
ohl.write(hash)
ohl.write('\n')
ohl.close
# Main Method
def main():
getOrigMd5()
main()
#Use checksumdir python package available for calculating checksum/hash of directory. It's available at https://pypi.python.org/pypi/checksumdir/1.0.5
#Usage :
#import checksumdir
#hash = checksumdir.dirhash("c:\\temp")
#print hash
``` |
{
"source": "0neblock/esp-idf",
"score": 2
} |
#### File: tools/tiny-test-fw/example.py
```python
import re
import os
import sys
try:
import TinyFW
except ImportError:
# if we want to run test case outside `tiny-test-fw` folder,
# we need to insert tiny-test-fw path into sys path
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import TinyFW
import IDF
from IDF.IDFDUT import ESP32DUT
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_https_request(env, extra_data):
"""
steps: |
1. join AP
2. connect to www.howsmyssl.com:443
3. send http request
"""
dut1 = env.get_dut("https_request", "examples/protocols/https_request", dut_class=ESP32DUT)
dut1.start_app()
dut1.expect(re.compile(r"Connecting to www.howsmyssl.com:443"), timeout=30)
dut1.expect("Performing the SSL/TLS handshake")
dut1.expect("Certificate verified.", timeout=15)
dut1.expect_all(re.compile(r"Cipher suite is TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256"),
"Reading HTTP response",
timeout=20)
dut1.expect(re.compile(r"Completed (\d) requests"))
if __name__ == '__main__':
TinyFW.set_default_config(env_config_file="EnvConfigTemplate.yml", dut=IDF.IDFDUT)
test_examples_protocol_https_request()
``` |
{
"source": "0neDividedbyZer0/QtiePie",
"score": 3
} |
#### File: 0neDividedbyZer0/QtiePie/brownian.py
```python
from math import exp, sin
import random as r
import matplotlib.pyplot as plt
class Brownian:
def __init__(self, delta_t=1, mu = 0, sigma = 1, n=1000):
self.delta_t = delta_t
self.mu = mu
self.sigma = sigma
self.n = n
self.range = [0]
for i in range(n):
delta_B = r.gauss(mu * delta_t, (sigma ** 2) * self.delta_t)
self.range.append(self.range[i] + delta_B )
class GeoBrownian:
def __init__(self, delta_t = 1, mu = 0, sigma = 1, n = 1000, S_0=1):
self.delta_t = delta_t
self.mu = mu
self.sigma = sigma
self.n = n
self.S_0 = S_0
B = Brownian(delta_t=self.delta_t, mu=self.mu - (self.sigma**2) / 2, sigma=self.sigma, n=self.n)
self.range = [self.S_0 * exp(b) for b in B.range]
# I don't know right now, it's weird
class SineBrownian:
def __init__(self, delta_t = 1, mu = 0, sigma = 1, n = 1000, S_0 = 1):
self.delta_t = delta_t
self.mu = mu
self.sigma = sigma
self.n = n
self.S_0 = S_0
self.range = []
B = Brownian(delta_t=self.delta_t, mu=self.mu, sigma=self.sigma, n=self.n)
for i in range(len(B.range)):
self.range.append(self.S_0 * exp(sigma**2 / 2 * delta_t * i) * sin(B.range[i]))
B = SineBrownian(delta_t=1, mu=1, sigma=1, n=1000)
plt.plot(B.range)
plt.show()
``` |
{
"source": "0neDividedbyZer0/UCB-MIT-Robotics",
"score": 3
} |
#### File: 0neDividedbyZer0/UCB-MIT-Robotics/controls.py
```python
__author__ = '<NAME> (<EMAIL>)'
__author__ = '<NAME> (<EMAIL>)'
__author__ = '<NAME> (<EMAIL>)'
import numpy
import scipy
import scipy.linalg
import scipy.signal
def c2d(A, B, dt, Q = None, R = None):
"""Converts from continuous time state space representation to discrete time.
Returns (A, B). C and D are unchanged.
This code is copied from: scipy.signal.cont2discrete method zoh
"""
_validate_system(A,B,None,None,Q,R)
if Q is not None and R is not None:
Q = numpy.asmatrix(Q)
R = numpy.asmatrix(R)
n = numpy.asmatrix(A).shape[0]
F = numpy.zeros((2 * n,2 * n))
F[:n,:n] = -A
F[n:2*n,n:2*n]= A.T
F[:n, n:2*n]=Q
H=scipy.linalg.expm(F*dt)
Q_d = numpy.asmatrix(H[n:2*n,n:2*n].T*H[:n, n:2*n])
R_d = numpy.asmatrix(R/dt)
a, b = numpy.array(A), numpy.array(B)
# Build an exponential matrix
em_upper = numpy.hstack((a, b))
# Need to stack zeros under the a and b matrices
em_lower = numpy.hstack((numpy.zeros((b.shape[1], a.shape[0])),
numpy.zeros((b.shape[1], b.shape[1]))))
em = numpy.vstack((em_upper, em_lower))
ms = scipy.linalg.expm(dt * em)
# Dispose of the lower rows
ms = ms[:a.shape[0], :]
ad = ms[:, 0:a.shape[1]]
bd = ms[:, a.shape[1]:]
if Q is not None and R is not None:
return numpy.matrix(ad), numpy.matrix(bd), 0.5*(Q_d+Q_d.T), 0.5*(R_d+R_d.T)
return numpy.matrix(ad), numpy.matrix(bd)
def controllability(A, B):
"""
Calculate the controllability matrix of the system defined by A and B.
Works on both discrete-time and continuous-time systems.
In a fully controllable system, rank(controllability(A, B)) == n
Args:
A: n*n system dynamics matrix
B: n*m control signal matrix
Returns:
E: n*nm controllability matrix
"""
A = numpy.asmatrix(A)
B = numpy.asmatrix(B)
_validate_system(A, B, None, None, None, None)
n = A.shape[0]
m = B.shape[1]
E = numpy.asmatrix(numpy.zeros((n, n*m)))
x = B
for i in range(0, n):
j = i * m
E[:n, j:j+m] = x
x = A * x
return E
def observability(A, C):
"""
Calculate the observability matrix of the system defined by A and C.
Works on both discrete-time and continuous-time systems.
In a fully observable system, rank(controllability(A, C)) == n
Observability is the dual of controllability, meaning that
observability(A, C) = controllability(A.T, C.T).T
Args:
A: n*n system dynamics matrix
C: n*q measurement signal matrix
Returns:
O: nq*n observability matrix
"""
A = numpy.asmatrix(A)
C = numpy.asmatrix(C)
_validate_system(A, None, C, None, None, None)
n = A.shape[0]
q = C.shape[0]
O = numpy.asmatrix(numpy.zeros((n*q, n)))
y = C
for i in range(0, n):
j = i * q
O[j:j+q, :n] = y
y = y * A
return O
def _validate_system(A, B, C, D, Q, R):
if A is not None:
A = numpy.asmatrix(A)
if B is not None:
B = numpy.asmatrix(B)
if C is not None:
C = numpy.asmatrix(C)
if D is not None:
D = numpy.asmatrix(D)
if Q is not None:
Q = numpy.asmatrix(Q)
if R is not None:
R = numpy.asmatrix(R)
if A is None:
raise ValueError("A must not be None")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if B is not None and B.shape[0] != A.shape[0]:
raise ValueError("B must be compatible with A")
if C is not None and C.shape[1] != A.shape[0]:
raise ValueError("C must be compatible with A")
if B is not None and C is not None and D is not None:
if D.shape[0] != C.shape[0]:
raise ValueError("D must be compatible with C")
if D.shape[1] != B.shape[1]:
raise ValueError("D must be compatible with B")
if Q is not None:
if Q.shape[0] != Q.shape[1]:
raise ValueError("Q must be square")
if Q.shape[0] != A.shape[0]:
raise ValueError("Q must be compatible with A")
if R is not None:
if R.shape[0] != R.shape[1]:
raise ValueError("R must be square!")
if B is not None:
if R.shape[0] != B.shape[1]:
raise ValueError("R must be compatible with B if B is defined")
elif C is not None:
if R.shape[0] != C.shape[0]:
raise ValueError("R must be compatible with C if C is defined")
else:
raise ValueError("R must not be defined if neither B or C is defined")
def place(A, B, poles):
"""
Find the m*n matrix K such that the poles (eigenvalues) of A-BK are at the
desired locations. Works on both discrete-time and continuous-time systems.
Note: If you are using continuous-time matrices, poles should be negative
to acheive stability while with discrete-time matrices they should just be
less than 1
Args:
A: n*n system dynamics matrix
B: n*m control signal matrix
poles: complex array of desired pole locations
For every complex pole a+bi, its conjugate a-bi must also be a pole
Returns:
K: m*n gains matrix such that u = -Kx
"""
A = numpy.asmatrix(A)
B = numpy.asmatrix(B)
_validate_system(A, B, None, None, None, None)
if len(poles) != A.shape[0]:
raise ValueError("Must be the same number of poles and states")
if numpy.linalg.matrix_rank(controllability(A, B)) != A.shape[0]:
raise ValueError("System must be completely controllable to perform pole placement")
result = scipy.signal.place_poles(A, B, poles)
for req, res in zip(result.requested_poles, result.computed_poles):
if abs(req - res) > 1e-3:
print("Warning: Pole %s could not be assigned as given and was instead assigned as %s" % (req, res))
return result.gain_matrix
def daugment(A,B,C):
"""
Augment the discrete matrices A, B, C for integral gain.
Args:
A: n*n system dynamics matrix
B: n*m control signal matrix
C: k*n output matrix
Returns:
A_a: n+1*n+1 augmented systems dynamics matrix
B_a: n+1*m augmented control signal matrix
"""
A = numpy.asmatrix(A)
B = numpy.asmatrix(B)
C = numpy.asmatrix(C)
_validate_system(A, B, C, None, None, None)
zero = numpy.zeros((A.shape[0], C.shape[0]))
identity = numpy.identity(C.shape[0])
upper = numpy.concatenate((numpy.asarray(A),numpy.asarray(zero)),axis=1)
lower = numpy.concatenate((numpy.asarray(C),numpy.asarray(identity)),axis=1)
zero2 = numpy.asarray(numpy.zeros((1,B.shape[1])))
A_a = numpy.asmatrix(numpy.concatenate((upper,lower)))
B_a = numpy.asmatrix(numpy.concatenate((numpy.asmatrix(B),zero2)))
return (A_a, B_a)
def caugment(A,B,C):
"""
Augment the continuous matrices A, B, C for integral gain.
Args:
A: n*n system dynamics matrix
B: n*m control signal matrix
C: k*n output matrix
Returns:
A_a: n+1*n+1 augmented systems dynamics matrix
B_a: n+1*m augmented control signal matrix
C_a: k*n+1 augmented output matrix
"""
A = numpy.asmatrix(A)
B = numpy.asmatrix(B)
C = numpy.asmatrix(C)
_validate_system(A, B, C, None, None, None)
zero = numpy.zeros((A.shape[0], C.shape[0]))
zero2 = numpy.zeros((C.shape[0], C.shape[0]))
upper = numpy.concatenate((numpy.asarray(A),numpy.asarray(zero)),axis=1)
lower = numpy.concatenate((numpy.asarray(C),numpy.asarray(zero2)),axis=1)
zero3 = numpy.asarray(numpy.zeros((1,B.shape[1])))
zero4 = numpy.asarray(numpy.zeros((C.shape[0],1)))
A_a = numpy.asmatrix(numpy.concatenate((upper,lower)))
B_a = numpy.asmatrix(numpy.concatenate((numpy.asmatrix(B),zero3)))
C_a = numpy.asmatrix(numpy.concatenate((numpy.asmatrix(C),zero4),axis=1))
return A_a, B_a, C_a
def dlqr(A,B,Q,R):
"""
Note: one can use lqr to find the poles of a system with respect to the noise
from Q and R and then tune around those poles. Simply use eig(A-BK)
to find the location of the poles
Calculate the discrete-time steady-state LQR gain matrix.
Minimize sum{0, inf}(x'Qx + u'Ru) for the system x(n+1) = Ax(n) + Bu(n).
Args:
A: n*n discrete-time system dynamics matrix
B: n*m discrete-time control signal matrix
Q: n*n quadratic state error weighting factor
R: m*m quadratic control signal weighting factor
Returns:
K: m*n gains matrix such that u = -Kx
"""
"""_validate_system(A,B, None, None, Q, R)"""
assert numpy.linalg.matrix_rank(controllability(A, B)) == A.shape[0], "System must be completely controllable to do LQR."
Q_eig = numpy.linalg.eigvalsh(Q)
assert numpy.all(Q_eig > -1E-8), "Q must be positive semi-definite"
R_eig = numpy.linalg.eigvalsh(R)
assert numpy.all(R_eig > 0), "R must be positive definite"
P = numpy.asmatrix(scipy.linalg.solve_discrete_are(A,B,Q,R))
return numpy.linalg.inv(R + B.T * P * B) * B.T * P * A
def clqr(A,B,Q,R):
"""
Note: one can use lqr to find the poles of a system with respect to the noise
from Q and R and then tune around those poles. Simply use eig(A-BK)
to find the location of the poles
Calculate the discrete-time steady-state LQR gain matrix.
Minimize sum{0, inf}(x'Qx + u'Ru) for the system x(n+1) = Ax(n) + Bu(n).
Args:
A: n*n discrete-time system dynamics matrix
B: n*m discrete-time control signal matrix
Q: n*n quadratic state error weighting factor
R: m*m quadratic control signal weighting factor
Returns:
K: m*n gains matrix such that u = -Kx
"""
_validate_system(A,B, None, None, Q, R)
assert numpy.linalg.matrix_rank(controllability(A, B)) == A.shape[0], "System must be completely controllable to do LQR."
Q_eig = numpy.linalg.eigvalsh(Q)
assert numpy.all(Q_eig > -1E-8), "Q must be positive semi-definite"
R_eig = numpy.linalg.eigvalsh(R)
assert numpy.all(R_eig > 0), "R must be positive definite"
P = numpy.asmatrix(scipy.linalg.solve_continuous_are(A,B,Q,R))
return numpy.asmatrix(numpy.linalg.inv(R) * B.T * P)
def dkalman(A,C,Q,R):
"""
Note: one can use lqr to find the poles of a system with respect to the noise
from Q and R and then tune around those poles. Simply use eig(A-BK)
to find the location of the poles
Calculate the discrete-time (or continuous) steady-state Kalman gain matrix.
Minimize sum{0, inf}(x'Qx + u'Ru) for the system x(n+1) = Ax(n) + Bu(n).
Args:
A: n*n discrete-time system dynamics matrix
C: n*m discrete-time control signal matrix
Q: n*n quadratic state error weighting factor
R: m*m quadratic control signal weighting factor
Returns:
K: m*n gains matrix such that u = -Kx
"""
_validate_system(A,None, C, None, Q, R)
assert numpy.linalg.matrix_rank(observability(A, C)) == A.shape[0], "System must be completely controllable to do LQR."
Q_eig = numpy.linalg.eigvalsh(Q)
#assert numpy.all(Q_eig > -1E-8), "Q must be positive semi-definite"
R_eig = numpy.linalg.eigvalsh(R)
assert numpy.all(R_eig > 0), "R must be positive definite"
#1678 implementation
P = numpy.asmatrix(scipy.linalg.solve_discrete_are(A.T,C.T,Q,R))
return P*C.T*numpy.asmatrix(numpy.linalg.inv(R))
#P_prior = numpy.asmatrix(scipy.linalg.solve_discrete_are(A.T,C.T,Q,R))
#S = C * P_prior * C.T + R
#return P_prior * C.T * scipy.linalg.inv(S)
def ckalman(A,C,Q,R):
"""
Note: one can use lqr to find the poles of a system with respect to the noise
from Q and R and then tune around those poles. Simply use eig(A-BK)
to find the location of the poles
Calculate the discrete-time (or continuous) steady-state Kalman gain matrix.
Minimize sum{0, inf}(x'Qx + u'Ru) for the system x(n+1) = Ax(n) + Bu(n).
Args:
A: n*n discrete-time system dynamics matrix
C: n*m discrete-time control signal matrix
Q: n*n quadratic state error weighting factor
R: m*m quadratic control signal weighting factor
Returns:
K: m*n gains matrix such that u = -Kx
"""
_validate_system(A,None, C, None, Q, R)
assert numpy.linalg.matrix_rank(observability(A, C)) == A.shape[0], "System must be completely controllable to do LQR."
Q_eig = numpy.linalg.eigvalsh(Q)
assert numpy.all(Q_eig > -1E-8), "Q must be positive semi-definite"
R_eig = numpy.linalg.eigvalsh(R)
assert numpy.all(R_eig > 0), "R must be positive definite"
P = numpy.asmatrix(scipy.linalg.solve_continuous_are(A.T,C.T,Q,R))
return P*C.T*numpy.asmatrix(numpy.linalg.inv(R))
def eig(A):
return numpy.linalg.eig(A)[0]
def feedforwards(A, B, Q=None):
B = numpy.asmatrix(B)
if Q is not None:
Q = numpy.asmatrix(Q)
_validate_system(A,B,None,None,Q,None)
if Q is not None:
return numpy.linalg.inv(B.T*Q*B)*B.T*Q
return numpy.linalg.pinv(B)
#for tup in getData(r"C:\Users\Maverick1\eclipse-workspace\Libraries\test.csv"):
# print(tup)
```
#### File: 0neDividedbyZer0/UCB-MIT-Robotics/pid_controller.py
```python
class PIDController():
def __init__(self, kp, ki, kd, dt, setpoint):
self.kp = kp
self.ki = ki
self.kd = kd
self.dt = dt
self.integral = 0
self.error = 0
self.prev_error = 0
self.setpoint = setpoint
def reset(self):
self.integral = 0
self.error = 0
self.prev_error = 0
def setSetpoint(self, setpoint):
self.setpoint = setpoint
def Update(self, curr):
self.prev_error = self.error
self.error = self.setpoint - curr
self.integral += self.error * self.dt
return self.kp * self.error + self.ki * self.integral + self.kd * (self.error - self.prev_error) / self.dt
```
#### File: UCB-MIT-Robotics/test subsystems/motor_model.py
```python
"""
This class represents a model of a DC electric motor.
Explanation of Physics/Math:
A DC motor can be understood as an open circuit with
a resistor due to the resistance in the copper windings
and a voltage source generated from the spinning armature
(a back emf is created due to a spinning magnetic field).
Thus, the equation for the circuit of a DC motor can be
modelled as
u = V + IR
where u is the voltage you have sent to the motor leads,
V is the back emf, and IR is the voltage from the resistor
(copper windings). In motor usage, you change the value of
u and that create different effects in the spinning of the
motor. To model a DC motor properly, you need to specify
a few more variables which most electric motor manufacturers
will publish in a motor curve. The explanation for the model
will be below. (This model is not perfect, little nonlinear
effects will show up because the resistor and generator are
not created directly after each other, but this equation
will work extremely well).
"""
class motor_model:
"""
Please put all variables in metric. Efficiency is given as
a percentage. We can take the original equation
u = V + IR
and modify it to be in terms of torques and angular velocity:
mu * u = k_v * omega + k_t * tau * R
where mu is efficiency, k_v is known as the voltage constant,
k_t is known as the torque constant, and tau is torque.
Further modification of the equation to include gear_ratios
and different numbers of motors, the equation becomes
mu * u = k_v * G * omega + (k_t * tau * R) / (n * G)
where G is the gear ratio and n is the number of motors.
(We use the convention where a gear ratio intended to
increase output torque is considered > 1)
"""
def __init__(self, stall_torque, stall_current, free_speed,
max_voltage, free_current = 0.0, efficiency = 1.0,
num_motors = 1, gear_ratio = 1.0):
self.tau_stall = stall_torque
self.I_stall = stall_current
self.omega_free = free_speed
self.V_max = max_voltage
self.I_free = free_current
self.mu = efficiency
self.n = num_motors
self.G = gear_ratio
#resistance
self.R = self.V_max / self.I_stall
#voltage constant
self.k_v = (self.V_max - self.I_free * self.R) / self.omega_free
#torque constant
self.k_t = self.I_stall / self.stall_torque
def torque(self, omega, u):
u = min(max(u, -self.V_max), self.V_max)
return (self.n * self.G * self.mu * u) / (self.k_t * self.R) - (self.n * self.G * self.G * self.k_v * omega) / (self.kt * self.R)
def omega(self, tau, u):
u = min(max(u, -self.V_max), self.V_max)
return (self.mu * u) / (self.k_v * self.G) - (self.k_t * tau * self.R) / (self.n * self.G * self.G * self.k_v)
``` |
{
"source": "0Nera/GoogleSearch",
"score": 2
} |
#### File: GoogleSearch/gs_modules/checks.py
```python
from colorama import init
from colorama import Fore, Back, Style
from gs_modules.url import get_url
import os
init(autoreset=True)
def integrity_check():
try:
file = open('url.txt', 'r')
file.close()
except:
print(Fore.RED + '!!!ERROR!!!\nВнимание отсутсвует url.txt. Попробую его создать...')
try:
file = open('url.txt', 'w')
file.close()
print(Fore.GREEN + 'Файл успешно создан! Добавьте нужные Вам ссылки в url.txt. Просто перезапустите программу')
except:
print(Fore.RED + 'Внимание, не удалось создать url.txt!\nВозможно не хватает прав')
return False
try:
file = open('search_files/test', 'w')
file.close()
os.remove('search_files/test')
except:
print(Fore.RED + '!!!ERROR!!!\nВнимание отсутсвует директория search_files. Попробую её создать...')
try:
os.mkdir('search_files')
print(Fore.GREEN + 'Директория успешно создана! Просто перезапустите программу')
except:
print(Fore.RED + 'Внимание, не удалось создать директорию\nВозможно не хватает прав')
return False
try:
get_url()
except:
print(Fore.RED + 'Внимание, ошибка при прочтении файла url.txt\nВозможно не хватает прав или файл пуст')
return False
return True
def check_to_empty_folder(name_folder):
files = os.listdir(name_folder)
if not files:
return True
else:
return False
```
#### File: GoogleSearch/gs_modules/docmunets.py
```python
from gs_modules.google import *
import requests
from colorama import init
from colorama import Fore, Back, Style
init(autoreset=True)
google = google_module()
class document_module():
def search_documents_site(self, site, extensions):
return_arr = []
for i in range(len(extensions)):
arr = google.search(f'site:{site} filetype:{extensions[i]}')
for g in range(len(arr)):
return_arr.append(arr[g])
print(Fore.LIGHTYELLOW_EX + f'[#] search {extensions[i]} done...')
return return_arr
def get_files(self, linksList):
for i in range(len(linksList)):
link = linksList[i]
name_file = link[link.rfind('/') + 1:]
try:
file_download = requests.get(link)
if file_download.status_code == 200:
print(Fore.LIGHTYELLOW_EX + f'[#] get {name_file} file...')
file = open(f'search_files/{name_file}', 'wb')
file.write(file_download.content)
file.close()
except:
print(Fore.RED + f'[-] ошибка при получении файла {name_file}')
# Copyright (c) 2021 videxerion
``` |
{
"source": "0ni0nrings/learning_lambda",
"score": 3
} |
#### File: 0ni0nrings/learning_lambda/aurorasls_integration.py
```python
import boto3
rds_client = boto3.client('rds-data')
database_name = 'information_schema'
db_cluster_arn = '<AURORA-SERVERLESS-DB-ARN>'
db_credentials_secret = '<SECRETS-MANAGER-SECRET-ARN>'
def lambda_handler(event,context):
def execute_statement(sql):
response = rds_client.execute_statement(
database = database_name,
resourceArn = db_cluster_arn,
secretArn = db_credentials_secret,
sql = sql
)
return response
response = execute_statement(f'create database if not exists moosa')
#print(response['records']) Un-comment this if you're runnig a select query and expect rows
print(response)
```
#### File: 0ni0nrings/learning_lambda/connect_mysql.py
```python
import pymysql.cursors
def lambda_handler(event, context):
# Connect to the database
connection = pymysql.connect(host='<IP-OR-DB-CLUSTER-URL>',
user='<username>',
password='<<PASSWORD>>',
db='information_schema',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
# Read a single record
sql = "select * from information_schema.tables LIMIT 1"
cursor.execute(sql)
result = cursor.fetchall()
print(result)
finally:
connection.close()
```
#### File: 0ni0nrings/learning_lambda/s3bucket_trigger.py
```python
import boto3
def lambda_handler(event, context):
client = boto3.client('sns')
try:
size = event['Records'][0]['s3']['object']['size']
except KeyError:
pass
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
if event['Records'][0]['eventName'] == "ObjectRemoved:Delete":
payload_str = "An object with name: " + key + " was deleted from bucket: " + bucket
else:
payload_str = "An object with name " + key + " and size "+ str(size) + "B was uploaded to bucket: " + bucket
response = client.publish(
TopicArn='<SNS-TOPIC-ARN>',
Message= payload_str,
Subject='My Lambda S3 event')
``` |
{
"source": "0Nikola0/Casted-Away",
"score": 3
} |
#### File: Casted-Away/src/scene.py
```python
import pygame
from random import randint
from pymunk.vec2d import Vec2d
from src.game_objects.actors import Actor
from src.game_objects.background import Background
from src.game_objects.floor import TestFloor
from src.game_objects.gui import GUI
from src.game_objects.level_borders import LevelBorders
from src.game_objects.selection_box import SelectionBox
from src.scenes.game_over import GameOver
from src.game_objects.gui import console_print_event
from src.game_objects.obstacles import Obstacle
from src.game_objects.tasks import Task
from src.game_objects.pymunk_bodies import ObstacleBody
import src.settings as s
from src.events import COMMAND, SWITCH_SCENE
from src.map import TiledMap
from src.state import State
FEED = pygame.event.Event(COMMAND, {'value': 'feed'})
MENU = pygame.event.Event(SWITCH_SCENE, {'scene': 'menu'})
GAME = pygame.event.Event(SWITCH_SCENE, {'scene': 'game'})
TEST = pygame.event.Event(SWITCH_SCENE, {'scene': 'test'})
# Layers
SELECTION_L = 5
class Scene:
"""Handle creating, managing, and cleaning up sprites."""
def __init__(self, main_loop):
self.main_loop = main_loop
self.GUI = None
self.all = pygame.sprite.LayeredUpdates()
self.selected_actor = pygame.sprite.GroupSingle()
self.selection_box = SelectionBox((100,100,255))
self.all.add(self.selection_box, layer=SELECTION_L)
self.shortcuts = s.SHORTCUTS
self.tasks = None
self.obstacles = None
self.main_loop.mouse_handlers.append(self.handle_mouse_event)
for key in self.shortcuts.values():
self.main_loop.add_up_down_key_handlers(self, key)
def load(self):
pass
def feed_selected_actor(self):
if self.selected_actor.sprite is not None:
if s.FOOD_SUPPLY > 30:
self.selected_actor.sprite.eat(30)
s.FOOD_SUPPLY -= 30
else:
console_print_event("Need more food!")
def water_selected_actor(self):
if self.selected_actor.sprite is not None:
if s.WATER_SUPPLY > 30:
self.selected_actor.sprite.drink(30)
s.WATER_SUPPLY -= 30
else:
console_print_event("Need more water!")
def do_task_selected_actor(self):
actor = self.selected_actor.sprite
task_found = False
if actor is not None:
for task in self.tasks:
if actor.rect.colliderect(task.rect):
task_found = True
if actor.energy < 50:
console_print_event(actor.name + " is too tired to do that!")
else:
task_text = task.do_task()
actor.energy -= 50
if task_text:
console_print_event(actor.name + task_text)
if not task_found:
console_print_event(actor.name + " can not do anything here.")
def handle_key_down(self, key):
pass
def handle_key_up(self, key):
if key == self.shortcuts["FEED_SELECTED_ACTOR"]:
self.feed_selected_actor()
def handle_mouse_event(self, ev, pos):
if ev == pygame.MOUSEMOTION:
self.handle_mouse_move(pos)
elif ev == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_down(pos)
# TODO this might not the best way to do it
if self.selected_actor.sprite is not None:
if pos[0] < s.PLAY_AREA[0] and pos[1] < s.PLAY_AREA[1]:
self.selected_actor.sprite.move_by_mouse(pos)
elif ev == pygame.MOUSEBUTTONUP:
self.handle_mouse_up(pos)
def handle_mouse_move(self, pos):
for task in self.tasks:
if task.rect.collidepoint(pos):
task.show_border()
else:
task.hide_border()
def handle_mouse_down(self, pos):
pos = s.flip_y(pos)
for shape in self.main_loop.space.shapes:
dist, info = shape.point_query(pos)
if dist < 0:
clicked_object = self.main_loop.take_object_by_id(shape.collision_type)
if isinstance(clicked_object, Actor):
actor = clicked_object
# Select
if len(self.selected_actor) == 0:
print(f"Actor with ID[{actor.id}] was selected")
actor.switch_selection()
self.selected_actor.add(actor)
self.GUI.select_actor(actor)
self.selection_box.bind_to(actor)
# Unselect
elif self.selected_actor.sprite == actor:
print(f"Actor with ID[{actor.id}] was unselected")
self.selected_actor.sprite.switch_selection() # unselect actor
self.selected_actor.remove(self.selected_actor.sprite)
self.GUI.select_actor(None)
self.selection_box.reset()
# Unselect and Select
else:
self.selected_actor.sprite.switch_selection()
self.selected_actor.remove(self.selected_actor.sprite)
self.selected_actor.add(actor)
self.GUI.select_actor(actor)
self.selection_box.bind_to(actor)
def handle_mouse_up(self, pos):
pass
class MenuScene(Scene):
"""The main Game Scene."""
def __init__(self, *args):
super().__init__(*args)
self.GUI = GUI()
self.main_loop.add_event_handler(self.GUI)
self.GUI.create_command_button(
"Game Scene", lambda: pygame.event.post(GAME))
self.GUI.create_command_button(
"Test Scene", lambda: pygame.event.post(TEST))
self.GUI.create_command_button(
"Quit Program", lambda: pygame.event.post(pygame.event.Event(pygame.QUIT)))
self.all.add(Background(s.SCREEN_SIZE, s.GRAY), layer=0)
self.all.add(self.GUI, layer=6)
self.main_loop.drawing_layers[1].add(self.all)
class GameScene(Scene):
"""The main Game Scene."""
def __init__(self, *args):
super().__init__(*args)
self.state = State()
self.GUI = GUI()
self.GUI.create_command_button(
"Eat Food", lambda: self.feed_selected_actor())
self.GUI.create_command_button(
"Drink Water", lambda: self.water_selected_actor())
self.GUI.create_command_button(
"Gather Resources", lambda: self.do_task_selected_actor())
# self.GUI.create_command_button(
# "Rest", lambda: self.do_task_selected_actor())
# self.GUI.create_command_button(
# "Quit", lambda: pygame.event.post(pygame.event.Event(pygame.QUIT)))
self.main_loop.add_event_handler(self.GUI)
self.main_loop.add_event_handler(self)
self.main_loop.add_update_hook(self)
self.map = TiledMap(s.MAP)
# Music
pygame.mixer.music.load(s.SOUNDTRACKS[randint(0, 2)])
pygame.mixer.music.set_volume(0.3)
pygame.mixer.music.play(loops=-1)
# Actors
self.a_names = [
["Ernest", "Clyde", "Marvin", "Leroy", "Melvin", "Herbert"], # Man
["Agnes", "Elsie", "Kathryn", "Sylvia", "Helen", "Leona"], # Woman
["Ralph", "Earl", "Elmer", "Harold", "George", "Thomas"], # Boy
["Mary", "Evelyn", "Irene", "Betty", "Ruby", "Ethel"], # Girl
["Makin", "kelham", "Francis", "Hartshorn", "Owen", "Gladdle"] # Family name
]
family_name = self.a_names[4][randint(0, 5)]
self.actors = pygame.sprite.Group()
self.actors.add(self.create_actor((300, 350), s.MAN_SPRITE_SHEETS,
name=f"{self.a_names[0][randint(0, 5)]} {family_name}", speed="Adult"))
self.actors.add(self.create_actor((330, 350), s.WOMAN_SPRITE_SHEETS,
name=f"{self.a_names[1][randint(0, 5)]} {family_name}", speed="Adult"))
self.actors.add(self.create_actor((300, 400), s.BOY_SPRITE_SHEETS,
name=f"{self.a_names[2][randint(0, 5)]} {family_name}", speed="Kid"))
self.actors.add(self.create_actor((330, 400), s.GIRL_SPRITE_SHEETS,
name=f"{self.a_names[3][randint(0, 5)]} {family_name}", speed="Kid"))
# Tasks
self.tasks = [
Task(task_id=0, increasement=40.0, pos=(73, 135), size=(140, 140)), # Harvest
Task(1, 40.0, (535, 240), (110, 20)), # Get water (1)
Task(1, 40.0, (625, 475), (33, 45)), # Get water(
Task(1, 40.0, (590, 510), (33, 45)),
Task(1, 40.0, (560, 547), (33, 45)), # 2)
Task(2, 40.0, (40, 325), (100, 55)), # Resting place by the crops
Task(2, 40.0, (420, 497), (125, 80)) # Resting place by the river
]
for task in self.tasks:
self.all.add(task, layer=6)
self.obstacle_bodies = []
# Obstacles
Obstacle.space = self.main_loop.space
self.obstacles = [ # House
((255, 96),(228, 222)),
# Garden fence
((20, 116), (30, 190)),
((20, 106), (235, 20)),
((45, 300), (160, 20)),
((235, 126), (15, 50)),
# Pond next to house
((483, 160), (257, 94)),
# River
((730, 160), (30, 240)),
((705, 380), (30, 55)),
((680, 420), (30, 60)),
((645, 480), (33, 45)),
((610, 515), (33, 45)),
((580, 552), (33, 45)),
# Bottom of map
((0, 565), (590, 20)),
# Right
((0, 0), (20, 570)),
# Left trees
((20, 390), (155, 178)),
# Middle bottom trees
((325, 480), (80, 90)),
# Right trees
((485, 387), (180, 100)),
((610, 352), (65, 40)),
((650, 250), (50, 60))
]
for obstacle in self.obstacles:
self.obstacle_bodies.append(Obstacle(obstacle[0], obstacle[1], self.main_loop.space))
self.all.add(self.state, layer=0) # add state so that it gets updates
self.all.add(self.map, layer=0)
self.all.add(self.actors.sprites(), layer=3)
self.all.add(self.GUI, layer=6)
# TODO This is a hack; remove old layer code
self.main_loop.drawing_layers[0].add(self.all)
def create_actor(self, position, sh, name=None, sound=s.OLD_MAN_SOUNDS, speed="Adult") -> Actor:
actor = Actor(position, sh, sound, self.main_loop.space, name, speed=speed)
return actor
def create_map(self) -> pygame.sprite.Sprite:
"""Create the map and return the floor Sprite"""
topleft = 50, 50
bottomright = 500, 300
f = TestFloor(topleft, bottomright, s.BROWN)
p0 = Vec2d(topleft)
p1 = p0 + Vec2d(bottomright)
self.level_borders_ids.update(
LevelBorders(s.flip_y(p0), s.flip_y(p1),
space=self.main_loop.space,
d=s.LEVEL_BORDERS_THICKNESS).get_ids
)
return f
def handle_event(self, event):
if event.type == COMMAND:
if event.value == 'feed':
if self.selected_actor.sprite:
self.selected_actor.sprite.eat(25)
def update(self, delta_time, *args):
if len(self.actors) == 0:
print("Game Over")
self.main_loop.drawing_layers[0].add(GameOver())
self.main_loop.del_update_hook(self)
class TestScene(GameScene):
"""This scene is used for testing code. Put your hacks and test here."""
def __init__(self, *args):
super().__init__(*args)
self.test = pygame.sprite.LayeredUpdates()
for actor in self.actors:
actor.food = 5 # testing death
actor.health = 5
```
#### File: Casted-Away/src/state.py
```python
import src.settings as s
from src.game_objects.gui import console_print_event
from src.game_objects.empty_sprite import EmptySprite
class State(EmptySprite):
"""Manage the game state"""
def __init__(self):
super().__init__()
self.seconds_per_month = 60
self.month = 0
self.seconds_passed = 0.0
self.frame = 0
self.log_current_month()
def update(self, *args):
self.frame += 1
if self.frame == s.FPS:
self.frame = 0
self.seconds_passed += 1.0
if self.seconds_passed > self.seconds_per_month:
self.month += 1
self.seconds_passed = 0.0
self.log_current_month()
def log_current_month(self):
console_print_event("It is month " + str(self.month))
``` |
{
"source": "0nlysn0w/uranus-invaders",
"score": 3
} |
#### File: uranus-invaders/UranusInvaders/BaseRenderer.py
```python
import sys
class BaseRenderer():
def __init__(self, pyg, screen):
print("base")
self.clock = pyg.time.Clock()
self.pyg = pyg
self.screen = screen
def run(self, fileName, className):
running = True
self.pyg.key.set_repeat(1, 10)
model = __import__(fileName)
classToCall = getattr(model, className)
classCalled = classToCall(self.pyg, self.screen);
while running:
try:
invert_op = getattr(classCalled, "background", None)
if callable(invert_op):
self.screen.fill((0, 0, 0))
m = classCalled.background()
if m != None:
state,value = m.split("=")
if state == "return":
return value
except :
pass
for i in self.pyg.event.get():
#Sets the screen to standard black
state = classCalled.run(i)
if type(state) == str:
state,value = state.split("=")
if state == "return":
return value
if i.type == self.pyg.QUIT or (hasattr(i, "key") and self.pyg.KEYDOWN == i.type and getattr(i, "key") == 27):
try:
classCalled.quit()
except :
pass
self.pyg.key.set_repeat(0, 10)
running = False
return "quit";
#sets the fps and updates the game screen.
self.pyg.display.flip()
self.clock.tick(30)
```
#### File: uranus-invaders/UranusInvaders/SpaceRace.py
```python
import pygame, sys, math, time
from utils import MenuItemIndex, utils, TimerObject
from BaseRenderer import BaseRenderer
class SpaceRace():
def __init__(self, pyg, screen):
# General settings
print("init SpaceRace")
self.pyg = pyg
self.myfont = self.pyg.font.SysFont("monospace", 30)
self.screen = screen
self.width = pyg.display.Info().current_w
self.height = pyg.display.Info().current_h
self.state = "menu"
# Loading the items on screen and do some calculations
self.spaceship = pyg.image.load("Assets/spaceship-basic.png")
self.track = pyg.image.load("Assets/track-2.png")
self.track_mask = pyg.image.load("Assets/track-2.png")
self.startfinish_checker = pyg.image.load("Assets/startfinish.png")
self.can_lap_checker = pyg.image.load("Assets/startfinish.png")
self.startfinish = pyg.image.load("Assets/chequered.png")
self.spaceshipWidth = self.spaceship.get_rect().size[0]
self.spaceshipHeight = self.spaceship.get_rect().size[1]
self.trackWidth = self.track.get_rect().size[0]
self.trackHeight = self.track.get_rect().size[1]
# Space ship start location
self.spaceshipX = -142
self.spaceshipY = -487
# Space ship starting variables
self.rotation = 0
self.speed = 0
self.max_speed = 20
self.acceleration = 0.3
self.keys = [False, False, False, False]
# Things with timers and laps
self.start_time = 0
self.laptime = TimerObject()
self.bestlaptime = TimerObject("00:00:000", 0)
self.laps = 0
self.can_lap = False
# Menu items
self.option_items = []
options = ("Continue", "Exit")
actions = ("game", "quit")
for index, option in enumerate(options):
option_item = MenuItemIndex(str(option), actions[index], index, None, 80)
t_h = len(options) * option_item.height
pos_x = (self.width / 2) - (option_item.width / 2)
pos_y = (self.height / 2) - (t_h / 2) + ((index * 2) + index * option_item.height)
option_item.set_position(pos_x, pos_y)
self.option_items.append(option_item)
# 30FPS masterrace
def background(self):
if self.state == "menu":
self.menu()
elif self.state == "quit":
return "return=main"
elif self.state == "game":
# Well, check every frame
self.speed_controll()
# React to button presses of the arrow keys and do something with it
if self.keys[0] == True: #Left
if self.can_move(self.spaceshipX + self.speed, self.spaceshipY):
self.spaceshipX += self.speed
self.rotation = 90
if self.keys[1] == True: #Right
if self.can_move(self.spaceshipX - self.speed, self.spaceshipY):
self.spaceshipX -= self.speed
self.rotation = 270
if self.keys[2] == True: #Up
if self.can_move(self.spaceshipX, self.spaceshipY + self.speed):
self.spaceshipY += self.speed
self.rotation = 0
if self.keys[3] == True: #Down
if self.can_move(self.spaceshipX, self.spaceshipY - self.speed):
self.spaceshipY -= self.speed
self.rotation = 180
if self.keys[2] and self.keys[0] == True: #Up Left
self.rotation = 45
if self.keys[2] and self.keys[1] == True: #Up Right
self.rotation = 315
if self.keys[3] and self.keys[0] == True: #Down Left
self.rotation = 135
if self.keys[3] and self.keys[1] == True: #Down Right
self.rotation = 225
# Draw track
self.rotatedimg = self.pyg.transform.rotate(self.spaceship, self.rotation)
self.screen.blit(self.track, (self.width/2 + self.spaceshipX, self.height/2 + self.spaceshipY))
# Track markers
startfinishX = 454 + self.spaceshipX
startfinishY = 787 + self.spaceshipY
can_lap_checkerX = 2221 + self.spaceshipX
can_lap_checkerY = 787 + self.spaceshipY
# Draw track markers
# The checkers are invisible
self.screen.blit(self.startfinish_checker, (startfinishX, startfinishY))
self.screen.blit(self.can_lap_checker, (can_lap_checkerX, can_lap_checkerY))
# This one is chequered xD
self.screen.blit(self.startfinish, (startfinishX, startfinishY))
# Draw rotated space ship on top of everything
self.screen.blit(self.rotatedimg, ((self.width / 2) - (self.spaceshipWidth/2), (self.height / 2) - (self.spaceshipHeight/2)))
# Check if markers have been hit
startfinish_hit = utils.collisionDetect(self.startfinish_checker, startfinishX, startfinishY, self.rotatedimg, (self.width / 2) - (self.spaceshipWidth/2), (self.height / 2) - (self.spaceshipHeight/2), self.speed)
can_lap_checker_hit = utils.collisionDetect(self.can_lap_checker, can_lap_checkerX, can_lap_checkerY, self.rotatedimg, (self.width / 2) - (self.spaceshipWidth/2), (self.height / 2) - (self.spaceshipHeight/2), self.speed)
# Check if space ship passed the lap marker halfway the lap
if can_lap_checker_hit == True:
self.can_lap = True
# Calculate the lap time
self.laptime = utils.get_elapsed_time(self.start_time)
# Check if space ship passed start finish and do stuf like reset the laptime an add one lap to the counter
if startfinish_hit == True and self.can_lap == True:
if self.laptime.millis < self.bestlaptime.millis or self.bestlaptime.millis == 0:
self.bestlaptime.millis = self.laptime.millis
self.bestlaptime.disp_time = self.laptime.disp_time
self.start_time = 0
self.laps += 1
self.can_lap = False
# Draw lap information
self.disp_laptime = self.myfont.render("Time: " + self.laptime.disp_time, 1, (255, 255, 0))
self.disp_bestlaptime = self.myfont.render("Highscore: " + self.bestlaptime.disp_time, 1, (225, 225, 0))
self.disp_laps = self.myfont.render("Laps: " + str(self.laps), 1, (225, 225, 0))
self.screen.blit(self.disp_laptime, (20, 20))
self.screen.blit(self.disp_bestlaptime, (20, 60))
self.screen.blit(self.disp_laps, (20, 100))
def run(self, event):
if self.state == "menu":
s = self.menu()
return s
elif self.state == "game":
i = event
# Detect if and which button(s) is/are pressed
if i.type == self.pyg.KEYDOWN:
if self.start_time == 0:
# Start the timer when you start moving
self.start_time = utils.start_timer()
if i.key == self.pyg.K_LEFT:
self.keys[0] = True
if i.key == self.pyg.K_RIGHT:
self.keys[1] = True
if i.key == self.pyg.K_UP:
self.keys[2] = True
if i.key == self.pyg.K_DOWN:
self.keys[3] = True
if i.type == self.pyg.KEYUP:
if i.key == self.pyg.K_LEFT:
self.keys[0] = False
if i.key == self.pyg.K_RIGHT:
self.keys[1] = False
if i.key == self.pyg.K_UP:
self.keys[2] = False
if i.key == self.pyg.K_DOWN:
self.keys[3] = False
# Manage the speed of the space ship
def speed_controll(self):
drag = 0.5
# Prevents the speed from dipping below 0
if self.speed < 0:
self.speed = 1
# Prevents the speed from exceeding the speed limit
if self.speed > self.max_speed:
self.speed = self.max_speed
# If there is movement in any direction
if any(k == True for k in self.keys):
self.speed += self.acceleration
# If there is no movement at all
if all(k == False for k in self.keys) and self.speed > 0:
self.speed -= drag
if self.speed > 1:
if self.keys[2] and self.keys[0] == True: #Up Left
self.speed -= drag
if self.keys[2] and self.keys[1] == True: #Up Right
self.speed -= drag
if self.keys[3] and self.keys[0] == True: #Down Left
self.speed -= drag
if self.keys[3] and self.keys[1] == True: #Down Right
self.speed -= drag
# Manage the menu clicks
def menu(self):
for option in self.option_items:
mouseProperties = self.pyg.mouse.get_pos()
if option.is_mouse_selection(mouseProperties[0], mouseProperties[1]):
option.set_selected(True)
if self.pyg.mouse.get_pressed()[0]:
self.state = option.redir
else:
option.set_selected(False)
self.screen.blit(option.label, option.position)
# Decides if the character is allowed to move
def can_move(self, min_x, min_y):
x = math.floor(0 - min_x)
y = math.floor(0 - min_y)
#x and y not outside track.width and height
if (x < 0 or x > self.trackWidth - 1 - self.speed):
return False
if (y < 0 or y > self.trackHeight - 1 - self.speed):
return False
# Don't move if transparent
if (self.color_code(x, y).a) > 0:
return True
else:
self.speed -= 10
return False
# Return the RGBA value of a pixel at a given location
def color_code(self, x, y):
if str(x)[0] == "-":
x = math.floor(0 - x)
y = math.floor(0 - y)
color_code = self.track_mask.get_at((x,y))
return color_code
``` |
{
"source": "0noketa/calcifier",
"score": 3
} |
#### File: 0noketa/calcifier/calcifier.py
```python
import sys
import io
class Calcifier:
def __init__(self):
self.root = "root"
self.lang = "calcifier_calc"
self.rules = {}
self.rules_by_priority = {}
self.node_type = ""
def priorities(self):
return self.rules_by_priority.keys()
@staticmethod
def crange(_from: str, _to: str) -> str:
return set(map(chr, range(ord(_from), ord(_to) + 1)))
def load(self, file: io.TextIOWrapper) -> bool:
self.__init__()
if not file.readable():
return False
for src in file.read().splitlines():
if src == None:
break
src = list(filter(len, map(str.strip, src.split(" "))))
if len(src) == 0 or src[0] == '#':
continue
if len(src) == 1 and src[0] == "end":
break
if self.load_lang(src):
continue
if self.load_type(src):
continue
if self.load_root(src):
continue
if self.load_rule(src):
continue
print(f'#error "{src}"')
return False
if self.node_type == "":
self.node_type = self.lang + "_node_t"
return True
def load_root(self, src: list) -> bool:
if len(src) == 2 and src[0] == "root":
self.root = src[1]
return True
return False
def load_lang(self, src: list) -> bool:
if len(src) == 2 and src[0] == "lang":
self.lang = src[1]
return True
return False
def load_type(self, src: list) -> bool:
if len(src) == 2 and src[0] == "type":
self.node_type = src[1]
return True
return False
# bug: maybe this function has any well-known name
@staticmethod
def zigzag(lst: list, unit: int) -> list:
r = []
u = []
for i in lst:
u += [i]
if len(u) >= unit:
r += [u]
u = []
return r
def load_rule(self, src: list) -> bool:
header = 7
if not (len(src) >= header + 2
and src[0] == "rule"
and (len(src) - header) % 2 == 0):
return False
base_rule = {
'priority': src[1],
'order': src[2],
'argc': int(src[3]),
'left': src[4],
'right': src[5],
'next': src[6]
}
if base_rule['priority'] not in self.priorities():
self.rules_by_priority[base_rule['priority']] = []
for c, f in self.zigzag(src[header:], 2):
rule = base_rule.copy()
rule.update({
'key': c,
'function': f
})
self.rules[rule['key']] = rule
self.rules_by_priority[rule['priority']] += [rule]
return True
def dump(self, file: io.TextIOWrapper):
file.writelines([
f'lang {self.lang}\n',
f'type {self.node_type}\n',
f'root {self.root}\n'
])
for key in self.priorities():
rules = self.rules_by_priority[key]
r = rules[0]
file.write(f'rule {key}' +
f' {r["order"]} {r["argc"]}' +
f' {r["left"]} {r["right"]} {r["next"]}')
for rule in rules:
file.write(f' {rule["key"]} {rule["function"]}')
file.write("\n")
def is_then(self, rule):
return rule['function'] in [ "if", "then", "pyif" ]
def is_do(self, rule):
return rule["function"] == "do"
def is_while(self, rule):
return rule["function"] == "while"
def generate(self, file: io.TextIOWrapper) -> bool:
if not file.writable():
return False
if self.root not in self.priorities():
return False
self.generate_header(file)
for key in self.priorities():
self.generate_func(file, key)
return True
def generate_header(self, file: io.TextIOWrapper):
file.write(f"""
#include "calcifier_rtl.h"
#include "{self.lang}.h"
typedef {self.node_type} node_t;
""")
for key in self.priorities():
rules = self.rules_by_priority[key]
file.write("static " + self.func_header(rules[0]) + ";\n")
file.write(f"""
bool {self.lang}_tryParse(char *s, ptrdiff_t len, node_t *out_value) {{
return tryParse_{self.root}(s, len, out_value);
}}
""")
file.write(f"""
static bool tryParse_n(char *s, ptrdiff_t len, node_t *out_value) {{
if (s == NULL || !*s || len == 0
|| !tryTrim(s, len, &s, &len)
|| len == 0)
return false;
return {self.lang}_tryParseValue(s, len, out_value);
}}
""")
def func_name(self, rule) -> str:
return f'tryParse_{rule["priority"]}'
def func_header(self, rule) -> str:
head = ""
if self.is_then(rule):
t = "Elseable"
return (f'bool {self.func_name(rule)}' +
f'(char *s, ptrdiff_t len, node_t *out_value)')
def generate_nodefuncs(self, file: io.TextIOWrapper):
for r in self.rules:
file.write(f'extern node_t {self.lang}_{r["key"]}_{r["argc"]}(')
for i in range(r["argc"]):
if i > 0:
file.write(", ")
file.write("node_t")
file.write(");\n")
@staticmethod
def escape_char(c):
if ord(c) in range(33, 127) and c not in "\\\"\'":
return c
else:
return "\\x{:02x}".format(ord(c))
def escape_str(self, s):
return "".join(list(map(self.escape_char, s)))
def rules_to_keys(self, rules: list) -> str:
def f(rule):
return self.escape_char(rule["key"])
return "".join(list(map(f, rules)))
def generate_func(self, file: io.TextIOWrapper, priority: str) -> bool:
rules = self.rules_by_priority[priority]
rule = rules[0]
skip = f'{self.lang}_skip{rule["order"]}'
if rule["next"] == priority:
return False
if rule["left"] == priority and rule["right"] == priority:
return False
if rule["left"] == priority and rule["order"] == "l":
return False
if rule["right"] == priority and rule["order"] == "r":
return False
if rule["argc"] not in range(1, 3):
return False
file.write(f"""
{self.func_header(rule)} {{
char *s2;
ptrdiff_t len2;
if (s == NULL || !*s || len == 0
|| !tryTrim(s, len, &s2, &len2)
|| len2 == 0)
return false;
""")
if rule["argc"] == 1:
if rule["left"] != "-" and rule["right"] != "-":
return False
if rule["order"] == "l":
idx = "0"
next_idx = "s2 + 1"
else:
idx = "len2 - 1"
next_idx = "s2"
if rule["left"] != "-":
child = "left"
else:
child = "right"
file.write(f"""
if (strchr("{self.rules_to_keys(rules)}", s2[{idx}])
&& tryParse_{rule[child]}({next_idx}, len2 - 1, out_value))
{{
if (out_value) {{
""")
for r in rules:
file.write(f"""if (s2[{idx}] == '{r["key"]}')
*out_value = {self.lang}_newnode_{r["function"]}_1(*out_value, {self.lang}_nilnode());
else """)
file.write(f"""{{
{self.lang}_delnode(*out_value);
*out_value = {self.lang}_nilnode();
}}
}}
return true;
}}
return """)
if rule["next"] == "-":
file.write("false")
else:
file.write(f"""tryParse_{rule["next"]}(s2, len2, out_value)""")
file.write(""";
}""")
return True
if rule["argc"] != 2:
return False
if rule["left"] == "-" or rule["right"] == "-":
return False
# any binary operators
file.write(f"""
node_t left = {self.lang}_nilnode(), right = {self.lang}_nilnode();
""")
if rule["order"] == "l":
file.write(f"""
for (ptrdiff_t opr = 0; opr < len2; opr = {skip}(s2, len2, opr)) {{""")
else:
file.write(f"""
for (ptrdiff_t opr = len2; (opr = {skip}(s2, len2, opr)) > 0;) {{""")
file.write(f"""
left = {self.lang}_delnode(left);
right = {self.lang}_delnode(right);
if (!strchr("{self.rules_to_keys(rules)}", s2[opr])) continue;
char *right_p = s2 + opr + 1;
ptrdiff_t right_len = len2 - opr - 1;
if (tryParse_{rule["left"]}(s2, opr, &left)
&& tryParse_{rule["right"]}(right_p, right_len, &right))
{{
if (out_value) {{
""")
for r in rules:
file.write(f"""if (s2[opr] == '{r["key"]}')
*out_value = {self.lang}_newnode_{r["function"]}_2(left, right);
else """)
file.write(f"""{{
*out_value = left;
{self.lang}_delnode(right);
}}
}}
return true;
}}
}}
return """)
if rule["next"] == "-":
file.write('false')
else:
file.write(f'tryParse_{rule["next"]}(s2, len2, out_value)')
file.write(""";
}
""")
return True
calcifier = Calcifier()
calcifier.load(sys.stdin)
print("/*")
calcifier.dump(sys.stdout)
print("*/")
calcifier.generate(sys.stdout)
``` |
{
"source": "0noketa/tobf",
"score": 3
} |
#### File: tobf/tobf/arrowfuck.py
```python
import io
class ArrowFuck:
@classmethod
def compile(self, src: io.TextIOWrapper, dst: io.TextIOWrapper, width: int = 0x100, height: int = 0x100) -> int:
s = "".join([c for c in src.read() if c in "><v^+-,.[]"])
while len(s):
up = s.find("^")
down = s.find("v")
if up != -1 and (down == -1 or up < down):
dst.write(s[:up] + ("<" * width))
s = s[up + 1:]
elif down != -1:
dst.write(s[:down] + (">" * width))
s = s[down + 1:]
else:
dst.write(s)
s = ""
return width * height
def __init__(self, src: io.TextIOWrapper = None, dst: io.TextIOWrapper = None, width: int = 0x100, height: int = 0x100) -> None:
self.src = src
self.dst = dst
self.width = width
self.height = height
def compile_file(self, src: io.TextIOWrapper = None, dst: io.TextIOWrapper = None, width: int = None, height: int = None) -> int:
if src == None and self.src != None:
src = self.src
if dst == None and self.dst != None:
dst = self.dst
if width == None:
width = self.width
if height == None:
height = self.height
if src == None and dst == None:
raise Exception(f"any of input and ouput was None")
return ArrowFuck.compile(src, dst, width, height)
if __name__ == "__main__":
import sys
min_width = 4
width = 256
for i in sys.argv[1:]:
if i.startswith("-w"):
width = max(min_width, int(i[2:]))
if i == "-?":
sys.stderr.write("ArrawFuck to Brainfuck compiler\n")
sys.stderr.write(f"python {sys.argv[0]} [-wWIDTH] < src > dst\n")
sys.exit(0)
af = ArrowFuck(sys.stdin, sys.stdout, width)
af.compile_file()
```
#### File: tobf/tobf/bfa.py
```python
import sys
import io
class Bfa:
def __init__(self, src) -> None:
self.src = src
self.varss = []
self.base_addr = 0
self.vars = []
self.block_stack = []
self.block_it = 0
self.current_addr = 0
self.max_mem = 0
def get_address(self, name):
if name in self.vars:
return self.base_addr + self.vars.index(name)
stack = self.varss.copy()
current = self.vars
base_addr2 = self.base_addr
for i in range(len(stack)):
current = stack.pop()
base_addr2 = base_addr2 - len(current)
if name in current:
return base_addr2 + current.index(name)
raise Exception(f"{name} is not defined")
def select_addr(self, addr, out=sys.stdout):
addr2 = addr - self.current_addr
if addr2 < 0:
out.write("<" * -addr2)
else:
out.write(">" * addr2)
self.current_addr = addr
def compile(self, out=sys.stdout) -> int:
"""out: file-like\n
returns memory size to run safe
"""
while True:
self.src = self.src.strip()
if len(self.src) == 0:
self.select_addr(0, out)
break
if self.src[0] == "'":
j = self.src[1:].index("'")
bf = self.src[1:j + 1]
self.src = self.src[j + 2:]
out.write(bf)
continue
if self.src[0] == "(":
j = self.src.index(")")
name = self.src[1:j]
self.src = self.src[j + 1:]
if name in self.vars:
raise Exception(f"error: {name} was defined twice")
self.vars.append(name)
addr = self.get_address(name)
if addr + 1 > self.max_mem:
self.max_mem = addr + 1
continue
if self.src[0] == "<":
j = self.src.index(">")
name = self.src[1:j]
self.src = self.src[j + 1:]
addr = self.get_address(name)
self.select_addr(addr, out)
continue
if self.src[0] == "[":
self.src = self.src[1:]
self.block_stack.append(self.block_it)
self.block_it = self.current_addr
out.write("[\n")
continue
if self.src[0] == "]":
self.src = self.src[1:]
if self.block_it == -1:
raise Exception(f"[ does not exists")
self.select_addr(self.block_it, out)
self.block_it = self.block_stack.pop()
out.write("]\n")
continue
if self.src[0] == "{":
self.src = self.src[1:]
self.base_addr += len(self.vars)
self.varss.append(self.vars)
self.vars = []
continue
if self.src[0] == "}":
self.src = self.src[1:]
addr0 = self.current_addr
for v in self.vars:
addr = self.get_address(v)
self.select_addr(addr, out)
out.write("[-]\n")
self.select_addr(addr0, out)
self.vars = self.varss.pop()
self.base_addr -= len(self.vars)
continue
if self.src[0] in ["#", ";"]:
try:
j = self.src[1:].index("\n")
self.src = self.src[j + 2:]
except Exception:
self.src = ""
continue
if self.src[0] in ["+", "-", ",", "."]:
out.write(self.src[0])
self.src = self.src[1:]
continue
return self.max_mem
@classmethod
def from_filename(self, filename: str):
with io.open(filename, "r") as f:
bfa = Bfa.from_file(f)
return bfa
@classmethod
def from_file(self, file):
src = file.read()
return Bfa(src)
def main(argv):
if len(argv) < 2:
print("yet another implementation of Clifford Wolf's BFA")
print(f" python {argv[0]} src.bfa")
return 0
try:
bfa = Bfa.from_file(argv[1])
with io.open(argv[1] + ".bf", "w") as f:
bfa.compile(f)
except Exception as e:
sys.stdout.write(e)
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
``` |
{
"source": "0Nom4D/mdCreator",
"score": 3
} |
#### File: mdCreator/sources/Creator.py
```python
import configparser
from sources.Prerequisites import cPlusPlusPrerequisites, pythonPrerequisites, cPrerequisites, haskellPrerequisites, \
noPrerequisites
from sources.CodingStyle import cStyle, haskellStyle, noStyle
from sources.ApiLoader.ApiLoader import ApiLoader
from configparser import *
from typing import Union
import json
import os
def find_config(name, path) -> str:
"""
Find recursively a file in a directory.
Parameters
-------
name : str
Name of the file you're looking for
path : str
Starting directory to find
Returns
-------
Returns the path to the file you're looking for
"""
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
return ""
class RangeError(Exception):
"""
Exception raised when error occurs with Range Option from configuration file.
Attributes
----------
message : str
Exception explanation
"""
def __init__(self, message):
"""
Constructs an actual Range Error Exception class.
Parameters
----------
message : str
Message explaning the Range Error
"""
self.message = message
def __str__(self):
"""
Returns the actual error message.
Returns
-------
Actual Range Error message.
"""
return f'RangeError: {self.message}'
class ConfigError(Exception):
"""
Exception raised when error occurs when configuration files are mission.
Attributes
----------
message : str
Exception explanation
"""
def __init__(self, message):
"""
Constructs an actual ConfigError Exception class.
Parameters
----------
message : str
Message explaning the ConfigError
"""
self.message = message
def __str__(self):
"""
Returns the actual error message.
Returns
-------
Actual ConfigError message.
"""
return f'ConfigError: {self.message}'
class mdCreator:
"""
Main project class having the main computing loop.
Attributes
----------
project : str
Project's name
gifAttr : str
String defining the gifs you're looking for
language : str
Project's main language
fileDesc : TextIOWrapper | None
File descriptor describing the new README file descriptor
arrOpt : bool
Boolean telling if the user wants an array in its README file
student : bool
Boolean telling if the user is a student
apiLoader : ApiLoader
Class making every Tenor's Api calls
"""
def __init__(self, projName, usedLang, gifAttr, arrOpt):
self.project = projName
if gifAttr is None:
self.gifAttr = ""
else:
self.gifAttr = gifAttr
self.language = usedLang
self.fileDesc = None
self.array = arrOpt
self.student = False
self.apiLoader = ApiLoader(url="https://g.tenor.com/v1/search?", search=gifAttr, limit=2)
# Main Loop
def launchCreator(self) -> None:
"""
Main loop function creating the README file.
Returns
-------
None
"""
index = 0
self.checkExisting()
self.loadConfig()
if self.apiLoader.isUrlBuild():
self.fileDesc.write("## Asked GIFS\n\n")
gifList = self.apiLoader.searchGifs()
while index < len(gifList):
self.fileDesc.write(f'\n')
index += 1
self.fileDesc.write("\n")
if self.array is True:
self.printArray()
self.fileDesc.write(
"\nThis README file has been created with mdCreator. [Please check the project by clicking this link.]("
"https://github.com/0Nom4D/mdCreator/)")
self.fileDesc.close()
print("\nREADME.md created.")
print("Don't forget to edit your README.md file if something's wrong with the existing file.")
print("if any error occurs, please create an issue or contact Nom4D- | NMS#0811 on Discord.")
def printArray(self) -> None:
"""
Writing an array in the user README file.
Returns
-------
None
"""
self.fileDesc.write("## Asked Array Template:\n\n\
| Index1 | Index2 |\n\
| ---------- |:-------------:|\n\
| Key 1 | Opt1 |\n\
| Key 2 | Opt2 |\n\
| Key 3 | Opt3 |\n\
| Key 4 | Opt4 |\n\
| Key 5 | Opt5 |\n\
| Key 6 | Opt6 |\n\
| Key 7 | Opt7 |\n")
def checkExisting(self) -> None:
"""
Checks if a README file already exists into the directory.
Returns
-------
None
"""
if os.path.isfile("README.md"):
while 1:
try:
value = input("README.md already exists. Do you want to create a new README.md file? [y/n] ")
if value == 'y':
self.fileDesc = open("README.md", "w")
self.fileDesc.truncate()
break
elif value == 'n':
exit(1)
else:
continue
except EOFError:
print("mdCreator Stopped - creator.py: l.203")
exit(1)
else:
self.fileDesc = open("README.md", "w")
return 0
# README.md Sections
def loadConfig(self) -> None:
"""
Load configuration file.
Returns
-------
None
"""
configMode = None
value = None
try:
# Loading mdCreatorrc config file
rcFile = find_config("mdCreatorrc", os.getenv('HOME'))
if rcFile == "":
raise ConfigError('mdCreatorrc file is missing.')
cfgParser = configparser.ConfigParser()
cfgParser.read(rcFile)
if cfgParser['CONFIG']['configtype'] == "ToBeAsked":
while value is None:
try:
value = input("For your next use, would you like to use the Student Configuration? [y/n] ")
if value == 'y':
cfgParser.set('CONFIG', 'configType', 'student')
elif value == 'n':
cfgParser.set('CONFIG', 'configType', 'pro')
else:
continue
with open('mdCreatorrc', 'w') as rcFileFD:
cfgParser.write(rcFileFD)
rcFileFD.close()
except EOFError:
print("mdCreator Stopped - creator.py: l.241")
exit(1)
configMode = cfgParser['CONFIG']['configtype']
# Loading mdCreator.json config file
configFile = find_config("mdCreator.json", os.getenv('HOME'))
config = open(configFile, "r")
cfg = json.load(config)
for lib in cfg[configMode]:
self.writeSection(cfg[configMode], lib)
except KeyError as err:
(x,) = err.args
print(f'KeyError: {x}')
exit(1)
except ConfigError as err:
print(err)
exit(1)
def writeSection(self, cfg, section) -> Union[int, None]:
"""
Write every sections and checks the configuration.
Parameters
-------
cfg : list
List of every sections present in the configuration file
section : list
List of every parameters of a section
Returns
-------
None
"""
secRange = 0
try:
secRange = cfg[section]["range"]
except KeyError:
secRange = None
if secRange is None:
if section == "gifs":
self.apiLoader.setLimit(int(cfg[section]["nbGifs"]))
self.apiLoader.buildUrl()
return 0
raise RangeError(f'Range is not set for {str(section)} section.')
return self.redirectSections(secRange, cfg, section)
def redirectSections(self, secRange, cfg, section) -> Union[int, None]:
"""
Write the different README sections.
Parameters
-------
secRange : int
Section range
cfg : list
List of every sections present in the configuration file
section : list
List of every parameters of a section
Returns
-------
None
"""
if secRange < 1:
raise RangeError(f'Range must be higher than 0 for {str(section)} section.')
while secRange != 0:
self.fileDesc.write("#")
secRange -= 1
if section == "header":
self.fileDesc.write(f' {self.project}\n\n')
elif cfg[section]["title"] is not None:
self.fileDesc.write(f'{cfg[section]["title"]}\n\n')
if section == "style":
self.printCodingStyle()
elif section == "prerequisites":
self.printPrerequisites()
elif cfg[section]["description"][0] == ' ':
self.fileDesc.write(f'{self.project}{cfg[section]["description"]}\n\n')
else:
self.fileDesc.write(f'{cfg[section]["description"]}\n\n')
return 0
def printPrerequisites(self) -> int:
"""
Write prerequisites in the created README file.
"""
return ({
"c++": cPlusPlusPrerequisites,
"c": cPrerequisites,
"python": pythonPrerequisites,
"haskell": haskellPrerequisites
}.get(self.language.lower(), noPrerequisites)(self.fileDesc))
def printCodingStyle(self) -> int:
"""
Write coding style in the created README file.
"""
return ({
"c": cStyle,
"haskell": haskellStyle
}.get(self.language.lower(), noStyle)(self.fileDesc, self.language, self.project))
``` |
{
"source": "0nse/mopidy-scrobbler",
"score": 2
} |
#### File: mopidy-scrobbler/mopidy_scrobbler/frontend.py
```python
from __future__ import unicode_literals
import logging
import os
import time
from mopidy.core import CoreListener
import pykka
import pylast
logger = logging.getLogger(__name__)
LASTFM_API_KEY = '2236babefa8ebb3d93ea467560d00d04'
LASTFM_API_SECRET = '94d9a09c0cd5be955c4afaeaffcaefcd'
LIBREFM_SESSION_KEY_FILE = os.path.join(os.path.expanduser('~'),
'.librefm_session_key')
class ScrobblerFrontend(pykka.ThreadingActor, CoreListener):
def __init__(self, config, core):
super(ScrobblerFrontend, self).__init__()
self.config = config
self.lastfm = None
self.librefm = None
self.networks = {}
self.last_start_time = None
def on_start(self):
if not (self.connect_to_lastfm() and self.connect_to_librefm()):
logger.warning("Couldn't connect to any scrobbling services. "
"Mopidy Scrobbler will stop.")
self.stop()
def connect_to_lastfm(self):
''' Connect to Last.fm and return True on success. '''
lastfm_username = self.config['scrobbler']['lastfm_username']
lastfm_password = self.config['scrobbler']['lastfm_password']
try:
if lastfm_username and lastfm_password:
self.lastfm = pylast.LastFMNetwork(
api_key=LASTFM_API_KEY,
api_secret=LASTFM_API_SECRET,
username=lastfm_username,
password_hash=pylast.md5(lastfm_password))
logger.info('Scrobbler connected to Last.fm')
self.networks['Last.fm'] = self.lastfm
return True
except (pylast.NetworkError, pylast.MalformedResponseError,
pylast.WSError) as e:
logger.error('Error while connecting to Last.fm: %s', e)
return False
def connect_to_librefm(self):
''' Connect to Libre.fm and return True on success. '''
librefm_username = self.config['scrobbler']['librefm_username']
librefm_password = self.config['scrobbler']['librefm_password']
try:
if librefm_username and librefm_password:
self.librefm = pylast.LibreFMNetwork(
username=librefm_username,
password_hash=pylast.md5(librefm_password))
if self.retrieve_librefm_session():
self.networks['Libre.fm'] = self.librefm
logger.info('Scrobbler connected to Libre.fm')
return True
else:
return False
except (pylast.NetworkError, pylast.MalformedResponseError,
pylast.WSError) as e:
logger.error('Error while connecting to Libre.fm: %s', e)
return False
def retrieve_librefm_session(self):
''' Opens a Web browser to create a session key file if none
exists yet. Else, it is loaded from disk. Returns True on
success. '''
if not os.path.exists(LIBREFM_SESSION_KEY_FILE):
import webbrowser
logger.warning('The Libre.fm session key does not exist. A Web '
'browser will open an authentication URL. Confirm '
'access using your username and password. This '
'has to be done only once.')
session_keygen = pylast.SessionKeyGenerator(self.librefm)
auth_url = session_keygen.get_web_auth_url()
webbrowser.open(auth_url)
logger.info('A Web browser may not be opened if you run Mopidy '
'as a different user. In this case, you will have '
'to manually open the link "{url}".'
.format(url=auth_url))
remainingTime = 30 # approximately 30 seconds before timeout
while remainingTime:
try:
session_key = session_keygen \
.get_web_auth_session_key(auth_url)
# if the file was created in the meantime, it will
# be blindly overwritten:
with open(LIBREFM_SESSION_KEY_FILE, 'w') as f:
f.write(session_key)
logger.debug('Libre.fm session key retrieved and written '
'to disk.')
break
except pylast.WSError:
remainingTime -= 1
time.sleep(1)
except IOError:
logger.error('Cannot write to session key file "{path}"'
.format(path=LIBREFM_SESSION_KEY_FILE))
return False
if not remainingTime:
logger.error('Authenticating to Libre.fm timed out. Did you '
'allow access in your Web browser?')
return False
else:
session_key = open(LIBREFM_SESSION_KEY_FILE).read()
self.librefm.session_key = session_key
return True
def get_duration(self, track):
return track.length and track.length // 1000 or 0
def get_artists(self, track):
''' Return a tuple consisting of the first artist and a merged
string of artists. The first artist is considered to be the
primary artist of the track. The artists are joined by using
slashes as recommended in ID3v2.3. Prefer the album artist if
any is given. '''
if not len(track.artists):
logger.error('The track does not have any artists.')
raise ValueError
artists = [a.name for a in track.artists]
if track.album and track.album.artists:
artists = [a.name for a in track.album.artists]
metaArtists = ['compilation', 'split', 'various artists']
if artists[0].lower() in metaArtists:
artists = [a.name for a in track.artists]
primaryArtist = artists[0]
artists = '/'.join(artists)
return (primaryArtist, artists)
def track_playback_started(self, tl_track):
track = tl_track.track
(artist, artists) = self.get_artists(track)
duration = self.get_duration(track)
self.last_start_time = int(time.time())
logger.debug('Now playing track: %s - %s', artists, track.name)
for network in self.networks.items():
try:
network[1].update_now_playing(
artist=artist,
title=(track.name or ''),
album=(track.album and track.album.name or ''),
duration=str(duration),
track_number=str(track.track_no or 0),
mbid=(track.musicbrainz_id or ''))
except (pylast.ScrobblingError, pylast.NetworkError,
pylast.MalformedResponseError, pylast.WSError) as e:
logger.warning('Error submitting playing track to {network}: '
'{error}'.format(network=network[0], error=e))
def track_playback_ended(self, tl_track, time_position):
''' Scrobble the current track but only submit the primary
artist instead of a combined string which could wrongfully
create new Last.FM artist pages. '''
track = tl_track.track
(artist, artists) = self.get_artists(track)
duration = self.get_duration(track)
time_position = time_position // 1000
if duration < 30:
logger.debug('Track too short to scrobble. (30s)')
return
if time_position < duration // 2 and time_position < 240:
logger.debug(
'Track not played long enough to scrobble. (50% or 240s)')
return
if self.last_start_time is None:
self.last_start_time = int(time.time()) - duration
logger.debug('Scrobbling track: %s - %s', artists, track.name)
for network in self.networks.items():
try:
network[1].scrobble(
artist=artist,
title=(track.name or ''),
timestamp=str(self.last_start_time),
album=(track.album and track.album.name or ''),
track_number=str(track.track_no or 0),
duration=str(duration),
mbid=(track.musicbrainz_id or ''))
except (pylast.ScrobblingError, pylast.NetworkError,
pylast.MalformedResponseError, pylast.WSError) as e:
logger.warning('Error submitting played track to {network}: '
'{error}'.format(network=network[0], error=e))
``` |
{
"source": "0one2/CVAHMR",
"score": 3
} |
#### File: discriminator/preprocess/hip_normalized.py
```python
import torch
import time
def normalize_hip(kps):
'''
Arguments:
kps (batch x num_kps x 2): Input 2D normalized joint position (-1 ~ 1)
Returns:
normalized_distance (batch x num_kps) : Normalized Distance from center joint(hip joint) to each joints .
related paper link: https://arxiv.org/pdf/1803.08244.pdf
'''
# 1. find center hip coord
batch = kps.size(0)
num_kps = kps.size(1)
kps_flat = kps.reshape(batch, -1) # kps_flat : batch x num_kps*2
# 2. calculate distance of all joints from center hip coord
l_hip = kps[:, 11, :].clone() # batch x 2
r_hip = kps[:, 12, :].clone() # batch x 2
cen_hip = ((r_hip + l_hip) / 2).unsqueeze(1).repeat(1, num_kps, 1) # batch x num_kps x 2
mean_distance = torch.norm(kps - cen_hip, dim = -1).mean(dim = 1).view(-1, 1) # batch 224 resolution
# 3. normalize kps. code reference: https://github.com/DwangoMediaVillage/3dpose_gan
idx = torch.where(mean_distance == 0)
mean_distance[idx] = 1e-6
normalized_kps = kps_flat / mean_distance
normalized_hip_x = (normalized_kps[:, 11 * 2] + normalized_kps[:, 12 * 2]) / 2
normalized_hip_y = (normalized_kps[:, 11 * 2 + 1] + normalized_kps[:, 12 * 2 + 1]) / 2
normalized_kps[:, 0::2] -= normalized_hip_x.view(-1, 1).repeat(1, num_kps)
normalized_kps[:, 1::2] -= normalized_hip_y.view(-1, 1).repeat(1, num_kps)
return normalized_kps # batch x 17*2
if __name__ == '__main__':
num_kps = 17
kps = torch.randn(1, num_kps, 2)
start = time.time()
normalized_distance = normalize_hip(kps)
print(normalized_distance)
print(f'time {time.time() - start}')
```
#### File: CVAHMR/train/pre_trainer.py
```python
import torch
import torch.nn as nn
import numpy as np
from torchgeometry import angle_axis_to_rotation_matrix, rotation_matrix_to_angle_axis
import cv2
import pdb
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from datasets import MixedDataset
from models import hmr, SMPL
from models.loss import GradientPaneltyLoss, Vanilla, Wgan, WGANGP, calculate_accuracy, calculate_rot_err
from utils.geometry import perspective_projection, estimate_translation, rot6d_to_rotmat
from utils.renderer import Renderer
from utils import BaseTrainer
from utils.imutils import plot_kps
import config
import constants
class Pre_Trainer(BaseTrainer):
def init_fn(self):
self.discriminator = self.discriminator.to(self.device)
self.train_ds = MixedDataset(self.options, ignore_3d=self.options.ignore_3d, is_train=True)
self.model = hmr(config.SMPL_MEAN_PARAMS, pretrained=True).to(self.device)
# self.optimizer = torch.optim.Adam(params=self.model.parameters(),
# lr=self.options.lr,
# weight_decay=0)
self.optimizer_d = torch.optim.Adam(params=self.discriminator.parameters(),
lr=self.options.lr*self.options.lr_rate,
weight_decay=0)
self.smpl = SMPL(config.SMPL_MODEL_DIR,
batch_size=self.options.batch_size,
create_transl=False).to(self.device)
self.mean_params = np.load(config.SMPL_MEAN_PARAMS)
self.init_pose = torch.from_numpy(self.mean_params['pose'][:]).repeat(self.options.batch_size).view(self.options.batch_size,-1).to(self.device)
self.init_betas = torch.from_numpy(self.mean_params['shape'][:].astype('float32')).repeat(self.options.batch_size).view(self.options.batch_size,-1).to(self.device)
self.init_rotmat = rot6d_to_rotmat(self.init_pose).view(self.options.batch_size, 24, 3, 3).to(self.device)
# Per-vertex loss on the shape
self.criterion_shape = nn.L1Loss().to(self.device)
# Keypoint (2D and 3D) loss
# No reduction because confidence weighting needs to be applied
self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
# Loss for SMPL parameter regression
self.criterion_regr = nn.MSELoss().to(self.device)
# Loss for GAN BCE LOSS
self.criterion_BCELogitsLoss = nn.BCEWithLogitsLoss() # combines a sigmoid layer + BCELoss in one single class. more stable than a plain sigmoid followed BCELoss
self.GP_func = WGANGP().cuda()
self.models_dict = {'discriminator' : self.discriminator}
self.optimizers_dict = {'optimizer_d' : self.optimizer_d}
print(f'models saved {self.models_dict.keys()}')
print(f'optimizer saved {self.optimizers_dict.keys()}')
self.focal_length = constants.FOCAL_LENGTH
if self.options.pretrained_checkpoint is not None:
self.load_pretrained(checkpoint_file=self.options.pretrained_checkpoint)
if self.options.gan_loss == 'vanilla':
self.gan_loss = Vanilla().cuda()
elif self.options.gan_loss == 'wgan':
self.gan_loss = Wgan().cuda()
else:
raise NameError(f'{self.options.gan_loss} not implemented Yet!')
# Create renderer
self.renderer = Renderer(focal_length=self.focal_length, img_res=self.options.img_res, faces=self.smpl.faces)
def train_step(self, input_batch):
self.model.train() # effects on certain modeuls, like Dropout, BatchNorm etc
# Get data from the batch
images = input_batch['img']# input image
gt_keypoints_2d = input_batch['keypoints'] # 2D keypoints
is_flipped = input_batch['is_flipped'] # flag that indicates whether image was flipped during data augmentation
rot_angle = input_batch['rot_angle'] # rotation angle used for data augmentation
dataset_name = input_batch['dataset_name'] # name of the dataset the image comes from
indices = input_batch['sample_index'] # index of example inside its dataset
img_name = input_batch['imgname']
gt_rotmat = input_batch['pose']
batch_size = images.shape[0]
mean_betas = self.init_betas
mean_rotmat = self.init_rotmat
# Feed images in the network to predict camera and SMPL parameters
pred_rotmat, pred_betas, pred_camera = self.model(images)
pred_output = self.smpl(betas=mean_betas, body_pose=mean_rotmat[:,1:], global_orient=pred_rotmat[:,0].unsqueeze(1), pose2rot=False)
pred_vertices = pred_output.vertices
pred_joints = pred_output.joints
# Convert Weak Perspective Camera [s, tx, ty] to camera translation [tx, ty, tz] in 3D given the bounding box size
# This camera translation can be used in a full perspective projection
pred_cam_t = torch.stack([pred_camera[:,1],
pred_camera[:,2],
2*self.focal_length/(self.options.img_res * pred_camera[:,0] +1e-9)],dim=-1)
camera_center = torch.zeros(batch_size, 2, device=self.device)
pred_keypoints_2d = perspective_projection(pred_joints,
rotation=torch.eye(3, device=self.device).unsqueeze(0).expand(batch_size, -1, -1),
translation=pred_cam_t,
focal_length=self.focal_length,
camera_center=camera_center)
# Normalize keypoints to [-1,1]
pred_keypoints_2d = pred_keypoints_2d / (self.options.img_res / 2.)
# idx_COCO = [i for i in range(25, 37)] + [j for j in range(44, 49)] # follow the SMPL index order
# get kps of COCO indexes (batch, 49, 3) -> (batch, 17, 3)
# gt_kps = gt_keypoints_2d[:, idx_COCO].clone()
# pred_kps = pred_keypoints_2d[:, idx_COCO].clone()
gt_kps = gt_keypoints_2d[:, 25:].clone()
pred_kps = pred_keypoints_2d[:, 25:].clone()
# set kps value to zero if visibility of certain kps is 0
vis_idx = torch.where(gt_kps[:, :, 2] == 0)
vis_batch, vis_kps = vis_idx
gt_kps[vis_batch, vis_kps] = 0
gt_kps = gt_kps[:, :, :2]
pred_kps[vis_batch, vis_kps] = 0
# Discriminator Update
if self.options.train == 'Base':
pass
else:
if self.options.train == 'Base_GAN':
loss_D, loss_D_real, loss_D_fake, real_acc, fake_acc = self.BaseGAN_loss(gt_kps, pred_kps, vis_idx)
elif self.options.train == 'RandomCam':
# Calculate RandomCam
std = torch.ones(1).cuda() * self.options.rand_std
rot_angle = torch.normal(0,std).cuda()
y_rotation = torch.FloatTensor([[torch.cos(rot_angle),0,torch.sin(rot_angle)],
[0,1,0],
[-torch.sin(rot_angle),0,torch.cos(rot_angle)]]).cuda()
# Random y rotation
if self.options.random_type == 'Shift':
rand_rotmat = torch.matmul(y_rotation, pred_rotmat)
elif self.options.random_type == 'Direct':
rand_rotmat = y_rotation
loss_D, loss_D_real, loss_D_fake, real_acc, fake_acc = self.DoubleCam_loss(pred_rotmat, rand_rotmat, pred_betas, pred_cam_t, gt_kps, pred_kps, vis_idx)
elif self.options.train == 'SwapCam':
swap_rotmat = torch.flip(pred_rotmat, dims = [0])
loss_D, loss_D_real, loss_D_fake, real_acc, fake_acc = self.DoubleCam_loss(pred_rotmat, swap_rotmat, pred_betas, pred_cam_t, gt_kps, pred_kps, vis_idx)
else:
return NameError(f'{self.options.train} not implemented yet!')
# Discriminator Update
if (self.step_count+1) % self.options.update_freq == 0 :
self.optimizer_d.zero_grad() # set the grads of the previous epoch to be zero, to prevent the accumulation of grads of previous step | equal to model.zero_grad() if optimizer contains params of model
loss_D.backward(retain_graph=True) # calculate the gradient at each layer | retain computational graph for the update of generator(if not, calculating grads and then disappear computation graph)
self.optimizer_d.step() # update the layer weight of related params, here 'discriminator parameters'
## For Generator Update
# Compute 2D reprojection loss for the keypoints
loss_keypoints = self.camera_fitting_loss(pred_keypoints_2d, gt_keypoints_2d,
self.options.openpose_train_weight,
self.options.gt_train_weight)
# if dataset_name == 'mpi-inf-3dhp':
rot_err = calculate_rot_err(pred_rotmat, gt_rotmat)
if self.options.train == 'Base':
loss_G = self.options.loss_kps_weight * loss_keypoints
g_real_acc, g_fake_acc = 0.5, 0.5
else:
loss_G = self.options.loss_kps_weight * loss_keypoints
if self.options.train == 'Base_GAN':
loss_gan_generator, g_real_acc, g_fake_acc = self.BaseGAN_loss(gt_kps,pred_kps,vis_idx,'genrator')
loss_G += self.options.loss_G_weight * loss_gan_generator
elif self.options.train == 'RandomCam':
loss_gan_generator, g_real_acc, g_fake_acc = self.DoubleCam_loss(pred_rotmat, rand_rotmat, pred_betas, pred_cam_t, gt_kps, pred_kps, vis_idx, mode = 'generator')
loss_G += self.options.loss_G_weight * loss_gan_generator
elif self.options.train == 'SwapCam':
swap_rotmat = torch.flip(pred_rotmat, dims = [0])
loss_gan_generator, g_real_acc, g_fake_acc = self.DoubleCam_loss(pred_rotmat, swap_rotmat, pred_betas, pred_cam_t, gt_kps, pred_kps, vis_idx, mode = 'generator')
loss_G += self.options.loss_G_weight * loss_gan_generator
else:
return NameError(f'{self.options.train} not implemented yet!')
# self.optimizer.zero_grad() # same as self.discriminator.zero_grad()
# loss_G.backward() # calculate gradient
# self.optimizer.step()
# Pack output arguments for tensorboard logging
if self.options.train == 'Base':
output = {'pred_vertices': pred_vertices.detach(),
'pred_cam_t': pred_cam_t.detach(),
'pred_kps': pred_kps.detach(),
'gt_kps': gt_kps.detach()}
losses = {'loss': loss_G.detach().item(),
'rot_err': sum(rot_err) / self.options.batch_size,
'loss_keypoints': loss_keypoints.detach().item()}
else:
output = {'pred_vertices': pred_vertices.detach(),
'pred_cam_t': pred_cam_t.detach(),
'pred_kps': pred_kps.detach(),
'gt_kps': gt_kps.detach()}
losses = {'loss': loss_G.detach().item(),
'loss_keypoints': loss_keypoints.detach().item(),
'loss_discriminator': loss_D.detach().item(),
'loss_generator': loss_gan_generator.detach().item(),
'fake_acc': fake_acc/self.options.batch_size,
'real_acc': real_acc/self.options.batch_size,
'fake_loss': loss_D_fake.detach().item(),
'real_loss': loss_D_real.detach().item(),
'rot_err': sum(rot_err) / self.options.batch_size,
'g_fake_acc' : 1-(g_fake_acc/self.options.batch_size)}
return output, losses
def camera_fitting_loss(self, pred_keypoints_2d, gt_keypoints_2d, openpose_weight, gt_weight):
# """ Compute 2D reprojection loss for selected keypoints.
# The loss is weighted by the confidence.
# The available keypoints are different for each dataset."""
# Extremities
op_joints = ['OP RHip', 'OP LHip', 'OP RShoulder', 'OP LShoulder','OP RAnkle','OP LAnkle','OP RWrist','OP LWrist']
op_joints_ind = [constants.JOINT_IDS[joint] for joint in op_joints]
gt_joints = ['Right Hip', 'Left Hip', 'Right Shoulder', 'Left Shoulder','Right Ankle','Left Ankle','Right Wrist','Left Wrist']
gt_joints_ind = [constants.JOINT_IDS[joint] for joint in gt_joints]
# Elbows and Knees
# op_joints = ['OP RHip', 'OP LHip', 'OP RShoulder', 'OP LShoulder','OP RKnee','OP LKnee','OP RElbow','OP LElbow']
# op_joints_ind = [constants.JOINT_IDS[joint] for joint in op_joints]
# gt_joints = ['Right Hip', 'Left Hip', 'Right Shoulder', 'Left Shoulder','Right Knee','Left Knee','Right Elbow','Left Elbow']
# gt_joints_ind = [constants.JOINT_IDS[joint] for joint in gt_joints]
conf = gt_keypoints_2d[:, :, -1].unsqueeze(-1).clone()
conf[:, :25] *= openpose_weight
conf[:, 25:] *= gt_weight
loss = (conf[:,gt_joints_ind] * self.criterion_keypoints(pred_keypoints_2d[:,gt_joints_ind], gt_keypoints_2d[:, gt_joints_ind, :-1])).mean()
return loss
def BaseGAN_loss(self, gt_kps, pred_kps, vis_idx, mode = 'discriminator'):
# loss_discriminator when Base_GAN
probability_real = self.discriminator(gt_kps, vis_idx)
if mode == 'discriminator':
probability_pred = self.discriminator(pred_kps.detach(), vis_idx).cuda()
loss, loss_D_real, loss_D_fake = self.gan_loss(probability_real, probability_pred, None, numCam = 'single', mode='discriminator')
if self.options.gan_loss == 'wgan':
loss_D_GP = self.GP_func(self.discriminator, gt_kps, pred_kps, None, vis_idx, numCam = 'single')
loss += loss_D_GP
correct_real, correct_fake = calculate_accuracy(probability_pred, probability_real, None, numCam = 'single')
return loss, loss_D_real, loss_D_fake, correct_real, correct_fake
# loss_generator when Base_GAN
else :
probability_pred = self.discriminator(pred_kps, vis_idx).cuda()
loss = self.gan_loss(None, probability_pred, None, numCam= 'single', mode='generator')
correct_real, correct_fake = calculate_accuracy(probability_pred, probability_real, None, numCam = 'single')
return loss, correct_real, correct_fake
def DoubleCam_loss(self, pred_rotmat, new_rotmat, pred_betas, pred_cam_t, gt_kps, pred_kps, vis_idx, mode = 'discriminator'):
new_output = self.smpl(betas=pred_betas, body_pose=pred_rotmat[:,1:], global_orient=new_rotmat[:,0].unsqueeze(1), pose2rot=False)
new_vertices = new_output.vertices
new_joints = new_output.joints
camera_center = torch.zeros(self.options.batch_size, 2, device=self.device)
new_keypoints_2d = perspective_projection(new_joints,
rotation=torch.eye(3, device=self.device).unsqueeze(0).expand(self.options.batch_size, -1, -1),
translation=pred_cam_t,
focal_length=self.focal_length,
camera_center=camera_center)
new_keypoints_2d = new_keypoints_2d / (self.options.img_res / 2.)
new_kps = new_keypoints_2d[:, 25:]
# loss_discriminator when Base_GAN
probability_real = self.discriminator(gt_kps, vis_idx)
if mode == 'discriminator':
probability_pred = self.discriminator(pred_kps.detach(), vis_idx)
probability_new = self.discriminator(new_kps.detach(), vis_idx)
loss, loss_D_real, loss_D_fake = self.gan_loss(probability_real, probability_pred, probability_new, numCam = 'double', mode='discriminator')
if self.options.gan_loss == 'wgan':
loss_D_GP = self.GP_func(self.discriminator, gt_kps, pred_kps, new_kps, vis_idx, numCam = 'double')
loss += loss_D_GP
correct_real, correct_fake = calculate_accuracy(probability_pred, probability_real, probability_new, numCam = 'double')
return loss, loss_D_real, loss_D_fake, correct_real, correct_fake
# loss_generator when Base_GAN
else:
probability_pred = self.discriminator(pred_kps, vis_idx)
probability_new = self.discriminator(new_kps, vis_idx)
loss = self.gan_loss(None, probability_pred, probability_new, numCam = 'double', mode='generator')
correct_real, correct_fake = calculate_accuracy(probability_pred, probability_real, probability_new, numCam = 'double')
return loss, correct_real, correct_fake
def train_summaries(self, mode, input_batch, output, losses):
images = input_batch['img']
images = images * torch.tensor([0.229, 0.224, 0.225], device=images.device).reshape(1,3,1,1)
images = images + torch.tensor([0.485, 0.456, 0.406], device=images.device).reshape(1,3,1,1)
pred_vertices = output['pred_vertices']
pred_cam_t = output['pred_cam_t']
pred_kps = output['pred_kps']
gt_kps = output['gt_kps']
images_pred = self.renderer.visualize_tb(pred_vertices, pred_cam_t, images)
figure = plot_kps(images,gt_kps,pred_kps)
if mode == 'train':
self.summary_writer.add_image('pred_shape', images_pred, self.step_count)
self.summary_writer.add_mesh('pred_mesh', vertices = pred_vertices, global_step=self.step_count)
self.summary_writer.add_figure('pred_kps', figure, self.step_count)
for loss_name, val in losses.items():
self.summary_writer.add_scalar(loss_name, val, self.step_count)
# to see the result of the most-challenging-results of samples
else:
self.summary_writer_sample.add_image('pred_shape', images_pred, self.step_count)
self.summary_writer_sample.add_mesh('pred_mesh', vertices = pred_vertices, global_step=self.step_count)
for loss_name, val in losses.items():
self.summary_writer_sample.add_scalar(loss_name, val, self.step_count)
``` |
{
"source": "0orja/Pointnet_Pointnet2_pytorch",
"score": 2
} |
#### File: Pointnet_Pointnet2_pytorch/data_utils/S3DISDataLoader.py
```python
import os
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset
class S3DISDataset(Dataset):
def __init__(self, split='train', data_root='trainval_fullarea', num_point=4096, test_area=5, block_size=1.0, sample_rate=1.0, transform=None):
super().__init__()
self.num_point = num_point
self.block_size = block_size
self.transform = transform
rooms = sorted(os.listdir(data_root))
rooms = [room for room in rooms if 'Area_' in room]
if split == 'train':
rooms_split = [room for room in rooms if not 'Area_{}'.format(test_area) in room]
else:
rooms_split = [room for room in rooms if 'Area_{}'.format(test_area) in room]
self.room_points, self.room_labels = [], []
self.room_coord_min, self.room_coord_max = [], []
num_point_all = []
labelweights = np.zeros(13)
for room_name in tqdm(rooms_split, total=len(rooms_split)):
room_path = os.path.join(data_root, room_name)
room_data = np.load(room_path) # xyzrgbl, N*7
points, labels = room_data[:, 0:6], room_data[:, 6] # xyzrgb, N*6; l, N
tmp, _ = np.histogram(labels, range(14))
labelweights += tmp
coord_min, coord_max = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
self.room_points.append(points), self.room_labels.append(labels)
self.room_coord_min.append(coord_min), self.room_coord_max.append(coord_max)
num_point_all.append(labels.size)
labelweights = labelweights.astype(np.float32)
labelweights = labelweights / np.sum(labelweights)
self.labelweights = np.power(np.amax(labelweights) / labelweights, 1 / 3.0)
print(self.labelweights)
sample_prob = num_point_all / np.sum(num_point_all)
num_iter = int(np.sum(num_point_all) * sample_rate / num_point)
room_idxs = []
for index in range(len(rooms_split)):
room_idxs.extend([index] * int(round(sample_prob[index] * num_iter)))
self.room_idxs = np.array(room_idxs)
print("Totally {} samples in {} set.".format(len(self.room_idxs), split))
def __getitem__(self, idx):
room_idx = self.room_idxs[idx]
points = self.room_points[room_idx] # N * 6
labels = self.room_labels[room_idx] # N
N_points = points.shape[0]
while (True):
center = points[np.random.choice(N_points)][:3]
block_min = center - [self.block_size / 2.0, self.block_size / 2.0, 0]
block_max = center + [self.block_size / 2.0, self.block_size / 2.0, 0]
point_idxs = np.where((points[:, 0] >= block_min[0]) & (points[:, 0] <= block_max[0]) & (points[:, 1] >= block_min[1]) & (points[:, 1] <= block_max[1]))[0]
if point_idxs.size > 1024:
break
if point_idxs.size >= self.num_point:
selected_point_idxs = np.random.choice(point_idxs, self.num_point, replace=False)
else:
selected_point_idxs = np.random.choice(point_idxs, self.num_point, replace=True)
# normalize
selected_points = points[selected_point_idxs, :] # num_point * 6
current_points = np.zeros((self.num_point, 9)) # num_point * 9
current_points[:, 6] = selected_points[:, 0] / self.room_coord_max[room_idx][0]
current_points[:, 7] = selected_points[:, 1] / self.room_coord_max[room_idx][1]
current_points[:, 8] = selected_points[:, 2] / self.room_coord_max[room_idx][2]
selected_points[:, 0] = selected_points[:, 0] - center[0]
selected_points[:, 1] = selected_points[:, 1] - center[1]
selected_points[:, 3:6] /= 255.0
current_points[:, 0:6] = selected_points
current_labels = labels[selected_point_idxs]
if self.transform is not None:
current_points, current_labels = self.transform(current_points, current_labels)
return current_points, current_labels
def __len__(self):
return len(self.room_idxs)
class ScannetDatasetWholeScene():
# prepare to give prediction on each points
def __init__(self, root, block_points=4096, split='test', test_area=5, stride=0.5, block_size=1.0, padding=0.001):
self.block_points = block_points
self.block_size = block_size
self.padding = padding
self.root = root
self.split = split
self.stride = stride
self.scene_points_num = []
assert split in ['train', 'test']
if self.split == 'train':
self.file_list = [d for d in os.listdir(root) if d.find('Area_%d' % test_area) is -1]
else:
self.file_list = [d for d in os.listdir(root) if d.find('Area_%d' % test_area) is not -1]
self.scene_points_list = []
self.semantic_labels_list = []
self.room_coord_min, self.room_coord_max = [], []
for file in self.file_list:
data = np.load(root + file)
points = data[:, :3]
self.scene_points_list.append(data[:, :6])
self.semantic_labels_list.append(data[:, 6])
coord_min, coord_max = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
self.room_coord_min.append(coord_min), self.room_coord_max.append(coord_max)
assert len(self.scene_points_list) == len(self.semantic_labels_list)
labelweights = np.zeros(13)
for seg in self.semantic_labels_list:
tmp, _ = np.histogram(seg, range(14))
self.scene_points_num.append(seg.shape[0])
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights / np.sum(labelweights)
self.labelweights = np.power(np.amax(labelweights) / labelweights, 1 / 3.0)
def __getitem__(self, index):
point_set_ini = self.scene_points_list[index]
points = point_set_ini[:,:6]
labels = self.semantic_labels_list[index]
coord_min, coord_max = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
grid_x = int(np.ceil(float(coord_max[0] - coord_min[0] - self.block_size) / self.stride) + 1)
grid_y = int(np.ceil(float(coord_max[1] - coord_min[1] - self.block_size) / self.stride) + 1)
data_room, label_room, sample_weight, index_room = np.array([]), np.array([]), np.array([]), np.array([])
for index_y in range(0, grid_y):
for index_x in range(0, grid_x):
s_x = coord_min[0] + index_x * self.stride
e_x = min(s_x + self.block_size, coord_max[0])
s_x = e_x - self.block_size
s_y = coord_min[1] + index_y * self.stride
e_y = min(s_y + self.block_size, coord_max[1])
s_y = e_y - self.block_size
point_idxs = np.where(
(points[:, 0] >= s_x - self.padding) & (points[:, 0] <= e_x + self.padding) & (points[:, 1] >= s_y - self.padding) & (
points[:, 1] <= e_y + self.padding))[0]
if point_idxs.size == 0:
continue
num_batch = int(np.ceil(point_idxs.size / self.block_points))
point_size = int(num_batch * self.block_points)
replace = False if (point_size - point_idxs.size <= point_idxs.size) else True
point_idxs_repeat = np.random.choice(point_idxs, point_size - point_idxs.size, replace=replace)
point_idxs = np.concatenate((point_idxs, point_idxs_repeat))
np.random.shuffle(point_idxs)
data_batch = points[point_idxs, :]
normlized_xyz = np.zeros((point_size, 3))
normlized_xyz[:, 0] = data_batch[:, 0] / coord_max[0]
normlized_xyz[:, 1] = data_batch[:, 1] / coord_max[1]
normlized_xyz[:, 2] = data_batch[:, 2] / coord_max[2]
data_batch[:, 0] = data_batch[:, 0] - (s_x + self.block_size / 2.0)
data_batch[:, 1] = data_batch[:, 1] - (s_y + self.block_size / 2.0)
data_batch[:, 3:6] /= 255.0
data_batch = np.concatenate((data_batch, normlized_xyz), axis=1)
label_batch = labels[point_idxs].astype(int)
batch_weight = self.labelweights[label_batch]
data_room = np.vstack([data_room, data_batch]) if data_room.size else data_batch
label_room = np.hstack([label_room, label_batch]) if label_room.size else label_batch
sample_weight = np.hstack([sample_weight, batch_weight]) if label_room.size else batch_weight
index_room = np.hstack([index_room, point_idxs]) if index_room.size else point_idxs
data_room = data_room.reshape((-1, self.block_points, data_room.shape[1]))
label_room = label_room.reshape((-1, self.block_points))
sample_weight = sample_weight.reshape((-1, self.block_points))
index_room = index_room.reshape((-1, self.block_points))
return data_room, label_room, sample_weight, index_room
def __len__(self):
return len(self.scene_points_list)
if __name__ == '__main__':
data_root = '/data/yxu/PointNonLocal/data/stanford_indoor3d/'
num_point, test_area, block_size, sample_rate = 4096, 5, 1.0, 0.01
point_data = S3DISDataset(split='train', data_root=data_root, num_point=num_point, test_area=test_area, block_size=block_size, sample_rate=sample_rate, transform=None)
print('point data size:', point_data.__len__())
print('point data 0 shape:', point_data.__getitem__(0)[0].shape)
print('point label 0 shape:', point_data.__getitem__(0)[1].shape)
import torch, time, random
manual_seed = 123
random.seed(manual_seed)
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
def worker_init_fn(worker_id):
random.seed(manual_seed + worker_id)
train_loader = torch.utils.data.DataLoader(point_data, batch_size=16, shuffle=True, num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
for idx in range(4):
end = time.time()
for i, (input, target) in enumerate(train_loader):
print('time: {}/{}--{}'.format(i+1, len(train_loader), time.time() - end))
end = time.time()
``` |
{
"source": "0ortmann/broker-application-templates",
"score": 3
} |
#### File: app-to-app/multi-processing-python/receiver.py
```python
import time
import broker
import select
import multiprocessing
def broker_receiver(event_queue):
## demo receiver that is subscribed to the topic "demo/multi-processing-python"
ep = broker.Endpoint()
subscriber = ep.make_subscriber("demo/multi-processing-python")
ep.listen("127.0.0.1", 9999)
print("endpoint listening...")
while True:
fd_sets = select.select([subscriber.fd()], [], [])
if not fd_sets[0]:
print("boom. this is the end.")
(_, data) = subscriber.get()
event = broker.bro.Event(data)
event_queue.put([event.name(), event.args()])
## we use a queue to let the broker_receiver process communicate back to the main loop
event_queue = multiprocessing.Queue()
## spawn receiver process such event processing and receiving are decoupled (as far as possible in python)
process = multiprocessing.Process(target=broker_receiver, args=[event_queue])
process.start()
## we will use a bunch of workers to handle incoming messages in batches. a worker handles a batch, reports the result and then goes away.
result_queue = multiprocessing.Queue()
## work results will be handled by another process
def handle_work_results(result_queue):
while True:
result = result_queue.get()
print("have result: {}".format(result))
process = multiprocessing.Process(target=handle_work_results, args=[result_queue])
process.start()
def process_batch(batch, result_queue):
my_sum = 0
for event in batch:
my_sum += event[1][0]
result_queue.put(my_sum)
## the main loop
PROCESS_BATCH_SIZE = 5000
worker_num = 0
while True:
## read a batch from the received messages.
batch = []
for i in range(PROCESS_BATCH_SIZE):
batch += [event_queue.get()]
print("started worker_num", worker_num)
worker_num += 1
worker = multiprocessing.Process(target=process_batch, args=[batch, result_queue])
worker.start()
``` |
{
"source": "0ortmann/graph_alert_correlation",
"score": 3
} |
#### File: graph_alert_correlation/gac/gac.py
```python
import numpy as np
import networkx as nx
def alert_similarity_graph(alerts, similarity_threshold=0.25):
'''
Builds the undirected alert similarity graph. Node labels are the alert UIDs
`alerts` should be a numpy.ndarray where each row has at least 5 fields: (uid, src_ip, src_prt, dst_ip, dst_prt)
'''
assert alerts.shape[0] >= 5
G = nx.Graph()
for alert_1 in alerts:
for alert_2 in alerts:
uid_1, uid_2 = alert_1[0], alert_2[0]
if uid_1 == uid_2: continue
if uid_1 not in G: G.add_node(uid_1)
if uid_2 not in G: G.add_node(uid_2)
sim = 0
for attr_index in range(1, 5):
if alert_1[attr_index] == alert_2[attr_index]: sim += 0.25
if sim > similarity_threshold: G.add_edge(uid_1, uid_2)
return G
def netflow_graph(alerts):
'''
Builds a directed graph. IPs are the nodes, direction indicates src and dst in an alert.
'''
G = nx.DiGraph()
for alert in alerts:
src, dst = alert[1], alert[3]
if src == dst: continue
if src not in G: G.add_node(src)
if dst not in G: G.add_node(dst)
G.add_edge(src, dst)
return G
def cluster_cliques(G, k=15):
'''
Convenience wrapper around nx.algorithms.community.k_clique_communities
'''
return nx.algorithms.community.k_clique_communities(G, k)
def get_alerts_by_uid(alerts, uid_list):
lookup = dict(zip(alerts[:,0], alerts))
result = list()
for uid in uid_list:
result.append(lookup[uid])
return result
def infer_label(directed_graph):
'''
Infers label (one-to-one, one-to-many, many-to-one, many-to-many) for the directed graph.
The graph should be a netflow_graph.
The formulae for the match certainty are directly taken from the GAC paper, page 5 section 3.3
Returns a tuple (match_certainty, label)
'''
attackers = list()
victims = list()
for node in directed_graph.nodes:
if directed_graph.in_degree(node) >= 1: victims.append(node)
if directed_graph.out_degree(node) >= 1: attackers.append(node)
V = len(directed_graph.nodes)
A = len(attackers)
T = len(victims)
oto = 1/3 * ( (V-A)/(V-1) + (V-T)/(V-1) + (V-abs(A-T))/V ) if V > 1 else 0
otm = 1/3 * ( (V-A)/(V-1) + T/(V-1) + abs(A-T)/(V-2) ) if V > 2 else 0
mto = 1/3 * ( A/(V-1) + (V-T)/(V-1) + abs(A-T)/(V-2) ) if V > 2 else 0
mtm = 1/3 * ( A/V + T/V + (V-abs(A-T))/V ) if V > 0 else 0
certainty, pattern_name = max((oto, 'one-to-one'), (otm, 'one-to-many'), (mto, 'many-to-one'), (mtm, 'many-to-many'), key=lambda v: v[0])
return (certainty, pattern_name, attackers, victims)
def gac_cluster(alerts, similarity_threshold=0.25, clique_size=15):
'''
`alerts` should be a numpy.ndarray where each row has at least 5 fields: (uid, src_ip, src_prt, dst_ip, dst_prt). Other fields are not used, but also not discarded.
Wrapper function to combine building the alert_similarity graph with the label inference from the flow graph.
Returns labeled clusters of alerts, a list of triples: (pattern_name, match_certainty, alerts_in_clique). alerts_in_clique is a numpy array.
'''
g_attr = alert_similarity_graph(alerts, similarity_threshold)
cliques = cluster_cliques(g_attr, clique_size)
all_clique_uids = list()
for clique in list(cliques):
all_clique_uids.append(list(clique))
labeled_clusters = list()
for clique_uids in all_clique_uids:
alerts_in_clique = get_alerts_by_uid(alerts, clique_uids)
g_flow = netflow_graph(alerts_in_clique)
certainty, pattern_name, attackers, victims = infer_label(g_flow)
labeled_clusters.append((certainty, pattern_name, alerts_in_clique, attackers, victims))
return labeled_clusters
def gac_connect(alerts, similarity_threshold=0.25, clique_size=15):
pass
# TODO
``` |
{
"source": "0oshowero0/COVID19-urban-mobility-model",
"score": 3
} |
#### File: COVID19-urban-mobility-model/brazil/simulate_ode_fit_rio_BO.py
```python
import numpy as np
from multiprocessing import Pool
from datetime import datetime
from argparse import ArgumentParser
from COVID_Model import City
from pathlib import Path
import json
from bayes_opt import BayesianOptimization
import pandas as pd
import setproctitle
setproctitle.setproctitle('Rio_SEIR@hanzhenyu')
MULTI_PROCESSING = 20
def load_pop(data_dir):
pop = np.load(data_dir).reshape(-1, 1).astype('float32')
std = pop[pop>100].std()
mean = pop[pop>100].mean()
upper_bond = mean + 3*std
pop = np.where(pop>upper_bond, upper_bond, pop)
return pop
def load_cases(data_dir, name):
data = pd.read_csv(data_dir)
#cases = data[data['Município']==name].iloc[:,1:].to_numpy().reshape(-1, 1).astype('float32')
cases = data[data['Município']==name]
return cases
def setup_args(parser=None):
""" Set up arguments
return:
python dictionary
"""
if parser is None:
parser = ArgumentParser()
# Default Params
parser.add_argument('--city_name', default='Rio de Janeiro', help='City Name')
parser.add_argument('--save_dir', default='./simulated_results_RioDeJaneiro_2500/', help='Result Loc')
parser.add_argument('--population_data', default='./population/rioDeJaneiro_pop.npy', help='Loc of pop data')
parser.add_argument('--cases_data', default='./cases/Brazil_epidemic_district_timeline.csv', help='Loc of cases data')
parser.add_argument('--units', default=2500, help='Unit Num', type=int)
parser.add_argument('--unit_distance', default=1,help='Unit distance between blocks(km)', type=int)
parser.add_argument('--start_date', default='2021-01-01', help='Start Time')
parser.add_argument('--change_point', default='2021-04-26', help='Interval of cases')
parser.add_argument('--final_date', default='2021-07-31', help='End Time')
parser.add_argument('--sample_rate', default=4, help='Sample Rate of cases curve')
parser.add_argument('--Pi', default=5*2.7913484249081293e-05, help='transmission rate of I (to fit)', type=float)
parser.add_argument('--Pe', default=5*2.7913484249081293e-05, help='transmission rate of E (the same with Pi)', type=float)
parser.add_argument('--PE', default=0.3, help='probability of a health people to be E when get infected', type=float)
parser.add_argument('--e_to_i', default=1 / 5.2, help='probability of the E turn to I', type=float)
parser.add_argument('--i_to_r', default=1 / 14, help='recover rate of I', type=float)
parser.add_argument('--mobility', default=0.4, help='Mobility Param (to fit)', type=float)
parser.add_argument('--early_detect', default=0, help='early detect rate (to fit; to accelerate I to R)', type=float)
parser.add_argument('--self_quarantine', default=0, help='Self Quarantine of S (deprecated, not actually use)', type=float)
parser.add_argument('--ki_disc', default=1, help='mobility discount of I when moving (deprecated, not actually use)', type=float)
parser.add_argument('--ke_disc', default=1, help='mobility discount of E when moving (deprecated, not actually use)', type=float)
parser.add_argument('--Pi_disc', default=1, help='discount of transmission rate of I (deprecated, not actually use)', type=float)
parser.add_argument('--Pe_disc', default=1, help='discount of transmission rate of E (deprecated, not actually use)', type=float)
return parser
def multi_process_fit(process_i,fit_epoch):
parser = setup_args()
opt = vars(parser.parse_args())
output_dir = Path(opt['save_dir'])
output_dir.mkdir(exist_ok=True,parents=True)
################################################################################
# Load Data
pop_data = load_pop(opt['population_data'])
cases_data = load_cases(opt['cases_data'],opt['city_name'])
start_index = np.where(cases_data.columns == opt['start_date'])[0]
change_index = np.where(cases_data.columns == opt['change_point'])[0]
final_index = np.where(cases_data.columns == opt['final_date'])[0]
# Sampling epidemic curve
origin_x = np.linspace(0, cases_data.shape[1]-1, num=cases_data.shape[1]-1, endpoint=False)
num_new_points = int((cases_data.shape[1]-1)/opt['sample_rate'])
resample_x = np.linspace(0, cases_data.shape[1]-1, num=num_new_points, endpoint=False)
cases_resample = np.interp(x=resample_x, xp=origin_x, fp=cases_data.iloc[:,1:].to_numpy().reshape(-1))
new_start_index = int(start_index / opt['sample_rate'])
new_change_index = int(change_index / opt['sample_rate'])
new_final_index = int(final_index / opt['sample_rate'])
cases_data_processed = []
cases_data_processed.append(cases_resample[new_start_index:new_change_index])
cases_data_processed.append(cases_resample[new_change_index:new_final_index])
# Set bias of cases number
cases_bias = cases_resample[new_start_index]
# Set active cases
init_cases_num = np.diff(cases_data.iloc[:,(int(start_index)-3):int(start_index)]).sum()
opt['cases_bias'] = cases_bias
opt['init_cases_num'] = int(init_cases_num)
optimizers = []
# Fit first part
city = City(opt)
city.setPopCases(pop_data, cases_data_processed[0])
city.init_blocks(pop_data, manual_init_case=True)
pbounds = {'pi': (0, 0.0006), 'early_detect': (0, 1), 'mobility': (0, 0.0003)}
optimizer = BayesianOptimization(
f=city.fit,
pbounds=pbounds,
)
optimizer.maximize(
init_points=20,
n_iter=fit_epoch,
)
optimizers.append(optimizer)
# Fit second part
opt['Pi'] = optimizers[0].max['params']['pi']
opt['Pe'] = optimizers[0].max['params']['pi']
opt['early_detect'] = optimizers[0].max['params']['early_detect']
opt['mobility'] = optimizers[0].max['params']['mobility']
city = City(opt)
city.setPopCases(pop_data, cases_data_processed[0])
city.init_blocks(pop_data,manual_init_case=True)
S_number, E_number, I_number, R_number, new_spread = city.begin_simulate(len(cases_data_processed[0]),fit=True)
new_pop = city.get_blk_pop()
city.setPopCases(new_pop, cases_data_processed[1])
city.make_check_point(float(new_spread.cumsum()[-1]))
pbounds = {'pi': (0, 0.0006),'early_detect': (0, 1), 'mobility': (0, optimizer.max['params']['mobility'])}
optimizer = BayesianOptimization(
f=city.fit_second,
pbounds=pbounds
)
optimizer.maximize(
init_points=20,
n_iter=fit_epoch,
)
optimizers.append(optimizer)
# Forward
city = City(opt)
city.setPopCases(pop_data, cases_data_processed[0])
city.init_blocks(pop_data, manual_init_case=True)
opts = []
for optimizer in optimizers:
opt = {'Pi': optimizer.max['params']['pi'], 'early_detect': optimizer.max['params']['early_detect'],
'mobility': optimizer.max['params']['mobility']}
opts.append(opt)
new_spread = city.begin_simulate_multi_parted(opts, cases_data_processed,output_dir.joinpath('result_' + str(process_i).zfill(2) + '.png'),fit=False)
i = 0
total_opt = {}
for opt in opts:
total_opt['opt'+str(i)] = opt
with open(output_dir.joinpath('opt_params_' + str(process_i).zfill(2) + '.json'), 'w') as f:
json.dump(opts, f)
np.save(output_dir.joinpath('result_curve_' + str(process_i).zfill(2) + '.npy'), new_spread.reshape(-1))
if __name__ == "__main__":
fit_num = 40
p = Pool(MULTI_PROCESSING)
result = [p.apply_async(multi_process_fit, args=(i,200)) for i in range(fit_num)]
for i in result:
i.get()
print(datetime.now())
``` |
{
"source": "0/pathintmatmult",
"score": 3
} |
#### File: pathintmatmult/pathintmatmult/nmm.py
```python
from itertools import product
import numpy as np
from .constants import HBAR
from .tools import cached
class PIMM:
"""
Path Integrals via Matrix Multiplication
Base class for various kinds of path integral implementations.
"""
def __init__(self, masses: '[g/mol]', grid_ranges: '[nm]',
grid_lens: '[1]', pot_f: '[nm] -> kJ/mol',
beta: 'mol/kJ', num_links: '1'):
"""
Note:
When pot_f receives an N-dimensional array as input, it needs to map
over it, returning an (N-1)-dimensional array.
Note:
The "particles" are actually any Cartesian degrees of freedom. One
might have the same configuration (masses and grids) for a
3-dimensional 1-particle system as for a 1-dimensional 3-particle
system. Of course, the coordinate arrays must be interpreted
appropriately in each case (whether by the potential function or by
the user of the output density).
Parameters:
masses: Masses of the particles.
grid_ranges: Where the grids are truncated. Each grid is symmetric
about the origin.
grid_lens: How many points are on the grids.
beta: Propagation length of the entire path.
num_links: Number of links in the entire path.
pot_f: Potential experienced by the particles in some spatial
configuration.
"""
assert len(masses) == len(grid_ranges) == len(grid_lens), \
'Numbers of configuration items must match.'
assert all(m > 0 for m in masses), 'Masses must be positive.'
assert all(gr > 0 for gr in grid_ranges), 'Grids must have positive lengths.'
assert all(gl >= 2 for gl in grid_lens), 'Grids must have at least two points.'
assert beta > 0, 'Beta must be positive.'
assert num_links >= 2, 'Must have at least two links.'
self._masses = np.array(masses)
self._grid_ranges = np.array(grid_ranges)
self._grid_lens = np.array(grid_lens)
self._pot_f = pot_f
self._beta = beta
self._num_links = num_links
# For cached decorator.
self._cached = {}
@property
def masses(self) -> '[g/mol]':
return self._masses
@property
def grid_ranges(self) -> '[nm]':
return self._grid_ranges
@property
def grid_lens(self) -> '[1]':
return self._grid_lens
@property
def pot_f(self) -> '[nm] -> kJ/mol':
return self._pot_f
@property
def beta(self) -> 'mol/kJ':
return self._beta
@property
def num_links(self) -> '1':
return self._num_links
@property
@cached
def tau(self) -> 'mol/kJ':
"""
High-temperature propagator length.
"""
return self.beta / self.num_links
@property
@cached
def num_points(self) -> '1':
"""
Number of points in the coordinate vector.
"""
return np.prod(self.grid_lens)
@property
@cached
def grid(self) -> '[[nm]]':
"""
Vector of the positions corresponding to the grid points.
This is not a vector in the sense of a 1-dimensional array, because
each element is itself a vector of coordinates for each particle.
However, it can be thought of as the tensor product of the
1-dimensional position vectors.
"""
grids = [np.linspace(-gr, gr, gl) for (gr, gl) in zip(self.grid_ranges, self.grid_lens)]
result = np.array(list(product(*grids)))
assert result.shape == (self.num_points, len(self.masses))
return result
@property
@cached
def volume_element(self) -> 'nm^N':
"""
Effective volume taken up by each grid point.
"""
return np.prod(2 * self.grid_ranges / (self.grid_lens - 1))
@property
@cached
def pot_f_grid(self) -> '[kJ/mol]':
"""
Potential function evaluated on the grid.
"""
return self.pot_f(self.grid)
@property
@cached
def rho_tau(self) -> '[[1/nm^N]]':
"""
Matrix for the high-temperature propagator.
"""
prefactors_K = self.masses / (2 * HBAR * HBAR * self.tau) # [1/nm^2]
prefactor_V = self.tau / 2 # mol/kJ
prefactor_front = np.sqrt(np.prod(prefactors_K) / np.pi) # 1/nm^N
K = np.empty((self.num_points, self.num_points)) # [[nm^2]]
V = np.empty_like(K) # [[kJ/mol]]
for i, q_i in enumerate(self.grid):
for j, q_j in enumerate(self.grid):
K[i, j] = np.sum(prefactors_K * (q_i - q_j) ** 2)
V[i, j] = self.pot_f_grid[i] + self.pot_f_grid[j]
return prefactor_front * np.exp(-K - prefactor_V * V)
@property
def density_diagonal(self):
raise NotImplementedError()
def expectation_value(self, property_f: '[nm] -> X') -> 'X':
"""
Expectation value of property_f.
Note:
This is only implemented for properties that are diagonal in the
position representation.
Note:
When property_f receives an N-dimensional array as input, it should
behave in the same manner as pot_f.
"""
return np.dot(self.density_diagonal, property_f(self.grid))
class PIFTMM(PIMM):
"""
Path Integral at Finite Temperature via Matrix Multiplication
Calculate the approximate thermal density matrix of a system comprised of
one or more particles in an arbitrary potential on a discretized and
truncated grid. The density matrix is determined via numerical matrix
multiplication of high-temperature matrices.
"""
@property
@cached
def rho_beta(self) -> '[[1/nm^N]]':
"""
Matrix for the full path propagator.
"""
power = self.num_links - 1
eigvals, eigvecs = np.linalg.eigh(self.volume_element * self.rho_tau)
result = np.dot(np.dot(eigvecs, np.diag(eigvals ** power)), eigvecs.T)
return result / self.volume_element
@property
@cached
def density(self) -> '[[1]]':
"""
Normalized thermal density matrix.
"""
density = self.rho_beta
# Explicitly normalize.
density /= density.diagonal().sum()
return density
@property
@cached
def density_diagonal(self) -> '[1]':
"""
Normalized thermal diagonal density.
"""
return self.density.diagonal()
class PIGSMM(PIMM):
"""
Path Integral Ground State via Matrix Multiplication
Calculate the approximate ground state wavefunction of a system comprised
of one or more particles in an arbitrary potential on a discretized and
truncated grid. The wavefunction is determined via imaginary time
propagation from a trial function using numerical matrix multiplication.
"""
def __init__(self, masses: '[g/mol]', grid_ranges: '[nm]',
grid_lens: '[1]', pot_f: '[nm] -> kJ/mol',
beta: 'mol/kJ', num_links: '1', *,
trial_f: '[nm] -> 1' = None,
trial_f_diffs: '[[nm] -> 1/nm^2]' = None):
"""
See PIMM.__init__ for more details.
Note:
The convention used is that beta represents the entire path, so the
propagation length from the trial function to the middle of the path
is beta/2.
Note:
When trial_f receives an N-dimensional array as input, it should
behave in the same manner as pot_f.
Parameters:
trial_f: Approximation to the ground state wavefunction. If none is
provided, a uniform trial function is used.
trial_f_diffs: Second derivatives of trial_f. One function must be
specified for each particle.
"""
super().__init__(masses, grid_ranges, grid_lens, pot_f, beta, num_links)
assert num_links % 2 == 0, 'Number of links must be even.'
if trial_f is not None:
assert trial_f_diffs is not None, 'Derivatives must be provided.'
assert len(trial_f_diffs) == len(masses), 'Number of derivatives must match.'
self._trial_f = trial_f
self._trial_f_diffs = trial_f_diffs
@property
def trial_f(self) -> '[nm] -> 1':
return self._trial_f
@property
def trial_f_diffs(self) -> '[[nm] -> 1/nm^2]':
return self._trial_f_diffs
@property
@cached
def uniform_trial_f_grid(self) -> '[1]':
"""
Unnormalized uniform trial function evaluated on the grid.
"""
return np.ones(self.num_points)
@property
@cached
def trial_f_grid(self) -> '[1]':
"""
Unnormalized trial function evaluated on the grid.
"""
if self.trial_f is None:
# Default to a uniform trial function.
return self.uniform_trial_f_grid
return self.trial_f(self.grid)
@property
@cached
def uniform_trial_f_diffs_grid(self) -> '[[1/nm^2]]':
"""
Unnormalized uniform trial function derivatives evaluated on the grid.
"""
return np.zeros(self.grid.T.shape)
@property
@cached
def trial_f_diffs_grid(self) -> '[[1/nm^2]]':
"""
Unnormalized trial function derivatives evaluated on the grid.
"""
if self.trial_f is None:
# Default to a uniform trial function.
return self.uniform_trial_f_diffs_grid
result = np.empty(self.grid.T.shape)
for i, f in enumerate(self.trial_f_diffs):
result[i] = f(self.grid)
return result
@property
@cached
def rho_beta_half(self) -> '[[1/nm^N]]':
"""
Matrix for the half path propagator.
"""
power = self.num_links // 2
eigvals, eigvecs = np.linalg.eigh(self.volume_element * self.rho_tau)
result = np.dot(np.dot(eigvecs, np.diag(eigvals ** power)), eigvecs.T)
return result / self.volume_element
@property
@cached
def rho_beta(self) -> '[[1/nm^N]]':
"""
Matrix for the full path propagator.
"""
return self.volume_element * np.dot(self.rho_beta_half, self.rho_beta_half)
@property
@cached
def ground_wf(self) -> '[1]':
"""
Normalized ground state wavefunction.
"""
ground_wf = np.dot(self.rho_beta_half, self.trial_f_grid)
# Explicitly normalize.
ground_wf /= np.sqrt(np.sum(ground_wf ** 2))
return ground_wf
@property
@cached
def density(self) -> '[[1]]':
"""
Normalized ground state density matrix.
"""
return np.outer(self.ground_wf, self.ground_wf)
@property
@cached
def density_diagonal(self) -> '[1]':
"""
Normalized ground state diagonal density.
"""
return self.ground_wf ** 2
@property
@cached
def energy_mixed(self) -> 'kJ/mol':
"""
Ground state energy calculated using the mixed estimator.
"""
ground_wf_full = np.dot(self.rho_beta, self.trial_f_grid) # [1/nm^N]
trial_f_diffs = np.sum(self.trial_f_diffs_grid / self.masses[:, np.newaxis], axis=0) # [mol/g nm^2]
energy_V = np.sum(ground_wf_full * self.pot_f_grid * self.trial_f_grid) # kJ/mol nm^N
energy_K = np.dot(ground_wf_full, trial_f_diffs) # mol/g nm^(N+2)
normalization = np.dot(ground_wf_full, self.trial_f_grid) # 1/nm^N
return (energy_V - 0.5 * HBAR * HBAR * energy_K) / normalization
@property
@cached
def density_reduced(self) -> '[[1]]':
"""
Density matrix for the first particle, with the other traced out.
Only implemented for two-particle systems.
"""
assert len(self.masses) == 2
new_len = self.grid_lens[0]
other_len = self.grid_lens[1]
density_new = np.zeros((new_len, new_len))
for i in range(new_len):
for j in range(new_len):
for t in range(other_len):
# Avoid computing self.density here.
density_new[i, j] += self.ground_wf[other_len * i + t] * self.ground_wf[other_len * j + t]
return density_new
@property
@cached
def trace_renyi2(self) -> '1':
"""
Trace of the square of the reduced density matrix.
The 2nd Rényi entropy is the negative logarithm of this quantity.
"""
return np.linalg.matrix_power(self.density_reduced, 2).trace()
class PIGSIMM(PIGSMM):
"""
Path Integral Ground State via Implicit Matrix Multiplication
Calculate the approximate ground state wavefunction of a system comprised
of one or more particles in an arbitrary potential on a discretized and
truncated grid. The wavefunction is determined via imaginary time
propagation from a trial function using implicit numerical matrix-vector
multiplication, where the full density matrix is never constructed.
"""
@property
def rho_tau(self):
# We don't build any (full) matrices!
raise NotImplementedError()
@property
def rho_beta_half(self):
raise NotImplementedError()
@property
def rho_beta(self):
raise NotImplementedError()
def _propagate_trial(self, start_grid: '[1]', power: '1') -> '[1]':
"""
Multiply start_grid by (rho_tau ** power).
"""
prefactors_K = self.masses / (2 * HBAR * HBAR * self.tau) # [1/nm^2]
pot_exp = np.exp(-0.5 * self.tau * self.pot_f_grid) # [1]
temp_wf1 = start_grid.copy() # [1]
temp_wf2 = np.zeros_like(temp_wf1) # [1]
for _ in range(power):
temp_wf1 *= pot_exp
for q, wf in zip(self.grid, temp_wf1):
# The temporary array here is the same shape as self.grid.
temp_wf2 += np.exp(-np.sum(prefactors_K * (self.grid - q) ** 2, axis=1)) * wf
temp_wf2 *= pot_exp
# Explicitly normalize at each step for stability.
temp_wf1 = temp_wf2 / np.sqrt(np.sum(temp_wf2 ** 2))
temp_wf2 = np.zeros_like(temp_wf1)
return temp_wf1
@property
@cached
def ground_wf(self) -> '[1]':
"""
Normalized ground state wavefunction.
"""
return self._propagate_trial(self.trial_f_grid, self.num_links // 2)
@property
def density(self):
raise NotImplementedError()
@property
@cached
def energy_mixed(self) -> 'kJ/mol':
"""
Ground state energy calculated using the mixed estimator.
"""
ground_wf_full = self._propagate_trial(self.ground_wf, self.num_links // 2) # [1]
trial_f_diffs = np.sum(self.trial_f_diffs_grid / self.masses[:, np.newaxis], axis=0) # [mol/g nm^2]
energy_V = np.sum(ground_wf_full * self.pot_f_grid * self.trial_f_grid) # kJ/mol
energy_K = np.dot(ground_wf_full, trial_f_diffs) # mol/g nm^2
normalization = np.dot(ground_wf_full, self.trial_f_grid) # 1
return (energy_V - 0.5 * HBAR * HBAR * energy_K) / normalization
```
#### File: pathintmatmult/pathintmatmult/potentials.py
```python
import numpy as np
def free_particle_potential() -> 'nm -> kJ/mol':
"""
Free particle potential.
"""
def free_particle(q: 'nm') -> 'kJ/mol':
# Remove the inner-most dimension.
return np.zeros(q.shape[:-1])
return free_particle
def harmonic_potential(k: 'kJ/mol nm^2' = None, m: 'g/mol' = None, w: '1/ps' = None) -> 'nm -> kJ/mol':
"""
Harmonic potential relative to the origin.
Note:
Either k or (m and w) must be specified.
Parameters:
k: Spring constant.
m: Mass of particle.
w: Angular frequency of oscillator.
"""
if k is not None:
force_constant = k # kJ/mol nm^2
elif m is not None and w is not None:
force_constant = m * w * w # kJ/mol nm^2
else:
assert False, 'Must provide either k or (m and w).'
def harmonic(q: 'nm') -> 'kJ/mol':
return force_constant * q[..., 0] * q[..., 0] / 2
return harmonic
``` |
{
"source": "0PEIN0/djaein",
"score": 2
} |
#### File: djaein/djaein_core/base_manager.py
```python
from django.db import models
from .exceptions import DatabaseGetException
class BaseManager(models.Manager):
def filter_active(self, *args, **kwargs):
return self.filter(*args,
**kwargs,
is_active=True)
def get_by_uuid(self,
uuid):
try:
model_instance = self.get(uuid=uuid)
except Exception as ex:
raise DatabaseGetException(code='core_model_manager_base_1')
return model_instance
class Meta:
abstract = True
```
#### File: djaein/djaein_core/base_views.py
```python
import sys
from rest_framework.generics import (ListCreateAPIView,
RetrieveUpdateDestroyAPIView)
from .generic_api_handler import GenericApiHandler
class BaseApiView(object):
HANDLER = GenericApiHandler()
OUTPUT_SERIALIZER = None
MODEL_CLASS = None
BULK_CREATE = False
PAYLOAD_SERIALIZER = None
ORDER_BY = 'id'
class Meta:
abstract = True
class BaseListCreateAPIView(BaseApiView, ListCreateAPIView):
def get_queryset(self,
user):
return self.MODEL_CLASS.objects.all().order_by(self.ORDER_BY)
def get(self,
request):
user = request.user
self.HANDLER.check_permission(method_name=sys._getframe().f_code.co_name,
user=user)
data = self.get_queryset(
user=user)
if hasattr(self, 'custom_get'):
data = self.custom_get(data=data)
return self.HANDLER.ok(data=data,
serializer=self.OUTPUT_SERIALIZER,
request=request,
is_listing=True,
many=True)
def post(self,
request):
user = request.user
self.HANDLER.check_permission(method_name=sys._getframe().f_code.co_name,
user=user)
response, data = self.HANDLER.check_serializer(
request=request, serializer_class=self.PAYLOAD_SERIALIZER)
if response:
return response
if hasattr(self, 'custom_post'):
data = self.custom_post(data=data)
if self.BULK_CREATE is True:
response = []
for item in data:
response.append(self.MODEL_CLASS.objects.create(**item))
else:
response = self.MODEL_CLASS.objects.create(**data)
return self.HANDLER.created(data=response,
serializer=self.OUTPUT_SERIALIZER,
request=request,
many=self.BULK_CREATE)
class Meta:
abstract = True
class BaseRetrieveUpdateDestroyAPIView(BaseApiView, RetrieveUpdateDestroyAPIView):
class Meta:
abstract = True
```
#### File: djaein/djaein_core/exceptions.py
```python
from .exception_type import ExceptionType
class BaseException(Exception):
def __init__(self,
code):
super().__init__()
self.error_type = ExceptionType.BASE
self.custom_code = code
class Meta:
abstract = True
class DatabaseGetException(BaseException):
def __init__(self,
code):
super().__init__(code=code)
self.error_type = ExceptionType.DATABASE_GET
```
#### File: personal/models/to_do.py
```python
from django.db import models
from djaein_core import BaseManager, BaseModel
class ToDoManager(BaseManager):
pass
class ToDo(BaseModel):
task_name = models.CharField(
verbose_name='Task Name',
help_text='Detailed task name.',
max_length=128,
)
is_completed = models.BooleanField(
verbose_name='Completed',
default=False,
help_text='Completed or not.'
)
def __str__(self):
return str(self.task_name)
objects = ToDoManager()
class Meta:
verbose_name = 'To Do'
verbose_name_plural = 'To Dos'
db_table = 'tasks_to_dos'
```
#### File: users/models/user.py
```python
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
from django.db import models
from djaein_core import BaseManager, BaseModel
class UserManager(BaseUserManager, BaseManager):
def create_superuser(self,
email,
password):
user = self.model(email=email, is_superuser=True, is_staff=True)
user.set_password(password)
user.save()
user = self.get(email=user.email)
return user
class User(AbstractBaseUser, PermissionsMixin, BaseModel):
email = models.EmailField(
verbose_name='Email',
help_text='Email address of the user.',
unique=True,
)
first_name = models.CharField(
verbose_name='First Name',
max_length=64,
help_text='First name of the user.',
blank=True,
)
last_name = models.CharField(
verbose_name='Last Name',
max_length=64,
help_text='Last name of the user.',
blank=True,
)
is_staff = models.BooleanField(
verbose_name='Staff Status',
default=False,
help_text='Identify whether the user is a staff or not.',
)
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
return self.first_name
def __str__(self):
return str(self.email)
objects = UserManager()
USERNAME_FIELD = 'email'
class Meta:
verbose_name = 'User'
verbose_name_plural = 'Users'
db_table = 'user_users'
``` |
{
"source": "0penth3wind0w/Freddy-the-bot",
"score": 2
} |
#### File: 0penth3wind0w/Freddy-the-bot/app.py
```python
import os
from flask import Flask, request, abort, send_file, redirect
from linebot.exceptions import InvalidSignatureError
from linewrapper.handler import handler
app = Flask(__name__)
# Redirect to Github
@app.route('/')
def index():
return redirect("https://www.github.com/0penth3wind0w", code=302)
# Static file
@app.route('/image/carousel')
def get_carousel_img():
filename = 'image/carousel.jpg'
return send_file(filename, mimetype='image/jpeg')
@app.route('/image/QRcode')
def get_qrcode_img():
filename = 'image/QRcode.png'
return send_file(filename, mimetype='image/png')
@app.route('/image/wallpaper/<image_name>/')
def get_wallpaper(image_name):
filename = 'image/wallpaper/{0}.jpg'.format(image_name)
return send_file(filename, mimetype='image/jpeg')
# Line reply
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
# app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
if __name__ == "__main__":
app.run(host='0.0.0.0',port=os.environ['PORT'])
``` |
{
"source": "0phoff/sphinxcontrib-autodoc_doxygen",
"score": 2
} |
#### File: autodoc_doxygen/autosummary/__init__.py
```python
from __future__ import print_function, absolute_import, division
import re
import operator
import posixpath
import logging
from functools import reduce
from itertools import count, groupby
from docutils import nodes
from docutils.statemachine import StringList, ViewList
from sphinx import addnodes
from sphinx.ext.autosummary import Autosummary, autosummary_table
from sphinx.ext.autodoc.directive import DocumenterBridge, Options
from sphinx.environment.adapters.toctree import TocTree
from sphinx.util.matching import Matcher
from sphinx.locale import __
from .. import get_doxygen_root
from ..autodoc import DoxygenMethodDocumenter, DoxygenClassDocumenter
from ..xmlutils import format_xml_paragraph
logger = logging.getLogger(__name__)
def import_by_name(name, env=None, prefixes=None, i=0):
"""Get xml documentation for a class/method with a given name.
If there are multiple classes or methods with that name, you
can use the `i` kwarg to pick which one.
"""
if prefixes is None:
prefixes = [None]
if env is not None:
parents = env.ref_context.get('cpp:parent_key')
if parents is not None:
parent_symbols = [p[0].get_display_string() for p in parents]
prefixes.append('::'.join(parent_symbols))
tried = []
for prefix in prefixes:
try:
if prefix:
prefixed_name = '::'.join([prefix, name])
else:
prefixed_name = name
return _import_by_name(prefixed_name, i=i)
except ImportError:
tried.append(prefixed_name)
raise ImportError('no module named %s' % ' or '.join(tried))
def _import_by_name(name, i=0):
root = get_doxygen_root()
name = name.replace('.', '::')
if '::' in name:
xpath_query = (
'.//compoundname[text()="%s"]/../'
'sectiondef[@kind="public-func"]/memberdef[@kind="function"]/'
'name[text()="%s"]/..') % tuple(name.rsplit('::', 1))
m = root.xpath(xpath_query)
if len(m) > 0:
obj = m[i]
full_name = '.'.join(name.rsplit('::', 1))
return full_name, obj, full_name, ''
xpath_query = (
'.//compoundname[text()="%s"]/../'
'sectiondef[@kind="public-type"]/memberdef[@kind="enum"]/'
'name[text()="%s"]/..') % tuple(name.rsplit('::', 1))
m = root.xpath(xpath_query)
if len(m) > 0:
obj = m[i]
full_name = '.'.join(name.rsplit('::', 1))
return full_name, obj, full_name, ''
xpath_query = ('.//compoundname[text()="%s"]/..' % name)
m = root.xpath(xpath_query)
if len(m) > 0:
obj = m[i]
return (name, obj, name, '')
raise ImportError()
def get_documenter(obj, full_name):
if obj.tag == 'memberdef' and obj.get('kind') == 'function':
return DoxygenMethodDocumenter
elif obj.tag == 'compounddef':
return DoxygenClassDocumenter
raise NotImplementedError(obj.tag)
class DoxygenAutosummary(Autosummary):
def run(self):
self.bridge = DocumenterBridge(self.env, self.state.document.reporter,
Options(), self.lineno, self.state)
names = [x.strip().split()[0] for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
items = self.get_items(names)
tablenodes = self.get_table(items)
if 'toctree' in self.options:
dirname = posixpath.dirname(self.env.docname)
tree_prefix = self.options['toctree'].strip()
docnames = []
excluded = Matcher(self.config.exclude_patterns)
filename_map = self.config.autosummary_filename_map
for name, sig, summary, real_name in items:
real_name = filename_map.get(real_name, real_name.replace('::', '.'))
docname = posixpath.join(tree_prefix, real_name)
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in self.env.found_docs:
if excluded(self.env.doc2path(docname, None)):
msg = __('autosummary references excluded document %r. Ignored.')
else:
msg = __('autosummary: stub file not found %r. '
'Check your autosummary_generate setting.')
logger.warning(msg, real_name)
continue
docnames.append(docname)
if docnames:
tocnode = addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docn) for docn in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode['caption'] = self.options.get('caption')
tablenodes.append(nodes.comment('', '', tocnode))
if 'toctree' not in self.options and 'caption' in self.options:
logger.warning(__('A captioned autosummary requires :toctree: option. ignored.'),
location=tablenodes[-1])
return tablenodes
def get_items(self, names):
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
env = self.state.document.settings.env
items = []
names_and_counts = reduce(operator.add,
[tuple(zip(g, count())) for _, g in groupby(names)]) # type: List[(Str, Int)]
for name, i in names_and_counts:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('::')[-1]
try:
real_name, obj, parent, modname = import_by_name(name, env=env, i=i)
except ImportError:
logger.warning('failed to import %s' % name)
items.append((name, '', '', name))
continue
self.bridge.result = StringList() # initialize for each documenter
documenter = get_documenter(obj, parent)(self, real_name, id=obj.get('id'))
if not documenter.parse_name():
logger.warning('failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
logger.warning('failed to import object %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if documenter.options.members and not documenter.check_module():
continue
# -- Grab the signature
sig = documenter.format_signature()
# -- Grab the summary
documenter.add_content(None)
brief = documenter.get_brief()
if brief is not None:
doc = list(documenter.process_doc(brief))
while doc and not doc[0].strip():
doc.pop(0)
if doc:
summary = doc[0].strip()
items.append((display_name, sig, summary, real_name))
continue
doc = list(documenter.process_doc([self.bridge.result.data]))
while doc and not doc[0].strip():
doc.pop(0)
# If there's a blank line, then we can assume the first sentence /
# paragraph has ended, so anything after shouldn't be part of the
# summary
for i, piece in enumerate(doc):
if not piece.strip():
doc = doc[:i]
break
# Try to find the "first sentence", which may span multiple lines
m = re.search(r"^([A-Z].*?\.)(?:\s|$)", " ".join(doc).strip())
if m:
summary = m.group(1).strip()
elif doc:
summary = doc[0].strip()
else:
summary = ''
items.append((display_name, sig, summary, real_name))
return items
def get_tablespec(self):
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = 'll'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '<autosummary>')
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
return table, table_spec, append_row
def get_table(self, items):
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table, table_spec, append_row = self.get_tablespec()
for name, sig, summary, real_name in items:
qualifier = 'cpp:any'
# required for cpp autolink
full_name = real_name.replace('.', '::')
col1 = ':%s:`%s <%s>`' % (qualifier, name, full_name)
col2 = summary
append_row(col1, col2)
return [table_spec, table]
class DoxygenAutoEnum(DoxygenAutosummary):
def get_items(self, names):
env = self.state.document.settings.env
self.name = names[0]
real_name, obj, parent, modname = import_by_name(self.name, env=env)
names = [n.text for n in obj.findall('./enumvalue/name')]
descriptions = [format_xml_paragraph(d) for d in obj.findall('./enumvalue/detaileddescription')]
return zip(names, descriptions)
def get_table(self, items):
table, table_spec, append_row = self.get_tablespec()
for name, description in items:
col1 = ':strong:`' + name + '`'
while description and not description[0].strip():
description.pop(0)
col2 = ' '.join(description)
append_row(col1, col2)
return [nodes.rubric('', 'Enum: %s' % self.name), table]
``` |
{
"source": "0phoff/streamlit",
"score": 2
} |
#### File: streamlit/statz_sample/statz_sample.py
```python
import streamlit as st
import pandas as pd
import numpy as np
@st.experimental_singleton
def singleton_dataframe(size: int) -> pd.DataFrame:
return pd.DataFrame(np.random.randn(size))
@st.experimental_memo
def memoize_dataframe(size: int) -> pd.DataFrame:
return pd.DataFrame(np.random.randn(size))
st.header("st.memoized dataframes")
memoize_dataframe(100)
memoize_dataframe(1_000)
st.write(memoize_dataframe(10_000))
st.header("st.singleton dataframes")
singleton_dataframe(100)
singleton_dataframe(1000)
st.write(singleton_dataframe(10_000))
st.header("InMemoryFileManager media files")
st.image("streamlit.png")
st.file_uploader("uploader")
``` |
{
"source": "0phoff/topcraft",
"score": 2
} |
#### File: 0phoff/topcraft/setup.py
```python
import setuptools as setup
def get_version_and_cmdclass(pkg_path):
"""
Load version.py module without importing the whole package.
Template code from miniver
"""
import os
from importlib.util import module_from_spec, spec_from_file_location
spec = spec_from_file_location('version', os.path.join(pkg_path, '_version.py'))
module = module_from_spec(spec)
spec.loader.exec_module(module)
return module.__version__, module.get_cmdclass(pkg_path)
def find_packages():
return ['top'] + ['top.'+p for p in setup.find_packages('top')]
version, cmdclass = get_version_and_cmdclass(r'top')
setup.setup(
name='top',
version=version,
cmdclass=cmdclass,
author='0phoff',
description='TOPCraft: Top Notch Python Debugging Tools',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='MIT',
url='https://github.com/0phoff/topcraft',
packages=find_packages(),
install_requires=[
'memory_profiler',
],
)
```
#### File: topcraft/top/_memory.py
```python
import gc
import logging
import os
import statistics
from collections import defaultdict
from functools import wraps
from memory_profiler import Pipe, Process, choose_backend, _get_memory
from ._meta import AutoContextType, AutoDecoratorType, AutoIterType, combine_types
__all__ = ['Mem', 'Memit', 'MemTrend']
log = logging.getLogger(__name__)
class MemTimer(Process):
START = 0
SPLIT = 1
STOP = 2
def __init__(self, monitor_pid, interval, pipe, *args, include_children=False, **kw):
super().__init__(*args, **kw)
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.include_children = include_children
try:
self.backend = choose_backend('psutil_uss')
except KeyError:
self.backend = choose_backend()
def run(self):
start_mem = _get_memory(
self.monitor_pid,
self.backend,
include_children=self.include_children,
)
max_mem = 0
stop, split = False, False
self.pipe.send(self.START)
while True:
mem = _get_memory(
self.monitor_pid,
self.backend,
include_children=self.include_children,
)
max_mem = max(max_mem, mem)
if stop:
break
elif split:
split = False
self.pipe.send(max(0, max_mem - start_mem))
start_mem = _get_memory(
self.monitor_pid,
self.backend,
include_children=self.include_children,
)
max_mem = 0
if (self.pipe.poll(self.interval)):
value = self.pipe.recv()
if value == self.STOP:
stop = True
elif value == self.SPLIT:
split = True
self.pipe.send(max(0, max_mem - start_mem))
self.pipe.close()
class Mem(metaclass=combine_types(AutoContextType, AutoDecoratorType)):
"""
This class allows you to measure code memory usage.
You can use it in various different ways:
- start() - split() - stop() methods
- contextmanager
- function decorator
Args:
unit (b, kb, mb, gb):
Memory unit; Default 'Mb'
label (str):
Default label to use when stopping the profiler
verbose (boolean):
Whether to log times
store (dict-like):
Object to store timings instead of logging (should likely not be used by user)
poll_interval (number):
Seconds the monitoring process waits for input between memory measurements
Note:
When benchmarking a piece of code, it is usually a good idea to run it once first,
as it will initialize "global" memory.
The `Memit` class takes care of this automatically.
"""
_units = {
'b': 2 ** 20,
'kb': 2 ** 10,
'mb': 2 ** 0,
'gb': 2 ** -10,
}
def __init__(self, unit='Mb', label='memory', verbose=True, store=None, *, poll_interval=0):
self.label = label
self.verbose = verbose
self.poll_interval = poll_interval
self.unit = unit if unit.lower() in self._units else 'Mb'
self.unit_factor = self._units[self.unit.lower()]
if isinstance(store, dict):
self.store = store
self.stop = self._stop_store
self.split = self._split_store
self._memtimer = None
self._pipe = None
self.reset()
def reset(self):
self.value = None
self._splits = 0
if self._memtimer is not None:
self._pipe.send(MemTimer.STOP)
self._pipe.recv()
self._memtimer.join()
self._memtimer = None
self._pipe = None
def start(self):
self.reset()
pipe, self._pipe = Pipe()
self._memtimer = MemTimer(os.getpid(), self.poll_interval, pipe, include_children=True)
self._memtimer.start()
self._pipe.recv()
def split(self):
gc.collect()
self._pipe.send(MemTimer.SPLIT)
self._splits += 1
value = self._pipe.recv() * self.unit_factor
if self.verbose:
log.info('%s %d: %.3f%s', self.label, self._splits, value, self.unit)
return value
def stop(self):
gc.collect()
self._pipe.send(MemTimer.STOP)
self.value = self._pipe.recv() * self.unit_factor
if self.verbose:
label = self.label if self._splits == 0 else f'{self.label} {self._splits+1}'
log.info('%s: %.3f%s', label, self.value, self.unit)
self._memtimer.join()
self._memtimer = None
self._pipe = None
return self.value
def _split_store(self):
gc.collect()
self._pipe.send(MemTimer.SPLIT)
self._splits += 1
value = self._pipe.recv() * self.unit_factor
self.store[f'{self.label} {self._splits}'] = value
if self.verbose:
log.info('%s %d: %.3f%s', self.label, self._splits, value, self.unit)
def _stop_store(self):
gc.collect()
self._pipe.send(MemTimer.STOP)
self.value = self._pipe.recv() * self.unit_factor
label = self.label if self._splits == 0 else f'{self.label} {self._splits+1}'
self.store[label] = self.value
if self.verbose:
log.info('%s: %.3f%s', label, self.value, self.unit)
self._memtimer.join()
self._memtimer = None
self._pipe = None
def __enter__(self):
self.start()
def __exit__(self, ex_type, ex_value, trace):
self.stop()
return False
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
verbose = self.verbose
label = self.label
self.verbose = True
self.label = fn.__name__
with self:
retval = fn(*args, **kwargs)
self.verbose = verbose
self.label = label
return retval
return inner
def __del__(self):
self.reset()
class Memit(metaclass=AutoIterType):
"""
This class allows you to benchmark the memory of a certain piece of code, by runnning it multiple times.
It will automatically run your code once before starting the benchmark.
Args:
repeat (int):
Number of times to run the code
unit (b, kb, mb, gb):
Memory unit
label (str):
Default label to use when stopping the profiler
verbose (boolean):
Whether to log intermediate loop times
store (dict-like):
Object to store timings instead of logging (should likely not be used by user)
poll_interval (number):
Seconds the monitoring process waits for input between memory measurements
Note:
In order to get consistent results, we manually run the garbage collector after every loop.
We do not disable the automatic garbage collector, as a real python run would have it enabled,
thus allowing to remove potentially unused memory.
Example:
>>> for _ in Memit(10):
... # benchmark code
... pass
>>> for m in Memit(100):
... # setup code
... m.start()
... # first part
... m.split()
... # second part
... m.stop()
>>> for m in Memit(1000):
... # setup code
... with m:
... # benchmark code
... pass
"""
def __init__(self, repeat=1, unit='Mb', label='memory', verbose=False, store=None, *, poll_interval=0):
self.repeat = repeat
self.label = label
self.unit = unit if unit.lower() in Mem._units else 'Mb'
self.verbose = verbose
self.poll_interval = poll_interval
self.values = defaultdict(list)
if store is not None:
self.store = store
self.process_results = self._store_results
else:
self.process_results = self._print_results
def reset(self):
self.values = defaultdict(list)
def __iter__(self):
if len(self.values):
log.warning('self.values is not empty, consider calling reset between benchmarks')
bg = Mem(self.unit, self.label, False, {}, poll_interval=self.poll_interval)
fg = Mem(self.unit, self.label, False, {}, poll_interval=self.poll_interval)
for i in range(self.repeat+1):
with bg:
yield fg
if i == 0:
fg.reset()
fg.store = {}
gc.collect()
continue
if fg.store:
for name, value in fg.store.items():
self.values[name].append(value)
else:
self.values[self.label].append(bg.store[self.label])
if self.verbose:
log.info('Loop %d: %.3f%s', i, bg.store[self.label], self.unit)
fg.reset()
fg.store = {}
gc.collect()
self.process_results(self.values)
def _print_results(self, values):
maxlen = max(len(k) for k in values.keys()) + 1
for name, val in values.items():
name = name + ':'
if self.repeat > 1:
log.info(
f'{name:<{maxlen}} worst {max(val):.3f}{self.unit} '
f'[mean {statistics.fmean(val):.3f} ± {statistics.stdev(val):.3f}{self.unit}]',
)
else:
log.info(f'{name:<{maxlen}} worst {max(val):.3f}{self.unit}')
def _store_results(self, values):
if self.verbose:
self._print_results(values)
for name, val in values.items():
self.store[name] = max(val)
class MemTrend(metaclass=AutoIterType):
"""
This class allows you to benchmark memory trends of a certain piece of code,
by runnning it multiple times with an increasing variable.
It works by yielding from a `Memit` class multiple times, with a different increasing number each time.
Args:
trend_range (int or range):
The increasing number that is returned each loop
repeat (int):
Number of times we run each trend to get statistics
unit (b, kb, mb, gb):
Memory unit
label (str):
Default label to use for the profiler
verbose (boolean):
Whether to log intermediate loop tim
store (dict-like):
Object to store timings instead of logging (should likely not be used by user)
poll_interval (number):
Seconds the monitoring process waits for input between memory measurements
Note:
In order to get consistent results, we manually run the garbage collector after every loop.
We do not disable the automatic garbage collector, as a real python run would have it enabled,
thus allowing to remove potentially unused memory.
Example:
>>> # Run some code using the `i` variable, which will range from [10,101) with a stepsize of 10
>>> # Note that the `m` variable is the same as returned by `Memit`
>>> for i, m in MemTrend(range(10,101,10)):
>>> pass
"""
def __init__(self, trend_range=10, repeat=1, unit='Mb', label='memory', verbose=True, *, poll_interval=0):
self.trend_range = trend_range if isinstance(trend_range, range) else range(trend_range)
self.repeat = repeat
self.label = label
self.unit = unit if unit.lower() in Mem._units else 'Mb'
self.verbose = verbose
self.poll_interval = poll_interval
self.values = defaultdict(list)
def reset(self):
self.values = defaultdict(list)
def __iter__(self):
self.reset()
memit = Memit(self.repeat, self.unit, self.label, False, {}, poll_interval=self.poll_interval)
trend_len = len(str(self.trend_range.stop))
for trend in self.trend_range:
for m in memit:
yield trend, m
for name, value in memit.store.items():
self.values[name].append(value)
if self.verbose:
trend_mems = ', '.join(f'{n}: {v:.3f}{self.unit}' for n, v in memit.store.items())
log.info(f'Trend {trend:>{trend_len}}: {trend_mems}')
memit.reset()
```
#### File: topcraft/top/_meta.py
```python
import logging
from collections import deque
__all__ = ['AutoContextType', 'AutoDecoratorType', 'AutoIterType', 'combine_types']
log = logging.getLogger(__name__)
types = {}
class AutoDecoratorType(type):
"""
This metaclass allows to use class-based decorators without actually instantiating them.
When used, it will automatically create your object (with default args) and wrap your function.
Note:
This metaclass will not work for decorator classes that take a single callable argument during initialization.
Example:
>>> class CustomClass(metaclass=AutoDecoratorType):
... # TODO: implement __init__ and provide default values for each argument!
... # TODO: implement __call__ to create your decorator
... pass
>>> # Use as decorator, without creating a class instance (no braces)
>>> @CustomClass
... def func():
... pass
"""
def __call__(cls, *args, **kwargs):
if len(args) == 1 and callable(args[0]):
obj = cls()
return obj(args[0])
return super().__call__(*args, **kwargs)
class AutoContextType(type):
"""
This metaclass allows to use class-based context managers without actually instantiating them.
When used, it will automatically create your object (with default args) and start the context manager.
Example:
>>> class CustomClass(metaclass=AutoContextType):
... # TODO: implement __init__ and provide default values for each argument!
... # TODO: implement __enter__ and __exit__ to create your contextmanager
... pass
>>> # Use as contextmanager, without creating a class instance (no braces)
>>> with CustomClass:
... pass
"""
def __init__(cls, *args, **kwargs):
cls.__instances = deque()
super().__init__(*args, **kwargs)
def __enter__(cls):
obj = cls()
cls.__instances.append(obj)
return obj.__enter__()
def __exit__(cls, ex_type, ex_value, trace):
obj = cls.__instances.pop()
return obj.__exit__(ex_type, ex_value, trace)
class AutoIterType(type):
"""
This metaclass allows to use class-based iterators without actually instantiating them.
When used, it will automatically create your object (with default args) and return your iterator.
Example:
>>> class CustomClass(metaclass=AutoIterType):
... # TODO: implement __init__ and provide default values for each argument!
... # TODO: implement __iter__to create your iterator
... pass
>>> # Use as iterator, without creating a class instance (no braces)
>>> generator = iter(CustomClass)
>>> for values in CustomClass:
... pass
"""
def __iter__(cls):
obj = cls()
return obj.__iter__()
def combine_types(*args):
"""
This function allows to combine 2 or more metaclasses together.
It will cache this result so fetching the same combined metaclass multiple times
will not result in a different class construction.
Args:
*args (metaclasses): Metaclasses to combine
Note:
We rely on class.__name__ to uniquely identify types.
Beware that if you have a type in another module with the same name, this will break everything.
Example:
>>> # Combine AutoContext and AutoIter
>>> class CustomClass(metaclass=combine_types(AutoContextType, AutoIterType):
... pass
"""
global types
name = ''.join(a.__name__ for a in args)
if name not in types:
types[name] = type(name, args, {})
return types[name]
``` |
{
"source": "0ptik41/PoolParty",
"score": 3
} |
#### File: PoolParty/poolparty/main.py
```python
from threading import Thread
import server
import utils
import client
import time
import json
import sys
import os
class ControlCenter:
def __init__(self):
# check for peer list
if utils.check_peer_file():
self.nodes = utils.load_peers()
def main():
if '-serve' in sys.argv:
# Start Server to query nodes
backend = server.Server()
# Spin up Local Web server
# Open Browser view
if __name__ == '__main__':
main()
```
#### File: PoolParty/poolparty/node.py
```python
from threading import Thread
import client as cl
import storage
import socket
import utils
import json
import time
import os
class Node:
inbound = 2424
def __init__(self, peers):
self.pool = peers
self.actions = {'AddPeer': self.add_peer,
'HashVal': self.hashdump,
'Uptime': self.uptime}
self.memory = self.check_memory()
self.hostname = os.getlogin()
self.os = os.name
self.uptime = 0.0
self.running = True
# get file hashes of shares
self.shares = self.setup_shares()
serve = Thread(target=self.run_backend, args=())
serve.setDaemon(True)
serve.start()
def check_memory(self):
free_mem = utils.cmd('free --kilo',False)
mem_labels = free_mem[0]
mem_labels = list(filter(None,mem_labels.split(' ')))
mem_free = list(filter(None,free_mem[1].split(' ')))
mem_free.pop(0)
memory_data = {}
i = 0
for label in mem_labels:
memory_data[label] = mem_free[i]
i += 1
return memory_data
def set_uptime(self,new_dt):
self.uptime = new_dt
def setup_shares(self):
hashes = {}
if not os.path.isdir('.shares/'):
os.mkdir('.shares')
if not os.path.isdir('received'):
os.mkdir('received')
else:
# os.system('mv received/* .shares/')
for f in os.listdir('received/'):
fn = '%s/received/%s' % (os.getcwd(), f)
fhash = utils.cmd('sha256sum %s' % fn,False).pop().split(' ')[0]
hashes[fn] = fhash
for fl in os.listdir('.shares'):
fn = '%s/.shares/%s' % (os.getcwd(), fl)
fhash = utils.cmd('sha256sum %s' % fn,False).pop().split(' ')[0]
hashes[fn] = fhash
return hashes
def update_shares(self):
self.shares = self.setup_shares()
print('[-] %d shared files ' % len(self.shares.keys()))
def run_backend(self):
print('[-] Backend Server Listening on 0.0.0.0:%d'%self.inbound)
s = utils.create_listener(self.inbound)
iteration = 0
try:
while self.running:
try:
c, i = s.accept()
c = self.handler(c,i)
c.close()
# update shares
self.shares = self.setup_shares()
# check if peers have the same shares
except socket.error:
print('[!!] Connection error with %s')
pass
iteration += 1
except KeyboardInterrupt:
self.running = False
pass
def add_peer(self, sock, args):
addr = args[0]
if addr not in self.pool:
self.pool.append(addr)
print('[+] Added peer %s' % addr)
sock.send(b'[+] Peer Added')
else:
sock.send(b'[x] Peer is known')
return sock
def uptime(self, sock, args):
dt = time.time() - self.start
ut = '[-] Uptime: %d seconds' % dt
sock.send(ut.encode('utf-8'))
self.set_uptime(dt)
return sock
def hashdump(self, sock, args):
hdata = json.dumps(self.shares).encode('utf-8')
sock.send(b'%s' % hdata)
return sock
def distribute_shared_files(self):
for peer in self.pool:
# Get files from this peer
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((peer, 2424))
s.send(b'HashVal :::: null')
try:
rmt_files = json.loads(s.recv(2048))
except ValueError:
rmt_files = {}
pass
for rf in rmt_files:
rhash = rmt_files[rf]
if rhash not in self.shares.values():
print('[o] %s has a file I dont [%s]'%(peer,rf))
for file in self.shares.keys():
recipient = storage.distribute(file, self.pool)
cl.send_file(file, recipient, 4242)
s.close()
def handler(self, c, i):
request = c.recv(1024).decode('utf-8')
try:
api_req = request.split(' :::: ')[0]
params = request.split(' :::: ')[1].split(',')
if api_req in self.actions.keys():
c = self.actions[api_req](c, params)
except IndexError:
pass
return c
```
#### File: PoolParty/poolparty/storage.py
```python
import numpy as np
import hashlib
import random
import utils
import os
def hashstring(msgin):
h = hashlib.sha256()
h.update(bytes(msgin))
return h.digest()
def hashbins(hashval, nbins):
binning = []
if nbins > 32: nbins = 32
chunksz = int(32/nbins)
bins = np.array(range(0,len(hashval)+chunksz, chunksz))
bins[1:] = bins[1:]-1
i = 0
for i in range(len(bins)):
if i > 0:
b = hashval[bins[i-1]:bins[i]]
binning.append(checksum(b))
i += 1
return binning
def checksum(hstring):
total = 0
for l in list(hstring):
if type(l)==str:
total += ord(l)
elif type(l)==int:
total += l
return total
def distribute(file,nodes):
if os.path.isfile(file):
hval = hashstring(open(file,'rb').read())
bins = hashbins(hval, len(nodes))
return nodes[np.nonzero(np.array(bins==np.max(bins),dtype=np.int))[0][0]]
else:
return ''
``` |
{
"source": "0ptik41/Tools",
"score": 3
} |
#### File: Crawling/Crypto/data_collector.py
```python
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
from requests import Request, Session
from threading import Thread
import string
import utils
import time
import json
import sys
import os
btc_api = 'api.coindesk.com/v1/bpi/currentPrice/USD'
ltc_api = 'api.blockchain.com/v3/exchange/tickers/LTC-USD'
xmr_api = 'min-api.cryptocompare.com/data/price?fsym=XMR&tsyms=USD'
big_api = api = 'pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
def get_btc_price():
link = 'https://%s' % btc_api
data = json.loads(requests.get(link).text)['bpi']
return data['bpi']['USD']['rate_float']
def get_ltc_price():
link = 'https://%s'%ltc_api
return json.loads(requests.get(link).text)
def get_xmr_price():
link = 'https://%s'%xmr_api
return json.loads(requests.get(url=link).text)['USD']
def pull_market():
url = 'https://%s' % big_api
parameters = {'start':'1','limit':'5000','convert':'USD'}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': 'GET_AN_API_KEY_FROM_COINMARKETCAP',
}
session = Session()
session.headers.update(headers)
try:
response = session.get(url, params=parameters)
data = json.loads(response.text)
except (ConnectionError, Timeout, TooManyRedirects) as e:
pass
return data
def parse_full_market():
data = pull_market()
market = {'btc': data['data'][0],
'eth': data['data'][1],
'fil': data['data'][3],
'ltc': data['data'][7],
'dsh': data['data'][43],
'bat': data['data'][59],
'doge': data['data'][15],
'mana': data['data'][67]}
return market
def parse_cmc_data(dat):
price = dat['quote']['USD']['price']
return price
def collect_data():
running = True
crypto = {}
collected = 0
try:
while running:
ld, lt = utils.create_timestamp()
mkt = parse_full_market()
print('[*] Market Data Collected [%s - %s]' % (ld,lt))
parsed = {}
for asset in mkt.keys():
value = parse_cmc_data(mkt[asset])
print('- %s is $%f' % (asset, value))
parsed[asset] = value
crypto[ld+' '+lt] = parsed
if collected%10==0:
print('[*] Backing Up Data collected...')
open('crypto_market.json','w').write(json.dumps(crypto))
time.sleep(60)
collected += 1
except KeyboardInterrupt:
running = False
pass
def main():
if not os.path.isdir('Data'):
os.mkdir('Data')
if os.path.isfile('crypto_market.json'):
bkup = utils.create_random_filename('.json')
os.system('cp crypto_market.json Data/%s' % bkup)
collect_data()
if __name__ == '__main__':
main()
```
#### File: Crawling/OSINT/data_loader.py
```python
import base64
import zlib
import json
import time
import sys
import os
def usage():
print('Usage: $ python %s [mode] user@ip:/path/to/file')
def merge_logs(fileA, fileB):
data1 = json.loads(open(fileA,'r').read())
data2 = json.loads(open(fileB,'r').read())
data3 = data1
for k in data2.keys(): data3[k] = data2[k]
return data3
if 'add' in sys.argv and len(sys.argv) >= 3:
try:
data_add = sys.argv[-1]
user = data_add.split(':')[0]
fin = data_add.split(':')[1]
except IndexError:
pass
usage()
exit()
if user.split('@')[1] == '127.0.0.1':
print('[*] Loading Local file %s' % fin)
if os.path.isfile(fin):
raw_data = json.loads(open(fin, 'r').read())
print(len(raw_data.keys()))
else:
print('[*] Loading Remote file %s' % fin)
lcopy = fin.split('/')[-1]
if os.path.isfile(lcopy):
print('[!] A local copy of %s exists. Backing it up...' % fin)
os.system('mv %s %s' % (lcopy, 'backup_'+lcopy))
cmd = "sftp %s:%s <<< $'get %s'" % (user, fin, 'data.json')
print(cmd)
os.system('echo "%s" | /bin/bash' % cmd)
raw_data = json.loads(open(lcopy, 'r').read())
print(len(raw_data.keys()))
if 'combine' in sys.argv:
if '*' in sys.argv and len(sys.argv)== 3:
print('[*] Combining all %s files' % sys.argv[2])
exit()
elif len(sys.argv) == 4:
print('[*] Combining %s and %s into one file' % (sys.argv[2],sys.argv[3]))
master = merge_logs(sys.argv[2], sys.argv[3])
open('combined.json', 'w').write(json.dumps(master))
print('[*] %d Total Entries in Combined JSON' % len(master.keys()))
```
#### File: code/Defensive/endpt.py
```python
from threading import Thread
import utils
import json
import time
import sys
import os
def check_connections():
# Check local tcp/udp connections coming from this machine
# based on various TCP/IP connection states
connections = {"ESTABLISHED":{'tcp':[], 'udp':[]},
"TIME_WAIT": {'tcp':[], 'udp':[]},
"CLOSE_WAIT": {'tcp':[], 'udp':[]},
"LISTEN": {'tcp':[], 'udp':[]}}
for line in utils.cmd('netstat -antup',False):
# Find connections by state
for STATE in connections.keys():
if len(line.split(STATE))>1:
items = filter(len, line.split(STATE)[0].split(' '))
if items[0] not in connections[STATE].keys():
print('[!] Unrecognized Protocol %s' % items[0])
else:
connections[STATE][items[0]].append([items[-2],items[-1]])
return connections
class Monitor:
def __init__(self, timeout=10):
self.sdate, self.stime = utils.create_timestamp()
self.log = self.create_log()
self.start = time.time()
self.interval = timeout
self.running = True
def create_log(self):
if not os.path.isdir('.logs'):
os.mkdir('.logs')
header = '[+] System Monitor Started [%s -%s]\n'%(self.sdate,self.stime)
fn = os.getcwd()+'/.logs/'+self.sdate.replace('/','')+'_'+self.stime.replace(':','')+'.log'
open(fn,'w').write(header)
print(header)
return fn
def shutdown(self):
lt, ld = utils.create_timestamp()
msg = '[!] Shutting Down [%s - %s]\n' % (lt,ld)
print(msg); self.running = False
open(self.log,'a').write(msg)
def add_connection_data(self, connections):
ld, lt = utils.create_timestamp()
msg = '='*80+'\n'+'[+] Connection Data [%s - %s]:'%(ld,lt)
open(self.log,'a').write(msg)
open(self.log,'a').write(json.dumps(connections))
open(self.log,'a').write('\n'+'='*80+'\n')
def run(self):
try:
while self.running:
# Log Connections
self.add_connection_data(check_connections())
# Back up file if it gets too large?
# Sleep
time.sleep(self.interval)
except KeyboardInterrupt:
self.shutdown()
pass
if __name__ == '__main__':
logger = Monitor()
logger.run()
```
#### File: code/Images/conv.py
```python
import matplotlib.pyplot as plt
import os, sys, ppm
import numpy as np
import string
def color2bw(imarr):
dims = imarr.shape
imout = np.zeros((dims[0],dims[1]))
print(dims[0],dims[1])
for x in range(dims[0]):
for y in range(dims[1]):
r = imarr[x,y,0]
g = imarr[x,y,1]
b = imarr[x,y,2]
imout[x,y] = ((r & g & b))
return imout
def ascii_art(fname):
ascii = list(string.printable.replace('\n'))
ascii.remove('\n')
ascii.remove('\r')
ascii.remove('\t')
ext = fname[2].split('.')[1]
if ext == 'ppm':
arr = ppm.ppm2arr(fname)
else:
arr = np.array(plt.imread(fname))
bw = color2bw(arr)
vals = [ord(n) for n in ascii]
mapping = np.linspace(min(vals),max(vals),len(vals))
maps = {}
for i in range(len(mapping)): maps[i] = mapping[i]
if '-c2k' in sys.argv and len(sys.argv)>1:
ext = sys.argv[2].split('.')[1]
if ext == 'ppm':
arr = ppm.ppm2arr(sys.argv[-1])
else:
arr = np.array(plt.imread(sys.argv[2]))
bw = color2bw(arr)
plt.imshow(bw)
plt.show()
```
#### File: code/Scanning/scanner.py
```python
from threading import Thread
import multiprocessing
import socket
import utils
import time
import sys
import os
def check_args():
options = {'targets': [],
}
if len(sys.argv)<2:
print('Usage: python scanner.py ip/hostname options')
exit()
if 3 > len(sys.argv) >= 2:
# check if user gave an IP Address
if len(sys.argv[1].split('.'))>=4 and int(sys.argv[1].split('.')[0]):
ip = sys.argv[1]
options['targets'].append(ip)
else: # User has given a hostname (ex. google.com)
c = "host %s | grep 'has address' | cut -d ' ' -f 4" % sys.argv[1]
addr = utils.cmd(c, False)
if len(addr) == 1:
options['targets'].append(addr.pop(0))
else:
for a in addr:
options['targets'].append(addr.pop())
elif len(sys.argv) > 2 and '-file' in sys.argv:
for addr in utils.swap(sys.argv[-1], False):
options['targets'].append(addr)
# remove any duplicate addresses
options['targets'] = list(set(options['targets']))
return options
def scan_thread(target, port, verbose):
results = {}
results['target'] = target
ports = {'ftp': 21, 'ssh': 22, 'smtp': 25,
'dns': 53, 'http': 80, 'http-proxy':8080,
'rdp': 3389, 'socks': 1080} # Add more, just a quick list
TIMEOUT = 5; n_open = 0
for proto in ports.keys():
port = ports[proto]
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((target, port))
results[proto] = 'OPEN'
n_open += 1
except socket.error: # closed or filtered
results['state'] = 'CLOSED'
pass
s.close()
except socket.error:
print('[!] Unable to create socket')
return False, {}
results['ports_open'] = n_open
return results
def parse_scan(results):
head = '- %s has %d ports open |' % (results['target'], results['ports_open'])
for i in results.keys():
if results[i] == 'OPEN':
head += '%s|' % i.upper()
print(head)
def main():
# Get the addresses to be scanned
opts = check_args()
# Start Scanning
start_date, start_time = utils.create_timestamp()
banner = '[*] Starting Scan of %d Hosts %s - %s'
print(banner % (len(opts['targets']), start_date, start_time))
verbosity = False
for ip in opts['targets']:
pool = multiprocessing.Pool(5)
scan = pool.apply_async(scan_thread, (ip, 22, False))
try:
ports = scan.get(timeout=3)
parse_scan(ports)
except multiprocessing.TimeoutError:
pass
if __name__ == '__main__':
main()
```
#### File: code/Serving/serve.py
```python
from threading import Thread
import socket
import utils
import time
import sys
import os
class BasicAPI:
inbound = 54123
served = []
start = 0.0
def __init__(self):
self.start = time.time()
self.actions = {'UPTIME': self.uptime}
self.run()
def run(self):
running = True
start_day, start_time = utils.create_timestamp()
print('[*] Starting Server [%s - %s]' % (start_day, start_time))
srv = self.create_listener()
while running:
try:
# Wait for client
c, ci = srv.accept()
# Pass client to handler thread
Thread(target=self.client_hander, args=(self, c, ci)).start()
except KeyboardInterrupt:
running = False
pass
# close the server
end_day, end_time = utils.create_timestamp()
print('[*] Shutting Down Server [%s - %s]' % (end_day, end_time))
srv.close()
def create_listener(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('0.0.0.0', self.inbound))
s.listen(5)
except socket.error:
print('[!] Unable to create socket')
exit()
return s
def uptime(self, c, i, r):
c.send('UPTIME: %s' % str(time.time() - self.start))
return c
def client_hander(self, api, csock, caddr):
raw_request = csock.recv(2048)
print('[*] Connection Accepted from %s' % caddr[0])
if len(raw_request.split(' ??? ')) >= 2:
api_req = raw_request.split(' ??? ')[0]
payload = raw_request.split(' ??? ')[1]
if api_req in self.actions.keys():
print(' - making %s request\n' % api_req)
csock = self.actions[api_req](csock, caddr, payload)
else:
try:
csock.send('[!] Invalid API Request.\nUse: "Method ??? Request"')
except socket.error:
pass
# finally close connection to client
csock.close()
def main():
simple_server = BasicAPI()
if __name__ == '__main__':
main()
```
#### File: code/Serving/utils.py
```python
import random
import time
import sys
import os
lowers = ['a','b','c','d','e','f','g','h','i','j',
'k','l','m','n','o','p','q','r','s','t',
'u','v','w','x','y','z']
uppers = ['A','B','C','D','E','F','G','H','I','J',
'K','L','M','N','O','P','Q','R','S','T',
'U','V','W','X','Y','Z']
alphas = ['0', '1','2','3','4','5','6','7','8','9']
PYVER = int(sys.version.split(' ')[0].split('.')[0])
def swap(filename, destroy):
data = []
for line in open(filename, 'rb').readlines():
data.append(line.replace('\n', ''))
if destroy:
os.remove(filename)
return data
def create_random_filename(ext):
charpool = []
for l in lowers: charpool.append(l)
for u in uppers: charpool.append(u)
for a in alphas: charpool.append(a)
basename = ''.join(random.sample(charpool, 6))
random_file = basename +ext
return random_file
def cmd(command, verbose):
tmp = create_random_filename('.sh')
tmp2 = create_random_filename('.txt')
data = '#!/bin/bash\n%s\n#EOF' % command
open(tmp, 'w').write(data)
os.system('bash %s >> %s' % (tmp,tmp2))
os.remove(tmp)
if verbose:
os.system('cat %s' % tmp2)
return swap(tmp2, True)
def create_timestamp():
date = time.localtime(time.time())
mo = str(date.tm_mon)
day = str(date.tm_mday)
yr = str(date.tm_year)
hr = str(date.tm_hour)
min = str(date.tm_min)
sec = str(date.tm_sec)
date = mo + '/' + day + '/' + yr
timestamp = hr + ':' + min + ':' + sec
return date, timestamp
``` |
{
"source": "0ptimus/PyInger",
"score": 3
} |
#### File: 0ptimus/PyInger/pyinger.py
```python
import smtplib
import email
from email.MIMEText import MIMEText
import logging
import datetime
log = logging.getLogger('pyinger')
### Update path below to the directory where you put pyinger.py ###
loghandler = logging.FileHandler('/home/user/pythonscripts/pyinger/pyinger.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
loghandler.setFormatter(formatter)
log.addHandler(loghandler)
log.setLevel(logging.INFO)
### Add hosts that you want to ping here ###
hosts = ['host1.domain.com', 'host2.domain.com']
def ping(host):
"""
Returns True if host responds to a ping request
"""
import os, platform
# Ping parameters as function of OS
ping_str = "-n 1" if platform.system().lower()=="windows" else "-c 1"
# Ping
return os.system("ping " + ping_str + " " + host) == 0
def sendemail():
smtp_host = 'smtp.yourdomain.com'
smtp_port = 25
emailuser = 'someuser'
emailpass = '<PASSWORD>'
server = smtplib.SMTP()
server.connect(smtp_host,smtp_port)
server.ehlo()
server.starttls()
### Uncommment line below if your SMTP server requires authentication ###
# server.login(emailuser,emailpass)
### Change this to whatever you want your your from address to be
fromaddr = '<EMAIL>'
### Add recipients to receive notifications when hosts do not respond to ping ###
tolist = ['someone<EMAIL>', '<EMAIL>']
sub = host + ' is down!'
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = email.Utils.COMMASPACE.join(tolist)
msg['Subject'] = sub
messagebody = 'The following host did not respond to ping and appears to be down:\n%s' % host
msg.attach(MIMEText('The following host did not respond to ping and appears to be down:\n' + host, 'plain'))
msg.attach(MIMEText(messagebody, 'plain'))
server.sendmail(emailuser,tolist,msg.as_string())
def notificationsent(host):
today = datetime.date.today()
global formattedtoday
formattedtoday = today.strftime('%m/%d/%y')
global hstatusline
hstatusline = host + ' - notification sent - ' + formattedtoday
if hstatusline in open('/home/user/pythonscripts/pyinger/pyinger_notification.log').read():
print "Notification about %s host being down was already sent today (%s)." % (host, formattedtoday)
return True
else:
print "Notification about %s host being down has NOT been sent today (%s)." % (host, formattedtoday)
print "Sending notification..."
return False
for host in hosts:
if ping(host) == True:
print "OK - %s host responded to ping!" % host
else:
print "DOWN - %s host not responding to ping! Houston, we have problem!" % host
log.warning('==> %s host is down!', host)
if notificationsent(host) == False:
### Update path below to the directory where you put pyinger.py ###
file = open('/home/user/pythonscripts/pyinger/pyinger_notification.log', 'a')
file.write(hstatusline)
file.write("\n")
file.close()
sendemail()
log.info('==> sent NEW notification that %s host is down', host)
else:
log.info('==> already sent notification that %s host is down', host)
``` |
{
"source": "0R1-qt/POMODORO",
"score": 3
} |
#### File: 0R1-qt/POMODORO/main.py
```python
from tkinter import *
import math
# CONSTANT
PINK = "#e29779c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
FONT_NAME = "After disaster"
WORK = 25
SHORT_BREAK_MIN = 5
LONG_BREAK_MIN = 20
class Pomodoro:
def __init__(self):
self.window = Tk()
self.Image = PhotoImage(file="./Images/tomato.png")
self.canvas = Canvas(width=292, height=300, bg = YELLOW, highlightthickness=0)
self.canvas.create_image(146, 150, image = self.Image)
self.timer = self.canvas.create_text(146, 240, text="00:00", fill="white", font=(FONT_NAME, 30, "bold"))
self.canvas.grid(row=1, column=1)
self.window.title("Pomodoro")
self.window_configuration()
self.text_configuration()
self.window.mainloop()
def window_configuration(self):
"""Sets the padding horizontally and vertically"""
self.window.config(padx=50, pady=40, bg=YELLOW)
def text_configuration(self):
timer_label = Label(text="Timer", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 35))
timer_label.grid(row=0, column=1)
start_text = Button(text="Start", fg=GREEN, bg=YELLOW, font=("Bahnschrift", 10), command=self.trigger_count_down)
start_text.grid(row=2, column=0)
reset_text = Button(text="Reset", fg=GREEN, bg=YELLOW, font=("Bahnschrift", 10))
reset_text.grid(row=2, column=2)
check_text = Label(text="✔", fg=GREEN, bg=YELLOW)
check_text.grid(row=2, column=1)
def count_down(self, count):
"""This functions is to change the timer tex and modifying text as well"""
counter_minutes = math.floor(count/60)
counter_seconds = count % 60
self.canvas.itemconfig(self.timer, text=f"{counter_minutes}:{counter_seconds}")
if count > 0:
self.window.after(1000, self.count_down, count-1)
else:
print("Time's UP!")
def trigger_count_down(self):
self.count_down(25*60)
if __name__ == "__main__":
window = Pomodoro()
``` |
{
"source": "0racul/pythonize",
"score": 2
} |
#### File: pythonize/fixture/contact.py
```python
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
contact_cache = None
def contact_creation(self, contact):
wd = self.app.wd
self.new_contact_init()
self.fill_contact_fields(contact)
self.select_group()
self.submit()
self.return_home()
self.contact_cache = None
def return_home(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
def fill_contact_fields(self, contact):
wd = self.app.wd
self.change_fields_value("firstname", contact.firstname)
self.change_fields_value("middlename", contact.middlename)
self.change_fields_value("lastname", contact.lastname)
self.change_fields_value("nickname", contact.nickname)
self.change_fields_value("title", contact.title)
self.change_fields_value("company", contact.company)
self.change_fields_value("address", contact.address)
self.change_fields_value("home", contact.hometele)
self.change_fields_value("mobile", contact.mobiletele)
self.change_fields_value("work", contact.worktele)
self.change_fields_value("fax", contact.faxtele)
self.change_fields_value("email", contact.email)
self.change_fields_value("email2", contact.email2)
self.change_fields_value("email3", contact.email3)
self.change_fields_value("homepage", contact.homepage)
self.input_dates()
self.change_fields_value("address2", contact.secondaryaddress)
self.change_fields_value("phone2", contact.phone2)
self.change_fields_value("notes", contact.notes)
def submit(self):
wd = self.app.wd
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
def input_dates(self):
wd = self.app.wd
self.input_birth_day()
self.input_birth_month()
self.input_birth_year("2010")
self.input_anniversary_day()
self.input_anniversary_month()
self.input_anniversary_year("3010")
def select_group(self):
wd = self.app.wd
if not wd.find_element_by_xpath("//div[@id='content']/form/select[5]//option[10]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[5]//option[10]").click()
def input_anniversary_year(self, anniv_year):
wd = self.app.wd
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(anniv_year)
def input_anniversary_month(self):
wd = self.app.wd
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[6]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[6]").click()
def input_anniversary_day(self):
wd = self.app.wd
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[5]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[5]").click()
def input_birth_year(self, birth_year):
wd = self.app.wd
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(birth_year)
def input_birth_month(self):
wd = self.app.wd
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[7]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[7]").click()
def input_birth_day(self):
wd = self.app.wd
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[5]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[5]").click()
def change_fields_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def new_contact_init(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def init_edit_contact_by_index(self, index):
wd = self.app.wd
site_index = index + 2
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[" + str(site_index) + "]/td[8]/a/img").click()
def init_view_contact_by_index(self, index):
wd = self.app.wd
site_index = index + 2
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[" + str(site_index) + "]/td[7]/a/img").click()
def submit_updating(self):
wd = self.app.wd
wd.find_element_by_name("update").click()
def count(self):
wd = self.app.wd
self.return_home()
return len(wd.find_elements_by_name("selected[]"))
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.return_home()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
lastname = cells[1].text
firstname = cells[2].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(id=id,
firstname=firstname,
lastname=lastname,
address=address,
all_phones_from_homepage=all_phones,
all_emails_from_homepage=all_emails))
return list(self.contact_cache)
def update_first_contact(self, contact):
wd = self.app.wd
self.update_contact_by_index(0, contact)
def update_contact_by_index(self, index, contact):
wd = self.app.wd
self.init_edit_contact_by_index(index)
self.fill_contact_fields(contact)
self.submit_updating()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.init_edit_contact_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
middlename = wd.find_element_by_name("middlename").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
nickname = wd.find_element_by_name("nickname").get_attribute("value")
title = wd.find_element_by_name("title").get_attribute("value")
company = wd.find_element_by_name("company").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
hometele = wd.find_element_by_name("home").get_attribute("value")
worktele = wd.find_element_by_name("work").get_attribute("value")
faxtele = wd.find_element_by_name("fax").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
homepage = wd.find_element_by_name("homepage").get_attribute("value")
secondaryaddress = wd.find_element_by_name("secondaryaddress").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
notes = wd.find_element_by_name("notes").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
mobiletele = wd.find_element_by_name("mobile").get_attribute("value")
return Contact(firstname = firstname,
middlename = middlename,
lastname = lastname,
nickname = nickname,
title = title,
company = company,
address = address,
hometele = hometele,
mobiletele = mobiletele,
worktele = worktele,
faxtele = faxtele,
email = email,
email2 = email2,
email3 = email3,
homepage = homepage,
secondaryaddress = secondaryaddress,
phone2 = phone2,
notes = notes,
id = id,
all_phones_from_homepage = None,
all_emails_from_homepage = None)
def contact_from_view_page(self, index):
wd = self.app.wd
self.init_view_contact_by_index(index)
text = wd.find_element_by_id("content").text
hometele = re.search("H: (.*)", text).group(1)
worktele = re.search("W: (.*)", text).group(1)
mobiletele = re.search("M: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(hometele=hometele,
worktele=worktele,
mobiletele=mobiletele,
phone2=phone2)
def get_contact_phones(self):
if self.contact_cache is None:
wd = self.app.wd
self.return_home()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
lastname = cells[1].text
firstname = cells[2].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_phones = cells[5].text
self.contact_cache.append(Contact(lastname=lastname,
firstname=firstname,
id=id,
all_phones_from_homepage=all_phones))
return list(self.contact_cache)
def get_contact_info_from_homepage(self):
if self.contact_cache is None:
wd = self.app.wd
self.return_home()
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
lastname = cells[1].text
firstname = cells[2].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(id=id,
firstname=firstname,
lastname=lastname,
address=address,
all_phones_from_homepage=all_phones,
all_emails_from_homepage=all_emails))
return list(self.contact_cache)
def get_contact_list_full_info(self):
if self.contact_cache is None:
wd = self.app.wd
self.return_home()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
cells[7].find_element_by_tag_name("a").click()
firstname = wd.find_element_by_name("firstname").get_attribute("value")
middlename = wd.find_element_by_name("middlename").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
nickname = wd.find_element_by_name("nickname").get_attribute("value")
title = wd.find_element_by_name("title").get_attribute("value")
company = wd.find_element_by_name("company").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
hometele = wd.find_element_by_name("home").get_attribute("value")
worktele = wd.find_element_by_name("work").get_attribute("value")
faxtele = wd.find_element_by_name("fax").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
homepage = wd.find_element_by_name("homepage").get_attribute("value")
secondaryaddress = wd.find_element_by_name("address2").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
notes = wd.find_element_by_name("notes").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
mobiletele = wd.find_element_by_name("mobile").get_attribute("value")
self.contact_cache.append(Contact(id=id,
firstname=firstname,
middlename=middlename,
nickname=nickname,
title=title,
company=company,
hometele=hometele,
worktele=worktele,
faxtele=faxtele,
email=email,
email2=email2,
email3=email3,
homepage=homepage,
secondaryaddress=secondaryaddress,
phone2=phone2,
notes=notes,
mobiletele=mobiletele,
lastname=lastname,
address=address))
self.return_home()
return list(self.contact_cache)
def delete_contact_by_id(self, id):
wd = self.app.wd
self.select_contact_by_id(id)
wd.find_element_by_css_selector("input[value = 'Delete']").click()
wd.switch_to_alert().accept()
self.contact_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value ='%s']" % id).click()
def get_last_contact_id(self):
wd = self.app.wd
id_s = []
for val in wd.find_elements_by_css_selector("input[value = 'selected[]']"):
id_s.append(int(val.get_attribute("id")))
id = max(id_s)
return id
```
#### File: pythonize/model/contact.py
```python
from sys import maxsize
class Contact:
def __init__(self, firstname = None,
middlename = None,
lastname = None, nickname = None,
title = None, company = None,
address = None, hometele = None,
mobiletele = None,
worktele = None,
faxtele = None,
email = None,
email2 = None,
email3 = None,
homepage = None,
secondaryaddress = None,
phone2 = None,
notes = None,
id = None,
all_phones_from_homepage = None,
all_emails_from_homepage = None):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.hometele = hometele
self.mobiletele = mobiletele
self.worktele = worktele
self.faxtele = faxtele
self.email = email
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.secondaryaddress = secondaryaddress
self.phone2 = phone2
self.notes = notes
self.id = id
self.all_phones_from_homepage = all_phones_from_homepage
self.all_emails_from_homepage = all_emails_from_homepage
def __repr__(self):
return "%s:%s:%s:%s:%s:%s" % (self.id, self.lastname, self.firstname, self.address, self.all_phones_from_homepage, self.all_emails_from_homepage)
def __eq__(self, other):
return ((self.id is None or other.id is None or self.id == other.id)
and self.lastname == other.lastname
and self.firstname == other.firstname
and self.middlename == other.middlename
and self.nickname == other.nickname
and self.title == other.title
and self.company == other.company
and self.hometele == other.hometele
and self.worktele == other.worktele
and self.faxtele == other.faxtele
and self.email == other.email
and self.email2 == other.email2
and self.email3 == other.email3
and self.homepage == other.homepage
and self.secondaryaddress == other.secondaryaddress
and self.phone2 == other.phone2
and self.notes == other.notes
and self.address == other.address
and self.mobiletele == other.mobiletele
and self.all_phones_from_homepage == other.all_phones_from_homepage
and self.all_emails_from_homepage == other.all_emails_from_homepage)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
```
#### File: pythonize/test/test_modify gruppe.py
```python
from model.group import Group
from random import randrange
def test_modify_gruppe(app, db):
app.group.open()
old_groups = db.get_group_list()
index = randrange(len(old_groups))
if app.group.count() == 0:
app.group.create_group(Group("test_group"))
group = Group("[eqgbplfl;buehlf", "[eqgbplfl;buehlf", "[eqgbplfl;buehlf")
group.id = old_groups[index].id
app.group.modify_group_by_id(group.id, group)
assert len(old_groups) == app.group.count()
new_groups = db.get_group_list()
old_groups[index] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
``` |
{
"source": "0rakul0/webDjango",
"score": 2
} |
#### File: web/polls/models.py
```python
import datetime
from datetime import timezone
from django.db import models
# Create your models here.
class Question(models.Model):
question_texto = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_texto
def perguntas_recentes(self):
return self.pub_date >= timezone.now()-datetime.timedelta(days=1)
class Mudanca(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
mudanca_text = models.CharField(max_length=200)
votos = models.IntegerField(default=0)
def __str__(self):
return self.mudanca_text
``` |
{
"source": "0rang3max/int2az",
"score": 3
} |
#### File: int2az/int2az/__init__.py
```python
class Int2Az:
rank = [1, 10, 100, 1000, 1000000, 1000000000]
ones = ['', 'bir ', 'iki ', 'üç ', 'dörd ', 'beş ', 'altı ', 'yeddi ', 'səkkiz ', 'doqquz ']
tens = ['', 'on ', 'iyirmi ', 'otuz ', 'qırx ', 'əlli ', 'altmış ', 'yetmiş ', 'səksən ', 'doxsan ']
@classmethod
def _check_num(cls, num):
if type(num) != int:
raise Exception('Input number should be integer type.')
if num > cls.rank[-1]:
raise Exception('Input number should be lower than ' + str(cls.rank[-1]))
@classmethod
def _make_forstr(cls, a):
i = 0
forstr = []
check = True
while check != 0:
num = a // cls.rank[i]
if cls.rank[i] == 1:
forstr.append(a % 10)
elif cls.rank[i] == 100:
forstr.append(num % 10)
else:
forstr.append(num % cls.rank[i])
check = num != 0
i += 1
return(forstr)
@classmethod
def _forstr2str(cls, forstr):
strnum = []
for i, item in enumerate(forstr):
item = int(item)
if item != 0:
if i == 0:
strnum.append(cls.ones[item])
elif i == 1:
strnum.append(cls.tens[item])
elif i == 2:
strnum.append(cls.ones[item] + 'yüz ')
elif i == 3:
strnum.append(cls._forstr2str(cls._make_forstr(item)) + 'min ')
elif i == 4:
strnum.append(cls._forstr2str(cls._make_forstr(item)) + 'milyon ')
return ''.join(reversed(strnum))
@classmethod
def convert(cls, num):
cls._check_num(num)
return cls._forstr2str(cls._make_forstr(num)).strip()
``` |
{
"source": "0rang3max/RockPaperScissorsBot",
"score": 3
} |
#### File: RockPaperScissorsBot/rock_paper_scissors_bot/handlers.py
```python
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
from rock_paper_scissors_bot.db import Database
EMOJI = {
'Paper': '✋',
'Rock': '✊',
'Scissors': '✌️',
}
def handle_start_game(update, context):
keyboard = [
[
InlineKeyboardButton(emoji_icon, callback_data=name)
for name, emoji_icon in EMOJI.items()
],
]
update.message.reply_text(
'Choose:', reply_markup=InlineKeyboardMarkup(keyboard)
)
def handle_answer(update, context):
query = update.callback_query
data_id = query.message.message_id
data = Database.get_data(data_id)
if not data:
data = {
'user': query.from_user.username,
'answer': query.data,
}
Database.set_data(data_id, data)
query.answer('You answered first')
return
player1 = data['user']
player2 = query.from_user.username
if player1 == player2:
query.answer('Wait for other player!')
return
answer1 = data.get('answer')
answer2 = query.data
response = f'{player1} {EMOJI[answer1]} vs {player2} {EMOJI[answer2]}:'
if answer1 == answer2:
query.answer('Draw')
query.edit_message_text(text=f'{response} Draw')
return
results = {
('Paper', 'Rock'): 'Paper',
('Paper', 'Scissors'): 'Scissors',
('Rock', 'Paper'): 'Paper',
('Rock', 'Scissors'): 'Rock',
('Scissors', 'Paper'): 'Scissors',
('Scissors', 'Rock'): 'Rock'
}
players_answers = {
answer1: player1,
answer2: player2,
}
winning_answer = results[(answer1, answer2)]
query.edit_message_text(
text=f'{response} Winner @{players_answers[winning_answer]} {EMOJI[winning_answer]}'
)
query.answer('You lost :(' if winning_answer == answer2 else 'You won!')
``` |
{
"source": "0range-j0e/PYTHON-NAVIGATOR",
"score": 3
} |
#### File: 0range-j0e/PYTHON-NAVIGATOR/pynav.py
```python
import os
import colorama
import sys
import readline
''' The goal of this program is to simplify the navigation of the Linux filesystem within Terminal. '''
__author__ = "0range-j0e"
__copyright__ = "0range-j0e"
__version__ = "1.0"
# Initialize color variables from Colorama library for ease of coding.
black, red, white, green, yellow, cyan, white, magenta, dim, bright, normal, reset = colorama.Fore.BLACK, colorama.Fore.RED, colorama.Fore.WHITE, colorama.Fore.GREEN, colorama.Fore.YELLOW, colorama.Fore.CYAN, colorama.Fore.WHITE, colorama.Fore.MAGENTA, colorama.Style.DIM, colorama.Style.BRIGHT, colorama.Style.NORMAL, colorama.Style.RESET_ALL
# Define the maximum amount of directories and files to be listed in each column.
column_max = 20
# If set to False, terminal will not clear with each user command.
auto_clear = False
def macro(directory, columns_max):
if auto_clear is True:
os.system('clear')
print("-----------------------")
print(f"Directory:", os.getcwd() + "\n")
# Variables and formulas to send contents of present working directory to columns a, b, and c, defined as ca, cb, and cc below.
ca = 0
cb = (column_max)
cc = (column_max*2)
num_string = str(len(directory)/(column_max*3))
for i in range(len(num_string)):
if '.' in num_string[i]:
integer, decimal = (int(num_string[:i])), (float(num_string[i:]))
if decimal < .34:
sub_sequences = ((integer*3)+1)
elif decimal < .67:
sub_sequences = ((integer*3)+2)
else:
sub_sequences = ((integer*3)+3)
starter_subsequence = sub_sequences
if decimal != .0:
sequences = integer + 1
else:
sequences = integer
while True:
if sub_sequences >= 3:
for i in range(column_max):
try:
sys.stdout.write( white + '{:<5s}'.format('['+str(ca)+']') + white + bright + reset + '{:<45s}'.format(directory[ca]) + white + '{:<5s}'.format('['+str(cb)+']') + white + bright + reset + '{:<45s}'.format(directory[cb]) + white + '{:<5s}'.format('['+str(cc)+']') + white + bright + reset + '{:<45s}'.format(directory[cc]) + '\n')
sys.stdout.flush()
ca += 1
cb += 1
cc += 1
except:
sys.stdout.write( white + '{:<5s}'.format('['+str(ca)+']') + white + bright + reset + '{:<45s}'.format(directory[ca]) + white + '{:<5s}'.format('['+str(cb)+']') + white + bright + reset + '{:<45s}'.format(directory[cb]) + '\n')
ca += 1
cb += 1
sub_sequences -= 3
print("")
ca += (column_max *2)
cb += (column_max *2)
cc += (column_max *2)
else:
break
while True:
if sub_sequences == 2:
if starter_subsequence > 3:
for i in range(column_max):
try:
sys.stdout.write( white + '{:<5s}'.format('['+str(cb)+']') + white + bright + reset + '{:<45s}'.format(directory[cb]) + white + '{:<5s}'.format('['+str(cc)+']') + white + bright + reset + '{:<45s}'.format(directory[cc]) + '\n')
ca += 1
cb += 1
except:
sys.stdout.write( white + '{:<5s}'.format('['+str(ca)+']') + white + bright + reset + '{:<45s}'.format(directory[ca]) + '\n')
ca += 1
else:
for i in range(column_max):
try:
sys.stdout.write( white + '{:<5s}'.format('['+str(ca)+']') + white + bright + reset + '{:<45s}'.format(directory[ca]) + white + '{:<5s}'.format('['+str(cb)+']') + white + bright + reset + '{:<45s}'.format(directory[cb]) + '\n')
ca += 1
cb += 1
except:
sys.stdout.write( white + '{:<5s}'.format('['+str(ca)+']') + white + bright + reset + '{:<45s}'.format(directory[ca]) + '\n')
ca += 1
sub_sequences -= 2
print("")
ca += (column_max *2)
cb += (column_max *2)
cc += (column_max *2)
elif sub_sequences == 1:
if starter_subsequence > 3:
for i in range(column_max):
try:
sys.stdout.write(f'[{str(cc)}] {directory[cc]}' + '\n')
cc += 1
except Exception as e:
pass
sub_sequences -= 1
else:
for i in range(column_max):
try:
sys.stdout.write(f'[{str(ca)}] {directory[ca]}' + '\n')
ca += 1
except Exception as e:
pass
print("")
break
''' The heart of it all, the Python Navigator. Directory listings are placed in a dictionary and assigned a key.
If the key is linked to a directory, the program will navigate to that directory. If the key is linked to a file, the
program will open the file using the xdg-open utility. A user can also type their own commands just as if they were in
a regular terminal. For example, if a user would like to run a python script, they would simply enter python3 <file>.
'''
def Navigator(column_max):
while True:
directory = {}
for i, j in enumerate(os.listdir()):
directory[i] = j
try:
macro(directory, column_max)
except:
pass
# Take user input
command = input()
# Checks if the user input is a digit. Will attempt to change to directory first. If digit is not assigned to a directory, program will attempt to run file with xdg-open.
if command.isdigit() is True:
for key, content in directory.items():
if int(command) == key:
try:
os.chdir(directory[key])
except:
os.system(f'xdg-open "{directory[key]}"')
finally:
pass
# Check if the user input is a string.
elif type(command) is str:
# If 'cd' is in command, try to run the command as usual, but if there is an error print 'invalid directory'.
if 'cd' in command:
try:
os.chdir(command[3:])
except:
print('invalid diretory')
# If inputted string matches the name of a file or directory, attempt to open a directory and if that fails, attempt to open file with xdg-open.
elif command in os.listdir():
try:
os.chdir(command)
except:
os.system(f"xdg-open " + "'{command}'")
finally:
pass
# Function to find content within columns. Needs to be fixed.
elif 'find' in command:
aggregator = {}
x = 0
string_to_find = command[5:]
for key, content in directory.items():
if string_to_find in content:
aggregator[x] = content
x += 1
if len(aggregator) > 0:
macro(aggregator, column_max)
# If user input is 'b', program will move back a directory.
elif command.lower() in ['b', 'back']:
os.chdir('..')
# If none of the above parameters are met, program will run with default system command.
else:
os.system(command)
Navigator(column_max)
``` |
{
"source": "0rbianta/anti_afk_cheat",
"score": 3
} |
#### File: 0rbianta/anti_afk_cheat/anti_afk.py
```python
import os
import time
import keyboard
from random import randint
from pynput.mouse import Button, Controller
def main():
check_root()
print("Welcome to anti-afk cheat for games. Have fun. You can stop script anytime by pressing \"q\".")
print("I'm understand that I can stop script by pressing \"q\".")
usr=str(input("USER(yes/no)>> "))
if(usr=="yes" or usr=="y"):
antiafk()
elif(usr=="no" or usr=="n"):
print("Exiting...")
exit()
else:
print("Please write \"yes\" or \"no\" to continue. Exiting...")
exit()
def check_root():
user_profile=os.popen("whoami").read()
if("root" in user_profile):
os.system("clear")
else:
print("Please run as root.")
exit()
def antiafk():
Mouse = Controller()
while keyboard.is_pressed("q")==0:
x=randint(0,1000)
y=randint(0,1000)
time.sleep(0.5)
print("Mouse moved to x:",x,", y:",y,sep="")
Mouse.position = (x, y)
print("\"q\" pressed. Exitting...")
main()
``` |
Subsets and Splits