metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "4006G2/AutoMod",
"score": 2
} |
#### File: automod/chat/chat_discord.py
```python
from . import ChatBase
import discord
import asyncio
class ChatDiscord(ChatBase):
def __init__(self, chat_bot, token):
super().__init__(chat_bot)
self.token = token
self.client = discord.Client()
self.client.event(self.on_ready)
self.client.event(self.on_message)
self.guilds = []
self.muted = {}
async def init_guilds(self):
guilds = await self.client.fetch_guilds(limit=10).flatten()
for guild in guilds:
if guild not in self.guilds:
self.guilds.append(guild)
async def find_guild_id(self, guild_name):
for guild in self.guilds:
if guild.name == guild_name:
return guild.id
return None
def find_channel_id(self, ch_name):
for ch in self.client.get_all_channels():
if ch.name == ch_name:
return ch.id
return None
def find_user_id(self, user_name):
for user in self.client.get_all_members():
if user.name == user_name:
return user.id
return None
async def broadcast_message(self, ch_name, message):
ch_id = self.find_channel_id(ch_name)
ch = self.client.get_channel(ch_id)
await ch.send(message)
async def send_message_to(self, user_name, message):
user_id = self.find_user_id(user_name)
user = self.client.get_user(user_id)
await user.send(message)
async def send_message_to_id(self, user_id, message):
user = self.client.get_user(user_id)
await user.send(message)
async def get_last_msg(self, ch_name):
ch_id = self.find_channel_id(ch_name)
ch = self.client.get_channel(ch_id)
async for message in ch.history(limit=1):
return message
async def send_ban_req(self, user_name, guild_name, reason=None):
user_id = self.find_user_id(user_name)
user = self.client.get_user(user_id)
guild_id = await self.find_guild_id(guild_name)
guild = self.client.get_guild(guild_id)
if reason is None:
reason = "You are unworthy."
message = f"{user_name} have been banned for {reason}!"
await guild.ban(user, reason=reason)
await self.broadcast_message('general', message)
async def find_banned_user(self, user_name, guild_name):
guild_id = await self.find_guild_id(guild_name)
guild = self.client.get_guild(guild_id)
banned = await guild.bans()
for (reason, user) in banned:
if user.name == user_name:
return user
return None
async def print_banned(self, guild_name):
guild_id = self.find_guild_id(guild_name)
guild = self.client.get_guild(guild_id)
banned = await guild.bans()
for (reason, user) in banned:
print(reason, user)
async def unban(self, user_name, guild_name, reason=None):
user = await self.find_banned_user(user_name, guild_name)
guild_id = await self.find_guild_id(guild_name)
guild = self.client.get_guild(guild_id)
message = f"{user_name} have been unbanned for {reason}!"
await guild.unban(user)
await self.broadcast_message('general', message)
async def send_mute_req(self, user_name, reason=None):
user_id = self.find_user_id(user_name)
user = self.client.get_user(user_id)
if reason is None:
reason = "Cause I can."
message = f"{user_name} have been muted for {reason}!"
for ch in self.client.get_all_channels():
await ch.set_permissions(user, read_messages=True, send_messages=False)
self.muted[user_name] = reason
await self.broadcast_message('general', message)
async def unmute(self, user_name):
user_id = self.find_user_id(user_name)
user = self.client.get_user(user_id)
for ch in self.client.get_all_channels():
await ch.set_permissions(user, overwrite=None)
async def unmute_all(self, reason=None):
if len(self.muted) != 0:
for user in list(self.muted):
await self.unmute(user)
self.muted.pop(user, None)
message = f"{user} have been unmuted for {reason}!"
await self.broadcast_message('general', message)
def report_all(self):
for member in self.client.get_all_members():
username = str(member).split('#')[0]
if username != "ModeratorBot":
user_id = self.find_user_id(username)
self.chat_bot.report_user(user_id)
async def discussion_prompt(self):
last_msg = await self.get_last_msg('general')
prompt = self.chat_bot.raise_discussion(last_msg)
if len(prompt) != 0:
await self.broadcast_message('general', prompt)
async def event_alert(self):
alert = self.chat_bot.event_alert()
if len(alert) != 0:
await self.broadcast_message('general', alert)
# client.event
async def on_ready(self):
await self.init_guilds()
await self.broadcast_message('general', 'ModeratorBot is online!')
# client.event
async def on_message(self, message):
user_id = message.author.id
username = str(message.author).split('#')[0]
if user_id != self.find_user_id('ModeratorBot'): # check if sender is the bot
if self.chat_bot.is_spam(user_id, message.created_at, message.content): # spam check
await self.send_mute_req(username, reason="Spamming")
else:
action = self.chat_bot.monitor_behaviour(user_id, message.content) # behaviour check
if action == 0:
await self.send_message_to(username, 'Please stop sending toxic messages!')
elif action == 1:
await self.send_mute_req(username, reason="toxic behaviour")
elif action == 2:
await self.send_ban_req(username, str(message.guild), reason="toxic behaviour")
async def tasks(self):
await self.client.wait_until_ready()
self.report_all() # for prototype testing
while not self.client.is_closed(): # main loop
await self.discussion_prompt()
await self.event_alert()
await self.unmute_all(reason='mute expired')
await asyncio.sleep(60)
```
#### File: automod/chat/test_discord.py
```python
import unittest
from . import ChatDiscord
from automod.chatbot import ChatBot
import json
with open('keys.json') as keys:
key_dict = json.loads(keys.read())
TOKEN = key_dict['discord']
class DiscordTests(unittest.TestCase):
def __init__(self, methodName: str = "runTest") -> None:
super().__init__(methodName)
self.cb = ChatBot()
self.test_server = ChatDiscord(self.cb, TOKEN)
def test(self):
srvr = self.test_server.client
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "4006G2/chatter-box",
"score": 3
} |
#### File: chatter_box/chatbot/chatbot.py
```python
from typing import Union, Match
import re
import random
__author__ = "<NAME>"
__copyright__ = "Copyright 2019"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.0.1p"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Pre-alpha"
from chatter_box.users import UserBase
_SELF_NAME = r"Chat-?bot"
_PATTERN_NAME = re.compile(_SELF_NAME, re.IGNORECASE)
_GREETINGS = [r"Hi", r"Hello", r"Hey"]
_PATTERN_GREETING = re.compile(r'^(' + r'|'.join(_GREETINGS) + r'),? ?(' + _SELF_NAME + r')?', re.IGNORECASE)
class Chatbot:
"""Platform independent chatbot implementation"""
def __init__(self) -> None:
pass # TODO: initialise the chatbot.
def parse(self, user: UserBase, user_input: str) -> Union[None, str]:
"""
:param user: the user who sent the message
:param user_input: the whole text entered by the user
:return: str: the response to send back (or None if thi message ws unused)
"""
match = _PATTERN_GREETING.search(user_input)
if match:
return self.process_greeting(match, user)
return None
def process_greeting(self, match: Match, user: UserBase) -> str:
greeting = match.group(1)
name = match.group(2)
if name is not None:
user_name = user.name
return "{0}, {1}.".format(greeting, user_name)
return "{0}!".format(random.choice(_GREETINGS))
```
#### File: chatter-box/chatter_box/__main__.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2019"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.0.1p"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Pre-alpha"
from chatter_box.users import UserCmd
from chatter_box.chatbot import Chatbot
def main():
user = UserCmd()
bot = Chatbot()
user.add_message_listener(bot.parse)
user_string = ""
while user_string.lower() != "quit":
user_string = input(">>")
user.on_message(user_string)
if __name__ == '__main__':
main()
```
#### File: chatter_box/users/user.py
```python
import abc
from typing import Union, Callable
__author__ = "<NAME>"
__copyright__ = "Copyright 2019"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.0.1p"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Pre-alpha"
UserListener = Callable[['UserBase', str], Union[None, str]]
class UserBase(abc.ABC):
"""
Abstract base class to be used for all future users.
"""
def __init__(self) -> None:
super().__init__()
self._listeners = []
def on_message(self, message: str) -> None:
for listener in self._listeners:
reply = listener(self, message)
if reply is not None:
self.tell(reply)
return
def add_message_listener(self, listener: UserListener):
self._listeners.append(listener)
@abc.abstractmethod
def tell(self, message: str):
"""
sends a message to the user
:param message: atr message to send to the user.
:return: None
"""
pass
@property
@abc.abstractmethod
def name(self):
"""
:return: the user's name
"""
pass
``` |
{
"source": "4010k3h4r4/deep-learning-from-scratch-3",
"score": 3
} |
#### File: deep-learning-from-scratch-3/steps/step28.py
```python
import numpy as np
# Import core_simple explicitly
from dezero.core_simple import Variable
from dezero.core_simple import setup_variable
setup_variable()
def rosenbrock(x0, x1):
y = 100 * (x1 - x0 ** 2) ** 2 + (x0 - 1) ** 2
return y
logs = []
x0 = Variable(np.array(0.0))
x1 = Variable(np.array(2.0))
iters = 1000
lr = 0.001
for i in range(iters):
print(x0, x1)
y = rosenbrock(x0, x1)
x0.cleargrad()
x1.cleargrad()
y.backward()
logs.append([float(x0.data), float(x1.data), float(y.data)])
x0.data -= lr * x0.grad
x1.data -= lr * x1.grad
# Plot
import matplotlib.pyplot as plt
R = 0.01
x = np.arange(-2.0, 2.0, R)
y = np.arange(-1.0, 3.0, R)
X, Y = np.meshgrid(x, y)
Z = rosenbrock(X, Y)
XX = [float(d[0]) for d in logs]
YY = [float(d[1]) for d in logs]
ZZ = [float(d[2]) for d in logs]
plt.plot(XX, YY, c='orange', alpha=0.8)
plt.scatter(XX, YY, c='red', alpha=0.5)
plt.scatter([1], [1],marker="*",s=100,linewidths="2", c='blue')
plt.contour(X, Y, Z, alpha=0.5, levels=[0, 1, 2, 4, 8, 16, 32, 64, 128, 256])
plt.show()
``` |
{
"source": "40127093/Second-Coursework",
"score": 2
} |
#### File: Second-Coursework/src/index.py
```python
import ConfigParser
import logging
import warnings
# to avoid the generation of .pyc files
import sys
sys.dont_write_bytecode = True
# necessary import to ignore any ExtdepricationWarning warnings for external
# libraries
from flask.exthook import ExtDeprecationWarning
warnings.simplefilter('ignore', ExtDeprecationWarning)
# other essential imports
from logging.handlers import RotatingFileHandler
from flask import (Flask, url_for, g, render_template, flash, redirect, abort)
from flask.ext.bcrypt import check_password_hash
from flask.ext.login import (LoginManager, login_user, logout_user,
login_required, current_user)
import models
import forms
app = Flask(__name__)
app.secret_key = 'sefdewfewr43r535rewfwda!'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(userid):
try:
return models.User.get(models.User.id == userid)
except models.DoesNotExist:
return None
# to connect to the database before each request
@app.before_request
def before_request():
g.db = models.DATABASE
g.db.connect()
g.user = current_user
# to close the database connection after each request
@app.after_request
def after_request(response):
g.db.close()
return response
# routing to my landing page which is the portfolio section
@app.route("/myprofile/<username>")
@app.route("/myprofile")
@login_required
def profile(username=None):
template='portfolio.html'
try:
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
this_route = url_for('.profile')
app.logger.info( current_user.username + " viewed " + username + "'s personal Profile page " + this_route)
else:
user=current_user
this_route = url_for('.profile')
app.logger.info( current_user.username + " viewed his/her personal Profile page " + this_route)
if username:
template = 'portfolio.html'
except models.DoesNotExist:
abort(404)
else:
return render_template(template, user=user)
# routing to the about section
@app.route("/about/<username>")
@app.route("/about")
@login_required
def about(username=None):
template='about.html'
try:
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
this_route = url_for('.about')
app.logger.info( current_user.username + " viewed " + username + "'s personal About page " + this_route)
else:
user=current_user
this_route = url_for('.about')
app.logger.info( current_user.username + " viewed his/her personal About Me page " + this_route)
if username:
template = 'about.html'
except models.DoesNotExist:
abort(404)
else:
return render_template(template, user=user)
# routing to the create a new post section
@app.route("/new_post", methods=('GET','POST'))
@login_required
def post(username=None):
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
this_route = url_for('.post')
app.logger.info( current_user.username + " created a new post on " +
username + "'s post feed section " + this_route)
else:
user=current_user
this_route = url_for('.post')
app.logger.info( current_user.username + " created a new post on his/her post feed section "
+ this_route)
form = forms.PostForm()
if form.validate_on_submit():
models.Post.create(user=g.user._get_current_object(),
content=form.content.data.strip())
flash("Message posted!", "success")
return redirect(url_for('root'))
return render_template('post.html', form=form, user=user)
# the user is redirected to the root page after posting a new message and can
# view their recent posts on the post feed section
@app.route("/")
def root(username=None):
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
else:
user = current_user
this_route = url_for('.root')
app.logger.info(current_user.username + " was redirected to the root page " + this_route)
stream = models.Post.select().limit(100)
return render_template('stream.html',user=user, stream=stream)
# routing to the posts stream section
@app.route('/stream')
@app.route('/stream/<username>')
def stream(username=None):
template='stream.html'
if username and username != current_user.username:
this_route = url_for('.stream')
app.logger.info(current_user.username + " viewed " + username + "'s Stream section "
+ this_route)
try:
user = models.User.select().where(models.User.username**username).get()
except models.DoesNotExist:
abort(404)
else:
stream=user.posts.limit(100)
else:
stream=current_user.get_stream().limit(100)
user=current_user
this_route = url_for('.stream')
app.logger.info(current_user.username + " viewed his/her Stream section "
+ this_route)
if username:
template = 'user-stream.html'
return render_template(template, stream=stream, user=user)
# routing to each individual post
@app.route('/post/<int:post_id>')
def view_post(post_id, username=None):
if username and username != current_user.username:
user = models.User.select().where(models.User.username**username).get()
else:
user=current_user
posts = models.Post.select().where(models.Post.id == post_id)
if posts.count() == 0:
abort(404)
return render_template('stream.html', stream=posts, user=user)
# function that adds one follower in the relationship table for the selected user
@app.route('/follow/<username>')
@login_required
def follow(username):
try:
to_user = models.User.get(models.User.username**username)
except models.DoesNotExist:
abort(404)
else:
try:
models.Relationship.create(
from_user=g.user._get_current_object(),
to_user=to_user
)
except models.IntegrityError:
pass
else:
flash("You're now following {}!".format(to_user.username),"success")
app.logger.info(current_user.username + " is now following " + username)
return redirect(url_for('stream',username=to_user.username))
# function that deletes the follower instance from the relationship table for
# the selected user
@app.route('/unfollow/<username>')
@login_required
def unfollow(username):
try:
to_user = models.User.get(models.User.username**username)
except models.DoesNotExist:
abort(404)
else:
try:
models.Relationship.get(
from_user=g.user._get_current_object(),
to_user=to_user
).delete_instance()
except models.IntegrityError:
pass
else:
flash("You've unfollowed {}!".format(to_user.username),"success")
app.logger.info(current_user.username + " is now unfollowing " +
username)
return redirect(url_for('stream',username=to_user.username))
# routing to the register page
@app.route('/register', methods=('GET','POST'))
def register():
this_route = url_for('.register')
app.logger.info("Someone visited the Register page " + this_route)
form = forms.RegisterForm()
if form.validate_on_submit():
flash("Congratulations, you have successfully registered!", "success")
models.User.create_user(
username=form.username.data,
email=form.email.data,
password=form.password.data
)
return redirect(url_for('profile'))
return render_template('register.html', form=form)
# routing to the login page
@app.route('/login', methods=('GET','POST'))
def login():
this_route = url_for('.login')
app.logger.info("Someone visited the Login page " + this_route)
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.email == form.email.data)
except models.DoesNotExist:
flash("Your email or password doesn't match!", "error")
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in!", "success")
return redirect(url_for('profile'))
else:
flash("Your email or password doesn't match!", "error")
return render_template('login.html', form=form)
# routing to the logout page which redirects the user to the login page
@app.route('/logout')
@login_required
def logout():
this_route = url_for('.logout')
app.logger.info( current_user.username + " requested to logout " + this_route)
logout_user()
flash("You've been logged out. Come back soon!","success")
return redirect(url_for('login'))
# parsing configuration details from an external file
def init (app):
config = ConfigParser.ConfigParser()
try:
config_location = "etc/defaults.cfg"
config.read(config_location)
app.config['DEBUG'] = config.get("config", "debug")
app.config['ip_address'] = config.get("config", "ip_address")
app.config['port'] = config.get("config", "port")
app.config['url'] = config.get("config", "url")
app.config['log_file'] = config.get("logging", "name")
app.config['log_location'] = config.get("logging", "location")
app.config['log_level'] = config.get("logging", "level")
except:
print "Could not read configuration file from: " , config_location
# setting up a logging feature to record action logs into a text file
def logs(app):
log_pathname = app.config['log_location']+ app.config['log_file']
file_handler = RotatingFileHandler(log_pathname, maxBytes=1024*1024*10 ,
backupCount=1024)
file_handler.setLevel( app.config['log_level'])
formatter = logging.Formatter("%(levelname)s | %(asctime)s | %(module)s | %(funcName)s | %(message)s")
file_handler.setFormatter(formatter)
app.logger.setLevel(app.config['log_level'])
app.logger.addHandler(file_handler)
# error handling mechanism to catch all the 404 errors and to redirect the user to
# a custom 404 page
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# initialisation function
if __name__ == "__main__":
init(app)
logs(app)
models.initialize()
try:
# first user created to populate the user table
models.User.create_user(
username='poisonphoebe',
email='<EMAIL>',
password='password',
admin=True
)
except ValueError:
pass
app.run(
host = app.config['ip_address'],
port = int(app.config['port']))
``` |
{
"source": "401ode/state_time",
"score": 3
} |
#### File: state_time/timesheet-transition/forms.py
```python
from flask_wtf import Form
from flask_security.forms import LoginForm
from wtforms import validators, StringField, TextAreaField, DateField, DateTimeField, SelectField, FormField, FieldList
from wtforms.validators import InputRequired
from models import Break
from werkzeug.datastructures import MultiDict
class ExtendedLoginForm(LoginForm):
email = StringField('Username or Email Address', [InputRequired()])
class ApprovingForm(Form):
pass
class EntryForm(Form):
# breaks = list(Break.select(Break.id, Break.name).tuples())
# breaks.insert(0, (0, ''))
# date = DateField("Date")
# started_at = DateTimeField("Started At", format='%H:%M')
# finished_at = DateTimeField("Finished At", format='%H:%M')
# break_for = SelectField("Break For", choices=breaks)
#title = StringField("Title",[
# validators.Required(),
# validators.Length(max=80)
#])
#body = TextAreaField("Content",[validators.Required(),])
#category = QuerySelectField("Category", query_factory=lambda: Category.query, allow_blank=True)
#new_category = StringField("New Category")
pass
class TimeSheetForm(Form):
pass
# entries = FieldList(FormField(EntryForm), min_entries=7, max_entries=7)
# def fill(self, timesheet):
# for e in timesheet:
# entry = MultiDict([
# ('date', e.date),
# ('started_at', e.started_at),
# ('finished_at', e.finished_at),
# ('break_for', e.break_for.id if e.break_for else None)
# ])
# row = EntryForm(entry)
# self.entries.append_entry(row)
```
#### File: state_time/timesheet-transition/models.py
```python
from datetime import datetime, timedelta
from peewee import Model, CharField, DateTimeField, ForeignKeyField, \
TextField, IntegerField, DateField, TimeField, BooleanField
from state_time import db, FlaskDB, app, current_user, \
current_week_ending_date, str_to_time
from hashlib import md5
from flask_security import PeeweeUserDatastore, UserMixin, \
RoleMixin, login_required
from playhouse.fields import ManyToManyField
from peewee import drop_model_tables, Proxy, CompositeKey, RawQuery
UserRolesProxy = Proxy()
ApproverCompaniesProxy = Proxy()
class Company(db.Model):
name = CharField()
code = CharField()
class Meta:
table_alias = 'c'
def __str__(self):
return self.name
class Role(db.Model, RoleMixin):
name = CharField(unique=True)
description = TextField(null=True)
class Meta:
table_alias = 'r'
class User(db.Model, UserMixin):
username = CharField(unique=True, index=True)
password = <PASSWORD>()
email = CharField()
first_name = CharField()
last_name = CharField()
#confirmed_at = DateTimeField(null=True)
active = BooleanField(default=True)
workplace = ForeignKeyField(Company, related_name='works_for')
roles = ManyToManyField(
Role,
related_name='users',
through_model=UserRolesProxy)
approves_for = ManyToManyField(
Company,
related_name='approved_by',
through_model=ApproverCompaniesProxy)
full_name = property(
lambda self: "%s %s" %
(self.first_name, self.last_name))
def gravatar_url(self, size=80):
return "http://www.gravatar.com/avatar/%s?d=identicon&s=%d" % \
(md5(self.email.strip().lower().encode('utf-8')).hexdigest(), size)
class Meta:
order_by = ('username',)
table_alias = 'u'
def __str__(self):
return self.full_name
class UserRoles(db.Model):
user = ForeignKeyField(User, index=True, db_column='user_id')
role = ForeignKeyField(Role, index=True, db_column='role_id')
name = property(lambda self: self.role.name)
description = property(lambda self: self.role.description)
class Meta:
db_table = "user_role"
table_alias = 'ur'
primary_key = CompositeKey('user', 'role')
UserRolesProxy.initialize(UserRoles)
class ApproverCompanies(db.Model):
user = ForeignKeyField(User, index=True, db_column='user_id')
company = ForeignKeyField(Company, index=True, db_column='company_id')
name = property(lambda self: self.company.name)
code = property(lambda self: self.company.code)
class Meta:
db_table = "approver_company"
table_alias = "ac"
primary_key = CompositeKey('user', 'company')
ApproverCompaniesProxy.initialize(ApproverCompanies)
class Break(db.Model):
code = CharField(unique=True)
name = CharField()
minutes = IntegerField()
alternative_code = CharField(unique=True, null=True)
class Meta:
order_by = ('code',)
table_alias = 'b'
def __str__(self):
return self.name
def __repr__(self):
return "Break(code=%r, name=%r, minutes=%r, alternative_code=%r)" \
% (self.code, self.name, self.minutes, self.alternative_code)
class Entry(db.Model):
date = DateField()
user = ForeignKeyField(User, related_name='reported_by')
approver = ForeignKeyField(User, related_name='approved_by', null=True)
started_at = TimeField()
finished_at = TimeField()
modified_at = DateTimeField(default=datetime.now)
approved_at = DateTimeField(null=True)
comment = TextField(null=True, default="")
break_for = ForeignKeyField(Break, related_name='break_for', null=True)
is_approved = BooleanField(default=False)
break_length = property(
lambda self: self.break_for.minutes if self.break_for else 0)
@property
def total_min(self):
if self.started_at is None or self.finished_at is None:
return None
total = (self.finished_at.hour - self.started_at.hour) * 60
total += (self.finished_at.minute - self.started_at.minute)
total -= self.break_length
return total
@property
def total_time(self):
total = self.total_min
if total is None:
return None
return timedelta(hours=(total / 60), minutes=(total % 60))
def __str__(self):
output = "On %s from %s to %s" % (
self.date.isoformat(),
"N/A" if self.started_at is None else self.started_at.strftime("%H:%M"),
"N/A" if self.finished_at is None else self.finished_at.strftime("%H:%M"))
if self.break_for:
output += " with beak for " + self.break_for.name
total_min = self.total_min
if total_min:
output += ", total: %d:%02d" % (total_min // 60, total_min % 60)
return output
class Meta:
table_alias = 'e'
@classmethod
def get_user_timesheet(cls, *, user=None, week_ending_date=None):
"""
Retrieves timesheet entries for a user a week ending on week_ending_date.
"""
if user is None:
user = current_user
if week_ending_date is None:
week_ending_date = current_week_ending_date()
rq = RawQuery(cls,
"""
WITH
daynums(num) AS (VALUES (6),(5),(4),(3),(2),(1),(0)),
week(day) AS (SELECT date(?, '-'||num||' day') FROM daynums)
SELECT
id,
day as date,
finished_at,
started_at,
user_id,
modified_at,
break_for_id,
is_approved,
approver_id,
approved_at,
comment
FROM week LEFT JOIN entry ON "date" = day AND user_id = ?
ORDER BY "date" ASC
""", week_ending_date.isoformat(), user.id)
return rq.execute()
@classmethod
def get_for_approving(cls, *, user=None, week_ending_date=None):
"""
Retrievs timesheet entries for approval
"""
query = Entry.select()
if user:
query = query.where(Entry.user_id == user.id)
if week_ending_date:
week_start_date = week_ending_date - timedelta(days=7)
query = query.where((Entry.date >= week_start_date)
& (Entry.date <= week_ending_date))
return query.order_by(Entry.date).limit(100).execute()
class TimeSheet(object):
def __init__(self, *, user=None, week_ending_date=None):
if user is None:
user = current_user
if week_ending_date is None:
week_ending_date = current_week_ending_date()
self.user = user
self.week_ending_date = week_ending_date
self.entries = Entry.get_user_timesheet(
user=user, week_ending_date=week_ending_date)
def update(self, rows):
"""
Update timesheet entries or create new ones based on the submitted data
based on the list of row values submitted by the user. rows - a list of
dict of update data
"""
for idx, (old, new) in enumerate(zip(self.entries, rows)):
if not new["id"] or new["id"] == "None":
if not new["started_at"] or new["started_at"] == "None" or not new[
"finished_at"] or new["finished_at"] == "None": # Create a new entry
continue # skip if there is no basic data
old.user = User.get(id=current_user.id)
row_date = self.week_ending_date - timedelta(days=(6 - idx))
old.is_approved = False
started_at = str_to_time(new["started_at"])
finished_at = str_to_time(new["finished_at"])
break_for = Break.get(
id=int(new["break_id"])) if new["break_id"] else None
if (old.started_at != started_at or old.finished_at != finished_at
or old.break_for != break_for): # update only if there are changes:
old.started_at = started_at
old.finished_at = finished_at
if break_for:
old.break_for = break_for
old.modified_at = datetime.now()
old.save()
def approve(self, rows):
"""
Approve timesheet entriesbased on the list of row values
submitted by the user. rows - a list of dict of update data
"""
for idx, (entry, row) in enumerate(zip(self.entries, rows)):
if not entry.id:
continue
if "is_approved" in row:
entry.is_approved = True
entry.approver = current_user.id
entry.comment = row["comment"]
entry.approved_at = datetime.now()
else:
entry.is_approved = False
entry.approver = None
entry.comment = row["comment"]
entry.approved_at = None
entry.save()
# Setup Flask-Security
user_datastore = PeeweeUserDatastore(db, User, Role, UserRoles)
def create_tables():
"""
Create all DB tables
"""
if isinstance(db, FlaskDB):
_db = db.database
else:
_db = db
_db.connect()
_db.create_tables((
Company,
Role,
User,
Break,
Entry,
UserRoles,
ApproverCompanies,))
def drop_talbes():
"""
Drop all model tables
"""
models = (
m for m in globals().values() if isinstance(
m, type) and issubclass(
m, db.Model))
drop_model_tables(models, fail_silently=True)
``` |
{
"source": "401-pi-rates/raspberry-garden",
"score": 3
} |
#### File: raspberry-garden/garden_app/models.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Temperature(models.Model):
"""To set up Temperature class."""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='temperature')
temperature = models.CharField(max_length=48)
date_added = models.DateTimeField(default=timezone.now)
# def __repr__(self):
# return ''
def __str__(self):
return f'{self.date_added} ({self.temperature})'
# class WaterLevel(models.Model):
# """To set up WaterLevel class."""
# user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='waterlevel')
# water = models.CharField(max_length=48)
# date_added = models.DateTimeField(default=timezone.now)
# # def __repr__(self):
# # return ''
# def __str__(self):
# return f'{self.date_added} ({self.water})'
```
#### File: raspberry-garden/garden_app/views.py
```python
from .models import Temperature
from garden_api.models import SoilMoisture
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_list_or_404, get_object_or_404
import bokeh.plotting as bk
from bokeh.plotting import figure
from bokeh.embed import components
from bokeh.models import HoverTool, Label, BoxZoomTool, PanTool, ZoomInTool, ZoomOutTool, ResetTool
@login_required
def weekly_view(request):
"""To render weekly_view with its content."""
# To populate temp_list:
temps = Temperature.objects.all()
temp_date = []
temp_read = []
temp_list = []
# Add unique entries to temp_date, temp_read, and temp_list:
for item in temps:
if item.date_added.date() not in temp_date:
temp_date.append(item.date_added.date())
temp_read.append(item.temperature)
for i in range(len(temp_date)):
obj = {'date_added': temp_date[i], 'temperature': temp_read[i]}
temp_list.append(obj)
temp_list.sort(key=lambda x: x['date_added'], reverse=True)
# To select 7 entries from the sorted list:
temp_list_7 = []
for i in range(7):
if len(temp_list) >= i+1:
temp_list_7.append(temp_list[i])
# Add unique entries to water_date, water_read, and water_list:
waters = SoilMoisture.objects.all()
water_date = []
water_read = []
water_list = []
for i in range(len(waters)):
if waters[i].time_stamp.date() not in water_date:
water_date.append(waters[i].time_stamp.date())
if (waters[i].has_moisture):
water_read.append('Has Water')
else:
water_read.append('Dry')
for i in range(len(water_date)):
obj = {'time_stamp': water_date[i], 'has_moisture': water_read[i]}
water_list.append(obj)
water_list.sort(key=lambda x: x['time_stamp'], reverse=True)
# To select 7 entries from the sorted list:
water_list_7 = []
for i in range(7):
if len(water_list) >= i+1:
water_list_7.append(water_list[i])
context = {
# 'temperatures': get_list_or_404(Temperature),
'temperatures': temp_list_7,
# 'waterlevel': get_list_or_404(SoilMoisture),
'waterlevel': water_list_7,
}
return render(request, 'raspberry/weekly.html', context)
@login_required
def monthly_view(request):
"""To render monthly_view with its content."""
# TO POPULATE TEMP_DATE AND TEMP_READ:
temps = Temperature.objects.all()
temp_date = []
temp_read = []
temp_list = []
i = 0
# To append all entries in temp_list, in sorted sequence:
for item in temps:
if item.date_added.date() not in temp_date:
temp_date.append(item.date_added.date())
temp_read.append(item.temperature)
for i in range(len(temp_date)):
obj = {'date_added': temp_date[i], 'temperature': temp_read[i]}
temp_list.append(obj)
temp_list.sort(key=lambda x: x['date_added'], reverse=True)
# To select 30 entries from the sorted list:
temp_list_30 = []
for i in range(30):
if len(temp_list) >= i+1:
temp_list_30.append(temp_list[i])
# To save object key values to lists:
temp_date_graph = []
temp_read_graph = []
for i in range(len(temp_list_30)):
temp_date_graph.append(temp_list_30[i]['date_added'])
temp_read_graph.append(temp_list_30[i]['temperature'])
# TO PLOT TEMPERATURE STOCK_CHART
p1 = bk.figure(title=f'Temperature', x_axis_type="datetime", width=350, height=300)
p1.grid.grid_line_alpha = 0.3
p1.xaxis.axis_label = 'Date'
p1.yaxis.axis_label = 'Temperature'
p1.line(temp_date_graph, temp_read_graph, color='red')
p1.legend.location = "top_left"
script_temperature, div_temperature = components(p1)
# TO POPULATE WATER_DATE AND WATER_READ:
# To append all entries in water_list, in sorted sequence:
waters = SoilMoisture.objects.all()
water_date = []
water_read = []
water_list = []
for i in range(len(waters)):
if waters[i].time_stamp.date() not in water_date:
water_date.append(waters[i].time_stamp.date())
water_read.append(0)
if (waters[i].has_moisture):
water_read[i] = 1
for i in range(len(water_date)):
obj = {'time_stamp': water_date[i], 'has_moisture': water_read[i]}
water_list.append(obj)
water_list.sort(key=lambda x: x['time_stamp'], reverse=True)
# To select 30 entries from the sorted list:
water_list_30 = []
for i in range(30):
if len(water_list) >= i+1:
water_list_30.append(water_list[i])
# To save object key values to lists:
water_date_graph = []
water_read_graph = []
for i in range(len(water_list_30)):
water_date_graph.append(water_list_30[i]['time_stamp'])
water_read_graph.append(water_list_30[i]['has_moisture'])
# TO PLOT WATER IRIS_CHART
p3 = figure(title="WaterLevel", x_axis_type="datetime", width=350, height=300)
p3.xaxis.axis_label = 'Date'
p3.yaxis.axis_label = 'WaterLevel'
p3.circle(water_date_graph, water_read_graph, color='blue', fill_alpha=0.2, size=10)
script_water, div_water = components(p3)
context = {
'temperatures': get_list_or_404(Temperature),
'the_script_temperature': script_temperature,
'the_div_temperature': div_temperature,
'the_script_water': script_water,
'the_div_water': div_water,
}
return render(request, 'raspberry/monthly.html', context)
``` |
{
"source": "402853962/meiduo_mall",
"score": 2
} |
#### File: celery_tasks/email/tasks.py
```python
from django.core.mail import send_mail
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from meiduo_mall import settings
from celery_tasks.main import app
@app.task
def celery_send_mail(email,access_token):
subject = "美多商城邮箱验证"
message = ''
from_email = '<EMAIL>'
recipient_list = [email]
verify_url = 'http://www.meiduo.site:8080/success_verify_email.html?token=%s' % access_token
html_message = '<p>尊敬的用户您好!</p>' \
'<p>感谢您使用美多商城。</p>' \
'<p>您的邮箱为:%s 。请点击此链接激活您的邮箱:</p>' \
'<p><a href="%s">%s<a></p>' % (email, verify_url, verify_url)
send_mail(subject, message, from_email, recipient_list, html_message=html_message)
```
#### File: meiduo_mall/utils/converters.py
```python
class UsernameConverter:
regex = '[a-zA-Z0-9_-]{5,20}'
def to_python(self,value):
return str(value)
class MobileConverter:
regex = '1[3-9]\d{9}'
def to_python(self,value):
return str(value)
class ImageConverter:
regex = '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}'
def to_python(self,value):
return str(value)
```
#### File: utils/fastdfs/storage.py
```python
from django.core.files.storage import Storage
class MyStorage(Storage):
# def __init__(self, option=None):
# if not option:
# option = settings.CUSTOM_STORAGE_OPTIONS
def __open(self,name,mode='rb'):
pass
def __save(self,name,content,max_length=None):
pass
def url(self, name):
return 'http://192.168.112.146:8888/' + name
``` |
{
"source": "403712387/ggf",
"score": 2
} |
#### File: ggf/build/BuildService.py
```python
import os
import stat
import copy
import shutil
import time
import sys
currentTime = time.localtime()
strTime = "%d-%02d-%02d %02d:%02d:%02d" % (currentTime.tm_year, currentTime.tm_mon, currentTime.tm_mday, currentTime.tm_hour, currentTime.tm_min,currentTime.tm_sec)
# 服务模块
serviceName = "ggf"
# git信息
gitBranch = "unknown"
gitCommitId = "unknown"
#编译参数,支持debug,race
compileArg = ""
#------------------------函数的定义-------------------------#
#清理
def cleanFiles(path):
if os.path.exists(path):
shutil.rmtree(path)
#解析参数
def parseArgs():
global compileArg
if "race" in sys.argv:
compileArg = "-race"
if "debug" in sys.argv:
compileArg = '''-gcflags "-N -l"'''
#下载依赖的包
def downloadThirdLibrary():
librarys = ["github.com/btfak/sntp", "github.com/sirupsen/logrus", "github.com/shirou/gopsutil", "github.com/segmentio/kafka-go", "github.com/mattn/go-sqlite3"]
for library in librarys:
os.system("go get %s"%library)
#获取git的信息(获取当前分支以及commit id)
def getGitInfo():
global gitBranch, gitCommitId
gitDir = "../.git"
#获取分支信息
branchFile = os.path.join(gitDir, "HEAD")
if os.path.exists(branchFile):
with open(branchFile, "r") as f:
line = f.readline()
line = line.strip()
splits = line.split("/")
if len(splits) > 0:
gitBranch = splits[-1]
# 获取commit id
commitIdFile = os.path.join(gitDir + "/refs/heads" , gitBranch)
if os.path.exists(commitIdFile):
with open(commitIdFile) as f:
line = f.readline()
line = line.strip()
gitCommitId = line
#编译各个模块
def compileService():
global serviceName, compileArg, gitBranch, gitCommitId
compileSuccessful = False
# 切换目录
currentPath = os.getcwd()
os.chdir("../src")
# 格式话git信息
git = "-X HostServiceModule.GitBranch=%s -X HostServiceModule.GitCommitID=%s"%(gitBranch, gitCommitId)
# 获取当前GOPATH
currentGoPath = ""
pipe = os.popen("go env GOPATH")
lines = pipe.readlines()
if len(lines) > 0 :
currentGoPath = lines[0].strip("\n")
# 编译
projectPath = os.getcwd()[:-4]
goPathEnv = "export GOPATH=%s:%s"%(currentGoPath,projectPath)
os.system(goPathEnv + " && go clean")
os.system(goPathEnv + " && go clean -r")
os.system(goPathEnv + " && go clean -cache")
compile = '''go build -ldflags "%s" %s -o ./bin/%s/%s main.go'''%(git, compileArg, serviceName, serviceName)
print(goPathEnv + " && " + compile)
if os.system(goPathEnv + " && " + compile) == 0:
compileSuccessful = True
os.chdir(currentPath)
return compileSuccessful
# 拷贝配置文件
def copyConfigFile():
global serviceName
src = "../config"
dst = "../src/bin/%s"%serviceName
copyFiles(src, dst)
# 配置文件要放在config目录下
src = "../src/bin/%s/config.json"%serviceName
dst = "../src/bin/%s/config"%serviceName
copyFiles(src, dst)
os.remove(src)
#修改文件的权限
def processFilePromission(path):
files = os.listdir(path)
for file in files:
fileName = os.path.join(path, file)
if not os.path.isfile(fileName):
continue
#对于sh结束的文件,修改权限
if fileName.endswith(".sh"):
os.chmod(fileName, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
#拷贝文件或者目录
def copyFiles(source, destination):
#复制文件(要注意权限和软连接这种情况)
def copyFile(sourceFile, destDir):
if not os.path.exists(sourceFile):
return
if not os.path.exists(destDir):
os.makedirs(destDir)
if os.path.islink(sourceFile): #复制软连接
currentPath = os.getcwd()
symbolLink = os.readlink(sourceFile)
os.chdir(destDir)
os.symlink(symbolLink, os.path.basename(sourceFile))
os.chdir(currentPath)
elif os.path.isfile(sourceFile): #复制文件
with open(sourceFile, "rb") as input:
with open(os.path.join(destDir, os.path.basename(sourceFile)), "wb") as output:
output.write(input.read())
os.chmod(os.path.join(destDir, os.path.basename(sourceFile)), os.stat(sourceFile).st_mode)
if not os.path.exists(source):
print("copy %s to %s fail, not find %s"%(source, destination, source))
return
# 目标文件夹一定要存在
if not os.path.exists(destination):
os.makedirs(destination)
if os.path.isdir(source): #复制整个目录下的文件
for path, directorys, files in os.walk(source):
subPath = path[len(source): ]
# 创建目录
if subPath.startswith("/"):
subPath = subPath[1:]
destinationPath = os.path.join(destination, subPath)
if not os.path.exists(destinationPath):
os.makedirs(destinationPath)
# 复制目录下中的文件
for file in files:
copyFile(os.path.join(path, file), destinationPath)
elif os.path.isfile(source): # 复制单个文件
copyFile(source, destination)
#修改脚本中的结束符,\r\n换为\n
def formatLineBrak():
global serviceName
fileNames = ["../src/bin/" + serviceName + "/start.sh", "../src/bin/" + serviceName + "/stop.sh"]
for fileName in fileNames:
if not os.path.exists(fileName):
continue
fileData = ""
with open(fileName, "r") as file:
for lineData in file:
lineData = lineData.replace("\r\n", "\n")
fileData += lineData
# 向启动脚本中写入内容
with open(fileName, "w") as file:
file.write(fileData)
#构建服务
def buildService():
global serviceName
outputDir = "../src/bin/" + serviceName
serviceDir = "./" + serviceName
parseArgs()
downloadThirdLibrary()
cleanFiles(outputDir)
cleanFiles(serviceDir)
getGitInfo()
#编译各个模块
if not compileService():
print("\n--------------compile fail at %s--------------" % (strTime))
return -1
#拷贝文件
copyConfigFile()
#处理脚本
formatLineBrak()
#修改文件的权限
processFilePromission(outputDir)
#移动到当前目录
print("move dir %s to %s"%(outputDir, serviceDir))
copyFiles(outputDir, serviceDir)
print("\n--------------compile successful at %s--------------"%(strTime))
return 0
#------------------------函数的调用-------------------------#
buildService()
``` |
{
"source": "40423219/cpw3b",
"score": 2
} |
#### File: plugin/liquid_tags/liquid_tags.py
```python
from pelican import signals
from .mdx_liquid_tags import LiquidTags, LT_CONFIG
def addLiquidTags(gen):
if not gen.settings.get('MD_EXTENSIONS'):
from pelican.settings import DEFAULT_CONFIG
gen.settings['MD_EXTENSIONS'] = DEFAULT_CONFIG['MD_EXTENSIONS']
if LiquidTags not in gen.settings['MD_EXTENSIONS']:
configs = dict()
for key,value in LT_CONFIG.items():
configs[key]=value
for key,value in gen.settings.items():
if key in LT_CONFIG:
configs[key]=value
gen.settings['MD_EXTENSIONS'].append(LiquidTags(configs))
def register():
signals.initialized.connect(addLiquidTags)
``` |
{
"source": "404akhan/memnet",
"score": 2
} |
#### File: 404akhan/memnet/memnet.py
```python
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch import nn, autograd
from torch.utils.data import DataLoader
from babi import BabiDataset, pad_collate
from torch.nn.utils import clip_grad_norm
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.fastest = True
class MemoryCell(nn.Module):
def __init__(self, num_mem_slots, embed_dim):
super(MemoryCell, self).__init__()
self.num_mem_slots = num_mem_slots
self.embed_dim = embed_dim
# Memory update linear layers.
self.U = nn.Linear(embed_dim, embed_dim)
self.V = nn.Linear(embed_dim, embed_dim, bias=False)
self.W = nn.Linear(embed_dim, embed_dim, bias=False)
self.prelu_memory = nn.PReLU(init=1)
init.xavier_normal(self.U.weight)
init.xavier_normal(self.V.weight)
init.xavier_normal(self.W.weight)
def forward(self, inputs, keys):
memories = keys
memory_inputs = inputs
for index, sentence in enumerate(memory_inputs):
# Compute memory updates.
sentence = sentence.unsqueeze(1).repeat(1, self.num_mem_slots, 1)
sentence = sentence.view_as(memories)
memory_gates = F.sigmoid((sentence * (memories + keys)).sum(dim=-1))
memory_gates = memory_gates.expand_as(memories)
candidate_memories = self.prelu_memory(self.U(memories) + self.V(sentence) + self.W(keys))
updated_memories = memories + memory_gates * candidate_memories
updated_memories = updated_memories / (
updated_memories.norm(p=2, dim=-1).expand_as(updated_memories) + 1e-12)
memories = updated_memories
return memories
class RecurrentEntityNetwork(nn.Module):
def __init__(self, hidden_dim, max_num_sentences=150, vocab_size=50):
super(RecurrentEntityNetwork, self).__init__()
self.max_num_sentences = max_num_sentences
self.embed_dim = hidden_dim
self.num_mem_slots = 20
self.vocab_size = vocab_size
self.memory_mask = nn.Parameter(torch.randn(max_num_sentences, 1))
self.question_mask = nn.Parameter(torch.randn(max_num_sentences, 1))
self.embedding = nn.Embedding(vocab_size + self.num_mem_slots, hidden_dim, padding_idx=0)
init.uniform(self.embedding.weight, a=-(3 ** 0.5), b=3 ** 0.5)
self.cell = MemoryCell(self.num_mem_slots, hidden_dim)
# Fully connected linear layers.
self.C = nn.Linear(hidden_dim, hidden_dim)
self.H = nn.Linear(hidden_dim, hidden_dim, bias=False)
self.Z = nn.Linear(hidden_dim, vocab_size, bias=False)
self.prelu_outputs = nn.ReLU()
# Initialize weights.
init.xavier_normal(self.C.weight)
init.xavier_normal(self.H.weight)
init.xavier_normal(self.Z.weight)
self.memory_mask.data.fill_(1)
self.question_mask.data.fill_(1)
def forward(self, contexts, questions):
batch_size, context_length, context_num_words = contexts.size()
_, question_length = questions.size()
# List of sentence embeddings for every story in a batch. (num. sentences, batch size, encoder dim.)
contexts = self.embedding(contexts.view(batch_size, -1))
contexts = contexts.view(batch_size, context_length, context_num_words, -1)
questions = self.embedding(questions)
memory_mask = self.memory_mask[:context_length].unsqueeze(0).unsqueeze(2).expand(*contexts.size())
question_mask = self.question_mask[:question_length].unsqueeze(0).expand(*questions.size())
memory_inputs = torch.sum(contexts * memory_mask, dim=2).squeeze().t()
question_inputs = torch.sum(questions * question_mask, dim=1).squeeze()
# Compute memory updates.
keys = torch.arange(self.vocab_size, self.vocab_size + self.num_mem_slots)
keys = torch.autograd.Variable(keys.unsqueeze(0).expand(batch_size, self.num_mem_slots).long().cuda())
keys = self.embedding(keys).view(batch_size * self.num_mem_slots, -1)
network_graph = self.cell(memory_inputs, keys)
network_graph = self.C(network_graph).view(batch_size, self.num_mem_slots, self.embed_dim)
# Apply attention to the entire acyclic graph using the questions.
attention_energies = network_graph * question_inputs.unsqueeze(1).expand_as(network_graph)
attention_energies = attention_energies.sum(dim=-1)
attention_weights = F.softmax(attention_energies).expand_as(network_graph)
attended_network_graph = (network_graph * attention_weights).sum(dim=1).squeeze()
# Condition the fully-connected layer using the questions.
outputs = self.prelu_outputs(question_inputs + self.H(attended_network_graph))
outputs = self.Z(outputs)
return outputs
HIDDEN_DIM = 100
BATCH_SIZE = 100
NUM_EPOCHS = 250
LOG_FILE = "memnet.txt"
if __name__ == '__main__':
dataset = BabiDataset()
vocab_size = len(dataset.QA.VOCAB)
criterion = nn.CrossEntropyLoss(size_average=False)
model = RecurrentEntityNetwork(HIDDEN_DIM, 130, vocab_size)
model.cuda()
early_stopping_counter = 0
best_accuracy = 0
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
for epoch in range(NUM_EPOCHS):
dataset.set_mode('train')
train_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=pad_collate
)
model.train()
if early_stopping_counter < 20:
total_accuracy = 0
num_batches = 0
for batch_idx, data in enumerate(train_loader):
optimizer.zero_grad()
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
loss.backward()
clip_grad_norm(model.parameters(), 40)
total_accuracy += acc
num_batches += 1
if batch_idx % 20 == 0:
print('[Epoch %d] [Training] loss : %f, acc : %f, batch_idx : %d' % (
epoch, loss.data[0], total_accuracy / num_batches, batch_idx
))
optimizer.step()
dataset.set_mode('valid')
valid_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=pad_collate
)
model.eval()
total_accuracy = 0
num_batches = 0
for batch_idx, data in enumerate(valid_loader):
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
total_accuracy += acc
num_batches += 1
total_accuracy = total_accuracy / num_batches
if total_accuracy > best_accuracy:
best_accuracy = total_accuracy
best_state = model.state_dict()
early_stopping_counter = 0
else:
early_stopping_counter += 1
print('[Epoch %d] [Validate] Accuracy : %f' % (epoch, total_accuracy))
with open(LOG_FILE, 'a') as fp:
fp.write('[Epoch %d] [Validate] Accuracy : %f' % (epoch, total_accuracy) + '\n')
if total_accuracy == 1.0:
break
else:
print('Early Stopping at Epoch %d, Valid Accuracy : %f' % (epoch, best_accuracy))
break
dataset.set_mode('test')
test_loader = DataLoader(
dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=pad_collate
)
test_acc = 0
num_batches = 0
for batch_idx, data in enumerate(test_loader):
contexts, questions, answers = data
contexts = autograd.Variable(contexts.long().cuda())
questions = autograd.Variable(questions.long().cuda())
answers = autograd.Variable(answers.cuda())
model.state_dict().update(best_state)
outputs = model(contexts, questions)
l2_loss = 0
for name, param in model.named_parameters():
l2_loss += 0.001 * torch.sum(param * param)
loss = criterion(outputs, answers) + l2_loss
predictions = F.softmax(outputs).data.max(1)[1]
correct = predictions.eq(answers.data).cpu().sum()
acc = correct * 100. / len(contexts)
test_acc += acc
num_batches += 1
print('[Epoch %d] [Test] Accuracy : %f' % (epoch, test_acc / num_batches))
with open(LOG_FILE, 'a') as fp:
fp.write('[Epoch %d] [Test] Accuracy : %f' % (epoch, test_acc / num_batches) + '\n')
``` |
{
"source": "404alex/AdaptiveNeuralTrees",
"score": 3
} |
#### File: 404alex/AdaptiveNeuralTrees/data.py
```python
import torch
import torchvision
from torchvision import datasets, transforms
from ops import ChunkSampler
from imbalanced import ImbalancedDatasetSampler
import numpy as np
from torch.utils import data as tu
from torch._utils import _accumulate
from torch import randperm
from sklearn.preprocessing import normalize
from helper import Subset
import torch.nn.functional as F
def random_split(dataset, lengths):
r"""
Randomly split a dataset into non-overlapping new datasets of given lengths.
Arguments:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
"""
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = randperm(sum(lengths)).tolist()
return [Subset(dataset, indices[offset - length:offset]) for offset, length in
zip(_accumulate(lengths), lengths)]
def get_dataloaders(
dataset='mnist',
batch_size=128,
augmentation_on=False,
cuda=False, num_workers=0,
):
# TODO: move the dataloader to data.py
kwargs = {
'num_workers': num_workers, 'pin_memory': True,
} if cuda else {}
if dataset == 'mnist':
if augmentation_on:
transform_train = transforms.Compose(
[
transforms.RandomCrop(28, padding=2),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
],
)
transform_test = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
],
)
else:
transform_train = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
],
)
transform_test = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
],
)
mnist_train = datasets.MNIST(
'../data', train=True, download=True, transform=transform_train,
)
mnist_valid = datasets.MNIST(
'../data', train=True, download=True, transform=transform_test,
)
mnist_test = datasets.MNIST(
'../data', train=False, transform=transform_test,
)
TOTAL_NUM = 60000
NUM_VALID = int(round(TOTAL_NUM * 0.1))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
train_loader = torch.utils.data.DataLoader(
mnist_train,
batch_size=batch_size,
sampler=ChunkSampler(NUM_TRAIN, 0, shuffle=True),
**kwargs)
valid_loader = torch.utils.data.DataLoader(
mnist_valid,
batch_size=batch_size,
sampler=ChunkSampler(NUM_VALID, NUM_TRAIN, shuffle=True),
**kwargs)
test_loader = torch.utils.data.DataLoader(
mnist_test,
batch_size=1000,
shuffle=False,
**kwargs)
elif dataset == 'cifar10':
if augmentation_on:
transform_train = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010),
),
],
)
transform_test = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010),
),
],
)
else:
transform_train = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
],
)
transform_test = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
],
)
cifar10_train = torchvision.datasets.CIFAR10(
root='../data', train=True, download=True,
transform=transform_train,
)
cifar10_valid = torchvision.datasets.CIFAR10(
root='../data', train=True, download=True, transform=transform_test,
)
cifar10_test = torchvision.datasets.CIFAR10(
root='../data', train=False, download=True,
transform=transform_test,
)
TOTAL_NUM = 50000
NUM_VALID = int(round(TOTAL_NUM * 0.02))
NUM_TRAIN = int(round(TOTAL_NUM - NUM_VALID))
train_loader = torch.utils.data.DataLoader(
cifar10_train,
batch_size=batch_size,
sampler=ChunkSampler(NUM_TRAIN, 0, shuffle=True),
**kwargs)
valid_loader = torch.utils.data.DataLoader(
cifar10_valid,
batch_size=batch_size,
sampler=ChunkSampler(NUM_VALID, NUM_TRAIN, shuffle=True),
**kwargs)
test_loader = torch.utils.data.DataLoader(
cifar10_test,
batch_size=1000,
shuffle=False,
**kwargs)
elif dataset == 'iot':
if augmentation_on:
transform_train = transforms.Compose(
[
transforms.Resize((40, 40)),
transforms.RandomCrop(40, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010),
),
],
)
transform_test = transforms.Compose(
[
transforms.Resize((40, 40)),
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010),
),
],
)
else:
transform_train = transforms.Compose(
[
transforms.Resize((40, 40)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
],
)
transform_test = transforms.Compose(
[
transforms.Resize((40, 40)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
],
)
train_data = datasets.ImageFolder(root="../img", transform=transform_train)
test_data = datasets.ImageFolder(root="../img", transform=transform_test)
val_data = datasets.ImageFolder(root="../img", transform=transform_test)
unseen_data = datasets.ImageFolder(root="../img_test", transform=transform_test)
TOTAL_NUM = 551
NUM_VALID = 11
NUM_TRAIN = TOTAL_NUM - NUM_VALID
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=batch_size,
sampler=ChunkSampler(NUM_TRAIN, 0, shuffle=True),
**kwargs)
valid_loader = torch.utils.data.DataLoader(
val_data,
batch_size=batch_size,
sampler=ChunkSampler(NUM_VALID, NUM_TRAIN, shuffle=True),
**kwargs)
test_loader = torch.utils.data.DataLoader(
unseen_data,
batch_size=51,
shuffle=False,
**kwargs)
# train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False, **kwargs)
# valid_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, shuffle=True, **kwargs)
elif dataset == 'batadal':
filename = '../data/ICS/BATADAL/Physical_BATADAL.npy'
data = np.load(filename, allow_pickle=True)
datalen = len(data[0])
X = data[:, 1:datalen - 2]
X = normalize(X, axis=0)
y = data[:, datalen - 1]
re_X = []
for item in X:
z = np.zeros(6)
item = np.concatenate((item, z), axis=0)
temp = np.reshape(item, (1, 7, 7)).tolist()
re_X.append(np.array(temp))
re_y = []
for item in y:
if item == 'Normal':
temp = 0
else:
temp = 1
re_y.append(temp)
tensor_x = torch.Tensor(re_X)
tensor_x = F.upsample(tensor_x, size=28, mode='bilinear')
tensor_y = torch.Tensor(re_y).long()
all_dataset = tu.TensorDataset(tensor_x.data, tensor_y)
total_num = len(y)
val_num = int(round(total_num * 0.1))
train_num = total_num - val_num - 2000
tran_set, test_set, val_set = random_split(all_dataset, [train_num, 2000, val_num])
NUM_VALID = len(val_set)
NUM_TRAIN = len(tran_set)
train_loader = torch.utils.data.DataLoader(
tran_set,
batch_size=batch_size,
sampler=ImbalancedDatasetSampler(tran_set),
**kwargs)
valid_loader = torch.utils.data.DataLoader(
val_set,
batch_size=batch_size,
sampler=ImbalancedDatasetSampler(val_set),
**kwargs)
test_loader = torch.utils.data.DataLoader(
test_set,
batch_size=2000,
shuffle=False,
**kwargs)
elif dataset == 'swat':
filename = '../data/SWAT/Physical_SWAT_2019.npy'
data = np.load(filename, allow_pickle=True)
datalen = len(data[0])
X = data[:, 1:datalen - 2]
X[X == 'Active'] = 1.0
X[X == 'Inactive'] = 0.0
X = normalize(X, axis=0)
y = data[:, datalen - 1]
re_X = []
for item in X:
z = np.zeros(5)
item = np.concatenate((item, z), axis=0)
temp = np.reshape(item, (1, 9, 9)).tolist()
re_X.append(np.array(temp))
re_y = []
for item in y:
if item == 'Normal':
temp = 0
else:
temp = 1
re_y.append(temp)
tensor_x = torch.Tensor(re_X)
tensor_x = F.upsample(tensor_x, size=54, mode='bilinear')
tensor_y = torch.Tensor(re_y).long()
all_dataset = tu.TensorDataset(tensor_x.data, tensor_y)
total_num = len(y)
val_num = int(round(total_num * 0.1))
train_num = total_num - val_num - 2000
tran_set, test_set, val_set = random_split(all_dataset, [train_num, 2000, val_num])
NUM_VALID = len(val_set)
NUM_TRAIN = len(tran_set)
train_loader = torch.utils.data.DataLoader(
tran_set,
batch_size=batch_size,
sampler=ImbalancedDatasetSampler(tran_set),
**kwargs)
valid_loader = torch.utils.data.DataLoader(
val_set,
batch_size=batch_size,
sampler=ImbalancedDatasetSampler(val_set),
**kwargs)
test_loader = torch.utils.data.DataLoader(
test_set,
batch_size=2000,
shuffle=False,
**kwargs)
else:
raise NotImplementedError("Specified data set is not available.")
return train_loader, valid_loader, test_loader, NUM_TRAIN, NUM_VALID
def get_dataset_details(dataset):
if dataset == 'mnist':
input_nc, input_width, input_height = 1, 28, 28
classes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
elif dataset == 'cifar10':
input_nc, input_width, input_height = 3, 32, 32
classes = (
'plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck',
)
elif dataset == 'iot':
input_nc, input_width, input_height = 3, 40, 40
classes = (
'benign_img', 'malicious_img'
)
elif dataset == 'batadal':
input_nc, input_width, input_height = 1, 28, 28
classes = (
'Normal', 'Attack'
)
elif dataset == 'swat':
input_nc, input_width, input_height = 1, 54, 54
classes = (
'Normal', 'Attack'
)
else:
raise NotImplementedError("Specified data set is not available.")
return input_nc, input_width, input_height, classes
```
#### File: 404alex/AdaptiveNeuralTrees/utils.py
```python
import torch
import torchvision
import json
import time
import models
import random
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from ops import \
count_number_transforms, count_number_transforms_after_last_downsample
# ---------------------------- Model configuration ----------------------
def define_node(
args, node_index, level, parent_index, tree_struct, identity=False,
):
""" Define node operations.
In this function, we assume that 3 building blocks of node operations
i.e. transformer, solver and router are of fixed complexity.
"""
# define meta information
num_transforms = 0 if node_index == 0 else count_number_transforms(parent_index, tree_struct)
meta = {'index': node_index,
'parent': parent_index,
'left_child': 0,
'right_child': 0,
'level': level,
'extended': False,
'split': False,
'visited': False,
'is_leaf': True,
'train_accuracy_gain_split': -np.inf,
'valid_accuracy_gain_split': -np.inf,
'test_accuracy_gain_split': -np.inf,
'train_accuracy_gain_ext': -np.inf,
'valid_accuracy_gain_ext': -np.inf,
'test_accuracy_gain_ext': -np.inf,
'num_transforms': num_transforms}
# get input shape before transformation
if not tree_struct: # if it's first node, then set it to the input data size
meta['in_shape'] = (1, args.input_nc, args.input_width, args.input_height)
else:
meta['in_shape'] = tree_struct[parent_index]['out_shape']
# -------------------------- define transformer ---------------------------
# no transformation if the input size is too small.
if meta['in_shape'][2] < 3 or meta['in_shape'][3] < 3:
identity = True
if identity or args.transformer_ver==1:
meta['transformed'] = False
else:
meta['transformed'] = True
# only downsample at the specified frequency:
# currently assume the initial transform always perform downsampling.
num_downsample = 0 if node_index == 0 else count_number_transforms_after_last_downsample(parent_index, tree_struct)
if args.downsample_interval == num_downsample or node_index == 0:
meta['downsampled'] = True
else:
meta['downsampled'] = False
# get the transformer version:
config_t = {'kernel_size': args.transformer_k,
'ngf': args.transformer_ngf,
'batch_norm': args.batch_norm,
'downsample': meta['downsampled'],
'expansion_rate': args.transformer_expansion_rate,
'reduction_rate': args.transformer_reduction_rate
}
transformer_ver = args.transformer_ver
if identity:
transformer = models.Identity(meta['in_shape'][1], meta['in_shape'][2], meta['in_shape'][3],
**config_t)
else:
transformer = define_transformer(transformer_ver,
meta['in_shape'][1], meta['in_shape'][2], meta['in_shape'][3],
**config_t)
meta['identity'] = identity
# get output shape after transformation:
meta['out_shape'] = transformer.outputshape
print('---------------- data shape before/after transformer -------------')
print(meta['in_shape'], type(meta['in_shape']))
print(meta['out_shape'], type(meta['out_shape']))
# ---------------------------- define solver-------------------------------
config_s = {'no_classes': args.no_classes,
'dropout_prob': args.solver_dropout_prob,
'batch_norm': args.batch_norm}
solver = define_solver(args.solver_ver,
meta['out_shape'][1], meta['out_shape'][2], meta['out_shape'][3],
**config_s)
# ---------------------------- define router ------------------------------
config_r = {'kernel_size': args.router_k,
'ngf': args.router_ngf,
'soft_decision': True,
'stochastic': False,
'dropout_prob':args.router_dropout_prob,
'batch_norm': args.batch_norm}
router = define_router(
args.router_ver,
meta['out_shape'][1], meta['out_shape'][2], meta['out_shape'][3],
**config_r)
# define module:
module = {'transform': transformer,
'classifier': solver,
'router': router}
return meta, module
def define_transformer(version, input_nc, input_width, input_height, **kwargs):
if version == 1: # Identity function
return models.Identity(input_nc, input_width, input_height, **kwargs)
elif version == 2: # 1 conv layer
return models.JustConv(input_nc, input_width, input_height, **kwargs)
elif version == 3: # 1 conv layer + 1 max pooling
return models.ConvPool(input_nc, input_width, input_height, **kwargs)
elif version == 4: # Bottle-neck residual block
return models.ResidualTransformer(input_nc, input_width, input_height, **kwargs)
elif version == 5: # VGG13: 2 conv layer + 1 max pooling
return models.VGG13ConvPool(input_nc, input_width, input_height, **kwargs)
else:
raise NotImplementedError("Specified transformer module not available.")
def define_router(version, input_nc, input_width, input_height, **kwargs):
if version == 1: # Simple router with 1 conv kernel + spatial averaging
return models.Router(input_nc, input_width, input_height, **kwargs)
elif version == 2: # 1 conv layer with global average pooling + fc layer
return models.RouterGAP(input_nc, input_width, input_height, **kwargs)
elif version == 3: # 2 conv with global average pooling + fc layer
return models.RouterGAPwithDoubleConv(input_nc, input_width, input_height, **kwargs)
elif version == 4: # MLP with 1 hidden layer
return models.Router_MLP_h1(input_nc, input_width, input_height, **kwargs)
elif version == 5: # GAP + 2 fc layers (Veit. et al 2017)
return models.RouterGAP_TwoFClayers(input_nc, input_width, input_height, **kwargs)
elif version == 6: # 1 conv + GAP + 2 fc layers
return models.RouterGAPwithConv_TwoFClayers(input_nc, input_width, input_height, **kwargs)
else:
raise NotImplementedError("Specified router module not available!")
def define_solver(version, input_nc, input_width, input_height, **kwargs):
if version == 1: # Logistric regressor
return models.LR(input_nc, input_width, input_height, **kwargs)
elif version == 2: # MLP with 2 hidden layers:
return models.MLP_LeNet(input_nc, input_width, input_height, **kwargs)
elif version == 3: # MLP with a single hidden layer (MNIST LeNet)
return models.MLP_LeNetMNIST(input_nc, input_width, input_height, **kwargs)
elif version == 4: # GAP + 2 FC layers
return models.Solver_GAP_TwoFClayers(input_nc, input_width, input_height, **kwargs)
elif version == 5: # MLP with a single hidden layer in AlexNet
return models.MLP_AlexNet(input_nc, input_width, input_height, **kwargs)
elif version == 6: # GAP + 1 FC layer
return models.Solver_GAP_OneFClayers(input_nc, input_width, input_height, **kwargs)
else:
raise NotImplementedError("Specified solver module not available!")
def get_scheduler(scheduler_type, optimizer, grow):
if scheduler_type == 'step_lr': # reduce the learning rate
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[100,150], gamma=0.1,
)
elif scheduler_type == 'plateau': # patience based decay of learning rates
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', factor=0.1, patience=10,
)
elif scheduler_type == 'hybrid': # hybrid between step_lr and plateau
if grow: # use 'plateau' during the local growth phase
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', factor=0.1, patience=10,
)
else:
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[100, 150], gamma=0.1,
)
else:
scheduler = None
return scheduler
# --------------------------- Visualisation ----------------------------
# visualise numpy image:
def imshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def plot_hist(data, save_as='./figure'):
fig = plt.figure()
plt.hist(data, normed=True, bins=150, range=(0, 1.0))
fig.savefig(save_as)
def plot_hist_root(labels, split_status, save_as='./figures/hist_labels_split.png'):
""" Plot the distribution of labels of a binary routing function.
Args:
labels (np array): labels (N) each entry contains a label
split_status (np array bool): boolean array (N) where 0 indicates the entry
belongs to the right and 1 indicates left.
"""
fig = plt.figure()
plt.hist(labels[split_status], bins=range(11), alpha=0.75, label='right branch')
plt.hist(labels[split_status==False], bins=range(11), alpha=0.5, label='left branch')
plt.legend(loc='upper right')
print('save the histogram as ' + save_as)
fig.savefig(save_as)
# Use to visualise performance of one model:
def print_performance(jasonfile, model_name='model_1', figsize=(5,5)) :
""" Inspect performance of a single model
"""
records = json.load(open(jasonfile, 'r'))
print('\n'+model_name)
print(" train_best_loss: {}".format(records['train_best_loss']))
print(" valid_best_loss: {}".format(records['valid_best_loss']))
print(" test_best_loss: {}".format(records['test_best_loss']))
# Plot train/test loss
fig = plt.figure(figsize=figsize)
plt.plot(np.arange(len(records['test_epoch_loss'])), np.array(records['test_epoch_loss']),
linestyle='-.', color='b', label='test epoch loss')
plt.plot(np.arange(len(records['train_epoch_loss']), dtype=float), np.array(records['train_epoch_loss']),
color='r', linestyle='-', label='train epoch loss')
plt.legend(loc='upper right')
plt.ylabel('epoch wise loss (average CE loss)')
plt.xlabel('epoch number')
def plot_performance(jasonfiles, model_names=[], figsize=(5,5), title='') :
""" Visualise the results for several models
Args:
jasonfiles (list): List of jason files
model_names (list): List of model names
"""
# TODO: currently only supports up to 8 models at a time due to color types
fig = plt.figure(figsize=figsize)
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
if not(model_names):
model_names = [str(i) for i in range(len(jasonfiles))]
for i, f in enumerate(jasonfiles):
# load the information:
records = json.load(open(f, 'r'))
# Plot train/test loss
plt.plot(np.arange(len(records['test_epoch_loss'])), np.array(records['test_epoch_loss']),
color=color[i], linestyle='-.', label='test epoch loss: ' + model_names[i] )
plt.plot(np.arange(len(records['train_epoch_loss']), dtype=float), np.array(records['train_epoch_loss']),
color=color[i], linestyle='-', label='train epoch loss: ' + model_names[i])
plt.ylabel('epoch wise loss (average CE loss)')
plt.xlabel('epoch number')
plt.legend(loc='upper right')
plt.title(title)
def plot_accuracy(jasonfiles, model_names=[], figsize=(5,5), ymax=100.0, title=''):
fig = plt.figure(figsize=figsize)
color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
if not(model_names):
model_names = [str(i) for i in range(len(jasonfiles))]
for i, f in enumerate(jasonfiles):
# load the information:
records = json.load(open(f, 'r'))
# Plot train/test loss
plt.plot(
np.arange(len(records['test_epoch_accuracy']), dtype=float),
np.array(records['test_epoch_accuracy']),
color=color[i], linestyle='-', label=model_names[i],
)
# print(records['train_epoch_accuracy'])
plt.ylabel('test accuracy (%)')
plt.xlabel('epoch number')
plt.ylim(ymax=ymax)
print(model_names[i] + ': accuracy = {}'.format(max(records['test_epoch_accuracy'])))
plt.legend(loc='lower right')
plt.title(title)
def compute_error(model_file, data_loader, cuda_on=False, name = ''):
"""Load a model and compute errors on a held-out dataset
Args:
model_file (str): model parameters
data_dataloader (torch.utils.data.DataLoader): data loader
"""
# load the model
model = torch.load(model_file)
if cuda_on:
model.cuda()
# compute the error
model.eval()
test_loss = 0
correct = 0
for data, target in data_loader:
if cuda_on:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[
0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[
1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(data_loader.dataset)
print(name + 'Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
def load_tree_model(model_file, cuda_on=False,
soft_decision=True, stochastic=False,
breadth_first=False, fast=False,
):
"""Load a tree model. """
# load the model and set routers stochastic.
map_location = None
if not (cuda_on):
map_location = 'cpu'
tree_tmp = torch.load(model_file, map_location=map_location)
tree_struct, tree_modules = tree_tmp.tree_struct, tree_tmp.update_tree_modules()
for node in tree_modules:
node['router'].stochastic = stochastic
node['router'].soft_decision = soft_decision
node['router'].dropout_prob = 0.0
for node_meta in tree_struct:
if not ('extended' in node_meta.keys()):
node_meta['extended'] = False
model = models.Tree(
tree_struct, tree_modules,
split=False, cuda_on=cuda_on, soft_decision=soft_decision,
breadth_first=breadth_first,
)
if cuda_on:
model.cuda()
return model
def compute_error_general(model_file, data_loader, cuda_on=False,
soft_decision=True, stochastic=False,
breadth_first=False, fast = False,
task="classification",
name = ''):
"""Load a model and perform stochastic inferenc
Args:
model_file (str): model parameters
data_dataloader (torch.utils.data.DataLoader): data loader
"""
# load the model and set routers stochastic.
map_location = None
if not (cuda_on):
map_location = 'cpu'
tree_tmp = torch.load(model_file, map_location=map_location)
tree_struct, tree_modules = \
tree_tmp.tree_struct, tree_tmp.update_tree_modules()
for node in tree_modules:
node['router'].stochastic = stochastic
node['router'].soft_decision = soft_decision
node['router'].dropout_prob = 0.0
for node_meta in tree_struct:
if not('extended' in node_meta.keys()):
node_meta['extended']=False
if task == "classification":
model = models.Tree(
tree_struct, tree_modules,
split=False, cuda_on=cuda_on, soft_decision=soft_decision,
breadth_first=breadth_first,
)
if cuda_on:
model.cuda()
# compute the error
model.eval()
test_loss = 0
correct = 0
for data, target in data_loader:
if cuda_on:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
if fast:
output = model.fast_forward_BF(data)
else:
output = model.forward(data)
if task == "classification":
test_loss += F.nll_loss(output, target, size_average=False).data[0]
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
else:
raise NotImplementedError("The specified task is not supported")
# Normalise the loss and print:
if task == "classification":
test_loss /= len(data_loader.dataset)
print(name + 'Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
elif task == "regression":
test_loss = test_loss / 7.0 / len(data_loader.dataset)
print('Average loss: {:.4f}'.format(test_loss))
def compute_error_general_ensemble(model_file_list, data_loader, cuda_on=False,
soft_decision=True, stochastic=False,
breadth_first=False, fast = False,
task="classification",
name = ''):
"""Load an ensemble of models and compute the average prediction. """
# load the model and set routers stochastic.
model_list = []
map_location = None
if not (cuda_on):
map_location = 'cpu'
for model_file in model_file_list:
tree_tmp = torch.load(model_file, map_location=map_location)
tree_struct, tree_modules = tree_tmp.tree_struct, tree_tmp.update_tree_modules()
for node in tree_modules:
node['router'].stochastic = stochastic
node['router'].soft_decision = soft_decision
node['router'].dropout_prob = 0.0
for node_meta in tree_struct:
if not('extended' in node_meta.keys()):
node_meta['extended']=False
if task == "classification":
model = models.Tree(
tree_struct, tree_modules,
split=False, cuda_on=cuda_on, soft_decision=soft_decision,
breadth_first=breadth_first,
)
if cuda_on:
model.cuda()
model_list.append(model)
# compute the error
for model in model_list:
model.eval()
test_loss = 0
correct = 0
for data, target in data_loader:
if cuda_on:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
# compute the average prediction over different models
output = 0.0
for model in model_list:
if fast:
output += model.fast_forward_BF(data)
else:
output += model.forward(data)
output /= len(model_list)
if task == "classification":
test_loss += F.nll_loss(output, target, size_average=False).data[0]
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
elif task == "regression":
# print(test_loss)
test_loss += F.mse_loss(output, target, size_average=False).data[0]
# Normalise the loss and print:
if task == "classification":
test_loss /= len(data_loader.dataset)
print(name + 'Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
elif task == "regression":
test_loss = test_loss / 7.0 / len(data_loader.dataset)
print('Average loss: {:.4f}'.format(test_loss))
def try_different_inference_methods(
model_file, dataset, task="classification",
augmentation_on=False, cuda_on=True,
):
""" Try different inference methods and compute accuracy
"""
if dataset == 'cifar10':
if augmentation_on:
transform_test = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
else:
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
cifar10_test = torchvision.datasets.CIFAR10(root='../../data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(cifar10_test, batch_size=100, shuffle=False, num_workers = 2)
elif dataset == 'mnist':
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
mnist_test = datasets.MNIST('../../data', train=False, transform=transform_test)
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=100, shuffle=False, num_workers=2)
else:
raise NotImplementedError("The specified dataset is not supported")
# soft inferences:
start = time.time()
compute_error_general(
model_file, test_loader, task=task,
cuda_on=cuda_on, soft_decision=True, stochastic=False,
breadth_first=True, name='soft + BF : ',
)
end = time.time()
print('took {} seconds'.format(end - start))
# hard:
compute_error_general(
model_file, test_loader, task=task,
cuda_on=cuda_on, soft_decision=False, stochastic=False,
breadth_first=True,
name='hard + max + BF : ',
)
end = time.time()
print('took {} seconds'.format(end - start))
# stochastic hard
compute_error_general(
model_file, test_loader,
cuda_on=cuda_on, soft_decision=False, stochastic=True,
breadth_first=True, name='hard + stochastic + BF : ',
)
end = time.time()
print('took {} seconds'.format(end - start))
def try_different_inference_methods_ensemble(
model_file_list, dataset, task="classification",
augmentation_on=False, cuda_on=True,
):
""" Try different inference methods and compute accuracy
"""
if dataset == 'cifar10':
if augmentation_on:
transform_test = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
else:
transform_test = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
(0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))])
cifar10_test = torchvision.datasets.CIFAR10(root='../../data',
train=False, download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(cifar10_test, batch_size=100,
shuffle=False, num_workers=2)
elif dataset == 'mnist':
transform_test = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
mnist_test = datasets.MNIST('../../data', train=False,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=100,
shuffle=False, num_workers=2)
else:
raise NotImplementedError("The specified dataset is not availble")
# soft inferences:
start = time.time()
compute_error_general_ensemble(
model_file_list, test_loader, task=task,
cuda_on=cuda_on, soft_decision=True, stochastic=False,
breadth_first=True, name='soft + BF : ',
)
end = time.time()
print('took {} seconds'.format(end - start))
# hard:
compute_error_general_ensemble(
model_file_list, test_loader, task=task,
cuda_on=cuda_on, soft_decision=False, stochastic=False,
breadth_first=True,
name='hard + max + BF : ',
)
end = time.time()
print('took {} seconds'.format(end - start))
# --------------------- Parameter counters -------------------------
def get_total_number_of_params(model, print_on=False):
tree_struct = model.tree_struct
names, params = [], []
for node_idx, node_meta in enumerate(tree_struct):
for name, param in model.named_parameters():
if (( not(node_meta['is_leaf']) and '.'+str(node_idx)+'.router' in name) \
or ('.'+str(node_idx)+'.transform' in name) \
or (node_meta['is_leaf'] and '.'+str(node_idx)+'.classifier' in name)):
names.append(name)
params.append(param)
if print_on:
print("Count the number of parameters below: ")
for name in names: print(' '+name)
return sum(p.numel() for p in params)
def get_number_of_params_path(
model, nodes, print_on=False, include_routers=True,
):
names, params = [], []
if include_routers:
for name, param in model.named_parameters():
if '.'+str(nodes[-1])+'.classifier' in name \
or any(['.'+str(node)+'.transform' in name for node in nodes]) \
or any(['.'+str(node)+'.router' in name for node in nodes[:-1]]):
names.append(name)
params.append(param)
else:
for name, param in model.named_parameters():
if '.' + str(nodes[-1]) + '.classifier' in name \
or any(['.' + str(node) + '.transform' in name for node in nodes]):
names.append(name)
params.append(param)
if print_on:
print("\nCount the number of parameters below: ")
for name in names: print(' '+name)
return sum(p.numel() for p in params)
def get_number_of_params_summary(
model, name='', print_on=True, include_routers=True,
):
# compute the total number
total_num = get_total_number_of_params(model)
# compute min,max,mean number of parameters per branch
paths_list = model.paths_list
num_list = []
for (nodes, _) in paths_list:
num = get_number_of_params_path(
model, nodes, include_routers=include_routers,
)
num_list.append(num)
if print_on:
print('\n' + name)
print('Number of parameters summary:')
print(' Total: {} '.format(total_num))
print(' Max per branch: {} '.format(max(num_list)))
print(' Min per branch: {} '.format(min(num_list)))
print(' Average per branch: {}'.format(sum(num_list)*1.0/len(num_list)))
return total_num, max(num_list), min(num_list), sum(num_list)*1.0/len(num_list)
# --------------------- Others -------------------------
def round_value(value, binary=False):
divisor = 1024. if binary else 1000.
if value // divisor**4 > 0:
return str(round(value / divisor**4, 2)) + 'T'
elif value // divisor**3 > 0:
return str(round(value / divisor**3, 2)) + 'G'
elif value // divisor**2 > 0:
return str(round(value / divisor**2, 2)) + 'M'
elif value // divisor > 0:
return str(round(value / divisor, 2)) + 'K'
return str(value)
def set_random_seed(seed, cuda):
np.random.seed(seed) # cpu vars
torch.manual_seed(seed) # pytorch cpu vars
random.seed(seed) # Python
if cuda:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # pytorch gpu vars
torch.backends.cudnn.deterministic = True # needed
torch.backends.cudnn.benchmark = False
``` |
{
"source": "404CoderNotFound/Aternos-On-Discord-Replit",
"score": 2
} |
#### File: 404CoderNotFound/Aternos-On-Discord-Replit/connect_and_launch.py
```python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import ElementNotInteractableException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from helper import can_fire, can_fire_async
import asyncio
import time
from dotenv import load_dotenv
import os
from chromedriver_py import binary_path
if os.path.exists(os.path.relpath(".env")):
load_dotenv()
USER = os.getenv('USERNAME_C')
PASSWORD = os.getenv('PASSWORD_C')
URL = "https://aternos.org/server/"
SERVER_STATUS_URI = URL
connected = False
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument('--ignore-certificate-errors')
options.add_argument("--allow-insecure-localhost")
#options.add_argument("user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36 OPR/68.0.3618.125")
driver = webdriver.Chrome(options=options, executable_path=binary_path)
async def start_server():
""" Starts the server by clicking on the start button.
The try except part tries to find the confirmation button, and if it
doesn't, it continues to loop until the confirm button is clicked."""
# if not connected:
# connect_account()
await asyncio.sleep(5)
element = driver.find_element_by_xpath("/html/body/div[20]/div/div/div/div[3]/div[2]/div[2]")
element.click()
await asyncio.sleep(3)
element = driver.find_element_by_xpath('/html/body/div[2]/main/section/div[3]/div[4]/div[1]')
element.click()
await asyncio.sleep(10)
element = driver.find_element_by_xpath('/html/body/div[2]/main/div/div/div/main/div/a[2]')
element.click()
state = driver.find_element_by_xpath('/html/body/div[2]/main/section/div[3]/div[3]/div[1]/div/span[2]/span')
while state.text == "Wachten in de wachtrij":
state = driver.find_element_by_xpath('/html/body/div[2]/main/section/div[3]/div[3]/div[1]/div/span[2]/span')
try:
element = driver.find_element_by_xpath('/html/body/div[2]/main/section/div[3]/div[4]/div[6]')
element.click()
except ElementNotInteractableException as e:
pass
driver.close()
@can_fire
def connect_account():
""" Connects to the accounts through a headless chrome tab so we don't
have to do it every time we want to start or stop the server."""
driver.get(URL)
element = driver.find_element_by_xpath('//*[@id="user"]')
element.send_keys(USER)
element = driver.find_element_by_xpath('//*[@id="password"]')
element.send_keys(PASSWORD)
element = driver.find_element_by_xpath('//*[@id="login"]')
element.click()
connected = True
time.sleep(10)
@can_fire
def get_status():
# Returns the status of the server as a string
driver.get(SERVER_STATUS_URI)
time.sleep(2)
if not connected:
connect_account()
time.sleep(2)
element = driver.find_element_by_xpath('/html/body/div/main/section/div/div[2]/div/div[1]')
element.click()
time.sleep(1)
element = driver.find_element_by_class_name('statuslabel-label')
print(element.text)
return element.text
@can_fire
def get_number_of_players():
# Returns the number of players as a string
driver.get(SERVER_STATUS_URI)
if not connected:
connect_account()
time.sleep(5)
element = driver.find_element_by_xpath('/html/body/div/main/section/div/div[2]/div/div[1]')
element.click()
time.sleep(1)
number_of_players = WebDriverWait(driver, 360).until(ec.presence_of_element_located((By.XPATH, '/html/body/div[2]/main/section/div[3]/div[5]/div[2]/div[1]/div/div[2]/div/span')))
return number_of_players.text
async def stop_server():
if not connected:
connect_account()
driver.get(URL)
element = driver.find_element_by_xpath("/html/body/div/main/section/div/div[2]/div[1]/div[1]")
element.click()
await asyncio.sleep(3)
element = driver.find_element_by_xpath('//*[@id="stop"]')
element.click()
```
#### File: 404CoderNotFound/Aternos-On-Discord-Replit/helper.py
```python
from dotenv import load_dotenv
import os
import inspect
import asyncio
async def can_fire_async(func):
if os.path.exists(os.path.relpath(".env")):
return await func()
else:
print("Cannot find configuration file.")
def can_fire(func):
if os.path.exists(os.path.relpath(".env")):
return(func)
else:
print("Cannot find configuration file")
``` |
{
"source": "404devt/menace",
"score": 4
} |
#### File: menace/py/quat_ht.py
```python
import re
import string
from board import *
from ast import literal_eval
class BoardHashTable:
''' Quadratic Hash Class '''
def __init__(self, seedfilename=None):
''' Initialization of Hash Table '''
self.tablesize = 251
self.count = 0
self.arr = []
for i in range(self.tablesize):
self.arr.append(None)
if seedfilename != None:
self.load_file(seedfilename)
def get_conflict_resolved_index(self,key):
''' Gets a free index in the table based on
the key with the proper conflict resolution strategy'''
indx = self.myhash(key,self.get_tablesize())
orig = indx
inc = 0
while self.arr[indx] is not None and self.arr[indx][0] != key:
inc += 1
indx = orig + inc**2
indx %= self.get_tablesize()
return indx
def put(self, board, movelist=None):
'''inserts the keyitem pair into the table, rehashes if table too large'''
if self.get_load_fact() > 0.4:
copy = self.arr
oldct = self.count
self.tablesize = self.tablesize*2 + 1
self.count = 0
self.arr = []
for i in range(self.tablesize):
self.arr.append(None)
for tup in copy:
if tup is not None:
self.put(Board(tup[0]),tup[1])
if abs(oldct - self.count) >= 1:
print("old=%d, new=%d" % (oldct,self.count))
raise AssertionError("lost elements in rehash")
# print("ok")
indx = self.get_conflict_resolved_index(board.get_key())
if self.arr[indx] == None:
if movelist is None:
movelist = board.make_movelist()
self.arr[indx] = (board.get_key(),movelist)
self.count += 1
else:
raise AssertionError("double put")
def contains(self,board):
'''returns true if the key is indeed in the list'''
indx = self.get_conflict_resolved_index(board.get_key())
# print(self.arr[indx])
if self.arr[indx] is None:
return False
if self.arr[indx][0] == board.get_key():
return True
def get_movelist(self, board):
''' Uses given key to find and return the item, key pair'''
indx = self.get_conflict_resolved_index(board.get_key())
if self.arr[indx] == None:
raise LookupError()
if self.arr[indx][0] == board.get_key():
return self.arr[indx][1]
def get_tablesize(self):
'''returns Size of Hash Table'''
return self.tablesize
def get_load_fact(self):
'''returns the load factor of the hash table'''
return float(self.count) / float(self.tablesize)
def myhash(self, key, table_size):
'''hashes based on horners rule'''
num = 0
for i in range(min(len(key),9)):
num = 31*num + self.strangeord(key[i])
return num % table_size
def strangeord(self, char):
if char == '-':
return 0
elif char == 'o':
return 1
elif char == 'x':
return 2
else:
return ord(char)
def print_all_boards(self):
for i in range(self.tablesize):
if self.arr[i] is not None:
Board(self.arr[i][0]).print_board()
print(self.arr[i][1])
def menace_save(self,filename):
f = open(filename, 'w')
for i in range(self.tablesize):
if self.arr[i] is not None:
line = self.arr[i][0] + "|" + str(self.arr[i][1]) + '\n'
f.write(line)
f.close()
def load_file(self,filename):
f = open(filename,'r')
while True:
line = f.readline()
if not line:
break
pretup = line.split('|')
self.put(Board(pretup[0]),literal_eval(pretup[1]))
```
#### File: menace/py/test_quat_ht.py
```python
import unittest
from quat_ht import *
from board import *
class testing_menace(unittest.TestCase):
def test_ht_simple(self):
bht = BoardHashTable()
board = Board('-o-o-o-o-')
bht.put(board)
self.assertTrue(bht.contains(board))
self.assertListEqual(bht.get_movelist(board),[2,0,2,0,2,0,2,0,2])
def test_ht_a_bit_harder(self):
bht = BoardHashTable()
board = Board('-o-o-o-o-')
bht.put(board)
for i in range(6):
self.assertTrue(bht.contains(board.transform(i)))
self.assertListEqual(bht.get_movelist(board),[2,0,2,0,2,0,2,0,2])
def test_ht__harder(self):
bht = BoardHashTable()
board = Board('------xo-')
bht.put(board)
expect = [False,False,False,False,True,False]
topboard = Board('xo-------')
for i in range(6):
self.assertEqual(bht.contains(topboard.transform(i)),expect[i])
self.assertListEqual(bht.get_movelist(board),[2,2,2,2,2,2,0,0,2])
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "404-F21/project",
"score": 2
} |
#### File: project/main/views.py
```python
import os
from django.db.models import query
import requests
from django.contrib.auth.models import User
from django.db.models.query import QuerySet
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from main.models import Author, Comment, Following, FollowNotification, Post, LikePost, Admin, Node, MediaFile, PostNotification
from main.serializers import AuthorSerializer, CommentSerializer, FollowingSerializer, PostNotificationSerializer, FollowNotificationSerializer, PostSerializer
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from main.decorator import need_admin
from main.response import success, failure, no_auth
from django.db.models import F
from django.db.models.query import EmptyQuerySet
from django.core.paginator import Paginator
from django.shortcuts import redirect
from main.response import fetch_posts
from main.response import basic_auth, AUTH_SUCCESS
from social.settings import BASE_DIR
import uuid
import json
import time
import base64
from django.views.decorators.csrf import csrf_exempt
import hashlib
from typing import Dict
# =======================================================
# ============= Methods for startup/helpers =============
# =======================================================
def admin_page_logo(request):
"""
Redirect request for logo to correct path
"""
return redirect('/static/ant-design-pro/logo.svg')
# Create your views here.
def render_html(request):
# create default super user
if User.objects.count() == 0:
user = User.objects.create_user('admin', '<EMAIL>', 'admin123456')
user.is_stuff = True
user.save()
return render(request, 'index.html')
def paginate(objects: QuerySet, params: Dict[str, str]) -> QuerySet:
page = int(params.get('page', '1'))
size = int(params.get('size', '10'))
begin = (page - 1) * size
end = begin + size
return objects[begin:end]
# =======================================================
# ============= Views for the Model classes =============
# =======================================================
class PostList(APIView):
"""
List all Posts in the database
"""
def get(self, request, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
fetcher_id = request.GET.get('fid', None)
public_posts = Post.objects.filter(visibility='public')
all_posts = public_posts.order_by('-publishedOn')
if fetcher_id is not None:
private_posts = Post.objects.filter(visibility='toAuthor', author__id=fetcher_id)
author = Author.objects.get(pk=uuid.UUID(fetcher_id))
followers = (author.follower_set.all()
.values_list('follower__id'))
friends = (author.followed_set
.filter(followee__id__in=followers)
.order_by('followee__displayName')
.values('followee'))
friend_posts = Post.objects.filter(visibility='friends', author__in=friends)
all_posts = (all_posts | private_posts | friend_posts).order_by('-publishedOn')
# not to spec, but cannot change for fear of breaking connections with
# other groups
data = []
for post in all_posts:
data.append(post.dict())
foreign_posts = fetch_posts()
data = data + foreign_posts
response = JsonResponse(data, safe=False)
return response
def post(self, request, *args, **kwargs):
'''
make a post, giving `authorId`, `content`, `title`, and potentially
`contentType` and `visibility`.
'''
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
if request.content_type == "application/json":
author = Author.objects.get(pk=uuid.UUID(request.data['authorId']))
text = request.data['content']
title = request.data['title']
contentType = request.data.get('contentType', 'text/plain')
visibility = request.data.get('visibility', 'public')
new_post = Post(author=author,
content=text,
title=title,
contentType=contentType,
visibility=visibility)
new_post.save()
#friend_post_notification = PostNotification(type = 'friends post', authorId=post_author, senderId=liker, postId = post, sender_display_name=liker_display_name)
#print(f"\n\n\nREQUEST HEADERS: {request.headers}\n request data: {request.data}\n\n\n")
elif request.content_type == "application/x-www-form-urlencoded":
author = Author.objects.all().first()
text = request.data['content']
title = request.data['title']
new_post = Post(author=author, content=text, title=title)
new_post.save()
return HttpResponse("post created")
class Register(APIView):
def post(self, request, format=None):
author = Author.objects.create(
displayName = request.data['displayName'],
password = request.data["password"],
)
author.save()
ser = AuthorSerializer(author)
return Response(ser.data)
class FollowerList(APIView):
def get(self, request, pk, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
author = Author.objects.get(pk=uuid.UUID(pk))
follow_pairs = author.follower_set.all().order_by('follower__displayName')
serializer = FollowingSerializer(follow_pairs, many=True)
# this list comprehension is required to keep the serializers consistent
items = [e['follower'] for e in serializer.data]
return Response({ 'type': 'followers', 'items': items })
class FollowerDetail(APIView):
def delete(self, request, pk, fpk, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
try:
followee = Author.objects.get(pk=uuid.UUID(pk))
follower = Author.objects.get(pk=uuid.UUID(fpk))
follow_pair = Following.objects.get(followee=followee, follower=follower)
follow_pair.delete()
return Response({ 'success': True })
except (Author.DoesNotExist, Following.DoesNotExist):
return Response({ 'success': False })
def put(self, request, pk, fpk, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
follower = Author.objects.get(pk=uuid.UUID(fpk))
followee = Author.objects.get(pk=uuid.UUID(pk))
follow_pair = Following.objects.create(followee=followee, follower=follower)
follow_pair.save()
follower_display_name = follower.displayName
front_end_text = f'Friend request: {follower_display_name} has started following you. Follow them back to become friends with {follower_display_name}.'
#comment_notification = PostNotification(type='comment', postId = post, senderId=author, authorId=post_author, sender_display_name=author.displayName)
follow_notification = FollowNotification(front_end_text=front_end_text, senderId=follower, authorId=followee, sender_display_name=follower_display_name)
follow_notification.save()
serializer = FollowingSerializer(follow_pair)
return Response(serializer.data)
def get(self, request, pk, fpk, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
try:
followee = Author.objects.get(pk=uuid.UUID(pk))
follower = Author.objects.get(pk=uuid.UUID(fpk))
#author.followed_set.get(follower=uuid.UUID(fpk))
follow_query= Following.objects.filter(follower=follower, followee=followee)
#print(f"follow_query: {follow_query}")
#print(f"follow_query str: {str(follow_query)}")
if str(follow_query) == '<QuerySet []>':
return Response({ 'isFollower': False })
else:
return Response({ 'isFollower': True })
except (Author.DoesNotExist, Following.DoesNotExist):
return Response({ 'isFollower': False })
class FollowedList(APIView):
def get(self, request, pk, format=None):
author = Author.objects.get(pk=uuid.UUID(pk))
follow_pairs = author.followed_set.all().order_by('followee__displayName')
paged_pairs = paginate(follow_pairs, request.query_params)
serializer = FollowingSerializer(paged_pairs, many=True)
# this list comprehension is required to keep the serializers consistent
items = [e['followee'] for e in serializer.data]
return Response({ 'type': 'followees', 'items': items })
class FollowedDetail(APIView):
def delete(self, request, pk, fpk, format=None):
try:
author = Author.objects.get(pk=uuid.UUID(pk))
follow_pair = author.followed_set.get(followee=uuid.UUID(fpk))
follow_pair.delete()
return Response({ 'success': True })
except (Author.DoesNotExist, Following.DoesNotExist):
return Response({ 'success': False })
def put(self, request, pk, fpk, format=None):
follower = Author.objects.get(pk=uuid.UUID(pk))
followee = Author.objects.get(pk=uuid.UUID(fpk))
follow_pair = Following.objects.create(followee=followee, follower=follower)
follow_pair.save()
serializer = FollowingSerializer(follow_pair)
return Response(serializer.data)
def get(self, request, pk, fpk, format=None):
try:
author = Author.objects.get(pk=uuid.UUID(pk))
author.follower_set.get(followee=uuid.UUID(fpk))
return Response({ 'isFollower': True })
except (Author.DoesNotExist, Following.DoesNotExist):
return Response({ 'isFollower': False })
class FriendList(APIView):
def get(self, request, pk, format=None):
'''
get a list all of the user with id <pk>'s friends; that being those who
the user follows, and those who follow the user back
'''
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
author = Author.objects.get(pk=uuid.UUID(pk))
followers = author.follower_set.all().values_list('follower__id')
friend_pairs = author.followed_set.filter(followee__id__in=followers).order_by('followee__displayName')
serializer = FollowingSerializer(friend_pairs, many=True)
items = [e['followee'] for e in serializer.data]
return Response({'type': 'friends', 'items': items})
class FriendDetail(APIView):
def get(self, request, pk, fpk):
'''
find out whether the user with id <fpk> is friends with the user with
id <pk>.
'''
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
try:
author = Author.objects.get(pk=uuid.UUID(pk))
followers = author.follower_set.all().values_list('follower__id')
friend_pairs = author.followed_set.filter(followee__id__in=followers)
friend_pairs.get(followee=fpk)
return Response({ 'isFriend': True })
except (Author.DoesNotExist, Following.DoesNotExist):
return Response({ 'isFriend': False })
class PostDetail(APIView):
"""
List an individual post
"""
def get(self, request, pk, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
try:
post = Post.objects.get(postId=uuid.UUID(pk))
except Post.DoesNotExist:
return HttpResponse(status=404)
combined_data = []
post_serializer = PostSerializer(post)
return JsonResponse(post_serializer.data)
class AuthorPostList(APIView):
def get(self, request, pk, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
author = Author.objects.get(pk=uuid.UUID(pk))
posts = author.post_set.all().order_by('-publishedOn')
result = []
for post in posts:
result.append(post.dict())
return Response({ 'type': 'posts', 'items': result })
def post(self, request, pk, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
author = Author.objects.get(pk=uuid.UUID(pk))
text = request.data['content']
title = request.data['title']
new_post = Post.objects.create(authorId=author,content=text,title=title)
new_post.save()
return Response({ 'success': True })
class AuthorPostDetail(APIView):
def get(self, request, pk, pid, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
author = Author.objects.get(pk=uuid.UUID(pk))
post = author.post_set.get(pk=uuid.UUID(pid))
serializer = PostSerializer(post)
data = dict({ 'type': 'post' }, **serializer.data)
return Response(data)
def post(self, request, pk, pid, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
text = request.data['content']
title = request.data['title']
post = Post.objects.get(author__id=uuid.UUID(pk), pk=uuid.UUID(pid))
post.content = text
post.title=title
post.save()
return Response({ 'success': True })
def delete(self, request, pk, pid, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
post = Post.objects.get(author__id=uuid.UUID(pk), pk=uuid.UUID(pid))
post.delete()
return Response({ 'success': True })
class CommentList(APIView):
def get(self, request, pk, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
# https://docs.djangoproject.com/en/dev/ref/models/querysets/#exists
post = Post.objects.get(pk=pk)
if post is not None:
# Check if the post is visible for public/friends/whatever
#assuming it is visible to all
comments = Comment.objects.filter(postId=post)
if comments.count() > 0:
paged_comments = paginate(comments, request.query_params)
serializer = CommentSerializer(paged_comments, many=True)
return JsonResponse(serializer.data, safe=False)
else:
return Response("There are no comments on the post", status=404)
else:
# return a 404 response
return Response("Post not found", status=404)
def post(self, request, pk, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
post = Post.objects.get(pk=pk)
if post is not None:
if request.data.get('authorId', None) is not None:
# If this api is used by internal frontend
try:
author = Author.objects.get(pk=uuid.UUID(request.data['authorId']), if_foreign=False)
except Author.DoesNotExist:
author = None
else:
# If this api is used by foreign nodes
author_input = request.data['author']
try:
author = Author.objects.get(url=author_input['url'], if_foreign=True)
except Author.DoesNotExist:
# If foreign node want to create comment for our post, create their author
user = User.objects.create_user(author_input['displayName'], 'FOREIGN_INNER_AUTHOR')
author = Author.objects.create(
displayName=author_input['displayName'],
password='<PASSWORD>',
user=user,
url=author_input['url'],
host=author_input['host'],
github=author_input['github'],
if_foreign=True
)
if author is None:
return HttpResponse('Error, no such author')
comment = Comment(
postId = post,
authorId = author,
text = request.data['text']
)
post.commentCount += 1
post.save()
comment.save()
post_author = Author.objects.get(id=post.author.id)
# So you don't get notifications from your own comments
if post_author.id != author.id:
comment_notification = PostNotification(type='comment', postId = post, senderId=author, authorId=post_author, sender_display_name=author.displayName)
front_end_text = f'{author.displayName} has commented on your post.'
comment_notification.front_end_text = front_end_text
comment_notification.comment_text = request.data['text']
comment_notification.save()
print(f"comment notification: {comment_notification}")
serializer = PostNotificationSerializer(comment_notification)
print(f"data: {serializer.data}")
return HttpResponse(str(comment))
else:
# return a 404 response
return Response("Post not found", status=404)
class AuthorDetail(APIView):
def get(self, request, pk):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
author = Author.objects.filter(id=pk)
author_serializer = AuthorSerializer(author.first())
data = dict()
data['type'] = 'author'
data.update(author_serializer.data)
return JsonResponse(data)
def post(self, request, pk):
"""
Update info of a user
"""
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
display_name = request.data['displayName']
github = request.data['github']
# save new profile image
profile_pic_base64 = request.data.get('headPic', None)
file = None
if profile_pic_base64 is not None and profile_pic_base64 != '':
file_path = os.path.join(BASE_DIR, 'media', str(uuid.uuid4()))
file = MediaFile.objects.create(file_path=file_path)
# Save the image
with open(file_path, 'wb+') as f:
f.write(base64.b64decode(profile_pic_base64))
try:
author = Author.objects.get(id=pk)
except Author.DoesNotExist:
return failure('id not found')
author.displayName = display_name
author.github = github
if file:
author.profilePic = request.build_absolute_uri('/') + 'service/media/' + str(file.id) + '/'
author.save()
return Response({
'succ': True
})
class AuthorList(APIView):
"""
List all authors in the server, or register a new author
"""
def get(self, request, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
authors = paginate(Author.objects.filter(if_foreign=False).order_by('displayName'), request.query_params)
serializer = AuthorSerializer(authors, many=True)
data = { 'type': 'authors', 'items': serializer.data }
return Response(data)
def post(self, request, format=None):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
displayName = request.data['displayName']
password = request.data['password']
github = request.data['github']
profile_pic_base64 = request.data.get('headPic', None)
uri = request.build_absolute_uri('/')
file = None
if profile_pic_base64 is not None and profile_pic_base64 != '':
file_path = os.path.join(BASE_DIR, 'media', str(uuid.uuid4()))
file = MediaFile.objects.create(file_path=file_path)
# Save the image
with open(file_path, 'wb+') as f:
f.write(base64.b64decode(profile_pic_base64))
user = User.objects.create_user(displayName, password)
author = Author.objects.create(
displayName=displayName,
password=password,
user=user,
host=uri,
github=github,
profilePic=(request.build_absolute_uri('/') + 'service/media/' + str(file.id) + '/') if file else ''
)
author.save()
ser = AuthorSerializer(author)
return Response(ser.data)
# ============================================================
# ============= Decorators to handle API methods =============
# ============================================================
@api_view(['POST'])
def app_login(request):
name = request.data['displayName']
pwd = request.data['password']
try:
author = Author.objects.get(displayName=name, password=<PASSWORD>)
return Response({
'succ': True,
'id': str(author.pk),
'url': author.url,
'host': author.host,
'github': author.github,
'profilePic': author.profilePic
})
except Author.DoesNotExist:
return Response({ 'succ': False })
@api_view(['POST'])
def like_post(request, pk):
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
post_id = uuid.UUID(pk)
post = Post.objects.get(postId=post_id)
if request.data.get('authorId', None) is not None:
# If this api is used by internal frontend
author_id = uuid.UUID(request.data['authorId'])
try:
author = Author.objects.get(id=author_id, if_foreign=False)
except Author.DoesNotExist:
author = None
else:
# If this api is used by foreign nodes
author_input = request.data['author']
try:
author = Author.objects.get(url=author_input['url'], if_foreign=True)
except Author.DoesNotExist:
# If foreign node want to create comment for our post, create their author
user = User.objects.create_user(author_input['displayName'], 'FOREIGN_INNER_AUTHOR')
author = Author.objects.create(
displayName=author_input['displayName'],
password='<PASSWORD>',
user=user,
url=author_input['url'],
host=author_input['host'],
github=author_input['github'],
if_foreign=True
)
# check if already exists
likes = LikePost.objects.filter(postId=post, authorId=author)
if len(likes):
# already liked can not like again
return Response({ 'succ': False })
likepost = LikePost(postId=post, authorId=author)
likepost.save()
if request.data.get('authorId', None) is not None:
author_id = uuid.UUID(request.data['authorId'])
else:
author_id = author.id
#print(f"authorID from request: {author_id}")
#print(f"POST: {post}")
#print(f"POST AUTHOR: {post.author.id}")
liker = Author.objects.get(id=author_id)
post_author = Author.objects.get(id=post.author.id)
#post_author_id = Author.objects.get(id=post.author)
# So you don't get notifications from your own comments
if liker.id != post_author.id:
liker_display_name = liker.displayName
like_notification = PostNotification(type = 'like', authorId=post_author, senderId=liker, postId = post, sender_display_name=liker_display_name)
front_end_text = f'{author.displayName} has liked your post.'
like_notification.front_end_text = front_end_text
like_notification.save()
#serializer = PostNotificationSerializer(like_notification)
#data = serializer.data
#print(f"DATA: {data}")
#like_notification.save()
#print(f"lIKE NOTIFICAITON: {like_notification}")
post.likeCount += 1
post.save()
return Response({
'succ': True,
'count': post.likeCount
})
@api_view(['POST'])
def reshare_post(request, author_id, post_id):
"""
Reshare post, share_aid is the author who preform the reshare action
"""
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
if author_id == request.data['shareAid']:
return failure('Cannot share post by yourself')
try:
reshare_author = Author.objects.get(id=request.data['shareAid'])
except Author.DoesNotExist:
return failure('Author not found')
try:
target_post = Post.objects.get(postId=post_id, author__id=author_id)
except Post.DoesNotExist:
return failure('Post not found')
Post.objects.create(
author=reshare_author,
title=target_post.title,
content=target_post.content,
contentType=target_post.contentType
)
return success(None)
@api_view(['POST'])
def reshare_post_foreign(request, reshare_aid):
"""
Reshare foreign post to target author
"""
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
title = request.data['title']
content = request.data['content']
content_type = request.data['contentType']
try:
reshare_author = Author.objects.get(id=reshare_aid)
except Author.DoesNotExist:
return failure('Author not found')
Post.objects.create(
author=reshare_author,
title=title,
content=content,
contentType=content_type
)
return success(None)
@api_view(['GET','POST'])
def comment_list(request, pk):
"""
List all Comments of a Post
"""
# get simply gets all comments from a single post
if request.method == 'GET':
comments = Comment.objects.filter(postId=uuid.UUID(pk))
serializer = CommentSerializer(comments, many=True)
return JsonResponse(serializer.data, safe=False)
"""
Create a Comment of a Post
"""
# post will be used to comment on a Post
if request.method == 'POST':
post = Post.objects.filter(postId=uuid.UUID(request.data['postId']))
author = Author.objects.filter(id=uuid.UUID(request.data['authorId']))
comment = Comment(
postId = post[0],
authorId = author[0],
text = request.data['text']
)
post.update(commentCount=F('commentCount') + 1)
comment.save()
#nOT USED
#comment_notification = Notification(type='comment', aut)
#print(f"\n\nREQUEST DATAAAAA FOR COMMENT: {request.data} \n\n")
#print("\n\nauthor: {author}, post author: {post.author}")
return HttpResponse(str(comment))
serializer = PostSerializer(data=request.data['post'])
if serializer.is_valid():
serializer.saver()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Inbox will be composed of notifications & friend requests seperately
@api_view(['GET'])
def post_notifications(request, pk):
"""
List all post notification items
"""
author = Author.objects.get(id=uuid.UUID(pk))
author_notifications = PostNotification.objects.filter(authorId=author).order_by('-publishedOn')
serializer = PostNotificationSerializer(author_notifications, many=True)
return JsonResponse(serializer.data, safe=False)
@api_view(['GET'])
def follow_notifications(request, pk):
"""
List all post notification items
"""
author = Author.objects.get(id=uuid.UUID(pk))
author_notifications = FollowNotification.objects.filter(authorId=author).order_by('-sentOn')
serializer = FollowNotificationSerializer(author_notifications, many=True)
return JsonResponse(serializer.data, safe=False)
@csrf_exempt
def get_foreign_data(request, node_id, url_base64):
"""
Get foreign data (used as a proxy)
Post dat to foreign url (used as a proxy)
"""
# check if user is authenticated and if not return a 401
r_a = basic_auth(request)
if r_a != AUTH_SUCCESS:
return no_auth()
url = base64.b64decode(url_base64).decode()
try:
node = Node.objects.get(nodeId=node_id)
except Node.DoesNotExist:
return failure('Node not found')
username = node.http_username
password = node.http_password
if 'http://' in url:
url = url.replace('http:', 'https:')
if request.method == 'GET':
# GET
result = requests.get(url, auth=(username, password))
return JsonResponse(result.json(), safe=False)
elif request.method == 'POST':
# POST
try:
data: dict = json.loads(request.body.decode())
except json.JSONDecodeError:
return failure('json data format incorrect')
result = requests.post(url, json=data, auth=(username, password))
if not result.text:
return JsonResponse({})
return JsonResponse(result.json(), safe=False)
else:
return failure('GET')
# APIs for admin functions
# ============================
@csrf_exempt
def admin_login(request):
"""
Admin login
"""
if request.method == 'POST':
json_obj = json.loads(request.body.decode())
username = json_obj.get('username')
password = <PASSWORD>_<PASSWORD>('password')
if not username or not password:
return failure('arguments not enough')
# Check if the user exists
try:
admin = Admin.objects.get(username=username)
except Admin.DoesNotExist:
return failure('user not exists')
password_md5 = hashlib.md5(password.encode()).hexdigest()
if admin.password_md5 == password_md5:
# Password correct, admin login
request.session['id'] = str(admin.id)
request.session['username'] = username
request.session['role'] = 'admin'
return HttpResponse(json.dumps({
'status': 'ok',
'type': 'account',
'currentAuthority': 'admin',
}))
else:
# Password incorrect, fail
return HttpResponse(json.dumps({
'status': 'error',
'type': 'account',
'currentAuthority': 'guest',
}))
else:
return failure('POST')
@csrf_exempt
def admin_current_user(request):
"""
Get current login user
"""
# Init first admin
try:
Admin.objects.get(username='admin')
except Admin.DoesNotExist:
Admin.objects.create(username='admin', password_md5=hashlib.md5('<PASSWORD>'.encode()).hexdigest())
if request.method == 'GET':
if request.session.get('username', None) is not None:
return HttpResponse(json.dumps({
'success': True,
'data': {
'id': request.session['id'],
'name': request.session['username'],
'avatar': 'https://gw.alipayobjects.com/zos/antfincdn/XAosXuNZyF/BiazfanxmamNRoxxVxka.png',
'access': request.session['role']
}
}))
else:
r = HttpResponse(json.dumps({
'data': {
'isLogin': False,
},
'errorCode': '401',
'errorMessage': 'Login please!',
'success': True
}))
r.status_code = 401
return r
else:
return failure('GET')
@csrf_exempt
def admin_logout(request):
"""
Admin logout
"""
del request.session['username']
del request.session['role']
return success(None)
@csrf_exempt
@need_admin
def admin_list(request):
"""
Get admin users list
"""
if request.method == 'GET':
# Pagination
current = request.GET.get('current')
page_size = request.GET.get('pageSize')
if not current or not page_size:
return failure('arguments not enough')
admins = Admin.objects.all()
page = Paginator(admins, page_size).page(current)
obj_list = page.object_list
results = []
for admin in obj_list:
results.append(admin.dict())
return success({
'data': results,
'total': admins.count()
})
else:
return failure('GET')
@csrf_exempt
@need_admin
def admin_create_admin(request):
"""
Create admin
"""
if request.method == 'POST':
json_obj = json.loads(request.body.decode())
username = json_obj.get('username')
password = json_obj.get('password')
if not username or not password:
return failure('arguments not enough')
# Check if the user exists
try:
Admin.objects.get(username=username)
except Admin.DoesNotExist:
password_md5 = hashlib.md5(password.encode()).hexdigest()
Admin.objects.create(username=username, password_md5=password_md5)
return success(None)
return failure('User already exists.')
else:
return failure('POST')
@csrf_exempt
@need_admin
def admin_change_password(request, admin_id):
"""
Change admin user's password
"""
if request.method == 'POST':
json_obj = json.loads(request.body.decode())
password = <PASSWORD>_obj.get('password')
if not admin_id or not password:
return failure('arguments not enough')
# Check if the user exists
try:
admin = Admin.objects.get(id=admin_id)
except Admin.DoesNotExist:
return failure('user not exists')
# Change the password
password_md5 = hashlib.md5(password.encode()).hexdigest()
admin.password_md5 = password_md5
admin.save()
return success(None)
else:
return failure('POST')
@csrf_exempt
@need_admin
def admin_node_list(request, node_type):
"""
Get node list
"""
if request.method == 'GET':
# Pagination
current = request.GET.get('current')
page_size = request.GET.get('pageSize')
if not current or not page_size or not node_type:
return failure('arguments not enough')
nodes = Node.objects.filter(node_type=node_type)
page = Paginator(nodes, page_size).page(current)
obj_list = page.object_list
results = []
for node in obj_list:
results.append(node.dict())
return success({
'data': results,
'total': nodes.count()
})
else:
return failure('GET')
@csrf_exempt
@need_admin
def admin_create_node(request, node_type):
"""
Create node
"""
if request.method == 'POST':
json_obj = json.loads(request.body.decode())
host = json_obj.get('host')
node_id = json_obj.get('nodeId')
if not node_id:
node_id = uuid.uuid4()
if node_type == 'SHARE':
password = json_obj.get('password')
if not host or not password or not node_type:
return failure('arguments not enough')
try:
Node.objects.get(host=host)
except Node.DoesNotExist:
password_md5 = hashlib.md5(password.encode()).hexdigest()
node = Node(
nodeId=node_id,
host=host,
password_md5=<PASSWORD>,
create_time=time.time(),
node_type=node_type
)
node.save()
return success(None)
else:
# If type of new node is FETCH, then this operation need an another argument: username for HTTP Basic Auth
username = json_obj.get('username')
password = <PASSWORD>('password')
author_url = json_obj.get('authorUrl')
post_url = json_obj.get('postUrl')
if not host or not username or not password or not author_url or not post_url:
return failure('arguments not enough')
try:
Node.objects.get(host=host)
except Node.DoesNotExist:
node = Node(
nodeId=node_id,
host=host,
password_md5='',
create_time=time.time(),
node_type=node_type,
fetch_author_url=author_url,
fetch_post_url=post_url,
http_username=username,
http_password=password
)
node.save()
return success(None)
return failure('This host address already exists.')
else:
return failure('POST')
@csrf_exempt
@need_admin
def admin_delete_node(request, node_id):
"""
Delete node
"""
if request.method == 'DELETE':
if not node_id:
return failure('arguments not enough')
try:
node = Node.objects.get(nodeId=node_id)
except Node.DoesNotExist:
return failure('The node not exists')
node.delete()
return success(None)
else:
return failure('DELETE')
@csrf_exempt
@need_admin
def admin_set_node_approved(request, node_id):
"""
Set if a node is allowed to connect
"""
if request.method == 'POST':
json_obj = json.loads(request.body.decode())
if_approved = json_obj.get('approved')
if not node_id or not if_approved:
return failure('arguments not enough')
# Check if particular node already exists
try:
node = Node.objects.get(nodeId=node_id)
except Node.DoesNotExist:
return failure('The node not exists.')
node.if_approved = str(if_approved) == '1'
node.save()
return success(None)
else:
return failure('POST')
@csrf_exempt
def get_public_post(request):
"""
Get public data on this server, used for providing data to other nodes
Every different node has its own access password
"""
if request.method == 'GET':
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
node_id, password = base64.b64decode(auth[1]).decode().split(':')
password_md5 = hashlib.md5(password.encode()).hexdigest()
try:
node = Node.objects.get(nodeId=node_id)
except Node.DoesNotExist:
return failure('id not found')
if not node.if_approved or node.password_md5 != password_md5:
# Password is incorrect
return no_auth()
public_posts = Post.objects.filter(visibility='public').order_by('-publishedOn')
result = []
for post in public_posts:
p = post.dict()
p['type'] = 'post'
result.append(p)
return JsonResponse({
'type': 'posts',
'items': result
})
return no_auth()
else:
return failure('GET')
@csrf_exempt
def get_public_author(request):
"""
Get authors on this server, used for providing data to other nodes
Every different node has its own access password
"""
if request.method == 'GET':
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
node_id, password = base64.b64decode(auth[1]).decode().split(':')
password_md5 = hashlib.md5(password.encode()).hexdigest()
try:
node = Node.objects.get(nodeId=node_id)
except Node.DoesNotExist:
return failure('id not found')
if not node.if_approved or node.password_md5 != <PASSWORD>:
# Password is incorrect
return no_auth()
authors = Author.objects.filter(if_foreign=False)
result = []
for author in authors:
a = author.dict()
a['type'] = 'author'
result.append(a)
return JsonResponse({
"type": "authors",
"items": result
})
return no_auth()
else:
return failure('GET')
@api_view(['GET'])
def download_media_file(request, file_id):
"""
Download media file (for user profile pic)
"""
try:
file = MediaFile.objects.get(id=file_id)
except MediaFile.DoesNotExist:
return failure('file not found')
with open(file.file_path, 'rb') as f:
file_content = f.read()
r = HttpResponse(file_content, content_type='image/png')
r['Content-Disposition'] = "attachment; filename=default.png"
r["Access-Control-Allow-Origin"] = '*'
return r
``` |
{
"source": "404ic/HRNet_for_MARS",
"score": 2
} |
#### File: HRNet_for_MARS/data_utils/visualize.py
```python
import glob
import yaml
import os
import json
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.colors as colors
import matplotlib.cm as cm
import math
def plot_frame(project, config_file, frame_num, save=True, markersize=8, figsize=[15, 10]):
config_fid = config_file
with open(config_fid) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
legend_flag=[False,False]
image = glob.glob(os.path.join(project, cfg['DATASET']['ROOT'], 'images', 'MARS_' + cfg['DATASET']['VIEW'] + '_' + f'{frame_num:05d}' + '.jpg'))
if not image:
print("I couldn't find image " + str(frame_num))
return
matched_id = frame_num
config_file_name = config_file.split('/')[-1].split('.')[0]
infile = os.path.join(project, cfg['OUTPUT_DIR'], cfg['DATASET']['DATASET'], cfg['MODEL']['NAME'], config_file_name, 'results', 'MARS_format.json')
with open(infile) as jsonfile:
cocodata = json.load(jsonfile)
pred = [i for i in cocodata['pred_keypoints'] if i['category_id'] == 1 and int(i['image_id']) == matched_id]
gt = [i for i in cocodata['gt_keypoints']['annotations'] if i['category_id'] == 1 and int(i['image_id']) == matched_id]
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:brown', 'tab:pink', 'tab:olive', 'tab:cyan']
im = mpimg.imread(image[0])
plt.figure(figsize=figsize)
plt.imshow(im, cmap='gray')
for pt in gt:
for i, [x, y] in enumerate(zip(pt['keypoints'][::3], pt['keypoints'][1::3])):
plt.plot(x, y, color=colors[i], marker='o', markeredgecolor='k',
markeredgewidth=math.sqrt(markersize)/4, markersize=markersize, linestyle='None',
label='ground truth' if not legend_flag[0] else None)
legend_flag[0]=True
for pt in pred:
for i,[x,y] in enumerate(zip(pt['keypoints'][::3], pt['keypoints'][1::3])):
plt.plot(x, y, color=colors[i], marker='^', markeredgecolor='w',
markeredgewidth=math.sqrt(markersize)/2, markersize=markersize, linestyle='None',
label='predicted' if not legend_flag[1] else None)
legend_flag[1] = True
plt.legend(prop={'size': 14})
plt.show()
if save:
save_dir = os.path.join(project, cfg['OUTPUT_DIR'], cfg['DATASET']['DATASET'], cfg['MODEL']['NAME'], config_file_name, 'results')
plt.savefig(os.path.join(save_dir, 'frame_' + str(frame_num) + '.jpg'))
``` |
{
"source": "404neko/bridge",
"score": 2
} |
#### File: 404neko/bridge/main.py
```python
import sys
import json
import hashlib
import datetime
from datetime import timedelta
from flask import Flask
from flask import render_template
from flask import request
from flask import flash
from flask import url_for
from flask import redirect
from flask import session
import requests
sys.path.append('..')
from mod.database import *
import mod.whois
app = Flask(__name__)
app.secret_key = '2333'
SALT = '2333'
DOMAIN = 'http://csrss.tk/'
FORM = '''
<form action="w_" method="GET">
<input name="domain"></input><br>
<input type="checkbox" name="brief">brief</input><br>
<button>Submit</button>
</form>'''
BASE_SITE = '<html><head>%s</head><body>%s</body></html>'
LINK_SITE = '<html><body><p><a href="%s&_=">%s&_=</a></p></body></html>'
JUMP_SITE = '''
<html>
<head>
<title>%s</title>
<script type="text/javascript">
var t = 1;
function showTime(){
t -= 1;
if(t==0){
location.href='%s';
}
setTimeout("showTime()",1000);
}
showTime();
</script>
</head>
<body>Nyan</body></html>
'''
def uhash(password,salt=SALT):
pre_hash = password[0]+salt+password[1:]
Hash=hashlib.md5()
Hash.update(pre_hash)
return Hash.hexdigest()
@app.before_request
def _db_connect():
database.connect()
@app.teardown_request
def _db_close(exc):
if not database.is_closed():
database.close()
@app.teardown_appcontext
def close_database(error):
database.close()
@app.route('/w')
def w():
#@content =
content ='''
<form action="w_" method="GET">
<input name="domain"></input><br>
<input type="checkbox" name="brief">brief</input><br>
<button>Submit</button>
</form>
'''
return BASE_SITE % ('',content,)
@app.route('/w_')
def w_():
domain = request.args.get('domain','')
brief = request.args.get('brief',False)
if domain=='':
return '500'
return BASE_SITE % ('',FORM+'<hr>'+mod.whois.whois(domain,brief),)
@app.route('/',methods=['GET','POST'])
def index():
return 'Nyan'
@app.route('/l233')
def l():
return 'Nyan'
#uid = request.args.get('page',None)
@app.route('/a233',methods=['GET','POST'])
def a():
if request.method=='POST':
url = request.form.get('url','')
if url=='':
return '500'
hash_ = uhash(url)
content = request.get(url)
try:
title = content.content.split('<title>')[1].split('</title>')[0]
except:
title = ''
#content
new_url_info = Pool(time=datetime.datetime.now(),uid=hash_,url=url,title=title,content='')
new_url_info.save()
return LINK_SITE % (DOMAIN+'r?uid='+hash_,DOMAIN+'r?uid='+hash_,)
else:
url = request.args.get('url','')
if url=='':
return '500'
hash_ = uhash(url)
respon = requests.get(url)
try:
title = respon.content.split('<title>')[1].split('</title>')[0]
except:
title = ''
#content
new_url_info = Pool(time=datetime.datetime.now(),uid=hash_,url=url,title=title,content='')
new_url_info.save()
return LINK_SITE % (DOMAIN+'r?uid='+hash_,DOMAIN+'r?uid='+hash_,)
@app.route('/r',methods=['GET'])
def r():
uid = request.args.get('uid',None)
for char in uid:
if char not in 'qwertyuioplkjhgfdsazxcvbnm0123456789':
return redirect(url_for('index'))
ip = request.headers.get('X-Real-IP','127.0.0.1')
ua = ''
for tuple_ in request.headers:
if tuple_[0].lower()=='user-agent':
ua = tuple_[1]
referer = ''
if request.referrer==None:
pass
else:
referer = request.referrer
new_record = Record(ip=ip,ua=ua,referer=referer,time=datetime.datetime.now())
new_record.save()
url_info = Pool.select().where(Pool.uid==uid)
for url_info_ in url_info:
return JUMP_SITE % (url_info_.title,url_info_.url,)
return '404'
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5000,debug=1)
``` |
{
"source": "404neko/Marisa",
"score": 3
} |
#### File: Marisa/interface/error.py
```python
class SystemCallFault(Exception):
def __init__(self):
pass
def __str__(self):
return 'System not give expect response.'
class NecessaryLibraryNotFound(Exception):
def __init__(self,value = '?'):
self.value = value
def __str__(self):
return 'Necessary library \'%s\' not found.'%self.value
```
#### File: Marisa/Module/__twitter__.py
```python
import requests
import os
import re
import socket
class Action:
def __init__(self,text,env):
self.env = env
self.text = text
def proxy(self,port = 1080):
def scan(port):
s = socket.socket()
if s.connect_ex(('localhost', port)) == 0:
s.close()
return True
return False
if scan(port):
import socks
socks.set_default_proxy(socks.SOCKS5, 'localhost', 1080, rdns=True)
socket.socket = socks.socksocket
return True
else:
return False
def run(self):
if self.proxy():
re_get_pic = 'data-image-url="((https|http)://pbs.twimg.com/media/.*?.(jpg|png|gif))"'
print 1
s_url = re.findall(re_get_pic,requests.get(self.text,verify=False).content)[0]
url = s_url+':orig'
print 2
data = requests.get(url,verify=False)
f = open(s_url.split('/')[-1],'wb')
f.write(data.content)
f.close()
return {'value':url,'notice':['twitter','Picture has been downloaded.'],}
else:
f = open('record.txt','a+')
f.write(self.text)
f.write('\n')
f.close()
return {'value':'','notice':['twitter','No proxy, url has been recorded.'],}
@staticmethod
def test(text):
import re
return len(re.findall('((https|http)://twitter.com/.*?/status/.*?)',text))>0
``` |
{
"source": "404neko/Risu",
"score": 2
} |
#### File: 404neko/Risu/Risu.py
```python
import traceback
import io
import hashlib
import shutil
import os
import base64
from PIL import Image
from requests import Session
class RisuExtension:
def __init__(self, session):
self.session = session
def can_handle(self, url):
return False
def handle(self, url):
raise Exception
class Risu:
session = None
path = 'risu'
extensions = []
cached = {}
default_image = 'iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAIAAADTED8xAAAACXBIWXMAAAsTAAALEwEAmpwYAAAB+0lEQVR42u3TQQ0AAAjEMED5SeeNBloJS9ZJCr4aCTAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMgAHAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHAAGAAMAAGAAOAAcAAYAAwABgADAAGAAOAAcAAYAAwABgADAAGAAOAAcAAYAAwABgADAAGAAOAAcAAYAAwABgADAAGAAOAAcAAYAAwABgADAAGAAOAAcAAYAAwABgADAAGAAOAATAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMAAYAA4ABwABgADAAGAAMAAbAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHAAGAAMAAYAAwABgADgAHgWu7LA4CJx71QAAAAAElFTkSuQmCC'
def initialize_filesystem(self):
for item in ['', 'image', 'thumbnail']:
try:
os.mkdir(self.path + os.sep + item)
except:
pass
for item in os.listdir(os.sep.join([self.path, 'image'])):
self.cached[item] = 0
def on_fail(self):
expected_path = os.sep.join([self.path, 'default'])
if os.path.isfile(expected_path):
pass
else:
with open(expected_path, 'wb') as file_handle:
file_handle.write(base64.encode(self.default_image))
return expected_path
def __init__(self, session=None, path='risu', extensions=[]):
if session==None:
session = Session()
self.session = session
self.path = path
self.extensions = extensions
self.initialize_filesystem()
def get_hash(self, content):
md5 = hashlib.md5()
md5.update(content.encode('UTF-8'))
return md5.hexdigest()
def get_image(self, url):
if url.startswith('file:///'): url = url[len('file:///'):]
hash_ = self.get_hash(url)
expected_path = os.sep.join([self.path, 'image', hash_])
if hash_ in self.cached:
return expected_path
for extension in self.extensions:
if extension.can_handle(url):
content = extension.handle(url)
with open(expected_path, 'wb') as file_handle:
file_handle.write(content)
if os.path.isfile(url):
storage_path = expected_path
shutil.copy2(url, storage_path)
return storage_path
if url[:7] in ['https:/', 'http://']:
try:
response = self.session.get(url)
if response:
with open(expected_path, 'wb') as file_handle:
file_handle.write(response.content)
return expected_path
except:
return self.on_fail()
# todo: base64
return self.on_fail()
def get_thumbnail(self, url, _size=[256, 256]):
if type(_size)==int:
size = [_size, _size]
else:
size = _size
if url.startswith('file:///'): url = url[len('file:///'):]
hash_ = self.get_hash(url)
expected_path = os.sep.join([self.path, 'thumbnail', hash_])
if os.path.isfile(expected_path):
return expected_path
image_path = self.get_image(url)
data = None
with open(image_path, 'rb') as file_handle:
data = file_handle.read()
file_handle = io.BytesIO(data)
file_handle.seek(0)
image = Image.open(file_handle)
image.thumbnail(size)
image.save(expected_path, 'PNG')
file_handle.close()
return expected_path
if __name__=='__main__':
risu = Risu()
print(risu.get_image('http://wx4.sinaimg.cn/large/006z09Zkly1gana5bcf62j30yi0w4gzm.jpg'))
print(risu.get_image('https://wx4.sinaimg.cn/large/006z09Zkly1gana5bcf62j30yi0w4gzm.jpg'))
print(risu.get_thumbnail('http://wx4.sinaimg.cn/large/006z09Zkly1gana5bcf62j30yi0w4gzm.jpg'))
print(risu.get_thumbnail('https://wx4.sinaimg.cn/large/006z09Zkly1gana5bcf62j30yi0w4gzm.jpg'))
``` |
{
"source": "404nofound/CS521-Info-Str-Python",
"score": 4
} |
#### File: Homework/Homework4/10_9.py
```python
import math
def mean(x):
sum = 0
for number in x:
sum += float(number)
mean = sum / len(x)
return mean
def deviation(x):
mean1 = mean(x)
middle = 0;
for number in x:
middle += pow(float(number) - mean1, 2)
deviation = math.sqrt(middle/(len(x)-1))
return deviation
input = input('Enter numbers: ')
list = input.split()
print ('The mean is %.2f' %mean(list))
print ('The standard deviation is %.5f' %deviation(list))
```
#### File: Homework/Homework4/6_14.py
```python
def m(i):
result = 0
for n in range(1,i+1):
result += pow((-1), n+1)/(2 * n - 1)
return 4 * result
list = [1, 101, 201, 301, 401, 501, 601, 701, 801, 901]
print ('i\tm(i)')
for number in list:
print ('%d\t%.4f' %(number, m(number)))
```
#### File: Homework/Homework5/new_7_3.py
```python
class Account:
def __init__(self, id = 0, balance = 100, annualInterestRate = 0):
self.__id = id
self.__balance = balance
exception = Exception()
if exception.check(annualInterestRate) == False:
print ("The annual interest rate cannot be negative!")
self.__annualInterestRate = 0
else:
self.__annualInterestRate = annualInterestRate
def get_id(self):
return self.__id
def get_balance(self):
return self.__balance
def get_annualInterestRate(self):
return self.annualInterestRate
def set_id(self, value):
self.__id = value
def set_balance(self, value):
self.__balance = value
def set_annualInterestRate(self, value):
self.__annualInterestRate = value
def getMonthlyInterestRate(self):
return (self.__annualInterestRate / 12)
def getMonthlyInterest(self):
return self.__balance * self.getMonthlyInterestRate() / 100
def withdraw(self, amount):
exception = Exception()
if exception.check(amount) == False:
print ("The withdraw amount cannot be negative!")
else:
self.__balance -= amount
def deposit(self, amount):
exception = Exception()
if exception.check(amount) == False:
print ("The deposit amount cannot be negative!")
else:
self.__balance += amount
class Exception:
def check(self, amount):
if amount < 0:
return False
def main():
print ("******************************************\nCondition 1: Input negative annual interest")
account1 = Account(1122, 20000, -4.5)
print ("******************************************\nCondition 2: Withdraw negative amount")
account2 = Account(1122, 20000, 4.5)
account2.withdraw(-2500)
print ("******************************************\nCondition 3: Deposit negative amount")
account2.deposit(-3000)
print ("******************************************\nCondition 4: Everything is correct")
account2.withdraw(2500)
account2.deposit(3000)
print ("id: %d \nbalance: $%f \nmonthly interest rate: %f%% \nmonthly interest: $%f"
% (account2.get_id(), account2.get_balance(), account2.getMonthlyInterestRate(), account2.getMonthlyInterest()))
if __name__ == '__main__':
main()
```
#### File: Homework/Homework6/12_1.py
```python
import math
from GeometricObject import GeometricObject
class Triangle(GeometricObject):
def __init__(self, side1 = 1.0, side2 = 1.0, side3 = 1.0, *args):
super().__init__(*args)
self.__side1 = side1
self.__side2 = side2
self.__side3 = side3
def get_side1(self):
return self.__side1
def get_side2(self):
return self.__side2
def get_side3(self):
return self.__side3
def getArea(self):
p = self.getPerimeter() / 2
area = math.sqrt(
p *
(p - self.__side1) *
(p - self.__side2) *
(p - self.__side3)
)
return area
def getPerimeter(self):
return self.__side1 + self.__side2 + self.__side3
def __str__(self):
return "Triangle: side1 = " + str(self.__side1) + " side2 = " + str(self.__side2) + " side3 = " + str(self.__side3)
def main():
sides = input("Please enter three sides' length (Using ',' to split): ")
side = [float(i) for i in sides.replace(' ', '').split(',')]
color = input("Please enter a color: ")
filled = eval(input("please enter filled or not(1 mean Yes, 0 mean No): "))
fill = True
if filled == 0:
fill = False
triangle = Triangle(side[0], side[1], side[2], color, fill)
print (triangle.__str__())
print ("The Area: %.1f\nThe Perimeter: %.1f\nThe Color: %s\nThe filled: %s" %(triangle.getArea(), triangle.getPerimeter(), triangle.get_color(), triangle.is_filled()))
if __name__ == '__main__':
main()
```
#### File: Homework/Homework6/12_3.py
```python
from account import Account
class atm(Account):
def __init__(self, id):
super().__init__()
self.__id = id
def show_menu(self):
print ("\nMain menu\n1: check balance\n2: withdraw\n3: deposit\n4: exit")
def main():
again = True
next = True
while again:
id = eval(input("Enter an account id: "))
if id in range(0,10):
again = False
else:
print ("Please enter a correct id")
ATM = atm(id)
while next:
ATM.show_menu()
choice = eval(input("Enter a choice: "))
if choice == 1:
print ("The balance is ", ATM.get_balance())
elif choice == 2:
wit = eval(input("Enter an amount to withdraw: "))
ATM.withdraw(wit)
elif choice == 3:
dep = eval(input("Enter an amount to deposit: "))
ATM.deposit(dep)
elif choice == 4:
print ("")
next = False
main()
if __name__ == '__main__':
main()
```
#### File: Homework/Homework6/GeometricObject.py
```python
class GeometricObject:
def __init__(self, color="green", filled=True):
self.__color = color
self.__filled = filled
def get_color(self):
return self.__color
def set_color(self, color):
self.__color = color
def is_filled(self):
return self.__filled
def set_filled(self, filled):
self.__filled = filled
def __str__(self):
return "color: " + self.__color + \
" and filled: " + str(self.__filled)
```
#### File: Lecture/Lecture12/L12_Fib_gen.py
```python
from itertools import islice
def fib():
prev, curr = 0, 1
while True:
yield curr
prev, curr = curr, prev + curr
f = fib()
print(list(islice(f, 0, 10)))
```
#### File: Lecture/Lecture12/L12_iter3.py
```python
class Bookshelf:
def __init__(self):
self.__book_list = []
def addBook(self, author, title, price):
self.__book_list.append((author, title, price))
class BookIter:
def __init__(self, bookshelf):
self.reflist = [b for b in bookshelf._Bookshelf__book_list]
def __iter__(self):
return self
def __next__(self):
raise StopIteration
class BookAuthorIter(BookIter):
def __iter__(self):
return iter(sorted(self.reflist, key=lambda x:x[0]))
# dictionary for lookingup country of the author
author_country = {"<NAME>" : "UK", "<NAME>" : "UK", \
"<NAME>" : "US", "<NAME>" : "Russia", \
"<NAME>" : "Russia"}
class BookCountryIter(BookIter):
def __init__(self, bookshelf):
super().__init__(bookshelf)
self.book_iter = \
iter(sorted([(b, author_country[b[0]]) for b in self.reflist],
key=lambda x : x[1]))
#books_with_country = [(b, author_country[b[0]]) for b in self.reflist]
#books_sort_country = sorted(books_with_country, key=lambda x : x[1])
#self.book_iter = iter(books_sort_country)
def __next__(self):
return next(self.book_iter)[0]
myshelf = Bookshelf()
myshelf.addBook("<NAME>", "Pride and Prejudice", 6.95)
myshelf.addBook("<NAME>", "Romeo and Juliet", 10.99)
myshelf.addBook("<NAME>", "Adventures of Huckleberry Finn", 5.95)
myshelf.addBook("<NAME>", "Crime and Punishment", 12.95)
myshelf.addBook("<NAME>", "<NAME>", 9.56)
myshelf.addBook("<NAME>", "Macbeth", 5.99)
myshelf.addBook("<NAME>", "War and Peace", 10.36)
for b in BookAuthorIter(myshelf):
print(b)
print()
for b in BookCountryIter(myshelf):
print(b)
```
#### File: Lecture/Lecture12/L12_toh.py
```python
def toh(n, origin, target, interim):
if n < 1:
raise Exception('invalid input')
if n == 1:
print('Move Disc', n, 'from Pole', origin, 'to Pole', target)
return
toh(n-1, origin, interim, target)
print('Move Disc', n, 'from Pole', origin, 'to Pole', target)
toh(n-1, interim, target, origin)
return
import sys
if __name__ == '__main__':
toh(int(sys.argv[1]), 0, 1, 2)
``` |
{
"source": "404nofound/MBTA_Python",
"score": 4
} |
#### File: MBTA_Python/MBTA/step1_group.py
```python
import pandas as pd
import numpy as np
import os
# Function, divided all data into groups by time period, like [1AM-3AM; 3AM-5Am ...]
def binning(column, points, labels=None, month=0, stop=0):
'''
Notes: The Row Data from MBTA webiste
The Time format is from 3:00 to 27:00, means 3:00 AM today to next day 3:00 AM
And in the csv file, it use int to replace date format, like 300 means 3:00 AM; 1500 means 3:00 PM
:param column: use which column to divide, here we use TIME_PERIOD column
:param points: the break points we use to divide
:param labels: the labels for result groups that have been divided
:param month: used to record error
:param stop: used to record error
'''
# Get max time and min time from data
minval = column.min()
maxval = column.max()
# Handle break points and labels errors and print
while maxval <= points[len(points)-1]:
print ('Month: ' + str(month) + ' Stop: ' + stop)
del points[len(points)-1]
del labels[len(points)-1]
while minval >= points[0]:
print ('Month: ' + str(month) + ' Stop: ' + stop)
del points[0]
del labels[0]
# The full break points includes min, max time
break_points = [minval] + points + [maxval]
# If user doesn't provide labels, using int number to replace, here I have provided labels, so it doesn't work
if not labels:
labels = range(len(points)+1)
# cut() function to divide data into groups and return them
columnbin = pd.cut(column, bins=break_points, labels=labels, include_lowest=True)
return columnbin
# Function, make directory. if exist, do nothing
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
# Using Pandas read every months' row data, from January to July, there only 7 months provide by MBTA this year until now
for month in range(1,8):
csvfile = pd.read_csv('/Users/Eddy/Desktop/Python_MBTA/MBTA_Raw_Entry_Data/2018_0' + str(month) + '.csv')
# Format file to prepare data analysis
df = pd.DataFrame(csvfile)
# Divide data into different part group by stop id
grouped = df.groupby('GTFS_STOP_ID', as_index=False)
# For every stop's data, using binning() function to divide into different time period
for stop, group in grouped:
# Define break points
points = [500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100, 2300, 2500]
# Define labels
labels = ['3AM-5AM', '5AM-7AM', '7AM-9AM', '9AM-11AM', '11AM-1PM', '1PM-3PM', '3PM-5PM', '5PM-7PM', '7PM-9PM',
'9PM-11PM', '11PM-1AM', '1AM-3AM']
# Create new column [TIME_PERIOD_Bin] for the result returned by binning() function
group['TIME_PERIOD_Bin'] = binning(group['TIME_PERIOD'], points, labels, month, stop)
# Format all the data again
df_station = pd.DataFrame(group)
# Until now, all data have been grouped by stop_id, and then grouped by time period that we create
group_time = df_station.groupby('TIME_PERIOD_Bin')
# Make directory to store new csv files
mkdir('/Users/Eddy/Desktop/Python_MBTA/Step1/' + str(month))
# Calculate the sum of entry people number for every stops and every periods
data1 = pd.DataFrame(group_time['STATION_ENTRIES'].agg(np.sum))
# Write into the csv files
data1.to_csv('/Users/Eddy/Desktop/Python_MBTA/Step1/' + str(month) + "/" + stop + '.csv')
``` |
{
"source": "404notfound-3/ig-profile-scraper",
"score": 3
} |
#### File: 404notfound-3/ig-profile-scraper/scraper.py
```python
import csv
import time
import json
import random
import subprocess
from os import path, name
from selenium import webdriver
from traceback import format_exc
from bs4 import BeautifulSoup as bs
from pyvirtualdisplay import Display
from config import URL, slack, proxies, date, timee, weekday, lps, lprint, get_tor_ip, renew_tor_ip, profile, OUTPUT_FOLDER, usernames, ids, check_tor_conn
## This block will check for your OS.
if name == "nt":## To wake up Tor executables(only windows).
subprocess.Popen(r".\Tor\tor.exe -f .\Tor\torrc", shell = False)
elif name == "posix":## To create virtual display(only linux).
display = Display(visiable = False, size = (13666, 768))
display.start()
lprint("[+]", "Successfully started virtual display", "green")
check_tor_conn()
browser = webdriver.Firefox(firefox_profile = profile)
browser.delete_all_cookies() ## just in case if your browser...
## this function will take a soup as input and will return some valuable data.
def parse_data(page_source_soup):
data = {}
body = page_source_soup.find("body")
script = body.find("script", text=lambda t: t.startswith("window._sharedData"))
script_json = script.string.split(" = ", 1)[1].rstrip(";")
script_json = json.loads(script_json)
data["full_name"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["full_name"].encode())
data["biography"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["biography"].encode())
data["followers"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_followed_by"]["count"])
data["following"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_follow"]["count"])
data["date"], data["time"], data["weekday"] = date(), timee(), weekday()
data["is_private"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["is_private"])
data["is_business_account"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["is_business_account"])
data["is_verified"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["is_verified"])
data["logging_page_id"] = script_json["entry_data"]["ProfilePage"][0]["logging_page_id"]
data["id"] = script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["id"]
data["has_channel"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["has_channel"])
data["has_blocked_viewer"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["has_blocked_viewer"])
data["joined_recently"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["is_joined_recently"])
data["external_url"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["external_url"])
data["external_url_linkshimmed"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["external_url_linkshimmed"])
data["connected_fb_page"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["connected_fb_page"])
data["edge_felix_video_timeline"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_felix_video_timeline"]["count"])
data["edge_owner_to_timeline_media"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_owner_to_timeline_media"]["count"])
data["edge_saved_media"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_saved_media"]["count"])
data["edge_media_collections"] = str(script_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_media_collections"]["count"])
## All values below for further data analysis
data["viewerId"] = script_json["config"]["viewerId"]
data["csrf_token"] = script_json["config"]["csrf_token"]
data["device_id"] = script_json["device_id"]
data["platform"] = script_json["platform"]
data["rollout_hash"] = script_json["rollout_hash"]
data["nonce"] = script_json["nonce"]
return data
## will return parsed html to parse_data function above.
def scrape_data(username):
browser.get(URL.format(username))
return parse_data(bs(browser.page_source, "html.parser"))
## This function will take data dictonary in input and will save all of data "smartly" in a csv file.
def check_csv_file(data, filename):
## Check for the existing csv file
if path.isfile(OUTPUT_FOLDER + filename):
with open(OUTPUT_FOLDER + filename, "r") as rf:
last_line_items, true_false_list, specific_key_index = rf.readlines()[-1].split(","), [], [0, 2, 3, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
for key_index in specific_key_index:
true_false_list.append(data[list(data.keys())[key_index]] == last_line_items[key_index])
if False in true_false_list:
with open(OUTPUT_FOLDER + filename, "a", newline = "") as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow([data[keys] for keys in data.keys()])
## This block will create an csv file if you are runnig this script first time.
else:
with open(OUTPUT_FOLDER + filename, "a", newline = "") as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(list(data.keys()))
csvwriter.writerow([data[keys] for keys in data.keys()])
## this function will help us to create randomness in scraper, so we can save our little a** from facebook's anti scraping tools.
def random_sleep_time(secends):
return random.randint(secends-(secends * 0.3), secends+(secends * 0.4))
## Exit options will called if you pressed "CTRL+C"
def exit_options():
print("\n\nWelcome to the exit options, you have choices below\n1). Exit and close this script\n\n2). Do nothing, I intrupted accidently")
your_choice = int(input("\nYour command >>> "))
if your_choice == 1:
exit()
else: pass
## This function will take instagram credentials as input, and will login your id as output in browser.
## Warning please make sure that you are using temporary ids to signin and scraping profiles.
def ig_login(email, password):
lps("[*]", f"Trying to logging in using {email}", "yellow")
browser.get("https://instagram.com")
username_input_box = browser.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input')
username_input_box.click()
time.sleep(3.7)
for words in email:
username_input_box.send_keys(words)
time.sleep(0.3)
password_input_box = browser.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input')
password_input_box.click()
time.sleep(1.6)
for words in password:
password_input_box.send_keys(words)
time.sleep(0.25)
browser.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]/button/div').click()
lps("[+]", f"Successfully logged in {email}", "green")
time.sleep(15)
browser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/div/button').click()
time.sleep(15)
browser.find_element_by_xpath('/html/body/div[4]/div/div/div/div[3]/button[2]').click()
time.sleep(15)
## request to browser to take and save an screenshot "smartly".
t1 = None
def save_ss(event = None):
global t1
t2 = time.time()
if not t1 or ((t2-t1)>120):# 2 Minutes
browser.get_screenshot_as_file(f'./ss_log/browser/{date()}__{timee()}.png')
lps("[+]", "Browser Screenshot Successfully saved at ./ss_log/browser/", "green")
t1 = time.time() ## t1 will be replaced with current time if we succeed to take a screenshot.
else: lprint("[-]", f"Screenshot event cancelled, last ss was taken just {t2-t1} secends ago", "yellow")
## To send alerts to your slack installed devices
t3 = None
def slack_hook(username, exception):
global t3
t4 = time.time()
if not t3 or ((t4-t3)>120):# 2 Minutes
lps("[!]", f"Exception Occurred while scraping for {username}\n{str(exception)}", "red")
t3 = time.time() ## t3 will be replaced with current time.
else: lprint("[-]", f"Slack text cancelled, last text was just posted {t4-t3} secends ago", "yellow")
## This Function will log out your id from browser, after logout succeed all cookies will be deleted so we can login new id.
def ig_logout(email):
browser.get("https://instagram.com/")
lps("[*]", f"Trying to logging out {email} ", "yellow")
browser.find_element_by_xpath('//*[@id="react-root"]/section/nav/div[2]/div/div/div[3]/div/div[5]/span').click()
time.sleep(1)
browser.find_element_by_xpath('//div[text() = "Log Out"]').click()
browser.delete_all_cookies()
lps("[+]", f"Successfully logged out {email}", "green")
temp_id1 = random.choice([keys for keys in ids.keys()]) ## To load credentials from ids dictionary (randomly).
ig_login(email = temp_id1, password = ids[temp_id1]) ## To login your temprary id in browser
t5 = time.time()
while True: ## To keep running this scraper.
for username in usernames: ## load a instagram username from usernames list, which we have loaded previously
filename = f"{username}.csv" ## name of csv file
try:
data = scrape_data(username)
check_csv_file(data, filename)
lprint("[+]", f"\n{username} {data}", random.choice(["magenta", "cyan", "blue"]))
sleep_time = random_sleep_time(120)# 2 Minutes
lprint("[*]", f"Sleeping for {sleep_time} secends", "green")
time.sleep(sleep_time)
## To logout automaticly if script is running from past 8 hours, else instagram will block our temprory id.
if (time.time()-t5) > (8*3600):# 8 Hours
ig_logout(email = temp_id1)
t5, temp_id2 = time.time(), random.choice([keys for keys in ids.keys()])
ig_login(email = temp_id2, password = ids[temp_id2]) ## login new id
temp_id1 = temp_id2 ## variable will replaed if autologin succeed.
except KeyboardInterrupt:
lprint("[-]", "CTRL+C detected, Taking you to exit options...", "red")
exit_options()
except Exception as e:
exception = format_exc(e) ## to format exceptions
lprint("[!]", f"Exception Occurred while scraping for {username}\n{str(exception)}", "red")
save_ss()
slack_hook(username, exception)
``` |
{
"source": "404NotFound-401/DjangoTutorial",
"score": 2
} |
#### File: HelloWorldWebsite/searchTest/views.py
```python
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.template import loader
from django.http import HttpResponse
from django.views import generic
from .models import Movie
from . import searchapi
from django.urls import reverse
# Create your views here.
class Search(generic.DetailView):
model = Movie
template_name = 'searchTest.html'
def get(self, request, *args, **kwargs):
context = {'movieId':'Please enter the film name'}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
if 'movieName' in request.POST:
print("Get movie name")
context = searchapi.getName(request.POST['movieName'])
else:
print("Wrong act")
return redirect('mainpage')
return render(request, "home/result.html", context)
``` |
{
"source": "404NotFoundJ/ytubr",
"score": 2
} |
#### File: youtube_dl/extractor/viewlift.py
```python
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
determine_ext,
int_or_none,
js_to_json,
parse_duration,
)
class ViewLiftBaseIE(InfoExtractor):
_DOMAINS_REGEX = r'(?:snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|monumentalsportsnetwork|vayafilm)\.com|kesari\.tv'
class ViewLiftEmbedIE(ViewLiftBaseIE):
_VALID_URL = r'https?://(?:(?:www|embed)\.)?(?:%s)/embed/player\?.*\bfilmId=(?P<id>[\da-f-]{36})' % ViewLiftBaseIE._DOMAINS_REGEX
_TESTS = [{
'url': 'http://embed.snagfilms.com/embed/player?filmId=74849a00-85a9-11e1-9660-123139220831&w=500',
'md5': '2924e9215c6eff7a55ed35b72276bd93',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
}, {
# invalid labels, 360p is better that 480p
'url': 'http://www.snagfilms.com/embed/player?filmId=17ca0950-a74a-11e0-a92a-0026bb61d036',
'md5': '882fca19b9eb27ef865efeeaed376a48',
'info_dict': {
'id': '17ca0950-a74a-11e0-a92a-0026bb61d036',
'ext': 'mp4',
'title': 'Life in Limbo',
}
}, {
'url': 'http://www.snagfilms.com/embed/player?filmId=0000014c-de2f-d5d6-abcf-ffef58af0017',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:embed\.)?(?:%s)/embed/player.+?)\1' % ViewLiftBaseIE._DOMAINS_REGEX,
webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>This film is not playable in your area.<' in webpage:
raise ExtractorError(
'Film %s is not playable in your area.' % video_id, expected=True)
formats = []
has_bitrate = False
for source in self._parse_json(js_to_json(self._search_regex(
r'(?s)sources:\s*(\[.+?\]),', webpage, 'json')), video_id):
file_ = source.get('file')
if not file_:
continue
type_ = source.get('type')
ext = determine_ext(file_)
format_id = source.get('label') or ext
if all(v == 'm3u8' or v == 'hls' for v in (type_, ext)):
formats.extend(self._extract_m3u8_formats(
file_, video_id, 'mp4', m3u8_id='hls'))
else:
bitrate = int_or_none(self._search_regex(
[r'(\d+)kbps', r'_\d{1,2}x\d{1,2}_(\d{3,})\.%s' % ext],
file_, 'bitrate', default=None))
if not has_bitrate and bitrate:
has_bitrate = True
height = int_or_none(self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None))
formats.append({
'url': file_,
'format_id': 'http-%s%s' % (format_id, ('-%dk' % bitrate if bitrate else '')),
'tbr': bitrate,
'height': height,
})
field_preference = None if has_bitrate else ('height', 'tbr', 'format_id')
self._sort_formats(formats, field_preference)
title = self._search_regex(
[r"title\s*:\s*'([^']+)'", r'<title>([^<]+)</title>'],
webpage, 'title')
return {
'id': video_id,
'title': title,
'formats': formats,
}
class ViewLiftIE(ViewLiftBaseIE):
_VALID_URL = r'https?://(?:www\.)?(?P<domain>%s)/(?:films/title|show|(?:news/)?videos?)/(?P<id>[^?#]+)' % ViewLiftBaseIE._DOMAINS_REGEX
_TESTS = [{
'url': 'http://www.snagfilms.com/films/title/lost_for_life',
'md5': '19844f897b35af219773fd63bdec2942',
'info_dict': {
'id': '0000014c-de2f-d5d6-abcf-ffef58af0017',
'display_id': 'lost_for_life',
'ext': 'mp4',
'title': 'Lost for Life',
'description': 'md5:fbdacc8bb6b455e464aaf98bc02e1c82',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 4489,
'categories': ['Documentary', 'Crime', 'Award Winning', 'Festivals']
}
}, {
'url': 'http://www.snagfilms.com/show/the_world_cut_project/india',
'md5': 'e6292e5b837642bbda82d7f8bf3fbdfd',
'info_dict': {
'id': '00000145-d75c-d96e-a9c7-ff5c67b20000',
'display_id': 'the_world_cut_project/india',
'ext': 'mp4',
'title': 'India',
'description': 'md5:5c168c5a8f4719c146aad2e0dfac6f5f',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 979,
'categories': ['Documentary', 'Sports', 'Politics']
}
}, {
# Film is not playable in your area.
'url': 'http://www.snagfilms.com/films/title/inside_mecca',
'only_matching': True,
}, {
# Film is not available.
'url': 'http://www.snagfilms.com/show/augie_alone/flirting',
'only_matching': True,
}, {
'url': 'http://www.winnersview.com/videos/the-good-son',
'only_matching': True,
}, {
'url': 'http://www.kesari.tv/news/video/1461919076414',
'only_matching': True,
}, {
# Was once Kaltura embed
'url': 'https://www.monumentalsportsnetwork.com/videos/john-carlson-postgame-2-25-15',
'only_matching': True,
}]
def _real_extract(self, url):
domain, display_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, display_id)
if ">Sorry, the Film you're looking for is not available.<" in webpage:
raise ExtractorError(
'Film %s is not available.' % display_id, expected=True)
film_id = self._search_regex(r'filmId=([\da-f-]{36})"', webpage, 'film id')
snag = self._parse_json(
self._search_regex(
r'Snag\.page\.data\s*=\s*(\[.+?\]);', webpage, 'snag'),
display_id)
for item in snag:
if item.get('data', {}).get('film', {}).get('id') == film_id:
data = item['data']['film']
title = data['title']
description = clean_html(data.get('synopsis'))
thumbnail = data.get('image')
duration = int_or_none(data.get('duration') or data.get('runtime'))
categories = [
category['title'] for category in data.get('categories', [])
if category.get('title')]
break
else:
title = self._search_regex(
r'itemprop="title">([^<]+)<', webpage, 'title')
description = self._html_search_regex(
r'(?s)<div itemprop="description" class="film-synopsis-inner ">(.+?)</div>',
webpage, 'description', default=None) or self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
duration = parse_duration(self._search_regex(
r'<span itemprop="duration" class="film-duration strong">([^<]+)<',
webpage, 'duration', fatal=False))
categories = re.findall(r'<a href="/movies/[^"]+">([^<]+)</a>', webpage)
return {
'_type': 'url_transparent',
'url': 'http://%s/embed/player?filmId=%s' % (domain, film_id),
'id': film_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'categories': categories,
'ie_key': 'ViewLiftEmbed',
}
``` |
{
"source": "404-NOTFOUND/SHARESPACE-Server",
"score": 2
} |
#### File: sharespace/views/main.py
```python
from flask import (
render_template,
flash,
redirect,
request,
abort,
url_for
)
from flask_login import (
current_user,
login_user,
logout_user,
login_required
)
from werkzeug import secure_filename
import os, base64, random
import image_classification
from server import app
from server.database import *
ALLOWED_EXTENSIONS = set(['.jpg', '.jpeg'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['GET', 'POST'])
@login_required
def upload():
if request.method == 'POST':
if 'file' not in request.files: # no file part in POST request
return redirect(request.url)
file = request.files['file']
if file.filename == '': # no file selected
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# return redirect('/view/' + get_img_id(filename))
newpost = Post(
title = 'test',
author = current_user.username,
article = 'test',
image_url = url_for('static', filename='uploads/' + filename),
tags = ['a', 'b', 'c']
)
return redirect(url_for('backdoor'))
return render_template('vrview/upload.html')
``` |
{
"source": "404-Program-not-found/Discord-AntiScamBot",
"score": 3
} |
#### File: 404-Program-not-found/Discord-AntiScamBot/Moderation.py
```python
import json
import re
from discord.ext import commands
from thefuzz import process
class moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Default Values. It is not recommended to edit this. Use the built-in commands instead
self.defaultUpper = 98
self.defaultLower = 88
@commands.Cog.listener("on_message")
async def phishing_detection(self, message):
with open("database.json", "r") as f:
master = json.load(f)
detect = master.get("server_settings").get(str(message.guild.id))
if detect and detect.get("detect_phishing"):
links = re.findall(r'(https?://[^\s]+)', message.content) # Find all links in a message
if links and not message.author.bot:
# Get Upper and Lower Bounds
upper = detect.get("upper") or self.defaultUpper
lower = detect.get("lower") or self.defaultLower
links = [re.sub(r'https?://', '', s).split("/")[0].lower() for s in
links] # Clean up link for better fuzzy match
# Loops through every link to see if it is a suspected phishing domain
for i in links:
extracted = process.extractOne(i, master["domain_whitelist"])
if upper >= extracted[1] >= lower:
await message.delete() # Delete the Message
await message.channel.send(
f"{message.author.mention} **Uh Oh.**\n"
f"I detected a possible phishing link and have automatically removed it. "
f"Your link matched a known domain by {extracted[1]}%") # Send a message to the author telling them that the message has been removed
break # Stop looping as it is no longer necessary
@commands.group(description="Main settings for moderation related commands and features")
@commands.check_any(commands.has_guild_permissions(administrator=True))
async def mod_settings(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send('Invalid sub command passed')
@mod_settings.command(
description="Toggles the link detection feature. Off by default")
async def toggle_link_detect(self, ctx):
# Get the Database file
with open('database.json', 'r') as f:
channels = json.load(f)
channels.setdefault("server_settings", {})
settings = channels["server_settings"]
settings.setdefault(str(ctx.guild.id), {}) # In case if the ctx.guild.id entry does not exist
if not settings.get(str(ctx.guild.id)).get("detect_phishing"):
settings[str(ctx.guild.id)]["detect_phishing"] = True
await ctx.send(f'Phishing link detection is now **on**')
elif settings.get(str(ctx.guild.id)).get("verify_channel"):
settings[str(ctx.guild.id)]["detect_phishing"] = False
await ctx.send(f'Phishing link detection is now **off**')
# Save the edits
with open('database.json', 'w') as f:
channels["server_settings"] = settings
json.dump(channels, f, indent=4)
@mod_settings.command(
description="Sets the upper bound for the detection algorithm in comparison to the similarity to any link (Default = 98).")
async def set_upper_bound(self, ctx, number: int):
with open('database.json', 'r') as f:
channels = json.load(f)
channels.setdefault("server_settings", {})
settings = channels["server_settings"]
settings.setdefault(str(ctx.guild.id), {}) # In case if the ctx.guild.id entry does not exist
lower = settings[str(ctx.guild.id)].get("lower") or self.defaultLower # Get the lower bound
if lower <= number:
ctx.send(f"Please select a number higher than {lower}")
return
settings[str(ctx.guild.id)]["upper"] = number
await ctx.send(f"Upper bound is {number}")
with open('database.json', 'w') as f:
channels["server_settings"] = settings
json.dump(channels, f, indent=4)
@mod_settings.command(
description="Sets the lower bound for the detection algorithm in comparison to the similarity to any link (Default = 88).")
async def set_lower_bound(self, ctx, number: int):
with open('database.json', 'r') as f:
channels = json.load(f)
channels.setdefault("server_settings", {})
settings = channels["server_settings"]
settings.setdefault(str(ctx.guild.id), {}) # In case if the ctx.guild.id entry does not exist
upper = settings[str(ctx.guild.id)].get("upper") or self.defaultUpper # Get the upper bound
if upper <= number:
ctx.send(f"Please select a number lower than {upper}")
return
settings[str(ctx.guild.id)]["lower"] = number
await ctx.send(f"Lower bound is {number}")
with open('database.json', 'w') as f:
channels["server_settings"] = settings
json.dump(channels, f, indent=4)
def setup(bot):
bot.add_cog(moderation(bot))
``` |
{
"source": "404Room/go-in-python",
"score": 3
} |
#### File: go-in-python/tests/test_defer.py
```python
from gopy import (
golang,
defer
)
import unittest
import os
class TestFunctionDefer(unittest.TestCase):
def test_close_file(self):
@golang
def read_file(src):
f = open(src, 'r')
defer(f.close)
assert f.read()
return f
src = os.path.abspath(__file__)
f = read_file(src)
assert f.closed is True
def test_reverse_order(self):
@golang
def append_to_list(order):
defer(order.append, 1)
defer(order.append, 2)
order.append(3)
order = []
append_to_list(order)
assert [3, 2, 1] == order
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "404-wave/404-project",
"score": 2
} |
#### File: 404-project/posts/forms.py
```python
from django import forms
from mimetypes import guess_type
import base64
import os
from .models import Post
from users.models import User
from friends.models import FollowManager
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = [
"content",
"image",
"privacy",
"accessible_users",
"content_type",
"unlisted",
"user",
"publish"
]
widgets = {'accessible_users': forms.CheckboxSelectMultiple}
def __init__(self, *args, **kwargs):
user_details = kwargs.pop('user_details', None)
super(PostForm, self).__init__(*args, **kwargs)
self.fields['user'].widget = forms.HiddenInput()
self.fields['publish'].widget = forms.HiddenInput()
self.fields['content_type'].choices = (('text/plain', 'text/plain'), ('text/markdown','text/markdown'))
self.fields['accessible_users'] = forms.MultipleChoiceField(
label="Accessible Users",
required=False,
widget=forms.CheckboxSelectMultiple,
choices=self.choices(user_details)
)
self.set_placeholder('content', 'What\'s on your mind?')
self.set_form_class()
def choices(self, user_req):
users = User.objects.all().exclude(id=user_req.id).exclude(is_node=True)
options = []
followManager = FollowManager()
friends = followManager.get_friends(user_req)
for user in users:
if (user not in friends):
options.append((user.host+'/author/'+str(user.id),user.username))
friends = ((item, item) for item in friends)
options = options + list(friends)
return options
#add placeholder text to fields
def set_placeholder(self, field, text):
self.fields[field].widget.attrs['placeholder'] = text
#add class for css
def set_form_class(self):
self.fields['content'].widget.attrs['class'] = "create_post"
self.fields['unlisted'].widget.attrs['class'] = "create_post"
"""
Creates the objects for the accessible useres and then save to the form
"""
def save(self, commit=True):
#accessible_users = self.cleaned_data
post = super().save(commit)
username = post.user.username
timestamp = post.timestamp.strftime("%b %-d, %Y, at %H:%M %p")
post.title = username+" - "+timestamp
if (post.privacy == 1):
if (isinstance(post.accessible_users, list)):
post.accessible_users = post.user.host+'/author/'+str(post.user.id)
#post.accessible_users.add(*accessible_users)
post.accessible_users = post.accessible_users[:-1] +', \''+post.user.host+'/author/'+str(post.user.id)+'\']'
post.save()
return post
class ImageForm(forms.ModelForm):
class Meta:
model = Post
fields = [
"image",
"privacy",
"accessible_users",
"user",
"publish"
]
def __init__(self, *args, **kwargs):
user_details = kwargs.pop('user_details', None)
super().__init__(*args, **kwargs)
self.fields['user'].widget = forms.HiddenInput()
self.fields['publish'].widget = forms.HiddenInput()
self.fields['accessible_users'] = forms.MultipleChoiceField(
label="question",
required=False,
widget=forms.CheckboxSelectMultiple,
choices=self.choices(user_details)
)
"""
Creates the objects for the accessible useres and then save to the form
"""
def save(self, commit=True):
#accessible_users = self.cleaned_data.pop('accessible_users', [])
print("KK")
post = super().save(commit)
username = post.user.username
timestamp = post.timestamp.isoformat()
post.title = username+" - "+timestamp
if (post.privacy == 1):
if (isinstance(post.accessible_users, list)):
post.accessible_users = post.user.host+'/author/'+str(post.user.id)
post.accessible_users = post.accessible_users[:-1] +', \''+post.user.host+'/author/'+str(post.user.id)+'\']'
post.save()
return post
def choices(self, user_req):
users = User.objects.all().exclude(id=user_req.id).exclude(is_node=True)
options = []
followManager = FollowManager()
friends = followManager.get_friends(user_req)
for user in users:
if (user not in friends):
options.append((user.host+'author/'+str(user.id),user.username))
friends = ((item, item) for item in friends)
options = options + list(friends)
return options
``` |
{
"source": "406345/centernet-keras",
"score": 2
} |
#### File: 406345/centernet-keras/FPS_test.py
```python
import colorsys
import copy
import math
import os
import pickle
import time
import numpy as np
from keras.applications.imagenet_utils import preprocess_input
from keras.layers import Input
from PIL import Image
from tqdm import tqdm
from centernet import CenterNet
from nets.centernet import centernet
from utils.utils import centernet_correct_boxes, letterbox_image, nms
'''
该FPS测试不包括前处理(归一化与resize部分)、绘图。
包括的内容为:网络推理、得分门限筛选、非极大抑制。
使用'img/street.jpg'图片进行测试,该测试方法参考库https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch
video.py里面测试的FPS会低于该FPS,因为摄像头的读取频率有限,而且处理过程包含了前处理和绘图部分。
'''
def preprocess_image(image):
mean = [0.40789655, 0.44719303, 0.47026116]
std = [0.2886383, 0.27408165, 0.27809834]
return ((np.float32(image) / 255.) - mean) / std
class FPS_CenterNet(CenterNet):
def get_FPS(self, image, test_interval):
image_shape = np.array(np.shape(image)[0:2])
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
#---------------------------------------------------------#
crop_img = letterbox_image(image, [self.input_shape[0],self.input_shape[1]])
#----------------------------------------------------------------------------------#
# 将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的
#----------------------------------------------------------------------------------#
photo = np.array(crop_img,dtype = np.float32)[:,:,::-1]
#-----------------------------------------------------------#
# 图片预处理,归一化。获得的photo的shape为[1, 512, 512, 3]
#-----------------------------------------------------------#
photo = np.reshape(preprocess_image(photo), [1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
preds = self.centernet.predict(photo)
if self.nms:
preds = np.array(nms(preds, self.nms_threhold))
if len(preds[0])>0:
preds[0][:, 0:4] = preds[0][:, 0:4] / (self.input_shape[0] / 4)
det_label = preds[0][:, -1]
det_conf = preds[0][:, -2]
det_xmin, det_ymin, det_xmax, det_ymax = preds[0][:, 0], preds[0][:, 1], preds[0][:, 2], preds[0][:, 3]
top_indices = [i for i, conf in enumerate(det_conf) if conf >= self.confidence]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(det_xmin[top_indices],-1),np.expand_dims(det_ymin[top_indices],-1),np.expand_dims(det_xmax[top_indices],-1),np.expand_dims(det_ymax[top_indices],-1)
boxes = centernet_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.input_shape[0],self.input_shape[1]]),image_shape)
t1 = time.time()
for _ in range(test_interval):
preds = self.centernet.predict(photo)
if self.nms:
preds = np.array(nms(preds, self.nms_threhold))
if len(preds[0])>0:
preds[0][:, 0:4] = preds[0][:, 0:4] / (self.input_shape[0] / 4)
det_label = preds[0][:, -1]
det_conf = preds[0][:, -2]
det_xmin, det_ymin, det_xmax, det_ymax = preds[0][:, 0], preds[0][:, 1], preds[0][:, 2], preds[0][:, 3]
top_indices = [i for i, conf in enumerate(det_conf) if conf >= self.confidence]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(det_xmin[top_indices],-1),np.expand_dims(det_ymin[top_indices],-1),np.expand_dims(det_xmax[top_indices],-1),np.expand_dims(det_ymax[top_indices],-1)
boxes = centernet_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.input_shape[0],self.input_shape[1]]),image_shape)
t2 = time.time()
tact_time = (t2 - t1) / test_interval
return tact_time
centernet = FPS_CenterNet()
test_interval = 100
img = Image.open('img/street.jpg')
tact_time = centernet.get_FPS(img, test_interval)
print(str(tact_time) + ' seconds, ' + str(1/tact_time) + 'FPS, @batch_size 1')
``` |
{
"source": "406345/SharedPortProxy",
"score": 2
} |
#### File: SharedPortProxy/scripts/debug.py
```python
def filter(x):
print("filter called")
print(x)
def detect(x):
print(x)
return False
```
#### File: SharedPortProxy/scripts/mstsc.py
```python
def filter(x):
print("filter called")
print(x)
def detect(x):
if len(x) < 4: return False
if x[0] == 3 and x[1] == 0 and x[2] == 0 and x[3] == 19 : return True
return False
```
#### File: SharedPortProxy/scripts/openvpn.py
```python
def order():
return 2
def filter(x):
print(x)
def detect(x):
if len(x) < 4: return False
if x[0] == 22 and x[1] == 3 and x[2] == 1 and x[3] == 0 : return True
return False
``` |
{
"source": "40647045S/nnUNet",
"score": 3
} |
#### File: training/network_training/my_trainer.py
```python
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class my_trainer(nnUNetTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 300
``` |
{
"source": "4069430/burger_war_dev",
"score": 2
} |
#### File: burger_war_dev/scripts/randomRun.py
```python
import rospy
import random
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
from mask1 import detect_red_color
from mask1 import detect_green_color
from mask1 import detect_blue_color
from mask1 import detect_yellow_color
from mask1 import detect_white_color
#from mask import mask_green
#from mask import mask_red
from sensor_msgs.msg import LaserScan
class RandomBot():
def __init__(self, bot_name="NoName", use_camera=False,use_lidar=False):
# bot name
self.name = bot_name
# velocity publisher
self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)
if use_camera:
# for convert image topic to opencv obj
self.img_red = 0
self.img_green = 0
self.img_blue = 0
self.img_yellow = 0
self.img_white = 0
self.img = None
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber('image_raw', Image, self.imageCallback)
self.back_count = 0
self.blue_count = 0
if use_lidar:
self.scan = LaserScan()
self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)
def calcTwist(self):
if (230 < self.img_white) and (self.img_blue < 10) and (self.img_red < 55): #CASE1:near only the white wall
x = -1
r_int = random.randint(-1,1)
th = r_int
self.back_count += 1
print('--------')
print('| 2 |')
print('--------')
print(self.back_count)
elif (self.img_blue > 10): #CASE2:Hit the blue wall
x = 0.1
th = 0
self.blue_count += 1
if (self.blue_count > 3):
x = -1.5
if (self.blue_count > 5):
self.back_count = 0
print('--------')
print('| 3 |')
print(self.blue_count)
print('--------')
elif (self.img_white > 150) and (self.img_red > 55):
x = -2
th = 0
print('--------')
print('| 4 |')
print('--------')
else: #CASE1:Don't see the enemy, random
r_int = random.randint(-1,1)
th = r_int
x = 0.5
print('--------')
print('| 1 |')
print('--------')
twist = Twist()
twist.linear.x = x; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th
return twist
def strategy(self):
r = rospy.Rate(1) # change speed 1fps
target_speed = 0
target_turn = 0
control_speed = 0
control_turn = 0
while not rospy.is_shutdown():
twist = self.calcTwist()
print(twist)
self.vel_pub.publish(twist)
r.sleep()
def imageCallback(self, data):
try:
self.img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logerr(e)
#masked_img_r = mask_red(self.img)
#masked_img_g = mask_green(self.img)
self.img_red = detect_red_color(self.img)
self.img_green = detect_green_color(self.img)
self.img_blue = detect_blue_color(self.img)
self.img_yellow = detect_yellow_color(self.img)
self.img_white = detect_white_color(self.img)
print('Red')
print(self.img_red)
print('green')
print(self.img_green)
print('bule')
print(self.img_blue)
print('yellow')
print(self.img_yellow)
print('white')
print(self.img_white)
#grayimg_r = cv2.cvtColor(masked_img_r, cv2.COLOR_BGR2GRAY) #グレースケール化
#grayimg_g = cv2.cvtColor(masked_img_g, cv2.COLOR_BGR2GRAY) #グレースケール化
#self.gray_pix_num = len(grayimg_r[grayimg_r<255])+len(grayimg_g[grayimg_g<255]) #黒い点の数を数える
#neg_grayimg_r = cv2.bitwise_not(grayimg_r)
#self.mu_r = cv2.moments(neg_grayimg_r, False)
#neg_grayimg_g = cv2.bitwise_not(grayimg_g)
#self.mu_g = cv2.moments(neg_grayimg_g, False)
cv2.waitKey(1)
def lidarCallback(self, data):
self.scan = data
#rospy.loginfo(self.scan)
#print(self.scan)
if __name__ == '__main__':
rospy.init_node('random_run')
bot = RandomBot('Random', use_camera=True,use_lidar=True)
bot.strategy()
``` |
{
"source": "408794550/Mechine-Learning-In-Action",
"score": 3
} |
#### File: Mechine-Learning-In-Action/Ch02/KNNDating.py
```python
from Utility import *
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
import KNearestNeighbor
def file2_matrix(file_name):
# f = open(file_name)
f = open(file_name)
array_lines = f.readlines()
file_lines = len(array_lines)
logging.info(file_lines)
# zeros()函数:创建指定类型的矩阵,并初始化为0
return_mat = zeros((file_lines,3))
class_label_vec = []
index = 0
for line in array_lines:
# strip(char)函数从头尾移除指定字符,并返回
line = line.strip() # 去掉回车
list_from_line = line.split('\t') # 分割得到列表
return_mat[index,:] = list_from_line[0:3]
# -1可以取到最后一个元素,如果不明确指示这是int,解释器将当做字符串处理
class_label_vec.append(int(list_from_line[-1]))
index += 1
return return_mat,class_label_vec
mat, dating_labels = file2_matrix('datingTestSet2.txt')
logging.info(mat[:15])
# 创建画布
fig = plt.figure()
# add_subplot(111),参数111的解释:
# 111表示将画布分割成1行1列,在分割出来的区域的第1块作图,所以349,表示3行3列(有12个区域了),在第9个区域作图
ax = fig.add_subplot(111)
# scatter(x,y,s,c,marker,cmap,norm,vmin,vmax,alpha,linewidths,verts,hold,**kwargs)函数解析:
# x,y 对应输入的数据; s 对应散点的大小; c 对应散点的颜色; marker 对应散点的形状
# http://blog.csdn.net/u013634684/article/details/49646311 后面的参数详细见链接
ax.scatter(mat[:, 1], mat[:, 2], 15.0*array(dating_labels), 15.0*array(dating_labels))
plt.show()
def auto_norm(data_set):
# 获取每一 列 的最小值(如果参数是1,则是获取 行 的最小值)
min_vals = data_set.min(0)
# 最大值
max_vals = data_set.max(0)
ranges = max_vals - min_vals
# 创建一个数值全为0的形状为shape(data_set)的矩阵
norm_data_set = zeros(shape(data_set))
# shape[0]获得矩阵的行数,1获得列数
m = data_set.shape[0]
# 此时min_vals是一行,tile函数扩展到m行,和data_set形状一致
# 归一化公式:第一步,原来的值减去最小值
norm_data_set = data_set - tile(min_vals,(m,1))
# 第二步:除以取值范围(最大值与最小值之间的范围),这里ranges同min_vals是一行
# 下面的代码虽然是两个矩阵相除,但并不是矩阵的除法,矩阵的除法在numpy中另有方法
norm_data_set = norm_data_set / tile(ranges, (m,1))
return norm_data_set, ranges, min_vals
def dating_class_test():
ho_rotio = 0.10
dating_data_mat, dating_label = file2_matrix('datingTestSet2.txt')
norm_mat, ranges, min_vals = auto_norm(dating_data_mat)
m = norm_mat.shape[0]
# 用来测试的行数
num_test_vecs = int(m * ho_rotio)
error_count = 0
# 对每一行进行测试(这里有100行)
for i in range(num_test_vecs):
# 将每行norm_mat[i,:]进行分类,在使用这些数据的时候,我们已经知道了他是属于哪一类,所以可以计算这个算法的分类成功率
classifier_result = KNearestNeighbor.classify0(norm_mat[i, :],
norm_mat[num_test_vecs:m, :],
dating_labels[num_test_vecs:m],
3)
logging.info('came back with:%d,the real answer is: %d', classifier_result, dating_labels[i])
if classifier_result != dating_labels[i]:
error_count += 1.0
logging.info('total error rate is:%f', (error_count/float(num_test_vecs)))
dating_class_test()
def classify_person():
result_list = ['not at all', 'in small doses', 'in large doses']
# 3.0之后,raw_input()与input()等效,不再有raw_input()
percent_tats = float(input('percentage of time spent playing video games'))
ff_miles = float(input('frequent flier miles earned per year?'))
ice_cream = float(input('liters of ice cream consumed per year?'))
dating_data_mat, dating_labels = file2_matrix('datingTestSet2.txt')
norm_mat, ranges, min_vals = auto_norm(dating_data_mat)
in_arr = array([ff_miles, percent_tats, ice_cream])
classifier_result = MechineLearningDemo.classify0((in_arr - min_vals)/ranges,
norm_mat,
dating_labels,
3)
# 下面两个打印的差异,logging如果不加%s打印会出错,而print貌似会自动处理在最后没有%s这个过程
logging.info('you will probably like this person:%s',result_list[classifier_result - 1])
# print('you will probably like this person:',result_list[classifier_result - 1])
# classify_person()
```
#### File: Mechine-Learning-In-Action/Ch06/SVM.py
```python
from Utility import logging
from numpy import *
import random
def load_dataset(file_name):
data_mat = []
label_mat = []
with open(file_name) as fr:
for line in fr.readlines():
line_arr = line.strip().split('\t')
data_mat.append([float(line_arr[0]), float(line_arr[1])])
label_mat.append(float(line_arr[2]))
return data_mat, label_mat
def selectJ_rand(i, m):
j = i
while(j == i):
j = int(random.uniform(0, m))
return j
def clip_alpha(aj, H, L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
data_arr, label_arr = load_dataset('testSet.txt')
logging.info('{}'.format(label_arr))
def smo_simple(data_mat_in, # 数据集
class_labels, # 类别标签
C, # 常数C
toler, # 容错率
max_iter): # 取消前最大的循环次数
data_matrix = mat(data_mat_in) # 列表转成了矩阵
label_mat = mat(class_labels).transpose() # 转置得到了一个列向量,于是数据矩阵的每一行都和类别标签向量意义对应
b = 0
m, n = shape(data_matrix) # 100行2列
alphas = mat(zeros((m, 1))) # 构建alphas列矩阵(100行1列,都为0)
iter_count = 0
while iter_count < max_iter:
alpha_pairs_changed = 0 # 变量用于记录alpha是否已经进行优化,每次循环结束时得知
for i in range(m):
# multiply实现对应元素相乘,同使用 * 号
# .T 效果同使用.transpose(),只是在,只有一行的情况下,返回的是本身
f_Xi = float(multiply(alphas, label_mat).T * (data_matrix * data_matrix[i, :].T)) + b
Ei = f_Xi - float(label_mat[i]) # 计算误差Ei
if ((label_mat[i] * Ei < -toler) and (alphas[i] < C)) or ((label_mat[i] * Ei) > toler) and (alphas[i] > 0):
j = selectJ_rand(i, m) # 随机选择第二个alpha值
f_Xj = float(multiply(alphas, label_mat).T * (data_matrix * data_matrix[j, :].T)) + b
Ej = f_Xj - float(label_mat[j]) # 同样计算第二个alpha的误差
alphaI_old = alphas[i].copy()
alphaJ_old = alphas[j].copy()
# L H用来将alpha[j]调整到0与C之间
if label_mat[i] != label_mat[j]:
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
if L == H:
logging.info('L == H')
continue
# eta 为最优修改量
eta = 2.0 * data_matrix[i, :] * data_matrix[j, :].T - data_matrix[i, :] * data_matrix[i, :].T - data_matrix[j, :] *data_matrix[j, :].T
if eta >= 0:
logging.info('eta >= 0')
continue
alphas[j] -= label_mat[j] * (Ei - Ej) / eta
alphas[j] = clip_alpha(alphas[j], H, L)
if abs(alphas[j] - alphaJ_old) < 0.00001 :
logging.info('j not moving enough')
continue
alphas[i] += label_mat[j] * label_mat[i] * (alphaJ_old - alphas[j])
b1 = b - Ei - label_mat[i] * (alphas[i] - alphaI_old) * data_matrix[i, :] * data_matrix[i, :].T - label_mat[j] * (alphas[j] - alphaJ_old) * data_matrix[i, :] * data_matrix[j, :].T
b2 = b - Ej - label_mat[i] * (alphas[i] - alphaI_old) * data_matrix[i, :] * data_matrix[j, :].T - label_mat[j] * (alphas[j] - alphaJ_old) * data_matrix[j, :] * data_matrix[j, :].T
if 0 < alphas[j] and C > alphas[i]:
b = b1
elif 0 < alphas[j] < C:
b = b2
else:
b = (b1 + b2) / 2.0
alpha_pairs_changed += 1
logging.info('iter:{} i:{},pairs changed {}'.format(iter_count, i, alpha_pairs_changed))
if alpha_pairs_changed == 0:
iter_count += 1
else:
iter_count = 0
logging.info('iteration number: {}'.format(iter_count))
return b, alphas
b, alphas = smo_simple(data_arr, label_arr, 0.6, 0.001, 40)
logging.info('{}'.format(b))
logging.info('{}'.format(alphas[alphas > 0]))
''' 这里没理解SMO算法的真谛,选择了先跳过完整的SMO算法。
class opt_struct:
def __init__(self, datamat_in, class_labels, C, toler):
self.X = datamat_in
self.label_mat = class_labels
self.C = C
self.tol = toler
self.m = shape(datamat_in)[0]
self.b = 0
self.alphas = mat(zeros(self.m, 1))
self.e_cache = mat(zeros(self.m, 2))
def calculate_Ek(opt_stru, k):
f_Xk = float(multiply(opt_stru.alphas, opt_stru.label_mat).T * (opt_stru.X * opt_stru.X[k, :].T)) + opt_stru.b
Ek = f_Xk - float(opt_stru.label_mat[k])
return Ek
def select_j(i, opt_stru, Ei):
max_k = -1
max_delta_E = 0
Ej = 0
opt_stru.e_cache[i] = [1, Ei]
validE_ecache_list = nonzero(opt_stru.e_cache[:, 0].A)[0]
if len(validE_ecache_list) > 1:
for k in validE_ecache_list:
if k == i:
continue
Ek = calculate_Ek(opt_stru, k)
delta_E = abs(Ei - Ek)
if delta_E > max_delta_E:
max_k = k
max_delta_E = delta_E
Ej = Ek
return max_k, Ej
else:
j = selectJ_rand(i, opt_stru.m)
Ej = calculate_Ek(opt_stru, j)
return j, Ej
def update_Ek(opt_stru, k):
Ek = calculate_Ek(opt_stru, k)
opt_stru.e_cache[k] = [1, Ek]
'''
``` |
{
"source": "409729-diploma-thesis/similarity",
"score": 2
} |
#### File: 409729-diploma-thesis/similarity/language_model.py
```python
from __future__ import division
from heapq import heappush, heappop
import logging
from math import sqrt, floor, ceil
from multiprocessing import cpu_count, Pool
import pickle
import re
from datetime import timedelta
from time import monotonic as time
from gensim import corpora, models
from gensim.matutils import cossim
from numpy import mean, seterr, save, load, diag
from scipy.sparse import lil_matrix, identity, save_npz, load_npz, diags, coo_matrix
# from sparsesvd import sparsesvd
from filenames import UNANNOTATED_DATASET_FNAME, \
UNANNOTATED_DATASET_DICTIONARY_FNAME, \
UNANNOTATED_DATASET_DOCUMENT_TERM_MATRIX_FNAME as C_FNAME, \
UNANNOTATED_DATASET_WEIGHTED_DOCUMENT_TERM_MATRIX_FNAME as W_C_FNAME, \
UNANNOTATED_DATASET_SOFT_WEIGHTED_DOCUMENT_TERM_MATRIX_FNAME as M_W_C_FNAME, \
UNANNOTATED_DATASET_TFIDF_FNAME, \
UNANNOTATED_DATASET_TFIDF_MATRIX_FNAME as W_FNAME, \
UNANNOTATED_DATASET_W2V_FNAME, \
UNANNOTATED_DATASET_SOFT_MREL_MATRIX_FNAME as MREL_FNAME, \
UNANNOTATED_DATASET_SOFT_MLEV_MATRIX_FNAME as MLEV_FNAME, \
UNANNOTATED_DATASET_SOFT_SVD_MATRIX_UT as SOFT_UT_FNAME, \
UNANNOTATED_DATASET_SOFT_SVD_MATRIX_S as SOFT_S_FNAME, \
UNANNOTATED_DATASET_SOFT_SVD_MATRIX_VT as SOFT_VT_FNAME, \
UNANNOTATED_DATASET_SVD_MATRIX_UT as UT_FNAME, \
UNANNOTATED_DATASET_SVD_MATRIX_S as S_FNAME, \
UNANNOTATED_DATASET_SVD_MATRIX_VT as VT_FNAME, \
EXTERNAL_TERM_SIMILARITY_MODEL_FILENAMES
from preprocessing import documents, SegmentIterator
from workers import levsim
W2V_RANDOM_STATE = 12345
FLOAT_DTYPE = "float32"
LSI_NUM_FEATURES = 300
W2V_NUM_WORKERS = cpu_count()
W2V_NUM_FEATURES = 300
MREL_POWER_FACTOR = 2.0
MLEV_NUM_WORKERS = cpu_count()
MLEV_POOL = Pool(MLEV_NUM_WORKERS)
MLEV_MAX_LENGTH_RATIO = 1.5
LOGGER = logging.getLogger(__name__)
def density(M):
"""Returns the density of a sparse matrix M."""
return M.getnnz() / (M.shape[0] * M.shape[1])
class LanguageModel(object):
"""A tf-idf language model using the unannotated SemEval 2016/2017 Task 3 dataset."""
def __init__(self, similarity, technique="hard_topics", soft_matrices=[("mrel", 1.0)],
w2v_min_count=5, m_knn=100, m_threshold=0.0, term_similarity="w2v.ql"):
"""
Sets up a tf-idf language model using the unannotated SemEval 2016/2017 Task 3 dataset.
Attributes:
similarity The similarity model that will be used to compute the similarity
between two documents.
technique The model that will be employed when computing the similarity of two
documents. The following values are admissible:
(i) "hard_terms" -- cosine similarity in term space,
(ii) "soft_terms" -- soft cosine similarity in term space,
(iii) "hard_topics" -- cosine similarity in topic space,
(iv) "soft_topics" -- cosine similarity in soft topic space.
soft_matrices An iterable of (weight, metric) 2-tuples that specifies a weighted
average of similarity matrices that we will be using to model soft terms
and topics. The following similarity matrices are available:
(i) "mrel" -- mij = max(cossim(vi, vj), m_threshold)**MREL_POWER_FACTOR,
where vi, vj are word2vec vectors corresponding to terms wi, wj.
(ii) "mlev" -- mij = MLEV_ALPHA*((1-edit_distance(wi, wj))
/max(|wi|, |wj|))**MLEV_BETA, where |wi|, |wj| are the character
lengths of terms wi and wj.
w2v_min_count The minimum number of occurences of a term to be included in the word2vec
model dictionary.
m_knn The number of nearest neighbors of a term that are considered when
building the term similarity matrix M. Note that this imposes an upper
limit on the number of nonzero elements in any column / row of M.
m_threshold The minimum similarity that is recorded inside the sparse term similarity
matrix M.
term_similarity The term similarity model and the associated dictionary that should be
used when computing the local part of the similarity between two
documents (X^TM) with late weighting. The following values are
admissible:
(i) "w2v.ql" -- the Qatar Living word2vec model.
(ii) "w2v.googlenews" -- the Google News word2vec model.
(iii) "glove.enwiki_gigaword5" -- the English Wikipedia 2014 +
Gigaword 5 glove model.
(iv) "glove.common_crawl" -- the Common Crawl dictionary glove model.
(v) "glove.twitter" -- the Twitter glove model.
(vi) "fasttext.enwiki" -- the English Wikipedia fasttext model.
"""
assert technique in ("hard_terms", "soft_terms", "hard_topics", "soft_topics")
self.technique = technique
assert isinstance(similarity, Similarity)
self.similarity = similarity
assert isinstance(w2v_min_count, int)
assert isinstance(m_knn, int)
assert isinstance(m_threshold, float)
if technique == "soft_terms" or technique == "soft_topics":
assert soft_matrices
soft_matrices_config_string = ','.join(["%s_%.10f" % (matrix, weight) \
for matrix, weight in soft_matrices])
use_mrel = False
mrel_weight = 0.0
use_mlev = False
mlev_weight = 0.0
for matrix, weight in soft_matrices:
assert matrix in ("mrel", "mlev")
if matrix == "mrel":
use_mrel = True
mrel_weight = weight
else:
use_mlev = True
mlev_weight = weight
assert use_mrel or use_mlev
assert term_similarity in ("w2v.ql", "w2v.googlenews", "glove.enwiki_gigaword5",
"glove.common_crawl", "glove.twitter", "fasttext.enwiki")
try:
self.dictionary = corpora.Dictionary.load(UNANNOTATED_DATASET_DICTIONARY_FNAME,
mmap='r')
except IOError:
LOGGER.info("Building the dictionary.")
file_handler = logging.FileHandler("%s.log" % UNANNOTATED_DATASET_DICTIONARY_FNAME,
encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
self.dictionary = corpora.Dictionary(document.tokens for document in documents())
avg_tokens, avg_terms = mean(list(zip(*((len(document.tokens), len(document.terms)) \
for document in documents()))), axis=1)
LOGGER.info("Average number of tokens per a document: %f" % avg_tokens)
LOGGER.info("Average number of terms per a document: %f" % avg_terms)
self.dictionary.save(UNANNOTATED_DATASET_DICTIONARY_FNAME)
self.dictionary = corpora.Dictionary.load(UNANNOTATED_DATASET_DICTIONARY_FNAME,
mmap='r')
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
logging.getLogger().removeHandler(file_handler)
m = len(self.dictionary) # number of terms
self.m = m
n = self.dictionary.num_docs # number of documents
self.n = n
try:
self.tfidf = models.TfidfModel.load(UNANNOTATED_DATASET_TFIDF_FNAME, mmap='r')
except IOError:
LOGGER.info("Building the tf-idf model.")
file_handler = logging.FileHandler("%s.log" % UNANNOTATED_DATASET_TFIDF_FNAME,
encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
corpus_bow = [self.dictionary.doc2bow(document.tokens) for document in documents()]
self.tfidf = models.TfidfModel(corpus_bow)
self.tfidf.save(UNANNOTATED_DATASET_TFIDF_FNAME)
self.tfidf = models.TfidfModel.load(UNANNOTATED_DATASET_TFIDF_FNAME, mmap='r')
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
logging.getLogger().removeHandler(file_handler)
try:
self.W = load_npz("%s.npz" % W_FNAME)
except:
LOGGER.info("Building the diagonal IDF matrix W.")
file_handler = logging.FileHandler("%s.log" % W_FNAME, encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
W = lil_matrix((m, m), dtype=FLOAT_DTYPE)
for i in range(m):
W[i,i] = self.tfidf.idfs[i]
self.W = W.tocoo()
save_npz("%s.npz" % W_FNAME, self.W)
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
logging.getLogger().removeHandler(file_handler)
self.W = self.W.todia()
del self.tfidf
if technique == "soft_terms" or technique == "soft_topics":
self.M = lil_matrix((m, m), dtype=FLOAT_DTYPE)
if use_mrel:
if term_similarity == "w2v.ql":
w2v_full_fname = "%s-%d" % (UNANNOTATED_DATASET_W2V_FNAME, w2v_min_count)
try:
self.term_similarity = models.Word2Vec.load(w2v_full_fname, mmap='r').wv
except IOError:
LOGGER.info("Building the word2vec model.")
file_handler = logging.FileHandler("%s.log" % w2v_full_fname, encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
self.term_similarity = models.Word2Vec(sentences=SegmentIterator(),
size=W2V_NUM_FEATURES,
seed=W2V_RANDOM_STATE,
min_count=w2v_min_count, sg=0,
workers=W2V_NUM_WORKERS)
self.term_similarity.save(w2v_full_fname)
self.term_similarity = models.Word2Vec.load(w2v_full_fname, mmap='r').wv
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
LOGGER.info("Number of terms in the model: %d" % len(self.term_similarity.vocab))
logging.getLogger().removeHandler(file_handler)
elif term_similarity in ("glove.enwiki_gigaword5", "glove.common_crawl",
"glove.twitter", "fasttext.enwiki"):
self.term_similarity = models.KeyedVectors.load_word2vec_format( \
EXTERNAL_TERM_SIMILARITY_MODEL_FILENAMES[term_similarity], binary=False)
elif term_similarity == "w2v.googlenews":
self.term_similarity = models.KeyedVectors.load_word2vec_format( \
EXTERNAL_TERM_SIMILARITY_MODEL_FILENAMES[term_similarity], binary=True)
m_rel = len(self.term_similarity.vocab) # number of terms in the term similarity model
Mrel_full_fname = "%s-%s-%d-%d-%f-%f" % (MREL_FNAME, term_similarity, w2v_min_count,
m_knn, m_threshold, MREL_POWER_FACTOR)
try:
self.Mrel = load_npz("%s.npz" % Mrel_full_fname)
except FileNotFoundError:
LOGGER.info("Building the term similarity matrix Mrel.")
file_handler = logging.FileHandler("%s.log" % Mrel_full_fname, encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
Mrel = identity(m, dtype=FLOAT_DTYPE, format="lil")
for k, term_i in enumerate(self.term_similarity.vocab.keys()):
if k % 10000 == 0:
LOGGER.info("Processing term number %d." % (k+1))
i = self.dictionary.doc2bow([term_i])
if not i:
continue
for _, (term_j, similarity) in \
zip(range(m_knn),
self.term_similarity.most_similar(positive=[term_i], topn=m_knn)):
j = self.dictionary.doc2bow([term_j])
if not j:
continue
if similarity > m_threshold:
Mrel[i[0][0],j[0][0]] = similarity**2
self.Mrel = Mrel.tocoo()
save_npz("%s.npz" % Mrel_full_fname, self.Mrel)
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
LOGGER.info("Matrix density:\n- %.10f by word2vec,\n- %.10f by kNN," \
% (m_rel**2/m**2, ((m_knn+1)*m_rel + 1*(m-m_rel))/m**2) \
+ "\n- %.10f by thresholding" % density(self.Mrel))
logging.getLogger().removeHandler(file_handler)
del self.term_similarity
self.M = self.M + mrel_weight * self.Mrel
del self.Mrel
if use_mlev:
Mlev_full_fname = "%s-%d-%f" % (MLEV_FNAME, m_knn, m_threshold)
try:
self.Mlev = load_npz("%s.npz" % Mlev_full_fname)
except FileNotFoundError:
LOGGER.info("Building the term similarity matrix Mlev.")
file_handler = logging.FileHandler("%s.log" % Mlev_full_fname, encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
Mlev = identity(m, dtype=FLOAT_DTYPE, format="lil")
min_terms = m
avg_terms = []
max_terms = 0
for k, (i, term_i) in enumerate(self.dictionary.items()):
if k % 10 == 0:
LOGGER.info("Processing term number %d." % (k+1))
terms = [(term_i, term_j, j) for j, term_j \
in self.dictionary.items() \
if i != j and max(len(term_i), len(term_j)) \
/ min(len(term_i), len(term_j)) < MLEV_MAX_LENGTH_RATIO]
Mlev_chunksize = max(1, ceil(len(terms)/MLEV_NUM_WORKERS))
similarities = []
for term_num, (similarity, term_j, j) in \
enumerate(MLEV_POOL.imap_unordered(levsim, terms, Mlev_chunksize)):
heappush(similarities, (-similarity, term_j, j))
min_terms = min(min_terms, term_num+1)
avg_terms.append(term_num+1)
max_terms = max(max_terms, term_num+1)
for similarity, term_j, j in (heappop(similarities) for _ \
in range(min(m_knn, len(similarities)))):
similarity = -similarity
if similarity > m_threshold:
Mlev[i,j] = similarity
self.Mlev = Mlev.tocoo()
save_npz("%s.npz" % Mlev_full_fname, self.Mlev)
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
LOGGER.info("Minimum number of terms considered: %d", min_terms)
LOGGER.info("Average number of terms considered: %d", mean(avg_terms))
LOGGER.info("Maximum number of terms considered: %d", max_terms)
LOGGER.info("Matrix density:\n- %.10f by kNN," % (((m_knn+1)*m)/m**2) \
+ "\n- %.10f by thresholding" % density(self.Mlev))
logging.getLogger().removeHandler(file_handler)
self.M = self.M + mlev_weight * self.Mlev
del self.Mlev
if technique == "hard_topics" or technique == "soft_topics":
try:
self.C = load_npz("%s.npz" % C_FNAME)
except FileNotFoundError:
LOGGER.info("Building the (unweighted) term-document matrix C.")
file_handler = logging.FileHandler("%s.log" % C_FNAME, encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
Ct = lil_matrix((n, m), dtype=FLOAT_DTYPE)
for i, document in enumerate(documents()):
if i % 10000 == 0:
LOGGER.info("Processing document number %d." % (i+1))
for j, ct_ij in self.dictionary.doc2bow(document.tokens):
Ct[i,j] = ct_ij
self.C = Ct.tocoo().transpose()
del Ct
save_npz(C_FNAME, self.C)
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
LOGGER.info("Matrix density: %f" % density(self.C))
logging.getLogger().removeHandler(file_handler)
W_C_full_fname = "%s-%d-%d-%f-%f" % (W_C_FNAME, w2v_min_count, m_knn, \
m_threshold, MREL_POWER_FACTOR)
try:
self.W_C = load_npz("%s.npz" % W_C_full_fname)
except FileNotFoundError:
LOGGER.info("Building the weighted term-document matrix W*C.")
file_handler = logging.FileHandler("%s.log" % W_C_full_fname, encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
W_C = self.W.tocsr().dot(self.C.tocsc())
self.W_C = W_C.tocoo()
save_npz("%s.npz" % W_C_full_fname, self.W_C)
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
LOGGER.info("Matrix density: %f" % density(self.W_C))
logging.getLogger().removeHandler(file_handler)
del self.C
del self.W
if technique == "soft_topics":
M_W_C_full_fname = "%s-%s-%s-%d-%d-%f-%f" % (M_W_C_FNAME, soft_matrices_config_string, \
term_similarity, \
w2v_min_count, m_knn, m_threshold, \
MREL_POWER_FACTOR)
try:
self.M_W_C = load_npz("%s.npz" % M_W_C_full_fname)
except FileNotFoundError:
LOGGER.info("Building the weighted soft term-document matrix M*W*C.")
file_handler = logging.FileHandler("%s.log" % Mrel_W_C_full_fname, encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
M_W_C = self.M.tocsr().dot(self.W_C.tocsc())
self.M_W_C = M_W_C.tocoo()
save_npz("%s.npz" % Mrel_W_C_full_fname, self.M_W_C)
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
LOGGER.info("Matrix density: %f" % density(self.M_W_C))
logging.getLogger().removeHandler(file_handler)
del self.W_C
del self.M
soft_Ut_full_fname = "%s-%s-%s-%d-%d-%f-%f" % (SOFT_UT_FNAME, soft_matrices_config_string,\
term_similarity, w2v_min_count, m_knn, \
m_threshold, MREL_POWER_FACTOR)
soft_S_full_fname = "%s-%s-%s-%d-%d-%f-%f" % (SOFT_S_FNAME, soft_matrices_config_string, \
term_similarity, w2v_min_count, m_knn, \
m_threshold, MREL_POWER_FACTOR)
soft_Vt_full_fname = "%s-%s-%s-%d-%d-%f-%f" % (SOFT_VT_FNAME, soft_matrices_config_string,\
term_similarity, w2v_min_count, m_knn, \
m_threshold, MREL_POWER_FACTOR)
try:
self.UT = load("%s.npy" % soft_Ut_full_fname)
self.S = load("%s.npy" % soft_S_full_fname)
self.VT = load("%s.npy" % soft_Vt_full_fname)
except FileNotFoundError:
LOGGER.info("Building the SVD of M*W*C.")
file_handler = logging.FileHandler("%s.log" % soft_Ut_full_fname, encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
self.UT, self.S, self.VT = sparsesvd(self.M_W_C.tocsc(), LSI_NUM_FEATURES)
save("%s.npy" % soft_Ut_full_fname, self.UT)
save("%s.npy" % soft_S_full_fname, self.S)
save("%s.npy" % soft_Vt_full_fname, self.VT)
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
logging.getLogger().removeHandler(file_handler)
del self.M_W_C
if technique == "hard_topics":
try:
self.UT = load("%s.npy" % UT_FNAME)
self.S = load("%s.npy" % S_FNAME)
self.VT = load("%s.npy" % VT_FNAME)
except FileNotFoundError:
LOGGER.info("Building the SVD of W*C.")
file_handler = logging.FileHandler("%s.log" % Ut_full_fname, encoding='utf8')
logging.getLogger().addHandler(file_handler)
start_time = time()
self.UT, self.S, self.VT = sparsesvd(self.W_C.tocsc(), LSI_NUM_FEATURES)
save("%s.npy" % UT_FNAME, self.UT)
save("%s.npy" % S_FNAME, self.S)
save("%s.npy" % VT_FNAME, self.VT)
LOGGER.info("Time elapsed: %s" % timedelta(seconds=time()-start_time))
logging.getLogger().removeHandler(file_handler)
del self.W_C
if technique == "hard_topics" or technique == "soft_topics":
self.Sinv_UT = diag(1/self.S).dot(self.UT)
del self.UT
del self.S
del self.VT
def sparse2scipy(self, input):
"""Converts a sparse key-value list representation of a document to a sparse scipy array."""
col = [0] * len(input)
row, data = zip(*input)
return coo_matrix((data, (row, col)), shape=(self.m, 1), dtype=FLOAT_DTYPE)
def compare(self, query, result):
"""Returns similarity between a query and a result document."""
X = self.sparse2scipy(self.dictionary.doc2bow(query.qsubject.tokens + query.qbody.tokens))
Y = self.sparse2scipy(self.dictionary.doc2bow(result.qsubject.tokens + result.qbody.tokens))
if self.technique == "hard_topics" or self.technique == "soft_topics":
X = self.Sinv_UT * X
Y = self.Sinv_UT * Y
return self.similarity.compare(self, X, Y)
class Similarity(object):
"""An interface for an object that represents some measure of similarity between two
documents."""
def compare(self, language_model, X, Y):
"""Computes cosine similarity between the query vector X and a result vector Y, where
language_model is a language model."""
class TopicCosineSimilarity(Similarity):
"""A class that represents the cosine similarity between two documents
represented by dense topic vectors."""
def __init__(self):
"""Sets up an object that represents the cosine similarity between two documents."""
pass
def compare(self, language_model, X, Y):
"""Computes cosine similarity between the query vector X and a result vector Y, where
language_model is a language model that provides the term weighting matrix."""
X_tX = (X.T.dot(X))[0,0]
Y_tY = (Y.T.dot(Y))[0,0]
if X_tX == 0.0 or Y_tY == 0.0:
return 0.0
X_tY = (X.T.dot(Y))[0,0]
result = X_tY / (sqrt(X_tX) * sqrt(Y_tY))
return result
class TermHardCosineSimilarity(Similarity):
"""A class that represents the cosine similarity between two documents
represented by sparse term vectors."""
def __init__(self):
"""Sets up an object that represents the cosine similarity between two documents."""
pass
def compare(self, language_model, X, Y):
"""Computes cosine similarity between the query vector X and a result vector Y, where
language_model is a language model that provides the term weighting matrix."""
WX = language_model.W.tocsr() * X.tocsc()
WY = language_model.W.tocsr() * Y.tocsc()
_WX_tWX = (WX.transpose().tocsr() * WX.tocsc())[0,0]
_WY_tWY = (WY.transpose().tocsr() * WY.tocsc())[0,0]
if _WX_tWX == 0.0 or _WY_tWY == 0.0:
return 0.0
_WX_tWY = (WX.transpose().tocsr() * WY.tocsc())[0,0]
result = _WX_tWY / (sqrt(_WX_tWX) * sqrt(_WY_tWY))
return result
class TermSoftCosineSimilarity(Similarity):
"""A class that represents the soft cosine similarity between two documents
represented by sparse term vectors."""
def __init__(self, weighting="early", rounding=None, normalization="soft"):
"""Sets up an object that represents the soft cosine similarity between two documents.
Attributes:
weighting Whether a query vector will be weighted before its transpose has been
multiplied with the term similarity matrix ("early"), after ("late"),
or never (None).
rounding Whether the term frequencies in the query vector will be rounded
("round", "ceil", "floor") after the vector's transpose has been
multiplied with the term similarity matrix or not (None). The rounding
will only be applied with the "late" weighting.
normalization Whether the final product will be normalized using the soft cosine
norm ("soft"), just the cosine norm ("hard"), or not at all (None).
"""
assert weighting in ("early", "late", None)
self.weighting = weighting
if self.weighting == "early":
assert rounding is None
self.rounding = None
else:
assert rounding in (None, "round", "ceil", "floor")
if rounding == "round":
self.rounding = round
elif rounding == "ceil":
self.rounding = ceil
else:
self.rounding = floor
assert normalization in ("soft", "hard", None)
self.normalization = normalization
def compare(self, language_model, X, Y):
"""Computes cosine similarity between the query vector X and a result vector Y, where
language_model is a language model that provides the term weighting and term similarity
matrices."""
# Precompute commonly used data.
if self.weighting is None:
_WX_tM = (X.transpose().tocsr() * language_model.M.tocsc())
else:
WX = language_model.W.tocsr() * X.tocsc()
WY = language_model.W.tocsr() * Y.tocsc()
if self.weighting == "early":
_WX_tM = (WX.transpose().tocsr() * language_model.M.tocsc())
else:
XtM = X.transpose().tocsr() * language_model.M.tocsc()
if self.rounding is not None:
XtM = XtM.tocsr()
for coord in zip(*XtM.nonzero()):
XtM[coord] = self.rounding(XtM[coord])
W_XtM_t = language_model.W.tocsr() * XtM.transpose().tocsc()
# Compute the norm.
if self.normalization == "soft":
if self.weighting is None or self.weighting == "early":
if self.weighting is None:
_WY_tM = (Y.transpose().tocsr() * language_model.M.tocsc())
_WX_tMWX = (_WX_tM.tocsr() * X.tocsc())[0,0]
_WY_tMWY = (_WY_tM.tocsr() * Y.tocsc())[0,0]
elif self.weighting == "early":
_WY_tM = (WY.transpose().tocsr() * language_model.M.tocsc())
_WX_tMWX = (_WX_tM.tocsr() * WX.tocsc())[0,0]
_WY_tMWY = (_WY_tM.tocsr() * WY.tocsc())[0,0]
if _WX_tMWX == 0.0 or _WY_tMWY == 0.0:
return 0.0
norm = sqrt(_WX_tMWX) * sqrt(_WY_tMWY)
else:
YtM = Y.transpose().tocsr() * language_model.M.tocsc()
W_YtM_t = language_model.W.tocsr() * YtM.transpose().tocsc()
_W_XtM_t_t_WX = (W_XtM_t.transpose().tocsr() * WX.tocsc())[0,0]
_W_YtM_t_t_WY = (W_YtM_t.transpose().tocsr() * WY.tocsc())[0,0]
if _W_XtM_t_t_WX == 0.0 or _W_YtM_t_t_WY == 0.0:
return 0.0
norm = sqrt(_W_XtM_t_t_WX) * sqrt(_W_YtM_t_t_WY)
elif self.normalization == "hard":
if self.weighting is None:
_WX_tWX = (X.transpose().tocsr() * X.tocsc())[0,0]
_WY_tWY = (Y.transpose().tocsr() * Y.tocsc())[0,0]
else:
_WX_tWX = (WX.transpose().tocsr() * WX.tocsc())[0,0]
_WY_tWY = (WY.transpose().tocsr() * WY.tocsc())[0,0]
if _WX_tWX == 0.0 or _WY_tWY == 0.0:
return 0.0
norm = sqrt(_WX_tWX) * sqrt(_WY_tWY)
else:
norm = 1.0
# Compute the product.
if self.weighting is None or self.weighting == "early":
if self.weighting is None:
_WX_tMWY = (_WX_tM.tocsr() * Y.tocsc())[0,0]
if self.weighting == "early":
_WX_tMWY = (_WX_tM.tocsr() * WY.tocsc())[0,0]
product = _WX_tMWY
else:
_W_XtM_t_t_WY = (W_XtM_t.transpose().tocsr() * WY.tocsc())[0,0]
product = _W_XtM_t_t_WY
return product / norm
``` |
{
"source": "40hood/zulip",
"score": 2
} |
#### File: zulip/zproject/backends.py
```python
from __future__ import absolute_import
from django.contrib.auth.backends import RemoteUserBackend
from django.conf import settings
import django.contrib.auth
from django_auth_ldap.backend import LDAPBackend
from zerver.models import UserProfile, get_user_profile_by_id, \
get_user_profile_by_email, remote_user_to_email, email_to_username
from apiclient.sample_tools import client as googleapiclient
from oauth2client.crypt import AppIdentityError
def password_auth_enabled(realm):
if realm is not None:
if realm.domain == 'zulip.com' and settings.PRODUCTION:
# the dropbox realm is SSO only, but the unit tests still need to be
# able to login
return False
for backend in django.contrib.auth.get_backends():
if isinstance(backend, EmailAuthBackend):
return True
return False
def dev_auth_enabled():
for backend in django.contrib.auth.get_backends():
if isinstance(backend, DevAuthBackend):
return True
return False
def google_auth_enabled():
for backend in django.contrib.auth.get_backends():
if isinstance(backend, GoogleMobileOauth2Backend):
return True
return False
class ZulipAuthMixin(object):
def get_user(self, user_profile_id):
""" Get a UserProfile object from the user_profile_id. """
try:
return get_user_profile_by_id(user_profile_id)
except UserProfile.DoesNotExist:
return None
class ZulipDummyBackend(ZulipAuthMixin):
"""
Used when we want to log you in but we don't know which backend to use.
"""
def authenticate(self, username=None, use_dummy_backend=False):
if use_dummy_backend:
try:
return get_user_profile_by_email(username)
except UserProfile.DoesNotExist:
pass
return None
class EmailAuthBackend(ZulipAuthMixin):
"""
Email Authentication Backend
Allows a user to sign in using an email/password pair rather than
a username/password pair.
"""
def authenticate(self, username=None, password=<PASSWORD>):
""" Authenticate a user based on email address as the user name. """
if username is None or password is None:
# Return immediately. Otherwise we will look for a SQL row with
# NULL username. While that's probably harmless, it's needless
# exposure.
return None
try:
user_profile = get_user_profile_by_email(username)
if not password_auth_enabled(user_profile.realm):
return None
if user_profile.check_password(password):
return user_profile
except UserProfile.DoesNotExist:
return None
class GoogleMobileOauth2Backend(ZulipAuthMixin):
"""
Google Apps authentication for mobile devices
Allows a user to sign in using a Google-issued OAuth2 token.
Ref:
https://developers.google.com/+/mobile/android/sign-in#server-side_access_for_your_app
https://developers.google.com/accounts/docs/CrossClientAuth#offlineAccess
This backend is not currently supported on voyager.
"""
def authenticate(self, google_oauth2_token=None, return_data={}):
try:
token_payload = googleapiclient.verify_id_token(google_oauth2_token, settings.GOOGLE_CLIENT_ID)
except AppIdentityError:
return None
if token_payload["email_verified"] in (True, "true"):
try:
return get_user_profile_by_email(token_payload["email"])
except UserProfile.DoesNotExist:
return_data["valid_attestation"] = True
return None
else:
return_data["valid_attestation"] = False
class ZulipRemoteUserBackend(RemoteUserBackend):
create_unknown_user = False
def authenticate(self, remote_user):
if not remote_user:
return
email = remote_user_to_email(remote_user)
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return None
if user_profile.is_mirror_dummy:
# mirror dummies can not login, but they can convert to real users
return None
return user_profile
class ZulipLDAPAuthBackend(ZulipAuthMixin, LDAPBackend):
def django_to_ldap_username(self, username):
if settings.LDAP_APPEND_DOMAIN is not None:
return email_to_username(username)
return username
def ldap_to_django_username(self, username):
if settings.LDAP_APPEND_DOMAIN is not None:
return "@".join((username, settings.LDAP_APPEND_DOMAIN))
return username
def get_or_create_user(self, username, ldap_user):
try:
return get_user_profile_by_email(username), False
except UserProfile.DoesNotExist:
return UserProfile(), False
class ZulipLDAPUserPopulator(ZulipLDAPAuthBackend):
# Just like ZulipLDAPAuthBackend, but doesn't let you log in.
def authenticate(self, username, password):
return None
class DevAuthBackend(ZulipAuthMixin):
# Allow logging in as any user without a password.
# This is used for convenience when developing Zulip.
def authenticate(self, username):
try:
return get_user_profile_by_email(username)
except UserProfile.DoesNotExist:
return None
``` |
{
"source": "40i4/receiptDetection",
"score": 3
} |
#### File: 40i4/receiptDetection/detection.py
```python
import cv2
import numpy as np
import imutils
from transform import four_point_transform
from skimage.filter import threshold_adaptive
font = cv2.FONT_HERSHEY_SIMPLEX
image_name = "img1.jpg"
imageIn = cv2.imread(image_name)
ratio = imageIn.shape[0] / 500.0
orig = imageIn.copy()
def use_image_contour(all_vertices, image):
height, width, _ = image.shape
print "height: " + str(height) + " width: " + str(width)
first_vertex = all_vertices[0]
second_vertex = all_vertices[1]
# check if the curve is horizontal or vertical
if abs(first_vertex[1] - second_vertex[1]) < 0.1 * height:
print "horizontal curve"
# if y of the first vertex is on the upper side of image
if first_vertex[1] < height/2:
new_vertices = [[first_vertex[0], height], [second_vertex[0], height]]
# if y of the first vertex is on the bottom side of image
else:
new_vertices = [[first_vertex[0], 0], [second_vertex[0], 0]]
else:
print "vertical curve"
# if y of the first vertex is on the left side of image
if first_vertex[0] < width/2:
new_vertices = [[width, first_vertex[1]], [width, second_vertex[1]]]
# if y of the first vertex is on the right side of image
else:
new_vertices = [[0, first_vertex[1]], [0, second_vertex[1]]]
all_vertices = all_vertices + new_vertices
return all_vertices
# argument: image to transform
def transform_image(image):
image = imutils.resize(image, height=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
cv2.imshow("edged", edged)
# find contours; len(cnts) returns no. of contours found
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# if there are no contours found
if len(cnts) < 1:
height, width, _ = image.shape
print "height: " + str(height) + " width: " + str(width)
# return coordinates of the whole image
all_vertices = [[0, 0], [width, 0], [width, height], [0, height]]
contours = all_vertices
# print vertices' coordinates
for elem in contours:
text2 = str(elem[0]) + " " + str(elem[1])
cv2.putText(image, text2, (elem[0], elem[1]), font, 0.5, (255, 255, 0), 2)
contours_copy = contours
contours_copy_np = np.array(contours_copy)
cv2.drawContours(image, [contours_copy_np], -1, (0, 255, 0), 2)
warped = four_point_transform(orig, contours_copy_np.reshape(4, 2) * ratio)
return warped, image
# if there are some contours found
else:
# sort from the longest perimeter to the shortest
cnts_sorted = sorted(cnts, key=lambda x: cv2.arcLength(x, True), reverse=True)
# print "cnts sorted: " + str(cnts_sorted)
peri_arr2 = []
for elem in cnts_sorted:
perii = cv2.arcLength(elem, True)
peri_arr2.append(perii)
# length of the longest perimeter
peri_max = peri_arr2[0]
# approxPolyDP returns coordinates of vertices of the longest perimeter
approx2 = cv2.approxPolyDP(cnts_sorted[0], 0.02 * peri_max, True)
# find vertices and put them into array all_vertices
all_vertices = []
for a in approx2:
aa = a[0]
x_coord = aa[0]
y_coord = aa[1]
two_vertices = [x_coord, y_coord]
all_vertices.append(two_vertices)
# if only one curve was found
if len(all_vertices) == 2:
# but if there are other curves
if len(peri_arr2) > 1:
peri_max2 = peri_arr2[1]
print "peri max 2 length: " + str(peri_max2)
approx3 = cv2.approxPolyDP(cnts_sorted[1], 0.02 * peri_max2, True)
print "no of ver: " + str(len(approx3))
# find another vertical contour
if len(approx3) == 2:
all_vertices2 = []
for a in approx3:
aa = a[0]
x_coord = aa[0]
y_coord = aa[1]
two_vertices = [x_coord, y_coord]
all_vertices2.append(two_vertices)
print "all vertices 2: " + str(all_vertices2)
all_vertices = all_vertices + all_vertices2
# if there is no another vertical contour - use image contour
else:
all_vertices = use_image_contour(all_vertices, image)
# if there is no other curve found
else:
print 'there is no other curve found'
all_vertices = use_image_contour(all_vertices, image)
# find vertices that are most likely to be receipt vertices
br = ul = bl = ur = []
max_sum = 0
min_sum = 10000
max_sub_x_y = 0
max_sub_y_x = 0
for elem in all_vertices:
sum_x_and_y = elem[0] + elem[1]
if sum_x_and_y > max_sum:
max_sum = sum_x_and_y
br = elem
if sum_x_and_y < min_sum:
min_sum = sum_x_and_y
ul = elem
if elem[0] - elem[1] > 0:
if elem[0] - elem[1] > max_sub_x_y:
max_sub_x_y = elem[0] - elem[1]
ur = elem
if elem[1] - elem[0] > 0:
if elem[1] - elem[0] > max_sub_y_x:
max_sub_y_x = elem[1] - elem[0]
bl = elem
print "ul: " + str(ul)
print "ur: " + str(ur)
print "br: " + str(br)
print "bl: " + str(bl)
contours = []
contours.append(ul)
contours.append(ur)
contours.append(br)
contours.append(bl)
# if there are any empty vertices, assign their values to [0,0]
for elem, val in enumerate(contours):
if val == []:
contours[elem] = [0, 0]
print "coordinates of vertices ul, ur, br, bl: " + str(contours)
# print vertices' coordinates
for elem in contours:
text2 = str(elem[0]) + " " + str(elem[1])
cv2.putText(image, text2, (elem[0], elem[1]), font, 0.5, (255, 255, 0), 2)
contours_copy = contours
for elem, val in enumerate(contours_copy):
tab = []
tab.append(val)
contours_copy[elem] = tab
contours_copy_np = np.array(contours_copy)
cv2.drawContours(image, [contours_copy_np], -1, (0, 255, 0), 2)
warped = four_point_transform(orig, contours_copy_np.reshape(4, 2) * ratio)
return warped, image
cv2.waitKey(0)
cv2.destroyAllWindows()
def main():
warped, cut_image = transform_image(imageIn)
cv2.imshow("cut_image", cut_image)
cv2.imshow("warped", imutils.resize(warped, height=500))
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
``` |
{
"source": "40th-SOC/SpyderBot",
"score": 3
} |
#### File: SpyderBot/cogs/help.py
```python
import os, sys, discord
from discord.ext import commands
if not os.path.isfile("config.py"):
sys.exit("'config.py' not found! Please add it and try again.")
else:
import config
class Help(commands.Cog, name="help"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="help")
async def help(self, context):
# Note that commands made only for the owner of the bot are not listed here.
embed = discord.Embed(
title="Bot",
description="List of commands are:",
color=0x00FF00
)
embed.add_field(
name="Invite",
value=f"Usage: {config.BOT_PREFIX}invite",
inline=False
)
embed.add_field(
name="Server",
value=f"Usage: {config.BOT_PREFIX}server",
inline=False
)
embed.add_field(
name="Poll",
value=f"Usage: {config.BOT_PREFIX}poll <Idea>",
inline=False
)
embed.add_field(
name="8ball",
value=f"Usage: {config.BOT_PREFIX}8ball <Question>",
inline=False)
embed.add_field(
name="Bitcoin",
value=f"Usage: {config.BOT_PREFIX}bitcoin",
inline=False
)
embed.add_field(
name="Info",
value=f"Usage: {config.BOT_PREFIX}info",
inline=False
)
embed.add_field(
name="status",
value=f"Usage: {config.BOT_PREFIX}status\n Desc: Get DCS Mission, Uptime, Players for servers",
inline=False
)
embed.add_field(
name="mlist",
value=f"Usage: {config.BOT_PREFIX}mlist\n Desc: Get DCS Mission Server last 10 missions",
inline=False
)
embed.add_field(
name="attendance",
value=f"Usage: {config.BOT_PREFIX}attendance <mission ID> \n Desc: Get DCS attendance using ID from mlist",
inline=False
)
embed.add_field(
name="Purge",
value=f"Usage: {config.BOT_PREFIX}purge <Number>",
inline=False
)
embed.add_field(
name="Help",
value=f"Usage: {config.BOT_PREFIX}help",
inline=False
)
await context.send(embed=embed)
def setup(bot):
bot.add_cog(Help(bot))
``` |
{
"source": "40uf411/English-Grammar-Checker",
"score": 3
} |
#### File: 40uf411/English-Grammar-Checker/web.py
```python
import os
import re
from flask import Flask, request, json
from grammarChecker import GrammarChecker
from parser import TextTokenizing
import _nlp_
app = Flask(__name__)
@app.route('/')
def home():
file = ''
with open('./static/html/home.html', 'r') as file:
html = file.read()
return html
@app.route('/check', methods=['POST', 'GET'])
def check():
text = request.form.get('text')
print(text)
taggedTokens = TextTokenizing.parse(text)
print(taggedTokens)
# execute the testing process
p = [ 0 < _nlp_.test([taggedToken], GrammarChecker.grammar, GrammarChecker.fdist, GrammarChecker.n_gram_nbr) for taggedToken in taggedTokens]
#r = GrammarChecker.checkGrammar(taggedTokens)
result = False not in p
sents = list()
for i in range(len(p)):
if not p[i]:
sents.append(' '.join(taggedTokens[i]))
data = {'result': result, 'errors': sents}
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
@app.route('/train', methods=['POST', 'GET'])
def train():
text = request.form.get('text')
print(text)
taggedTokens = TextTokenizing.parse(text)
print(taggedTokens)
# execute the training process
grammar, fdist, n_gram_nbr = _nlp_.train(taggedTokens)
GrammarChecker.grammar = grammar
GrammarChecker.fdist = fdist
GrammarChecker.n_gram_nbr = n_gram_nbr
result = GrammarChecker.checkGrammar(taggedTokens)
print(result)
data = {'result': result}
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
if __name__ == '__main__':
app.run()
``` |
{
"source": "410063005/AndroidViewClient",
"score": 2
} |
#### File: android/uiautomator/uiautomatorhelpertests.py
```python
import io
import os
import random
import sys
from PIL import Image
try:
sys.path.insert(0, os.path.join(os.environ['ANDROID_VIEW_CLIENT_HOME'], 'src'))
except:
pass
from com.dtmilano.android.viewclient import ViewClient
from com.dtmilano.android.uiautomator.uiautomatorhelper import UiAutomatorHelper, UiObject2
__author__ = 'diego'
import unittest
DEBUG = False
class UiAutomatorHelperTests(unittest.TestCase):
def setUp(self):
if DEBUG:
print("@@@ UiAutomatorHelperTests.setUp", file=sys.stderr)
(self.device, self.serialno) = ViewClient.connectToDeviceOrExit(serialno='.*')
self.assertIsNotNone(self.device)
self.uiAutomatorHelper = UiAutomatorHelper(self.device)
def tearDown(self):
if DEBUG:
print("@@@ UiAutomatorHelperTests.tearDown", file=sys.stderr)
self.uiAutomatorHelper.quit()
def testDumpWindowHierarchy(self):
dump = self.uiAutomatorHelper.dumpWindowHierarchy()
self.assertIsNotNone(dump)
def testDumpWindowHierarchy_repeat(self):
for _ in range(10):
dump = self.uiAutomatorHelper.dumpWindowHierarchy()
self.assertIsNotNone(dump)
def testPressKeyCode(self):
response = self.uiAutomatorHelper.pressKeyCode(4)
'''4 is KEYCODE_BACK'''
if DEBUG:
print("response=", response, file=sys.stderr)
def testTakeScreenshot(self):
buf = self.uiAutomatorHelper.takeScreenshot()
self.assertIsNotNone(buf)
self.assertTrue(len(buf) > 0)
image = Image.open(io.StringIO(buf))
self.assertIsNotNone(image)
self.assertEqual(image.format, 'PNG')
def testClick_random(self):
x = random.randint(0, 1000)
y = random.randint(0, 1000)
response = self.uiAutomatorHelper.click(x=x, y=y)
if DEBUG:
print("response=", response, file=sys.stderr)
def testSwipe_random(self):
x0 = random.randint(0, 1000)
y0 = random.randint(0, 1000)
x1 = random.randint(0, 1000)
y1 = random.randint(0, 1000)
steps = random.randint(10, 100)
response = self.uiAutomatorHelper.swipe(startX=x0, startY=y0, endX=x1, endY=y1, steps=steps)
if DEBUG:
print("response=", response, file=sys.stderr)
def testSetText_UiObject2_Chinese_text(self):
# This enters a Reminder using Calendar
# See https://github.com/dtmilano/AndroidViewClient/issues/242
uio = self.uiAutomatorHelper.findObject(
bySelector='res@<EMAIL>.<EMAIL>.calendar:id/title_edit_text,[email protected],text@$Remind me to…,<EMAIL>')
self.assertIsNotNone(uio)
self.assertTrue(isinstance(uio, UiObject2))
uio.setText("提醒我包括中文支持")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "411522/mppcca",
"score": 2
} |
#### File: 411522/mppcca/mppcca.py
```python
import numpy as np
from numpy.linalg import inv
from scipy.misc import logsumexp
def init_params(nb_K, dim_y1, dim_y2, dim_x):
list_params = [{} for k in range(nb_K)]
dim_t = min(dim_y1, dim_y2)
Σπ = 0
for params in list_params:
params["μ"] = np.transpose(np.random.randn(dim_y1 + dim_y2))
params["Wx"] = np.random.randn(dim_x, dim_y1 + dim_y2)
params["μx"] = np.transpose(np.zeros(dim_x))
params["Ψx"] = np.zeros((dim_x, dim_x))
params["π"] = np.random.randn() ** 2
Σπ += params["π"]
Wt = np.matrix(np.random.randn(dim_y1 + dim_y2, dim_t))
Ψ1 = np.random.randn(dim_y1, dim_y1)
Ψ2 = np.random.randn(dim_y2, dim_y2)
Ψ1 = Ψ1 * Ψ1
Ψ2 = Ψ2 * Ψ2
temp_zero_mat1 = np.zeros((dim_y1, dim_y2))
temp_zero_mat2 = np.zeros((dim_y2, dim_y1))
Ψ = np.r_[np.c_[Ψ1, temp_zero_mat1], np.c_[temp_zero_mat2, Ψ2]]
params["C"] = Ψ + Wt * Wt.T
for key, value in params.items():
temp = np.matrix(value)
params[key] = temp
for params in list_params:
params["π"] = params["π"] / Σπ
return list_params
def calc_μ(params_k, γ_N, y_N, x_N):
# Σₙ(γₙ(yₙ-Wₓxₙ)) / Σₙγₙ
return np.dot(γ_N, (y_N - np.einsum("jk,ij->ik", params_k["Wx"], x_N))) / np.sum(γ_N)
def calc_π(γ_N):
return np.sum(γ_N) / len(γ_N)
def calc_Wx(y_tilde_N, x_tilde_N, γ_N):
temp1 = np.einsum("ijk,i->jk",
np.einsum("ij,ik->ijk", y_tilde_N, x_tilde_N),
γ_N)
temp2 = np.einsum("ijk,i->jk",
np.einsum("ij,ik->ijk", x_tilde_N, x_tilde_N),
γ_N)
return np.dot(temp1, inv(temp2)).transpose()
def calc_C(params_k, y_tilde_N, x_tilde_N, γ_N):
temp = y_tilde_N - np.einsum("jk,ij->ik", params_k["Wx"], x_tilde_N)
return np.einsum("i,ijk->jk", γ_N,
np.einsum("ij,ik->ijk", temp, temp)) / np.sum(γ_N)
def calc_lpdf_norm(y_N, x_N, params_k):
sign, logdet = np.linalg.slogdet(2 * np.pi * params_k["C"])
mean = np.einsum("jk,ij->ik", params_k["Wx"], x_N) + params_k["μ"]
covariance_inv = inv(params_k["C"])
# temp = (y-mean).T * C.I * (y-mean)
temp_N = np.einsum("ij,ij->i",
np.einsum("ij,jk->ik", y_N - mean, covariance_inv),
y_N - mean)
return np.array(-0.5 * logdet - 0.5 * temp_N + np.log(params_k["π"])).reshape(len(y_N))
def E_step(y_N, x_N, params, K):
lpdf_K_N = [calc_lpdf_norm(y_N, x_N, params[k]) for k in range(K)]
lpdf_N = logsumexp(lpdf_K_N, axis=0)
lpdf_K_N -= lpdf_N
γ_K_N = np.exp(lpdf_K_N)
return γ_K_N
def M_step(γ_K_N, y_N, x_N, params):
for k, (γ_N, params_k) in enumerate(zip(γ_K_N, params)):
μ_k = calc_μ(params_k, γ_N, y_N, x_N)
y_tilde_N = y_N - np.dot(γ_N, y_N) / np.sum(γ_N)
x_tilde_N = x_N - np.dot(γ_N, x_N) / np.sum(γ_N)
Wx_k = calc_Wx(y_tilde_N, x_tilde_N, γ_N)
C_k = calc_C(params_k, y_tilde_N, x_tilde_N, γ_N)
π_k = calc_π(γ_N)
params[k]["μ"] = μ_k
params[k]["Wx"] = Wx_k
params[k]["C"] = C_k
params[k]["π"] = π_k
def mppcca(y1_N, y2_N, x_N, nb_K):
params = init_params(nb_K,
len(y1_N[0]),
len(y2_N[0]),
len(x_N[0]))
y_N = np.concatenate([y1_N, y2_N], axis=1)
history_labels = []
while True:
log_γ = E_step(y_N, x_N, params, nb_K)
M_step(log_γ, y_N, x_N, params)
history_labels.append(np.argmax(log_γ, axis=0))
if len(history_labels) < 2:
continue
if np.array_equal(history_labels[-2], history_labels[-1]):
break
print("%d step - updated %d labels" % (len(history_labels), (np.count_nonzero(history_labels[-1] - history_labels[-2]))))
return params, history_labels[-1]
``` |
{
"source": "412b/cvat",
"score": 2
} |
#### File: datumaro/plugins/image_dir.py
```python
import os
import os.path as osp
from datumaro.components.extractor import DatasetItem, SourceExtractor, Importer
from datumaro.components.converter import Converter
from datumaro.util.image import save_image
class ImageDirImporter(Importer):
EXTRACTOR_NAME = 'image_dir'
def __call__(self, path, **extra_params):
from datumaro.components.project import Project # cyclic import
project = Project()
if not osp.isdir(path):
raise Exception("Can't find a directory at '%s'" % path)
source_name = osp.basename(osp.normpath(path))
project.add_source(source_name, {
'url': source_name,
'format': self.EXTRACTOR_NAME,
'options': dict(extra_params),
})
return project
class ImageDirExtractor(SourceExtractor):
_SUPPORTED_FORMATS = ['.png', '.jpg']
def __init__(self, url):
super().__init__()
assert osp.isdir(url), url
items = []
for dirpath, _, filenames in os.walk(url):
for name in filenames:
path = osp.join(dirpath, name)
if not self._is_image(path):
continue
item_id = osp.relpath(osp.splitext(path)[0], url)
items.append(DatasetItem(id=item_id, image=path))
self._items = items
def __iter__(self):
for item in self._items:
yield item
def __len__(self):
return len(self._items)
def _is_image(self, path):
if not osp.isfile(path):
return False
for ext in self._SUPPORTED_FORMATS:
if path.endswith(ext):
return True
return False
class ImageDirConverter(Converter):
def __call__(self, extractor, save_dir):
os.makedirs(save_dir, exist_ok=True)
for item in extractor:
if item.has_image and item.image.has_data:
save_image(osp.join(save_dir, item.id + '.jpg'),
item.image.data, create_dir=True)
``` |
{
"source": "4-1-2/BIOBOT",
"score": 2
} |
#### File: BIOBOT/server/app.py
```python
from cloudant import Cloudant
from flask import Flask, render_template, request, jsonify
from ibm_botocore.client import Config
import ibm_boto3
import numpy as np
import atexit
import os
import json
import io as libio
from PIL import Image
app = Flask(__name__)
from biobot.model import predict, get_model
import base64
from io import BytesIO
# Write image STORAGE IBM
def cgsWriteImage(client, bucket, file, image):
n = image.ndim
if (n==3):
img = Image.fromarray(image,'RGB')
else:
if (image.max()==1):
img = Image.fromarray(image,'1').convert('RGB')
else:
img = Image.fromarray(image,'L').convert('RGB')
bufImage = libio.BytesIO()
img.save(bufImage,"JPEG")
bufImage.seek(0)
isr = client.put_object(Bucket=bucket,
Body = bufImage,
Key = file,
ContentType = 'image/jpeg')
print("""cgsWriteImage:
\n\tBucket=%s
\n\tFile=%s
\n\tArraySize=%d %s
RawSize=%d\n""" % (
bucket, file, image.size, image.shape, bufImage.getbuffer().nbytes))
# DB IBM
client = Cloudant.iam(
"0543c3c0-716a-4fe4-8deb-bb2fd61dcd8e-bluemix",
"<KEY>",
connect=True
)
database_bot = client['biobot']
# STORAGE IBM
cgsClient = ibm_boto3.client(service_name='s3',
ibm_api_key_id = '<KEY>',
ibm_auth_endpoint='https://iam.cloud.ibm.com/identity/token',
config=Config(signature_version='oauth'),
endpoint_url='https://s3.ap.cloud-object-storage.appdomain.cloud')
#!im = numpy.array(pic)
# On IBM Cloud Cloud Foundry, get the port number from the environment variable
# PORT when running this app on the local machine, default the port to 8000
# Create the model:
# ResNet9 : classifier
# Input size : [56 x 256 x 3]
# Output size : [38]
model = get_model()
port = int(os.getenv('PORT', 8000))
@app.route('/', methods=['GET', 'POST'])
def basic():
if request.method == 'POST':
name = request.form['name']
partition_key = 'Humans'
document_key = 'julia30'
database_bot.create_document({
'_id': ':'.join((partition_key, document_key)),
'name': name
})
return render_template('index.html', t=name)
return render_template('index.html')
# Diagnosis
@app.route('/diagnosis', methods=['GET', 'POST'])
def run_diagnosis():
if request.method == 'POST':
#import pdb; pdb.set_trace()
image = request.files['img']
image_ = Image.open(image)
new_width, new_height = 256, 256
width, height = image_.size # Get dimensions
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
# Crop the center of the image
image_cropped = image_.crop((left, top, right, bottom))
im_file = BytesIO()
# -*- coding: utf-8 -*-
image_cropped.save(im_file, format='JPEG')
binary_data = im_file.getvalue()
io_image = base64.b64encode(binary_data)
#io_image = base64.b64encode(image_cropped.read()).decode('utf-8')
res1, res2 = predict(model, io_image)
return render_template('upload_image.html', image_up= res1 +' - '+ res2)
return render_template('upload_image.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
```
#### File: biobot/model/modules.py
```python
import os
import torch
import torch.nn as nn
import numpy as np
import base64
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from biobot.model import network
def ConvBlock(in_channels, out_channels, pool=False):
"""
Convolutional Block: Conv2d + BatchNorm2d + ReLU
Parameters:
in_channels : input channels
out_channels : output channels
pool : use pooling operation
"""
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)]
if pool:
layers.append(nn.MaxPool2d(4))
return nn.Sequential(*layers)
def create_network(device):
"""
Create ResNet9 network
Parameters:
device: the device to which the model parameters are sent.
"""
model = network.ResNet9(3, 38).to(device)
return model
def load_model(device, load_path):
"""
Parameters:
device : the device to which the model parameters are sent.
load_path : a string containing the 'state_dict' of the model.
"""
model = create_network(device)
checkpoint = torch.load(load_model, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint)
return model
def get_db(path):
"""
Parameters:
path: a filename that contains the test dataset.
"""
test_db = ImageFolder(path, transform=transforms.ToTensor())
return test_db, test_db.classes
def load_test_db(path):
"""
Parameters:
path: a filename that contains the test dataset.
"""
test_db, _ = get_db(path)
test_dl = DataLoader(test_db, 1, num_workers=2, pin_memory=True)
return test_dl, len(test_db)
def eval_accuracy(model, path, device):
"""
Parameters:
model : an 'torch.nn.Module' model.
path : a filename that contains the test dataset.
device : the device to which the model parameters are sent.
"""
model.eval()
data_generator, data_size = load_test_db(path)
acc = []
for it, batch in enumerate(data_generator):
image_batch, label_batch = batch
y = model(image_batch.to(device))
acc_it = (torch.max(y.detach().cpu(), dim=1)[1] == label_batch)
acc.append(float(acc_it.item()))
print('\r[{:4d}/{:4d}] acc = {:3.5f}%'.format(
it, data_size, np.mean(acc) * 100.), end='')
accuracy = np.mean(acc)
print('\r[{:4d}/{:4d}] acc = {:3.5f}%'.format(
it, data_size, accuracy * 100.))
return accuracy
def test_classes_ids():
test_path = 'dataset/test'
test1 = sorted(os.listdir(test_path))
test2 = get_db(test_path)[1]
for t1, t2 in zip(test1, test2):
assert t1 == t2
def accuracy_performance(path, device):
"""
Parameters:
path : the device to which the model parameters are sent.
device : a string containing the 'state_dict' of the model.
"""
model = load_model(device, os.path.join('params', 'model.pth'))
assert eval_accuracy(model, path, device) >= 0.95
def predict_random_image(path, device):
"""
Parameters:
path : the device to which the model parameters are sent.
device : a string containing the 'state_dict' of the model.
"""
model = load_model(device, os.path.join('params', 'model.pth'))
model.eval()
test_db, classes_id = get_db(path)
x, pred_y = test_db[np.random.randint(len(test_db))]
y_hat = model(x.to(device).unsqueeze(0))
_, pred_y_hat = torch.max(y_hat, dim=1)
return classes_id[pred_y], classes_id[pred_y_hat[0].item()]
``` |
{
"source": "413612/PL-0.5",
"score": 3
} |
#### File: 413612/PL-0.5/my_parser.py
```python
from llvmlite import ir
from llvmlite import binding
import ast
import utils
from lexer import Lexer
def get_rpn(i, tokens):
res, stack, error = [], [], ''
while tokens[i].type not in ['SEMI', 'COLON']:
if tokens[i].type == 'INT_LIT' or tokens[i].type == 'FLOAT_LIT' or tokens[i].type == 'CHAR_LIT':
res.append(i)
i += 1
continue
elif tokens[i].type == 'ID':
res.append(i)
if tokens[i + 1].type == 'LPAREN':
i += 1
while tokens[i].type != 'RPAREN':
i += 1
else:
i += 1
continue
elif utils.is_operator(tokens[i].type):
if len(stack) == 0:
stack.append(i)
else:
while tokens[stack[-1]].type != 'LPAREN' and utils.op_priority(tokens[i].type) >= utils.op_priority(tokens[stack[-1]].type):
res.append(stack.pop())
if len(stack) == 0:
break
stack.append(i)
i += 1
continue
elif tokens[i].type == 'LPAREN':
stack.append(i)
i += 1
continue
elif tokens[i].type == 'RPAREN':
while tokens[stack[-1]].type != 'LPAREN':
res.append(stack.pop())
if len(stack) == 0:
break
if len(stack) > 0:
if tokens[stack[-1]].type == 'LPAREN':
stack.pop()
else:
error = "В выражении неправильно расставлены скобки"
i += 1
continue
else:
break
while len(stack) != 0:
res.append(stack.pop())
return i, res, error
def var_parse(i: int, tokens, parent):
v = ast.VarDecAST()
v.set_parent(parent)
if tokens[i].type == 'VAR':
i += 1
if tokens[i].type == 'ID':
obj = parent.get_children(tokens[i].value)
if obj is not None:
error = "Переменная с именем " + tokens[i].value + " существует."
print(error)
return None, i, error
else:
parent.add_child(tokens[i].value, v)
v.set_name(tokens[i].value)
else:
error = "Ошибка объявления переменной. Не указано имя."
print(error)
return None, i, error
i += 1
if utils.is_type(tokens[i].type):
v.set_type(tokens[i].type)
else:
error = "Ошибка объявления переменной. Некорректно указан тип."
print(error)
return None, i, error
i += 1
if tokens[i].type == 'SEMI':
return v, i, ""
else:
error = "Ошибка. Нет точки с запятой."
print(error)
return None, i, error
def func_call_parse(i, tokens, parent):
error = ""
name = ""
args = []
if tokens[i].type == 'ID':
name = tokens[i].value
i += 1
if tokens[i].type == 'LPAREN':
i += 1
while tokens[i].type != 'RPAREN':
if utils.is_number(tokens[i].type):
if tokens[i].type == 'INT_LIT':
numb = ast.IntLiteralAST(tokens[i].value)
args.append(numb)
elif tokens[i].type == 'FLOAT_LIT':
numb = ast.FloatLiteralAST(tokens[i].value)
args.append(numb)
elif tokens[i].type == 'CHAR_LIT':
char = ast.CharLiteralAST(tokens[i].value)
args.append(char)
elif tokens[i].type == 'ID':
obj = parent.get_children(tokens[i].value)
if obj is None:
error = "Переменная с имененем " + tokens[i].value + " не объявлена."
print(error)
return None, i, error
var_def_obj = ast.VarDefAST(parent)
var_def_obj.set_var_dec(obj)
args.append(var_def_obj)
i += 1
if name != "":
obj = parent.get_children(name)
if obj is not None:
f = ast.FunctionCallAST(obj, args)
f.set_parent(parent)
return f, i, error
else:
error = "Не объявлена функция с именем " + name
print(error)
return None, i, error
else:
error = "Не корректное объявление функции"
print(error)
return None, i, error
def proc_call_parse(i, tokens, parent):
error = ""
name = ""
args = []
if tokens[i].type == 'ID':
name = tokens[i].value
i += 1
while tokens[i].type != 'SEMI':
if utils.is_number(tokens[i].type):
if tokens[i].type == 'INT_LIT':
numb = ast.IntLiteralAST(tokens[i].value)
args.append(numb)
elif tokens[i].type == 'FLOAT_LIT':
numb = ast.FloatLiteralAST(tokens[i].value)
args.append(numb)
elif tokens[i].type == 'CHAR_LIT':
char = ast.CharLiteralAST(tokens[i].value)
args.append(char)
elif tokens[i].type == 'ID':
obj = parent.get_children(tokens[i].value)
if obj is None:
error = "Переменная с имененем " + tokens[i].value + " не объявлена."
print(error)
return None, i, error
var_def_obj = ast.VarDefAST(parent)
var_def_obj.set_var_dec(obj)
args.append(var_def_obj)
i += 1
if name != "":
obj = parent.get_children(name)
if obj is not None:
p = ast.ProcedureCallAST(obj, args)
p.set_parent(parent)
return p, i, error
else:
error = "Не объявлена процедура с именем " + name
print(error)
return None, i, error
else:
error = "Требуется имя процедуры"
print(error)
return None, i, error
def bin_op_parse(i: int, tokens, parent: ast.BaseAST):
error = ""
root = None
j, rpn, error = get_rpn(i, tokens)
if error != "":
print(error)
return None, i, error
stack = []
for k in range(len(rpn)):
if tokens[rpn[k]].type == 'INT_LIT':
hs = ast.IntLiteralAST(tokens[rpn[k]].value)
stack.append(hs)
continue
elif tokens[rpn[k]].type == 'FLOAT_LIT':
hs = ast.FloatLiteralAST(tokens[rpn[k]].value)
stack.append(hs)
continue
elif tokens[rpn[k]].type == 'ID':
obj = parent.get_children(tokens[rpn[k]].value)
if obj is None:
error = "Переменная с именем " + tokens[rpn[k]].value + " не объявлена."
print(error)
return None, rpn[k], error
else:
if tokens[rpn[k] + 1].type == 'LPAREN':
call_obj, i, error = func_call_parse(rpn[k], tokens, parent)
if call_obj is None:
error = "Функция с именем " + tokens[i].value + " вызвана некорректно."
print(error)
return None, i, error
else:
stack.append(call_obj)
else:
var_def_obj = ast.VarDefAST(parent)
var_def_obj.set_var_dec(obj)
stack.append(var_def_obj)
elif utils.is_operator(tokens[rpn[k]].type):
bin_op = ast.BinaryAST()
bin_op.set_op(tokens[rpn[k]].type)
rhs = stack.pop()
lhs = stack.pop()
rhs.set_parent(bin_op)
lhs.set_parent(bin_op)
bin_op.set_rhs(rhs)
bin_op.set_lhs(lhs)
stack.append(bin_op)
if len(stack) == 1:
root = stack.pop()
root.set_parent(parent)
return root, j, error
def base_parse(tokens):
base = ast.CompoundExpression(None)
i = 0
error = ""
while i < len(tokens):
base, i, error = top_expression_parse(i, tokens, base)
if error != "":
print(error)
return None, i, error
i += 1
return base, i, error
def func_parse(i, tokens, parent=None):
func = ast.FunctionDefAST(parent)
error = ""
while tokens[i].type != 'END':
if tokens[i].type == 'FUNC':
i += 1
continue
elif tokens[i].type == 'ID':
obj = parent.get_children(tokens[i].value)
if obj is not None:
error = "Переменная с именем " + tokens[i].value + " уже объявлена."
print(error)
return None, i, error
parent.add_child(tokens[i].value, func)
func.set_name(tokens[i].value)
i += 1
elif tokens[i].type == 'LPAREN':
i += 1
while tokens[i].type != 'RPAREN':
if tokens[i].type == 'ID':
a = parent.get_children(tokens[i].value)
if a is not None:
error = "Переменная с именем " + tokens[i].value + " уже объявлена во внешней области видимости."
print(error)
return None, i, error
a = ast.VarDecAST(func)
a.set_name(tokens[i].value)
func.add_arg(a)
i += 1
if utils.is_type(tokens[i].type):
a.set_type(tokens[i].type)
else:
error = "Не указан тип у переменной с именем " + tokens[i].value + "."
print(error)
return None, i, error
i += 1
i += 1
continue
elif utils.is_type(tokens[i].type):
func.set_type(tokens[i].type)
i += 1
continue
elif tokens[i].type == 'COLON':
if func.type is None:
error = "Не указан возвращаемый тип у функции с именем " + func.name + "."
print(error)
return None, i, error
i += 1
while tokens[i].type != 'END':
_, i, error = compound_expression_parse(i, tokens, func)
i += 1
if error != "":
print(error)
return None, i, error
return func, i, error
def proc_parse(i, tokens, parent=None):
proc = ast.ProcedureDefAST(parent)
error = ""
while tokens[i].type != 'END':
if tokens[i].type == 'PROC':
i += 1
continue
elif tokens[i].type == 'ID':
obj = parent.get_children(tokens[i].value)
if obj is not None:
error = "Переменная с именем " + tokens[i].value + " уже объявлена."
print(error)
return None, i, error
parent.add_child(tokens[i].value, proc)
proc.set_name(tokens[i].value)
i += 1
elif tokens[i].type == 'LPAREN':
i += 1
while tokens[i].type != 'RPAREN':
if tokens[i].type == 'ID':
a = parent.get_children(tokens[i].value)
if a is not None:
error = "Переменная с именем " + tokens[i].value + " уже объявлена во внешней области видимости."
print(error)
return None, i, error
a = ast.VarDecAST(proc)
a.set_name(tokens[i].value)
proc.add_arg(a)
i += 1
if utils.is_type(tokens[i].type):
a.set_type(tokens[i].type)
else:
error = "Не указан тип у переменной с именем " + tokens[i].value + "."
print(error)
return None, i, error
i += 1
i += 1
continue
elif tokens[i].type == 'COLON':
i += 1
while tokens[i].type != 'END':
_, i, error = compound_expression_parse(i, tokens, proc)
i += 1
if error != "":
print(error)
return None, i, error
return proc, i, error
def compound_expression_parse(i, tokens, compound_expression):
obj, i, error = parse(i, tokens, parent=compound_expression)
if error != "":
print(error)
return obj, i, error
compound_expression.set_child(obj)
return compound_expression, i, error
def top_expression_parse(i, tokens, compound_expression):
obj, i, error = top_parse(i, tokens, parent=compound_expression)
if error != "":
print(error)
return obj, i, error
compound_expression.set_child(obj)
return compound_expression, i, error
def parse(i, tokens, parent=None):
obj = None
error = ""
if tokens[i].type == 'VAR':
obj, i, error = var_parse(i, tokens, parent)
if obj is None:
print(error)
return None, i, error
elif tokens[i].type == 'SEMI':
i += 1
elif tokens[i].type == 'ID':
if tokens[i + 1].type == 'RPAREN':
obj, i, error = func_call_parse(i, tokens, parent)
if obj is None:
print(error)
return None, i, error
elif tokens[i + 1].type == 'EQ':
assignment = ast.AssignmentAST(parent)
var_dec_obj = parent.get_children(tokens[i].value)
var_def_obj = ast.VarDefAST(parent)
var_def_obj.set_var_dec(var_dec_obj)
assignment.set_lval(var_def_obj)
obj, i, error = bin_op_parse(i + 2, tokens, parent)
if obj is None:
print(error)
return None, i, error
assignment.set_rval(obj)
obj = assignment
else:
obj, i, error = proc_call_parse(i, tokens, parent)
if obj is None:
print(error)
return None, i, error
elif tokens[i].type == 'IF':
obj, i, error = expr_if_parse(i, tokens, parent)
elif tokens[i].type == 'WHILE':
obj, i, error = expr_while_parse(i, tokens, parent)
elif tokens[i].type == 'DO':
obj, i, error = expr_do_while_parse(i, tokens, parent)
elif tokens[i].type == 'RETURN':
i += 1
obj, i, error = bin_op_parse(i, tokens, parent)
if obj is None:
print(error)
return None, i, error
if isinstance(parent, ast.FunctionDefAST):
if obj.get_type() == parent.type:
parent.set_return_value(obj)
ret_obj = ast.ReturnAst(parent)
ret_obj.set_value(obj)
if tokens[i].value != ';':
i += 1
return ret_obj, i, error
else:
error = "Ожидается возвращаемый тип " + str(parent.type) + " актуальный тип - " + str(obj.get_type())
print(error)
return None, i, error
else:
error = "Недопустимая конструкция: return в " + type(parent)
print(error)
return None, i, error
return obj, i, error
def top_parse(i, tokens, parent=None):
obj = None
error = ""
if tokens[i].type == 'VAR':
obj, i, error = var_parse(i, tokens, parent)
if obj is None:
print(error)
return None, i, error
elif tokens[i].type == 'SEMI':
i += 1
elif tokens[i].type == 'FUNC':
obj, i, error = func_parse(i, tokens, parent)
if error != "":
print(error)
return None, i, error
elif tokens[i].type == 'PROC':
obj, i, error = proc_parse(i, tokens, parent)
if error != "":
print(error)
return None, i, error
return obj, i, error
def expr_if_parse(i, tokens, parent=None):
error = ""
if tokens[i].type == 'IF':
if_ast = ast.ExprIfAST(parent=parent)
orig_if_ast = if_ast
i += 1
if tokens[i].type == 'ID' or tokens[i].type == 'INT_LIT' or tokens[i].type == 'FLOAT_LIT':
obj, i, error = bin_op_parse(i, tokens, if_ast)
if_ast.set_expression(obj)
else:
error = "Ожидается выражение"
print(error)
return None, i, error
if tokens[i].type == 'COLON':
i += 1
then_body = ast.CompoundExpression(parent=if_ast)
while tokens[i].type not in ('ELIF','ELSE','END'):
then_body, i, error = compound_expression_parse(i, tokens, then_body)
i += 1
if error != "":
print(error)
return None, i, error
if_ast.set_then(then_body)
while tokens[i].type not in ('ELSE', 'END'):
if tokens[i].type == 'ELIF':
i += 1
else_body = ast.CompoundExpression(if_ast)
if_ast.set_else(else_body)
if_ast = ast.ExprIfAST(else_body)
else_body.set_child(if_ast)
if tokens[i].type == 'ID' or tokens[i].type == 'INT_LIT' or tokens[i].type == 'FLOAT_LIT':
obj, i, error = bin_op_parse(i, tokens, if_ast)
if_ast.set_expression(obj)
else:
error = "Ожидается выражение"
print(error)
return None, i, error
if tokens[i].type == 'COLON':
i += 1
then_body = ast.CompoundExpression(parent=if_ast)
while tokens[i].type not in ('ELIF','ELSE','END'):
then_body, i, error = compound_expression_parse(i, tokens, then_body)
i += 1
if error != "":
print(error)
return None, i, error
if_ast.set_then(then_body)
else:
error = "Ожидается двоеточие"
print(error)
return None, i, error
if tokens[i].type == 'ELSE':
i += 1
if tokens[i].type == 'COLON':
i += 1
else_body = ast.CompoundExpression(parent=if_ast)
while tokens[i].type != 'END':
else_body, i, error = compound_expression_parse(i, tokens, else_body)
i += 1
if error != "":
print(error)
return None, i, error
if_ast.set_else(else_body)
else:
error = "Ожидается двоеточие"
print(error)
return None, i, error
return orig_if_ast, i, error
def expr_while_parse(i, tokens, parent=None):
while_expr = None
error = ""
while tokens[i].type != 'END':
if tokens[i].type == 'WHILE':
while_expr = ast.ExprWhileAST(parent)
i += 1
continue
elif (tokens[i].type == 'ID') or (tokens[i].type == 'INT_LIT') or (tokens[i].type == 'FLOAT_LIT'):
expr = ast.BinaryAST(while_expr)
expr, i, error = bin_op_parse(i, tokens, expr)
if error != "":
print(error)
return None, i, error
while_expr.set_expression(expr)
elif tokens[i].type == 'COLON':
i += 1
compound_expression = ast.CompoundExpression(parent=while_expr)
while tokens[i].type != 'END':
compound_expression, i, error = compound_expression_parse(i, tokens, compound_expression)
i += 1
if error != "":
print(error)
return None, i, error
# i += 1
while_expr.set_body(compound_expression)
break
return while_expr, i, error
def expr_do_while_parse(i, tokens, parent=None):
error = ""
expr_do = None
while tokens[i].type != 'SEMI':
if tokens[i].type == 'DO':
expr_do = ast.ExprDoWhileAST(parent)
compound_expression = ast.CompoundExpression(parent=expr_do)
i += 1
continue
else:
while tokens[i].type != 'WHILE':
compound_expression, i, error = compound_expression_parse(i, tokens, compound_expression)
i += 1
if error != "":
print(error)
return None, i, error
if tokens[i].type == 'WHILE':
j = i
while tokens[j].type not in ['SEMI', 'COLON']:
j += 1
if tokens[j].type == 'COLON':
compound_expression, i, error = compound_expression_parse(i, tokens, compound_expression)
continue
elif tokens[j].type == 'SEMI':
expr = ast.BinaryAST(expr_do)
expr, i, error = bin_op_parse(i + 1, tokens, expr)
if error != "":
print(error)
return None, i, error
expr_do.set_body(compound_expression)
expr_do.set_expression(expr)
break
return expr_do, i, error
def print_result(root, i=0):
print(' ' * i, type(root).__name__)
if root is None:
return
elif isinstance(root, (ast.ExprWhileAST, ast.ExprDoWhileAST)):
print_result(root.expression, i + 1)
print_result(root.body, i + 1)
elif isinstance(root, ast.ExprIfAST):
print_result(root.expression, i + 1)
print_result(root.then_body, i + 1)
print_result(root.else_body, i + 1)
elif isinstance(root, ast.BinaryAST):
print(' ' * i, root.operator)
print_result(root.lhs, i + 1)
print_result(root.rhs, i + 1)
elif isinstance(root, ast.AssignmentAST):
print_result(root.lval, i + 1)
print_result(root.rval, i + 1)
elif isinstance(root, ast.VarDecAST):
print(' ' * i, root.name, root.type)
elif isinstance(root, ast.VarDefAST):
print(' ' * i, root.var_dec.name)
elif isinstance(root, (ast.IntLiteralAST, ast.FloatLiteralAST, ast.CharLiteralAST)):
print(' ' * i, root.value)
elif isinstance(root, ast.FunctionCallAST):
print(' ' * i, root.func_callee.name, root.args)
elif isinstance(root, ast.ProcedureCallAST):
print(' ' * i, root.proc_callee.name, root.args)
elif isinstance(root, ast.CompoundExpression):
if isinstance(root, (ast.FunctionDefAST, ast.ProcedureDefAST)):
print(' ' * i, root.name)
for op in root.order_operations:
print_result(op, i + 1)
if __name__ == '__main__':
lexer = Lexer()
with open("examples\\correct\\1.txt", 'r', encoding='utf-8') as f:
code = f.read()
tokens = lexer.lex(code)
root, i, errors = base_parse(tokens)
print_result(root)
binding.initialize()
binding.initialize_all_targets()
binding.initialize_all_asmprinters()
triple = binding.get_default_triple() # 'mips-PC-Linux-GNU'
module = ir.Module('module')
module.triple = triple
target = binding.Target.from_triple(triple)
target_machine = target.create_target_machine()
root.code_gen(module)
llvm_ir = str(module)
mod = binding.parse_assembly(llvm_ir)
mod.verify()
pass_builder = binding.create_pass_manager_builder()
mod_pass = binding.create_module_pass_manager()
# pass_builder.opt_level = 2
# pass_builder.populate(mod_pass)
mod_pass.add_constant_merge_pass()
mod_pass.add_dead_arg_elimination_pass()
mod_pass.add_function_inlining_pass(225)
mod_pass.add_global_dce_pass()
mod_pass.add_global_optimizer_pass()
mod_pass.add_ipsccp_pass()
mod_pass.add_dead_code_elimination_pass()
mod_pass.add_cfg_simplification_pass()
mod_pass.add_gvn_pass()
mod_pass.add_instruction_combining_pass()
mod_pass.add_licm_pass()
mod_pass.add_sccp_pass()
mod_pass.add_type_based_alias_analysis_pass()
mod_pass.add_basic_alias_analysis_pass()
print(mod_pass.run(mod))
print(str(mod))
asm = target_machine.emit_assembly(mod)
print(asm)
with open("examples\\correct\\1.s", 'w') as asm_file:
asm_file.write(asm)
with open("examples\\correct\\1.elf", 'wb') as obj_file:
obj_file.write(target_machine.emit_object(mod))
``` |
{
"source": "4144414D/zinky",
"score": 2
} |
#### File: 4144414D/zinky/zinky.py
```python
VERSION="BETA 0.0.1"
from docopt import docopt
import zipfile
import re
import os
import xml.dom.minidom as xml
import time
from datetime import datetime
def find_track_changes(raw_data):
data = ""
for line in raw_data:
data = data + line
track_changes = re.findall('date="(.*?)"', data)
return track_changes
def read_core_docProps(docprops,zipinfo,not_safe):
data = ""
for line in docprops:
data = data + line
dates = re.findall('<dcterms:(.*?) xsi:type="dcterms:W3CDTF">(.*?)</', data)
print "DATES FOUND"
for date in dates:
print str(date[0]) + ":",
print date[1]
print
print "ZIP DETAILS"
print_details(zipinfo)
if not_safe:
print "\nXML"
print xml.parseString(data).toprettyxml()
def find_content_xml(path):
f = open(path,'rb')
f.seek(30)
data = f.read(19)
print "[Content_Types].xml POSITION"
print "---------------------------------------------"
if data == "[Content_Types].xml":
print "It looks to be in the right place"
else:
print "It isn't found at offset 0x1D... this is hinky!"
print
f.close()
def test_document(path,safe,track_changes):
if os.path.isfile(os.path.abspath(path)):
print "============================================="
print path
print
find_content_xml(path)
with zipfile.ZipFile(path, 'r') as zip:
#list all details
all_file_info = zip.infolist()
if track_changes:
print "TRACK CHANGES"
print "---------------------------------------------"
for file in all_file_info:
results = find_track_changes(zip.open(file.filename))
if results:
print file.filename
for item in results:
print item
print
print "docProps/core.xml"
print "---------------------------------------------"
try:
read_core_docProps(zip.open('docProps/core.xml'),zip.getinfo('docProps/core.xml'),safe)
except KeyError:
print "ERROR! Cannot find docProps/core.xml. We would expect to see this is a post 2007 office document"
for file in all_file_info:
if file.filename != 'docProps/core.xml':
print file.filename
print "---------------------------------------------"
print_details(file)
print
else:
print "WARNING!",
print path,
print "does not exist"
def format_zip_date(date):
return time.strftime("%Y-%m-%dT%H:%M:%S", date + (0, 0, 0,))#convert zip date to something human
def print_details(zipinfo):
print 'date_time: ' + format_zip_date(zipinfo.date_time)
print 'compress_type: ' + str(zipinfo.compress_type)
print 'comment: ' + str(zipinfo.comment)
print 'extra: ' + str(zipinfo.extra)
print 'create_system: ' + str(zipinfo.create_system)
print 'create_version: ' + str(zipinfo.create_version)
print 'extract_version: ' + str(zipinfo.extract_version)
print 'reserved: ' + str(zipinfo.reserved)
print 'flag_bits: ' + str(zipinfo.flag_bits)
print 'volume: ' + str(zipinfo.volume)
print 'internal_attr: ' + str(zipinfo.internal_attr)
print 'external_attr: ' + str(zipinfo.external_attr)
print 'header_offset: ' + str(zipinfo.header_offset)
print 'CRC: ' + str(zipinfo.CRC)
print 'compress_size: ' + str(zipinfo.compress_size)
print 'file_size: ' + str(zipinfo.file_size)
if __name__ == '__main__':
arguments = docopt(__doc__, version=VERSION)
for file in arguments['<files>']:
test_document(file,arguments['--not-safe'],arguments['--track-changes'])
``` |
{
"source": "4144/rich",
"score": 2
} |
#### File: rich/rich/style.py
```python
from functools import lru_cache
import sys
from typing import Any, Dict, Iterable, List, Mapping, Optional, Type, Union
from . import errors
from .color import blend_rgb, Color, ColorParseError, ColorSystem
from .terminal_theme import TerminalTheme, DEFAULT_TERMINAL_THEME
class _Bit:
"""A descriptor to get/set a style attribute bit."""
def __init__(self, bit_no: int) -> None:
self.bit = 1 << bit_no
def __get__(self, obj: "Style", objtype: Type["Style"]) -> Optional[bool]:
if obj._set_attributes & self.bit:
return obj._attributes & self.bit != 0
return None
class Style:
"""A terminal style."""
_color: Optional[Color]
_bgcolor: Optional[Color]
_attributes: int
_set_attributes: int
def __init__(
self,
*,
color: Union[Color, str] = None,
bgcolor: Union[Color, str] = None,
bold: bool = None,
dim: bool = None,
italic: bool = None,
underline: bool = None,
blink: bool = None,
blink2: bool = None,
reverse: bool = None,
conceal: bool = None,
strike: bool = None,
):
def _make_color(color: Union[Color, str]) -> Color:
return color if isinstance(color, Color) else Color.parse(color)
self._color = None if color is None else _make_color(color)
self._bgcolor = None if bgcolor is None else _make_color(bgcolor)
self._attributes = (
(bold or 0)
| (dim or 0) << 1
| (italic or 0) << 2
| (underline or 0) << 3
| (blink or 0) << 4
| (blink2 or 0) << 5
| (reverse or 0) << 6
| (conceal or 0) << 7
| (strike or 0) << 8
)
self._set_attributes = (
(bold is not None)
| (dim is not None) << 1
| (italic is not None) << 2
| (underline is not None) << 3
| (blink is not None) << 4
| (blink2 is not None) << 5
| (reverse is not None) << 6
| (conceal is not None) << 7
| (strike is not None) << 8
)
bold = _Bit(0)
dim = _Bit(1)
italic = _Bit(2)
underline = _Bit(3)
blink = _Bit(4)
blink2 = _Bit(5)
reverse = _Bit(6)
conceal = _Bit(7)
strike = _Bit(8)
def __str__(self) -> str:
"""Re-generate style definition from attributes."""
attributes: List[str] = []
append = attributes.append
if self.bold is not None:
append("bold" if self.bold else "not bold")
if self.dim is not None:
append("dim" if self.dim else "not dim")
if self.italic is not None:
append("italic" if self.italic else "not italic")
if self.underline is not None:
append("underline" if self.underline else "not underline")
if self.blink is not None:
append("blink" if self.blink else "not blink")
if self.blink2 is not None:
append("blink2" if self.blink2 else "not blink2")
if self.reverse is not None:
append("reverse" if self.reverse else "not reverse")
if self.conceal is not None:
append("conceal" if self.conceal else "not conceal")
if self.strike is not None:
append("strike" if self.strike else "not strike")
if self._color is not None:
append(self._color.name)
if self._bgcolor is not None:
append("on")
append(self._bgcolor.name)
return " ".join(attributes) or "none"
@classmethod
@lru_cache(maxsize=1000)
def normalize(cls, style: str) -> str:
"""Normalize a style definition so that styles with the same effect have the same string
representation.
Args:
style (str): A style definition.
Returns:
str: Normal form of style definition.
"""
try:
return str(cls.parse(style))
except errors.StyleSyntaxError:
return style.strip().lower()
@classmethod
def pick_first(cls, *values: Optional[Union["Style", str]]) -> Union["Style", str]:
"""Pick first non-None style."""
for value in values:
if value is not None:
return value
raise ValueError("expected at least one non-None style")
def __repr__(self) -> str:
"""Render a named style differently from an anonymous style."""
return f'<style "{self}">'
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Style):
return NotImplemented
return (
self._color == other._color
and self._bgcolor == other._bgcolor
and self._set_attributes == other._set_attributes
and self._attributes == other._attributes
)
def __hash__(self) -> int:
return hash(
(self._color, self._bgcolor, self._attributes, self._set_attributes,)
)
@property
def color(self) -> Optional[Color]:
"""Get the style foreground color or None if it is not set."""
return self._color
@property
def bgcolor(self) -> Optional[Color]:
"""Get the style background color or None if it is not set."""
return self._bgcolor
@classmethod
@lru_cache(maxsize=1000)
def parse(cls, style_definition: str) -> "Style":
"""Parse style name(s) in to style object."""
style_attributes = {
"dim",
"bold",
"italic",
"underline",
"blink",
"blink2",
"reverse",
"conceal",
"strike",
}
color: Optional[str] = None
bgcolor: Optional[str] = None
attributes: Dict[str, Optional[bool]] = {}
words = iter(style_definition.split())
for original_word in words:
word = original_word.lower()
if word == "on":
word = next(words, "")
if not word:
raise errors.StyleSyntaxError("color expected after 'on'")
try:
Color.parse(word) is None
except ColorParseError as error:
raise errors.StyleSyntaxError(
f"unable to parse {word} in {style_definition!r}; {error}"
)
bgcolor = word
elif word == "not":
word = next(words, "")
if word not in style_attributes:
raise errors.StyleSyntaxError(
f"expected style attribute after 'not', found {original_word!r}"
)
attributes[word] = False
elif word in style_attributes:
attributes[word] = True
else:
try:
Color.parse(word)
except ColorParseError as error:
raise errors.StyleSyntaxError(
f"unable to parse {word!r} in style {style_definition!r}; {error}"
)
color = word
style = Style(color=color, bgcolor=bgcolor, **attributes)
return style
@lru_cache(maxsize=1000)
def get_html_style(self, theme: TerminalTheme = None) -> str:
"""Get a CSS style rule."""
theme = theme or DEFAULT_TERMINAL_THEME
css: List[str] = []
append = css.append
color = self.color
bgcolor = self.bgcolor
if self.reverse:
color, bgcolor = bgcolor, color
if self.dim:
foreground_color = (
theme.foreground_color if color is None else color.get_truecolor(theme)
)
color = Color.from_triplet(
blend_rgb(foreground_color, theme.background_color, 0.5)
)
if color is not None:
theme_color = color.get_truecolor(theme)
append(f"color: {theme_color.hex}")
if bgcolor is not None:
theme_color = bgcolor.get_truecolor(theme, foreground=False)
append(f"background-color: {theme_color.hex}")
if self.bold:
append("font-weight: bold")
if self.italic:
append("font-style: italic")
if self.underline:
append("text-decoration: underline")
if self.strike:
append("text-decoration: line-through")
return "; ".join(css)
@classmethod
def combine(self, styles: Iterable["Style"]) -> "Style":
"""Combine styles and get result.
Args:
styles (Iterable[Style]): Styles to combine.
Returns:
Style: A new style instance.
"""
style = Style()
update = style._update
for _style in styles:
update(_style)
return style
@classmethod
def chain(self, *styles: "Style") -> "Style":
"""Combine styles from positiona argument in to a single style.
Args:
styles (Iterable[Style]): Styles to combine.
Returns:
Style: A new style instance.
"""
style = Style()
update = style._update
for _style in styles:
update(_style)
return style
def copy(self) -> "Style":
"""Get a copy of this style.
Returns:
Style: A new Style instance with identical attributes.
"""
style = self.__new__(Style)
style.__dict__ = self.__dict__.copy()
return style
def render(
self,
text: str = "",
*,
color_system: Optional[ColorSystem] = ColorSystem.TRUECOLOR,
reset=False,
) -> str:
"""Render the ANSI codes to implement the style."""
if color_system is None:
return text
attrs: List[str] = []
if self._color is not None:
attrs.extend(self._color.downgrade(color_system).get_ansi_codes())
if self._bgcolor is not None:
attrs.extend(
self._bgcolor.downgrade(color_system).get_ansi_codes(foreground=False)
)
set_bits = self._set_attributes
if set_bits:
append = attrs.append
bits = self._attributes
for bit_no in range(0, 9):
bit = 1 << bit_no
if set_bits & bit:
append(str(1 + bit_no) if bits & bit else str(21 + bit_no))
reset = "\x1b[0m" if reset else ""
if attrs:
return f"\x1b[{';'.join(attrs)}m{text or ''}{reset}"
else:
return f"{text or ''}{reset}"
def test(self, text: Optional[str] = None) -> None:
"""Write test text with style to terminal.
Args:
text (Optional[str], optional): Text to style or None for style name.
Returns:
None:
"""
text = text or str(self)
sys.stdout.write(f"{self.render(text)}\x1b[0m\n")
def _apply(self, style: "Style") -> "Style":
"""Merge this style with another.
Args:
style (Optional[Style]): A style object to copy attributes from.
If `style` is `None`, then a copy of this style will be returned.
Returns:
(Style): A new style with combined attributes.
"""
new_style = self.__new__(Style)
new_style.__dict__ = {
"_color": style._color or self._color,
"_bgcolor": style._bgcolor or self._bgcolor,
"_attributes": (
(self._attributes & ~style._set_attributes)
| (style._attributes & style._set_attributes)
),
"_set_attributes": self._set_attributes | style._set_attributes,
}
return new_style
def _update(self, style: "Style") -> None:
"""Update this style with another.
Args:
style (Style): Style to combine to this instance.
"""
self._color = style._color or self._color
self._bgcolor = style._bgcolor or self._bgcolor
self._attributes = (self._attributes & ~style._set_attributes) | (
style._attributes & style._set_attributes
)
self._set_attributes = self._set_attributes | style._set_attributes
def __add__(self, style: Optional["Style"]) -> "Style":
if style is None:
return self
if not isinstance(style, Style):
return NotImplemented # type: ignore
return self._apply(style)
def __iadd__(self, style: Optional["Style"]) -> "Style":
if style is None:
return self
if not isinstance(style, Style):
return NotImplemented # type: ignore
self._update(style)
return self
class StyleStack:
"""A stack of styles that maintains a current style."""
def __init__(self, default_style: "Style") -> None:
self._stack: List[Style] = [default_style]
self.current = default_style
def __repr__(self) -> str:
return f"<stylestack {self._stack!r}>"
def push(self, style: Style) -> None:
"""Push a new style on to the stack.
Args:
style (Style): New style to combine with current style.
"""
self.current = self.current + style
self._stack.append(self.current)
def pop(self) -> Style:
"""Pop last style and discard.
Returns:
Style: New current style (also available as stack.current)
"""
self._stack.pop()
self.current = self._stack[-1]
return self.current
if __name__ == "__main__": # pragma: no cover
import sys
# style = Style(color="blue", bold=True, italic=True, reverse=False, dim=True)
from .console import Console
c = Console()
style = Style.parse("bold not italic #6ab825")
print(bin(style._attributes), bin(style._set_attributes))
print(repr(style.bold))
print(repr(style.italic))
print(style.render("hello", reset=True))
c.print("hello", style=style)
print(Style.parse("dim cyan").render("COLOR", reset=True))
print(Style.parse("dim cyan+").render("COLOR", reset=True))
print(Style.parse("cyan").render("COLOR", reset=True))
print(Style.parse("cyan+").render("COLOR", reset=True))
print(Style.parse("bold blue on magenta+ red").render("COLOR", reset=True))
print(Style.parse("bold blue on magenta+ red").get_html_style())
# style.italic = True
# print(style._attributes, style._set_attributes)
# print(style.italic)
# print(style.bold)
# # style = Style.parse("bold")
# # print(style)
# # print(repr(style))
# # style.test()
# style = Style.parse("bold on black")
# print(style.bold)
# print(style)
# print(repr(style))
```
#### File: rich/rich/theme.py
```python
import configparser
from typing import Dict, IO
from .style import Style
class Theme:
def __init__(self, styles: Dict[str, Style] = None):
self.styles = styles or {}
@property
def config(self) -> str:
"""Get contents of a config file for this theme."""
config_lines = ["[styles]"]
append = config_lines.append
for name, style in sorted(self.styles.items()):
append(f"{name} = {style}")
config = "\n".join(config_lines)
return config
@classmethod
def from_file(cls, config_file: IO[str], source: str = None) -> "Theme":
config = configparser.ConfigParser()
config.read_file(config_file, source=source)
styles = {name: Style.parse(value) for name, value in config.items("styles")}
theme = Theme(styles)
return theme
@classmethod
def read(cls, path: str) -> "Theme":
with open(path, "rt") as config_file:
return cls.from_file(config_file, source=path)
``` |
{
"source": "4144/s3viewer",
"score": 2
} |
#### File: src/providers/httpindex_provider.py
```python
import sys
import os
import re
import time
import argparse
import requests
import collections
import urllib.parse
import bs4
from utils import show_message_box
from consts import HTTP_MAX_RECURSE_LEVEL, USER_AGENT
from providers.base_provider import StorageProvider
##################################################################################
### Most of the code here is from https://github.com/gumblex/htmllisting-parser ##
##################################################################################
HEADERS = {"User-Agent": USER_AGENT}
RE_ISO8601 = re.compile(r'\d{4}-\d+-\d+T\d+:\d{2}:\d{2}Z')
DATETIME_FMTs = (
(re.compile(r'\d+-[A-S][a-y]{2}-\d{4} \d+:\d{2}:\d{2}'), "%d-%b-%Y %H:%M:%S"),
(re.compile(r'\d+-[A-S][a-y]{2}-\d{4} \d+:\d{2}'), "%d-%b-%Y %H:%M"),
(re.compile(r'\d{4}-\d+-\d+ \d+:\d{2}:\d{2}'), "%Y-%m-%d %H:%M:%S"),
(RE_ISO8601, "%Y-%m-%dT%H:%M:%SZ"),
(re.compile(r'\d{4}-\d+-\d+ \d+:\d{2}'), "%Y-%m-%d %H:%M"),
(re.compile(r'\d{4}-[A-S][a-y]{2}-\d+ \d+:\d{2}:\d{2}'), "%Y-%b-%d %H:%M:%S"),
(re.compile(r'\d{4}-[A-S][a-y]{2}-\d+ \d+:\d{2}'), "%Y-%b-%d %H:%M"),
(re.compile(r'[F-W][a-u]{2} [A-S][a-y]{2} +\d+ \d{2}:\d{2}:\d{2} \d{4}'), "%a %b %d %H:%M:%S %Y"),
(re.compile(r'[F-W][a-u]{2}, \d+ [A-S][a-y]{2} \d{4} \d{2}:\d{2}:\d{2} .+'), "%a, %d %b %Y %H:%M:%S %Z"),
(re.compile(r'\d{4}-\d+-\d+'), "%Y-%m-%d"),
(re.compile(r'\d+/\d+/\d{4} \d{2}:\d{2}:\d{2} [+-]\d{4}'), "%d/%m/%Y %H:%M:%S %z"),
(re.compile(r'\d{2} [A-S][a-y]{2} \d{4}'), "%d %b %Y")
)
RE_FILESIZE = re.compile(r'\d+(\.\d+)? ?[BKMGTPEZY]|\d+|-', re.I)
RE_ABSPATH = re.compile(r'^((ht|f)tps?:/)?/')
RE_COMMONHEAD = re.compile('Name|(Last )?modifi(ed|cation)|date|Size|Description|Metadata|Type|Parent Directory', re.I)
RE_HASTEXT = re.compile('.+')
RE_HEAD_NAME = re.compile('name$|^file|^download')
RE_HEAD_MOD = re.compile('modifi|^uploaded|date|time')
RE_HEAD_SIZE = re.compile('size|bytes$')
FileEntry = collections.namedtuple('FileEntry', 'name modified size description')
def human2bytes(s):
"""
>>> human2bytes('1M')
1048576
>>> human2bytes('1G')
1073741824
"""
if s is None:
return None
try:
return int(s)
except ValueError:
symbols = 'BKMGTPEZY'
letter = s[-1:].strip().upper()
num = float(s[:-1])
prefix = {symbols[0]: 1}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
return int(num * prefix[letter])
def aherf2filename(a_href):
isdir = ('/' if a_href[-1] == '/' else '')
return os.path.basename(urllib.parse.unquote(a_href.rstrip('/'))) + isdir
def parse(soup):
'''
Try to parse apache/nginx-style directory listing with all kinds of tricks.
Exceptions or an empty listing suggust a failure.
We strongly recommend generating the `soup` with 'html5lib'.
Returns: Current directory, Directory listing
'''
cwd = None
listing = []
if soup.title and soup.title.string and soup.title.string.startswith('Index of '):
cwd = soup.title.string[9:]
elif soup.h1:
title = soup.h1.get_text().strip()
if title.startswith('Index of '):
cwd = title[9:]
[img.decompose() for img in soup.find_all('img')]
file_name = file_mod = file_size = file_desc = None
pres = [x for x in soup.find_all('pre') if
x.find('a', string=RE_HASTEXT)]
tables = [x for x in soup.find_all('table') if
x.find(string=RE_COMMONHEAD)] if not pres else ()
heads = []
if pres:
pre = pres[0]
started = False
for element in (pre.hr.next_siblings if pre.hr else pre.children):
if element.name == 'a':
if not element.string or not element.string.strip():
continue
elif started:
if file_name:
listing.append(FileEntry(
file_name, file_mod, file_size, file_desc))
file_name = aherf2filename(element['href'])
file_mod = file_size = file_desc = None
elif (element.string in ('Parent Directory', '..', '../') or
element['href'][0] not in '?/'):
started = True
elif not element.name:
line = element.string.replace('\r', '').split('\n', 1)[0].lstrip()
for regex, fmt in DATETIME_FMTs:
match = regex.match(line)
if match:
file_mod = time.strptime(match.group(0), fmt)
line = line[match.end():].lstrip()
break
match = RE_FILESIZE.match(line)
if match:
sizestr = match.group(0)
if sizestr == '-':
file_size = None
else:
file_size = human2bytes(sizestr.replace(' ', '').replace(',', ''))
line = line[match.end():].lstrip()
if line:
file_desc = line.rstrip()
if file_name and file_desc == '/':
file_name += '/'
file_desc = None
else:
continue
if file_name:
listing.append(FileEntry(file_name, file_mod, file_size, file_desc))
elif tables:
started = False
for tr in tables[0].find_all('tr'):
status = 0
file_name = file_mod = file_size = file_desc = None
if started:
if tr.parent.name in ('thead', 'tfoot') or tr.th:
continue
for td in tr.find_all('td'):
if status >= len(heads):
raise AssertionError("can't detect table column number")
if td.get('colspan'):
continue
elif heads[status] == 'name':
if not td.a:
continue
a_str = td.a.get_text().strip()
a_href = td.a['href']
if not a_str or not a_href or a_href[0] == '#':
continue
elif a_str == 'Parent Directory' or a_href == '../':
break
else:
file_name = aherf2filename(a_href)
status = 1
elif heads[status] == 'modified':
if td.time:
timestr = td.time.get('datetime', '')
if RE_ISO8601.match(timestr):
file_mod = time.strptime(timestr, "%Y-%m-%dT%H:%M:%SZ")
status += 1
continue
timestr = td.get_text().strip()
if timestr:
for regex, fmt in DATETIME_FMTs:
if regex.match(timestr):
file_mod = time.strptime(timestr, fmt)
break
else:
if td.get('data-sort-value'):
file_mod = time.gmtime(int(td['data-sort-value']))
# else:
# raise AssertionError(
# "can't identify date/time format")
status += 1
elif heads[status] == 'size':
sizestr = td.get_text().strip().replace(',', '')
if sizestr == '-' or not sizestr:
file_size = None
elif td.get('data-sort-value'):
file_size = int(td['data-sort-value'])
else:
match = RE_FILESIZE.match(sizestr)
if match:
file_size = human2bytes(
match.group(0).replace(' ', ''))
else:
file_size = None
status += 1
elif heads[status] == 'description':
file_desc = file_desc or ''.join(map(str, td.children)
).strip(' \t\n\r\x0b\x0c\xa0') or None
status += 1
elif status:
# unknown header
status += 1
if file_name:
listing.append(FileEntry(
file_name, file_mod, file_size, file_desc))
elif tr.hr:
started = True
continue
elif tr.find(string=RE_COMMONHEAD):
namefound = False
colspan = False
for th in (tr.find_all('th') if tr.th else tr.find_all('td')):
if th.get('colspan'):
colspan = True
continue
name = th.get_text().strip(' \t\n\r\x0b\x0c\xa0↑↓').lower()
if not name:
continue
elif not namefound and RE_HEAD_NAME.search(name):
heads.append('name')
namefound = True
elif name in ('size', 'description'):
heads.append(name)
elif RE_HEAD_MOD.search(name):
heads.append('modified')
elif RE_HEAD_SIZE.search(name):
heads.append('size')
elif name.endswith('signature'):
heads.append('signature')
else:
heads.append('description')
if colspan:
continue
if not heads:
heads = ('name', 'modified', 'size', 'description')
elif not namefound:
heads[0] = 'name'
started = True
continue
elif soup.ul:
for li in soup.ul.find_all('li'):
a = li.a
if not a or not a.get('href'):
continue
file_name = urllib.parse.unquote(a['href'])
if (file_name in {'Parent Directory', '.', './', '..', '../', '#'}
or RE_ABSPATH.match(file_name)):
continue
else:
listing.append(FileEntry(file_name, None, None, None))
return cwd, listing
def fetch_listing(url, timeout=30):
req = requests.get(url, headers=HEADERS, timeout=timeout)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.content, 'html5lib')
return parse(soup)
def is_directory(entry):
return entry.description == "Directory" or (not entry.description and not entry.size)
def print_fetch_dir(url, max_recurse_level=HTTP_MAX_RECURSE_LEVEL, recurse_level=0):
if recurse_level == 0:
print(url)
print("-----------------------")
if recurse_level == max_recurse_level:
return
recurse_level += 1
cwd, listing = fetch_listing(url)
# Fix cwd to support inner starting point
# cwd shouldn't start with /, but it should end with one
if cwd:
cwd = cwd.strip("/") + "/"
else:
cwd = ""
for f in listing:
filename_print = cwd + f.name
if is_directory(f):
if not filename_print.endswith("/"):
filename_print = filename_print + "/"
date_format = time.strftime('%Y-%m-%d %H:%M:%S', f.modified)
size_format = f.size or "0"
print("{}{:>13} {}".format(date_format, size_format, filename_print))
if is_directory(f):
print_fetch_dir(url=url + f.name, max_recurse_level=max_recurse_level, recurse_level=recurse_level)
# BFS and DFS mixture - output entire content of each directory met
def yield_fetch_dir(url, max_recurse_level=HTTP_MAX_RECURSE_LEVEL, recurse_level=0):
if recurse_level == max_recurse_level:
return
queue_process = []
recurse_level += 1
try:
cwd, listing = fetch_listing(url)
except Exception as e:
# Skip bad entries
return
# Fix cwd to support inner starting point
# cwd shouldn't start with /, but it should end with one
if cwd:
cwd = cwd.strip("/") + "/"
else:
cwd = ""
for f in listing:
filename_output = cwd + f.name
if is_directory(f):
if not filename_output.endswith("/"):
filename_output = filename_output + "/"
date_format = time.strftime('%Y-%m-%d %H:%M:%S', f.modified)
size_format = f.size or "0"
yield "{}{:>13} {}".format(date_format, size_format, filename_output) + os.linesep
queue_process.append(f)
for f in queue_process:
if is_directory(f):
yield from yield_fetch_dir(url=url + f.name, max_recurse_level=max_recurse_level, recurse_level=recurse_level)
class HTTPIndexStorageProvider(StorageProvider):
NODE_BATCH_UPDATE_COUNT = 100
@staticmethod
def is_provider(url):
url = url.lower()
scheme = urllib.parse.urlparse(url).scheme
if scheme and "http" in scheme:
return True
return False
def check(self):
try:
cwd, listing = fetch_listing(self.url)
return len(listing) > 0
except Exception as e:
show_message_box(self.get_default_error_message())
return False
def get_download_url(self, relative_path):
uri_obj = urllib.parse.urlparse(self.url)
return '{uri.scheme}://{uri.netloc}/{relative_path}'.format(uri=uri_obj, relative_path=relative_path)
def yield_dirlist(self):
url = self.url
if not self.url.endswith("/"):
url = self.url + "/"
for dirlist_line in yield_fetch_dir(url):
# Stop
if self.should_stop:
break
yield dirlist_line
def get_default_error_message(self):
return "Could not parse Apache/nginx-style directory listing. Are you sure it's a valid HTTP dir index?"
def hostname(self):
return urllib.parse.urlparse(self.url).netloc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DirLister IndexOf/')
parser.add_argument('-u', '--url', dest='url', help='URL')
parser.add_argument('--max_level', dest='max_level', type=int, default=HTTP_MAX_RECURSE_LEVEL, help='Max recurse level')
args = parser.parse_args()
print_fetch_dir(url=args.url, max_recurse_level=args.max_level)
``` |
{
"source": "415905716/MQBench",
"score": 3
} |
#### File: intrinsic/modules/fused.py
```python
from torch.nn.intrinsic import _FusedModule
from torch.nn import Linear, BatchNorm1d
class LinearBn1d(_FusedModule):
r"""This is a sequential container which calls the Linear and Batch Norm 1d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, linear, bn):
assert type(linear) == Linear and type(bn) == BatchNorm1d, \
'Incorrect types for input modules{}{}'.format(
type(linear), type(bn))
super().__init__(linear, bn)
``` |
{
"source": "416104443/QUANTAXIS",
"score": 2
} |
#### File: QUANTAXIS/QAFetch/__init__.py
```python
from . import QAWind as QAWind
from . import QATushare as QATushare
from . import QATdx as QATdx
from . import QAThs as QAThs
#import QAFetch.QAGmsdk as QAGmsdk
#import QAFetch.QACrawlData as QACD
class QA_Fetcher():
"""
一个通用的数据获取方法类
"""
def __init__(self, *args, **kwargs):
pass
@property
def security_list(self):
return self.security_list
def use(package):
if package in ['wind']:
from WindPy import w
# w.start()
return QAWind
elif package in ['tushare', 'ts']:
return QATushare
elif package in ['tdx', 'pytdx']:
return QATdx
elif package in ['ths', 'THS']:
return QAThs
def QA_fetch_get_stock_day(package, code, startDate, endDate, if_fq='01', level='day', type_='json'):
Engine = use(package)
if package in ['ths', 'THS', 'wind']:
return Engine.QA_fetch_get_stock_day(code, startDate, endDate, if_fq)
elif package in ['ts', 'tushare']:
return Engine.QA_fetch_get_stock_day(code, startDate, endDate, if_fq, type_)
elif package in ['tdx', 'pytdx']:
return Engine.QA_fetch_get_stock_day(code, startDate, endDate, if_fq, level)
else:
return Engine.QA_fetch_get_stock_day(code, startDate, endDate)
def QA_fetch_get_stock_realtime(package, code):
Engine = use(package)
return Engine.QA_fetch_get_stock_realtime(code)
def QA_fetch_get_stock_indicator(package, code, startDate, endDate):
Engine = use(package)
return Engine.QA_fetch_get_stock_indicator(code, startDate, endDate)
def QA_fetch_get_trade_date(package, endDate, exchange):
Engine = use(package)
return Engine.QA_fetch_get_trade_date(endDate, exchange)
def QA_fetch_get_stock_min(package, code, start, end, level='1min'):
Engine = use(package)
if package in ['tdx', 'pytdx']:
return Engine.QA_fetch_get_stock_min(code, start, end, level)
else:
return 'Unsupport packages'
def QA_fetch_get_stock_list(package, type_='stock'):
Engine = use(package)
if package in ['tdx', 'pytdx']:
return Engine.QA_fetch_get_stock_list(type_)
else:
return 'Unsupport packages'
def QA_fetch_get_stock_transaction(package, code, start, end, retry=2):
Engine = use(package)
if package in ['tdx', 'pytdx']:
return Engine.QA_fetch_get_stock_transaction(code, start, end, retry)
else:
return 'Unsupport packages'
def QA_fetch_get_stock_xdxr(package, code):
Engine = use(package)
if package in ['tdx', 'pytdx']:
return Engine.QA_fetch_get_stock_xdxr(code)
else:
return 'Unsupport packages'
def QA_fetch_get_index_day(package, code, start, end, level='day'):
Engine = use(package)
if package in ['tdx', 'pytdx']:
return Engine.QA_fetch_get_index_day(code, start, end, level)
else:
return 'Unsupport packages'
def QA_fetch_get_index_min(package, code, start, end, level='1min'):
Engine = use(package)
if package in ['tdx', 'pytdx']:
return Engine.QA_fetch_get_index_min(code, start, end, level)
else:
return 'Unsupport packages'
def QA_fetch_get_stock_block(package):
Engine = use(package)
if package in ['tdx', 'pytdx']:
return Engine.QA_fetch_get_stock_block()
else:
return 'Unsupport packages'
def QA_fetch_get_stock_info(package,code):
Engine = use(package)
if package in ['tdx', 'pytdx']:
return Engine.QA_fetch_get_stock_info(code)
else:
return 'Unsupport packages'
``` |
{
"source": "416e64726579/fast-example-github-dvwa-integration-jira",
"score": 2
} |
#### File: 416e64726579/fast-example-github-dvwa-integration-jira/create_ticket.py
```python
from jira import JIRA
import fnmatch
import os
def find_txt():
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*.txt'):
return file
jira = JIRA(basic_auth=(str(os.environ['LOGIN']), str(os.environ['PASSWORD'])), server=str(os.environ['SERVER']))
with open(f'./{find_txt()}', 'r') as report:
description = report.read()
new_issue = jira.create_issue(project='FAST', summary='Automatic report from FAST Security testing',
description=description, issuetype={'name': 'Task'}, labels=['FAST', 'WALLARM'])
``` |
{
"source": "418sec/BentoML",
"score": 2
} |
#### File: bentoml/marshal/utils.py
```python
import pickle
from functools import lru_cache
from typing import Sequence
from bentoml import config as bentoml_config
from bentoml.types import HTTPRequest, HTTPResponse
BATCH_REQUEST_HEADER = bentoml_config("apiserver").get("batch_request_header")
import io
import builtins
safe_builtins = {
'range',
'complex',
'set',
'frozenset',
'slice',
}
class RestrictedUnpickler(pickle.Unpickler):
def find_class(self, module, name):
"""Only allow safe classes from builtins"""
if module == "builtins" and name in safe_builtins:
return getattr(builtins, name)
"""Forbid everything else"""
raise pickle.UnpicklingError("global '%s.%s' is forbidden" %
(module, name))
def restricted_loads(s):
"""Helper function analogous to pickle.loads()"""
return RestrictedUnpickler(io.BytesIO(s)).load()
class PlasmaDataLoader:
"""
Transfer datas with plasma store, in development now
"""
@classmethod
@lru_cache(maxsize=1)
def get_plasma(cls):
import pyarrow.plasma as plasma
import subprocess
object_store_size = 2 * 10 * 1000 * 1000
plasma_path = "/tmp/store"
subprocess.Popen(
["plasma_store", "-s", plasma_path, "-m", str(object_store_size)]
)
return plasma.connect(plasma_path)
@classmethod
def merge_requests(cls, reqs) -> bytes:
merged_reqs = tuple((b, h) for h, b in reqs)
oid = cls.get_plasma().put(merged_reqs)
return oid.binary()
@classmethod
def split_responses(cls, raw: bytes):
import pyarrow.plasma as plasma
oid = plasma.ObjectID(raw)
merged_responses = cls.get_plasma().get(oid)
cls.get_plasma().delete((oid,))
return merged_responses
@classmethod
def split_requests(cls, raw: bytes):
import pyarrow.plasma as plasma
oid = plasma.ObjectID(raw)
info_list = cls.get_plasma().get(oid)
cls.get_plasma().delete((oid,))
return info_list
@classmethod
def merge_responses(cls, resps) -> bytes:
merged_resps = tuple((r, tuple()) for r in resps)
oid = cls.get_plasma().put(merged_resps)
return oid.binary()
class PickleDataLoader:
@classmethod
def merge_requests(cls, reqs: Sequence[HTTPRequest]) -> bytes:
return pickle.dumps(reqs)
@classmethod
def split_requests(cls, raw: bytes) -> Sequence[HTTPRequest]:
restricted_loads(raw)
return pickle.loads(raw)
@classmethod
def merge_responses(cls, resps: Sequence[HTTPResponse]) -> bytes:
return pickle.dumps(list(resps))
@classmethod
def split_responses(cls, raw: bytes) -> Sequence[HTTPResponse]:
try:
return pickle.loads(raw)
except pickle.UnpicklingError:
raise ValueError(
f"Batching result unpacking error: \n {raw[:1000]}"
) from None
DataLoader = PickleDataLoader
``` |
{
"source": "418sec/computervision-recipes",
"score": 3
} |
#### File: utils_cv/common/deployment.py
```python
import os
import yaml
from azureml.core.conda_dependencies import CondaDependencies
def generate_yaml(
directory: str,
ref_filename: str,
needed_libraries: list,
conda_filename: str,
):
"""
Creates a deployment-specific yaml file as a subset of
the image classification environment.yml
Also adds extra libraries, if not present in environment.yml
Args:
directory (string): Directory name of reference yaml file
ref_filename (string): Name of reference yaml file
needed_libraries (list of strings): List of libraries needed
in the Docker container
conda_filename (string): Name of yaml file to be deployed
in the Docker container
Returns: Nothing
"""
with open(os.path.join(directory, ref_filename), "r") as f:
yaml_content = yaml.load(f, Loader=yaml.SafeLoader)
# Extract libraries to be installed using conda
extracted_libraries = [
depend
for depend in yaml_content["dependencies"]
if any(lib in depend for lib in needed_libraries)
]
# Extract libraries to be installed using pip
if any(isinstance(x, dict) for x in yaml_content["dependencies"]):
# if the reference yaml file contains a "pip" section,
# find where it is in the list of dependencies
ind = [
yaml_content["dependencies"].index(depend)
for depend in yaml_content["dependencies"]
if isinstance(depend, dict)
][0]
extracted_libraries += [
depend
for depend in yaml_content["dependencies"][ind]["pip"]
if any(lib in depend for lib in needed_libraries)
]
# Check whether additional libraries are needed
not_found = [
lib
for lib in needed_libraries
if not any(lib in ext for ext in extracted_libraries)
]
# Create the deployment-specific yaml file
conda_env = CondaDependencies()
for ch in yaml_content["channels"]:
conda_env.add_channel(ch)
for library in extracted_libraries + not_found:
conda_env.add_conda_package(library)
# Display the environment
print(conda_env.serialize_to_string())
# Save the file to disk
conda_env.save_to_file(
base_directory=os.getcwd(), conda_file_path=conda_filename
)
# Note: For users interested in creating their own environments,
# the only commands needed are:
# conda_env = CondaDependencies()
# conda_env.add_channel()
# conda_env.add_conda_package()
# conda_env.save_to_file()
``` |
{
"source": "418sec/pypicloud",
"score": 3
} |
#### File: pypicloud/pypicloud/auth.py
```python
import binascii
from base64 import b64decode
# pylint: disable=E0611
from paste.httpheaders import AUTHORIZATION, WWW_AUTHENTICATE
# pylint: enable=E0611
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.httpexceptions import HTTPForbidden, HTTPUnauthorized
from pyramid.security import Everyone, authenticated_userid
# Copied from
# http://docs.pylonsproject.org/projects/pyramid_cookbook/en/latest/auth/basic.html
def get_basicauth_credentials(request):
"""Get the user/password from HTTP basic auth"""
authorization = AUTHORIZATION(request.environ)
try:
authmeth, auth = authorization.split(" ", 1)
except ValueError: # not enough values to unpack
return None
if authmeth.lower() == "basic":
try:
auth = b64decode(auth.strip()).decode("utf8")
except (TypeError, binascii.Error): # can't decode
return None
try:
login, password = auth.split(":", 1)
except ValueError: # not enough values to unpack
return None
return {"login": login, "password": password}
return None
class BasicAuthenticationPolicy(object):
"""A :app:`Pyramid` :term:`authentication policy` which
obtains data from basic authentication headers.
Constructor Arguments
``check``
A callback passed the credentials and the request,
expected to return None if the userid doesn't exist or a sequence
of group identifiers (possibly empty) if the user does exist.
Required.
"""
def authenticated_userid(self, request):
"""Verify login and return the authed userid"""
credentials = get_basicauth_credentials(request)
if credentials is None:
return None
userid = credentials["login"]
if request.access.verify_user(credentials["login"], credentials["password"]):
return userid
return None
def unauthenticated_userid(self, request):
"""Return userid without performing auth"""
return request.userid
def effective_principals(self, request):
"""Get the authed groups for the active user"""
if request.userid is None:
return [Everyone]
return request.access.user_principals(request.userid)
def remember(self, request, principal, **kw):
"""HTTP Headers to remember credentials"""
return []
def forget(self, request):
"""HTTP headers to forget credentials"""
return []
class SessionAuthPolicy(object):
"""Simple auth policy using beaker sessions"""
def authenticated_userid(self, request):
"""Return the authenticated userid or ``None`` if no
authenticated userid can be found. This method of the policy
should ensure that a record exists in whatever persistent store is
used related to the user (the user should not have been deleted);
if a record associated with the current id does not exist in a
persistent store, it should return ``None``."""
return request.session.get("user", None)
def unauthenticated_userid(self, request):
"""Return the *unauthenticated* userid. This method performs the
same duty as ``authenticated_userid`` but is permitted to return the
userid based only on data present in the request; it needn't (and
shouldn't) check any persistent store to ensure that the user record
related to the request userid exists."""
return request.userid
def effective_principals(self, request):
"""Return a sequence representing the effective principals
including the userid and any groups belonged to by the current
user, including 'system' groups such as
``pyramid.security.Everyone`` and
``pyramid.security.Authenticated``."""
if request.userid is None:
return [Everyone]
return request.access.user_principals(request.userid)
def remember(self, request, principal, **_):
"""
This implementation is slightly different than expected. The
application should call remember(userid) rather than
remember(principal)
"""
request.session["user"] = principal
return []
def forget(self, request):
"""Return a set of headers suitable for 'forgetting' the
current user on subsequent requests."""
request.session.delete()
return []
def _is_logged_in(request):
"""Check if there is a logged-in user in the session"""
return request.userid is not None
def _request_login(request):
"""Return a 401 to force pip to upload its HTTP basic auth credentials"""
response = HTTPUnauthorized()
realm = WWW_AUTHENTICATE.tuples('Basic realm="%s"' % request.registry.realm)
response.headers.update(realm)
return response
def _forbid(request):
"""
Return a 403 if user is logged in, otherwise return a 401.
This is required to force pip to upload its HTTP basic auth credentials
"""
if request.is_logged_in:
return HTTPForbidden()
else:
return _request_login(request)
def includeme(config):
"""Configure the app"""
config.set_authorization_policy(ACLAuthorizationPolicy())
config.set_authentication_policy(config.registry.authentication_policy)
config.add_authentication_policy(SessionAuthPolicy())
config.add_authentication_policy(BasicAuthenticationPolicy())
config.add_request_method(authenticated_userid, name="userid", reify=True)
config.add_request_method(_forbid, name="forbid")
config.add_request_method(_request_login, name="request_login")
config.add_request_method(_is_logged_in, name="is_logged_in", reify=True)
settings = config.get_settings()
realm = settings.get("pypi.realm", "pypi")
config.registry.realm = realm
``` |
{
"source": "41alderson/Sinister.ly-Notifier",
"score": 3
} |
#### File: 41alderson/Sinister.ly-Notifier/sinister.py
```python
import requests
import ctypes
from win10toast import ToastNotifier
from time import sleep
from bs4 import BeautifulSoup
from colorama import Fore, init
init(convert=True)
last_posts_dup = []
last_posts_clean = []
latest_post = []
new_posts = []
topic = []
latest_topic = []
ctypes.windll.kernel32.SetConsoleTitleW(f"Sinister.ly Posts Notifier")
Notify = ToastNotifier()
class c:
r = Fore.RED
re = Fore.RESET
y = Fore.YELLOW
w = Fore.WHITE
b = Fore.BLUE
g = Fore.GREEN
def get_stats():
url = 'https://sinister.ly/index.php'
r = requests.get(url)
scraper = BeautifulSoup(r.text, 'html.parser')
for links in scraper.find_all('a'):
try:
if 'Thread' in links['href']:
if 'https' in links['href']:
last_posts_dup.append(links['href'])
topic.append(links['title'])
else:
pass
except Exception:
pass
last_posts_dup.pop(0)
clean_links = list(dict.fromkeys(last_posts_dup))
for sublist in clean_links:
last_posts_clean.append(sublist)
latest_post.append(last_posts_clean[0])
latest_topic.append(topic[0])
for i in last_posts_clean:
if i == latest_post[0]:
break
else:
new_posts.append(i)
def check_stats():
try:
if new_posts[0] is not None:
latest_post.clear()
latest_post.append(new_posts[0])
print(f'\n\t\t{c.y}Found {len(new_posts)} Post{c.re}')
send_notifications(posts=len(new_posts))
except IndexError:
print(f'\n\t\t{c.r}No New Posts{c.re}')
def send_notifications(posts):
if posts == 0:
pass
elif posts == 1:
Notify.show_toast(title='New Posts Available', msg=f'Topic: {latest_topic[0]}',
duration=10)
else:
Notify.show_toast(title='New Posts Available', msg=f'There Are {posts} New Posts Being Discussed On Forum.',
duration=10)
def get_data(post):
if post == 1:
r = requests.get(new_posts[0])
soup = BeautifulSoup(r.text, 'html.parser')
for span in soup.findAll('span'):
span.unwrap()
for div in soup.findAll('div'):
div.unwrap()
content = soup.find('div', {'class': 'post_content'})
content = str(content).split('post_body scaleimages')
content = content[1].split('</div>')
content = content[0].split('">')
content = content[1].replace('<br/>', '')
show = f'''\n
TOPIC: {c.y}{latest_topic[0]}
{content}{c.re}
'''
print(show)
input(f'\n\n{c.b}Press Enter To Continue...')
else:
pass
def cleaner():
last_posts_dup.clear()
last_posts_clean.clear()
new_posts.clear()
topic.clear()
latest_topic.clear()
def runner(duration):
get_stats()
check_stats()
sleep(duration)
cleaner()
sleep(1)
return runner(dur)
def about():
about = f'''{Fore.MAGENTA}
\t\t _ _ _____ _____ _____________ __
\t\t| \ | | _ |_ _|_ _| ___\ \ / /
\t\t| \| | | | | | | | | | |_ \ V /
\t\t| . ` | | | | | | | | | _| \ /
\t\t| |\ \ \_/ / | | _| |_| | | |
\t\t\_| \_/\___/ \_/ \___/\_| \_/
\t\t{c.re}by ALDERSON41
'''
contact = f'''
{c.r}CONTACT ME
{c.y}sinister.ly -> {c.w}sefefew
{c.y}instagram -> {c.w}john_snow__41
{c.y}Telegram -> {c.w}@Thanos_Did_Nothing_Wrong{c.re}
'''
print(about)
sleep(1)
print(contact)
input(f'\n\n{c.b}Press Enter To Continue...')
if '__main__' == __name__:
about()
sleep(1)
dur = int(input('Enter Time To Refresh Feeds: '))
runner(dur)
``` |
{
"source": "41WhiteElephants/glow-tts",
"score": 3
} |
#### File: 41WhiteElephants/glow-tts/attentions.py
```python
import copy
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import commons
import modules
from modules import LayerNorm
class Encoder(nn.Module):
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=None, block_length=None, **kwargs):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.block_length = block_length
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size, p_dropout=p_dropout, block_length=block_length))
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask):
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
for i in range(self.n_layers):
x = x * x_mask
y = self.attn_layers[i](x, x, attn_mask)
y = self.drop(y)
x = self.norm_layers_1[i](x + y)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = self.norm_layers_2[i](x + y)
x = x * x_mask
return x
class CouplingBlock(nn.Module):
def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0, sigmoid_scale=False):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.sigmoid_scale = sigmoid_scale
start = torch.nn.Conv1d(in_channels//2, hidden_channels, 1)
start = torch.nn.utils.weight_norm(start)
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(hidden_channels, in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
self.wn = modules.WN(in_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels, p_dropout)
def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs):
b, c, t = x.size()
if x_mask is None:
x_mask = 1
attn_mask = None
else:
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
x_0, x_1 = x[:,:self.in_channels//2], x[:,self.in_channels//2:]
x = self.start(x_0) * x_mask
x = self.wn(x, x_mask, g)
out = self.end(x)
z_0 = x_0
m = out[:, :self.in_channels//2, :]
logs = out[:, self.in_channels//2:, :]
if self.sigmoid_scale:
logs = torch.log(1e-6 + torch.sigmoid(logs + 2))
if reverse:
z_1 = (x_1 - m) * torch.exp(-logs) * x_mask
logdet = None
else:
z_1 = (m + torch.exp(logs) * x_1) * x_mask
logdet = torch.sum(logs * x_mask, [1, 2])
z = torch.cat([z_0, z_1], 1)
return z, logdet
def store_inverse(self):
self.wn.remove_weight_norm()
class MultiHeadAttention(nn.Module):
def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0., block_length=None, proximal_bias=False, proximal_init=False):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.p_dropout = p_dropout
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels**-0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
if proximal_init:
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
nn.init.xavier_uniform_(self.conv_v.weight)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
# reshape [b, d, t] -> [b, n_h, t, d_k]
b, d, t_s, t_t = (*key.size(), query.size(2))
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
if self.window_size is not None:
assert t_s == t_t, "Relative attention is only available for self-attention."
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings)
rel_logits = self._relative_position_to_absolute_position(rel_logits)
scores_local = rel_logits / math.sqrt(self.k_channels)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, "Proximal bias is only available for self-attention."
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
if self.block_length is not None:
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
scores = scores * block_mask + -1e4*(1 - block_mask)
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
max_relative_position = 2 * self.window_size + 1
# Pad first before slice to avoid using cond ops.
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max((self.window_size + 1) - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(
relative_embeddings,
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
# Concat extra elements so to add up to shape (len+1, 2*len-1).
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
# Reshape and slice out the padded elements.
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
# add 0's in the beginning that will skew the elements after reshape
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
class FFN(nn.Module):
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.activation = activation
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size//2)
self.drop = nn.Dropout(p_dropout)
def forward(self, x, x_mask):
x = self.conv_1(x * x_mask)
if self.activation == "gelu":
x = x * torch.sigmoid(1.702 * x)
else:
x = torch.relu(x)
x = self.drop(x)
x = self.conv_2(x * x_mask)
return x * x_mask
```
#### File: 41WhiteElephants/glow-tts/train.py
```python
import os
import json
import argparse
import math
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.multiprocessing as mp
import torch.distributed as dist
from apex.parallel import DistributedDataParallel as DDP
from apex import amp
from data_utils import TextMelLoader, TextMelCollate
import models
import commons
import utils
from text.symbols import symbols
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = 1 # torch.cuda.device_count()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '80000'
hps = utils.get_hparams()
mp.spawn(train_and_eval, nprocs=n_gpus, args=(n_gpus, hps,))
def train_and_eval(rank, n_gpus, hps):
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
train_dataset = TextMelLoader(hps.data.training_files, hps.data)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset,
num_replicas=n_gpus,
rank=rank,
shuffle=True)
collate_fn = TextMelCollate(1)
train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False,
batch_size=hps.train.batch_size, pin_memory=True,
drop_last=True, collate_fn=collate_fn, sampler=train_sampler)
if rank == 0:
val_dataset = TextMelLoader(hps.data.validation_files, hps.data)
val_loader = DataLoader(val_dataset, num_workers=8, shuffle=False,
batch_size=hps.train.batch_size, pin_memory=True,
drop_last=True, collate_fn=collate_fn)
generator = models.FlowGenerator(
n_vocab=len(symbols),
out_channels=hps.data.n_mel_channels,
**hps.model).cuda(rank)
optimizer_g = commons.Adam(generator.parameters(), scheduler=hps.train.scheduler, dim_model=hps.model.hidden_channels, warmup_steps=hps.train.warmup_steps, lr=hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps)
if hps.train.fp16_run:
generator, optimizer_g._optim = amp.initialize(generator, optimizer_g._optim, opt_level="O1")
generator = DDP(generator)
epoch_str = 1
global_step = 0
try:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), generator, optimizer_g)
epoch_str += 1
optimizer_g.step_num = (epoch_str - 1) * len(train_loader)
optimizer_g._update_learning_rate()
global_step = (epoch_str - 1) * len(train_loader)
except:
if hps.train.ddi and os.path.isfile(os.path.join(hps.model_dir, "ddi_G.pth")):
_ = utils.load_checkpoint(os.path.join(hps.model_dir, "ddi_G.pth"), generator, optimizer_g)
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank==0:
train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer)
evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval)
utils.save_checkpoint(generator, optimizer_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(epoch)))
else:
train(rank, epoch, hps, generator, optimizer_g, train_loader, None, None)
def train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer):
train_loader.sampler.set_epoch(epoch)
global global_step
generator.train()
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(train_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
# Train Generator
optimizer_g.zero_grad()
(z, y_m, y_logs, logdet), attn, logw, logw_, x_m, x_logs = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = 0.5 * math.log(2 * math.pi) + (torch.sum(y_logs) + 0.5 * torch.sum(torch.exp(-2 * y_logs) * (z - y_m)**2) - torch.sum(logdet)) / (torch.sum(y_lengths // hps.model.n_sqz) * hps.model.n_sqz * hps.data.n_mel_channels)
l_length = torch.sum((logw - logw_)**2) / torch.sum(x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if hps.train.fp16_run:
with amp.scale_loss(loss_g, optimizer_g._optim) as scaled_loss:
scaled_loss.backward()
grad_norm = commons.clip_grad_value_(amp.master_params(optimizer_g._optim), 5)
else:
loss_g.backward()
grad_norm = commons.clip_grad_value_(generator.parameters(), 5)
optimizer_g.step()
if rank==0:
if batch_idx % hps.train.log_interval == 0:
(y_gen, *_), *_ = generator.module(x[:1], x_lengths[:1], gen=True)
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss_g.item()))
logger.info([x.item() for x in loss_gs] + [global_step, optimizer_g.get_lr()])
scalar_dict = {"loss/g/total": loss_g, "learning_rate": optimizer_g.get_lr(), "grad_norm": grad_norm}
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(loss_gs)})
utils.summarize(
writer=writer,
global_step=global_step,
images={"y_org": utils.plot_spectrogram_to_numpy(y[0].data.cpu().numpy()),
"y_gen": utils.plot_spectrogram_to_numpy(y_gen[0].data.cpu().numpy()),
"attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()),
},
scalars=scalar_dict)
global_step += 1
if rank == 0:
logger.info('====> Epoch: {}'.format(epoch))
def evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval):
if rank == 0:
global global_step
generator.eval()
losses_tot = []
with torch.no_grad():
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(val_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
(z, y_m, y_logs, logdet), attn, logw, logw_, x_m, x_logs = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = 0.5 * math.log(2 * math.pi) + (torch.sum(y_logs) + 0.5 * torch.sum(torch.exp(-2 * y_logs) * (z - y_m)**2) - torch.sum(logdet)) / (torch.sum(y_lengths // hps.model.n_sqz) * hps.model.n_sqz * hps.data.n_mel_channels)
l_length = torch.sum((logw - logw_)**2) / torch.sum(x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if batch_idx == 0:
losses_tot = loss_gs
else:
losses_tot = [x + y for (x, y) in zip(losses_tot, loss_gs)]
if batch_idx % hps.train.log_interval == 0:
logger.info('Eval Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(val_loader.dataset),
100. * batch_idx / len(val_loader),
loss_g.item()))
logger.info([x.item() for x in loss_gs])
losses_tot = [x/len(val_loader) for x in losses_tot]
loss_tot = sum(losses_tot)
scalar_dict = {"loss/g/total": loss_tot}
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_tot)})
utils.summarize(
writer=writer_eval,
global_step=global_step,
scalars=scalar_dict)
logger.info('====> Epoch: {}'.format(epoch))
if __name__ == "__main__":
main()
``` |
{
"source": "41xu/GeneticAlgorithm",
"score": 3
} |
#### File: 41xu/GeneticAlgorithm/GA.py
```python
import math
import random
populationNum = 50000
chromoLen = 21 * 2 # 2D X in f1, therefore the chromosome len will be 21*2, 1-21: x1, 22-42: x2
maxX = 100
minX = -100
crossRate=0.7
mutationRate=0.01
def f1(x1, x2):
return x1 * x1 + x2 * x2
def f3(x1,x2):
return x1*x1-10*math.cos(2*math.pi*x1)+10+\
x2*x2-10*math.cos(2*math.pi*x2)+10
def f4(x1,x2):
return (x1*x1+x2*x2)/4000-math.cos(x1/1)*math.cos(x2/2)+1
def EncodingInit(populationNum, chromoLen):
population = []
for i in range(populationNum):
temp = []
for j in range(chromoLen):
temp.append(random.randint(0, 1))
population.append(temp)
return population
def Decoding(population, chromoLen):
X1 = []
X2 = []
for i in range(len(population)):
temp = 0
for j in range(0,chromoLen // 2):
temp += population[i][j] * math.pow(2, j)
X1.append(temp)
temp = 0
for j in range(chromoLen // 2, chromoLen):
temp += population[i][j] * math.pow(2, j - chromoLen // 2)
X2.append(temp)
X1_ = [minX + xi * (maxX - minX) / (pow(2, chromoLen // 2) - 1) for xi in X1]
X2_ = [minX + xi * (maxX - minX) / (pow(2, chromoLen // 2) - 1) for xi in X2]
return X1_, X2_
def fitness1(X1, X2,population):
# cal Y
# Y is the actual value of function f1, smaller is better
# while we calculate the fitness, the larger will be chose
# the larger one should has the higher probability to be chose
# therefore, we do some little trick on Y
# we use (max of Y) - Y[i] to represent the fitness of Y[i]
# So, the larger, the better
Y = []
for i in range(len(population)):
Y.append(f1(X1[i], X2[i]))
maxY = max(Y)
for i in range(len(population)):
Y[i] = maxY - Y[i]
return Y
def fitness3(X1,X2,population):
Y=[]
for i in range(len(population)):
Y.append(f3(X1[i],X2[i]))
maxY=max(Y)
for i in range(len(population)):
Y[i]=maxY-Y[i]
return Y
def fitness4(X1,X2,population):
Y=[]
for i in range(len(population)):
Y.append(f4(X1[i],X2[i]))
maxY=max(Y)
for i in range(len(population)):
Y[i]=maxY-Y[i]
return Y
def selection(population, Y): # Roulette Wheel Selection
newPopulation = []
if sum(Y)!=0:
probY = [y / sum(Y) for y in Y]
else:
return None
c = 0
for (index, item) in enumerate(probY):
c += item
r = random.random()
if r < c:
newPopulation.append(population[index])
return newPopulation
def crossover(newPopulation):
for i in range(len(newPopulation)-1):
r=random.random()
if r<crossRate:
point=random.randint(0,len(newPopulation[0])-1)
temp1=[]
temp2=[]
temp1.extend(newPopulation[i][:point])
temp1.extend(newPopulation[i+1][point:])
temp2.extend(newPopulation[i+1][:point])
temp2.extend(newPopulation[i][point:])
newPopulation[i]=temp1
newPopulation[i+1]=temp2
return newPopulation
def mutation(newPopulation):
for i in range(len(newPopulation)):
r=random.random()
if r<mutationRate:
position=random.randint(0,chromoLen//2-1)
if newPopulation[i][position]==1:
newPopulation[i][position]=0
else:
newPopulation[i][position]=1
return newPopulation
if __name__ == '__main__':
totalPop=[]
X=[]
Y=[]
population = EncodingInit(populationNum, chromoLen)
totalPop.append(population)
for i in range(populationNum):
X1, X2 = Decoding(totalPop[-1], chromoLen)
# print(X1,X2)
y = fitness1(X1, X2,totalPop[-1])
X.append([X1,X2])
Y.append(y)
newpop = selection(totalPop[-1], y)
if newpop!=None:
newpop = crossover(newpop)
newpop = mutation(newpop)
totalPop.append(newpop)
# print(len(totalPop[-1]))
else:
break
print("f1(x)=sum(xi)^2 for i in range(1,3)")
print("x1,x2:",X[-1])
print("y:",Y[-1])
print("Precision for 10^(-4)")
populationNum = 50000
chromoLen = 17 * 2 # 2D X in f1, therefore the chromosome len will be 21*2, 1-21: x1, 22-42: x2
maxX = 5.12
minX = -5.12
crossRate = 0.7
mutationRate = 0.01
totalPop=[]
X=[]
Y=[]
population = EncodingInit(populationNum, chromoLen)
totalPop.append(population)
for i in range(populationNum):
X1, X2 = Decoding(totalPop[-1], chromoLen)
# print(X1,X2)
y = fitness3(X1, X2,totalPop[-1])
X.append([X1,X2])
Y.append(y)
newpop = selection(totalPop[-1], y)
if newpop!=None:
newpop = crossover(newpop)
newpop = mutation(newpop)
totalPop.append(newpop)
# print(len(totalPop[-1]))
else:
break
print("f3(x)=sum(xi^2-10*cos(2*pi*xi)+10) for i in range(1,3)")
print("x1,x2=",X[-1])
print("y:",Y[-1])
print("Precision for 10^(-4)")
populationNum = 50000
chromoLen = 24 * 2 # 2D X in f1, therefore the chromosome len will be 21*2, 1-21: x1, 22-42: x2
maxX = 600
minX = -600
crossRate = 0.7
mutationRate = 0.01
totalPop=[]
X=[]
Y=[]
population = EncodingInit(populationNum, chromoLen)
totalPop.append(population)
for i in range(populationNum):
X1, X2 = Decoding(totalPop[-1], chromoLen)
# print(X1,X2)
y = fitness4(X1, X2,totalPop[-1])
X.append([X1,X2])
Y.append(y)
newpop = selection(totalPop[-1], y)
if newpop!=None:
newpop = crossover(newpop)
newpop = mutation(newpop)
totalPop.append(newpop)
# print(len(totalPop[-1]))
else:
break
print("f4(x)=(x1*x1+x2*x2)/4000-math.cos(x1/1)*math.cos(x2/2)+1")
print("x1,x2=",X[-1])
print("y:",Y[-1])
print("Precision for 10^(-4)")
``` |
{
"source": "41y08h/warah-killer",
"score": 3
} |
#### File: resources/classes/powerup.py
```python
import os
import pygame
import random
import globals
from os import listdir
from os.path import isfile, join
class BulletPlus(pygame.sprite.Sprite):
appeared = 0
# Constructor
def __init__(self):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
# Image
self.image = pygame.image.load(os.path.join(globals.data_dir, 'img/pwrp/pwr_bullets.png')).convert_alpha()
self.rect = self.image.get_rect()
# Create mask
self.mask = pygame.mask.from_surface(self.image)
# Place on screen
self.rect.center = (random.randint(1, globals.winWidth - 140), random.randint(100, globals.winHeight - 140))
def kill(self):
self.rect.center = (-300, -300)
class BulletPlusGone(pygame.sprite.Sprite):
frames = []
# Constructor
def __init__(self, x, y):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
# Get all frames
path = "data/img/pwrp/pwr_bullets_kill/"
frames = [f for f in listdir(path) if isfile(join(path, f))]
# Put all frames in a list of Pygame images
self.images = []
for frame in frames:
self.images.append(pygame.image.load(path + frame).convert_alpha())
self.index = 0
self.image = self.images[self.index]
self.rect = self.image.get_rect()
# Position
self.rect.x = x
self.rect.y = y
def update(self):
# ANIMATE AND KILL AFTER ANIMATION
if self.index >= len(self.images):
self.kill()
else:
self.image = self.images[self.index]
self.index += 1
class Bomb(pygame.sprite.Sprite):
appeared = 0
# Constructor
def __init__(self):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
# Image
self.image = pygame.image.load(os.path.join(globals.data_dir, 'img/pwrp/pwr_bomb.png')).convert_alpha()
self.rect = self.image.get_rect()
# Create mask
self.mask = pygame.mask.from_surface(self.image)
# Place on screen
self.rect.center = (random.randint(1, globals.winWidth - 140), random.randint(100, globals.winHeight - 140))
def kill(self):
self.rect.center = (-300, -300)
class BombGone(pygame.sprite.Sprite):
frames = []
# Constructor
def __init__(self, x, y):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
# Get all frames
path = "data/img/pwrp/pwr_bomb_kill/"
frames = [f for f in listdir(path) if isfile(join(path, f))]
# Put all frames in a list of Pygame images
self.images = []
for frame in frames:
self.images.append(pygame.image.load(path + frame).convert_alpha())
self.index = 0
self.image = self.images[self.index]
self.rect = self.image.get_rect()
# Position
self.rect.x = x
self.rect.y = y
def update(self):
# ANIMATE AND KILL AFTER ANIMATION
if self.index >= len(self.images):
self.kill()
else:
self.image = self.images[self.index]
self.index += 1
``` |
{
"source": "4201337/InstagramChecker",
"score": 3
} |
#### File: 4201337/InstagramChecker/app.py
```python
from multiprocessing.dummy import Pool as ThreadPool
from threading import Lock as LockPool
import requests
import time
import sys
myList = open('app.txt').readlines()
myThreads = 50
myLock = LockPool()
myPool = ThreadPool(myThreads)
def myRun(username):
username = username.strip()
url = 'https://www.instagram.com/'
req = requests.get(url + username)
if req.status_code == 200:
with myLock:
print '[Unavailable]' , username
with open('Unavailable.txt', 'a') as Unavailable:
Unavailable.write(username + '\n')
elif req.status_code == 404:
with myLock:
print '[Available]' , username
with open('Available.txt', 'a') as Available:
Available.write(username + '\n')
else:
with myLock:
print '[Error]' , username
with open('Error.txt', 'a') as Error:
Error.write(username + '|' + req.status_code + '\n')
startTime = time.time()
if __name__ == '__main__':
myPool.map(myRun, myList)
myPool.close()
myPool.join()
endTime = time.time()
print '============================='
print 'Done!'
print '============================='
print 'Total Time :' , round(endTime - startTime, 2) , 'Seconds'
print 'Total Threads :' , myThreads
print 'Total Tries :' , len(myList)
print '============================'
print 'Thank you, come again!'
print '============================'
``` |
{
"source": "420tushar/text_to_handwriting",
"score": 3
} |
#### File: 420tushar/text_to_handwriting/imagehand.py
```python
import numpy as np
from PIL import Image
def make_image(list1,count):
imgs = [ Image.open(i) for i in list1 ]
#min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]
min_shape=(10*y,10*y)
imgs_comb = np.hstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )
imgs_comb = Image.fromarray( imgs_comb)
imgs_comb.save( str(count)+'.jpg' )
def multiple_lines(count):
list1=[]
for i in range(1,count+1,1):
z=str(i)+".jpg"
list1.append(z)
imgs = [ Image.open(i) for i in list1 ]
min_shape = sorted( [(np.sum(i.size), i.size ) for i in imgs])[0][1]
# min_shape=(300*y,200*y)
imgs_comb = np.vstack( (np.asarray( i.resize(min_shape) ) for i in imgs ) )
imgs_comb = Image.fromarray( imgs_comb)
imgs_comb.save( 'final.jpg')
#list_im = ['Test1.jpg', 'Test2.jpg', 'Test3.jpg']
x=open("new.txt","r")
#print("ENTER TEXT TO CONVERT")
#x.write(input()+"\n")
list1=[]
listx=[i for i in x.read()]
y=0
count=0
for i in listx:
if i=='\n':
count=count+1
if(len(list1)<50):
for i in range(len(list1),50,1):
list1.append('space.jpg')
make_image(list1,count)
list1.clear()
continue
if(i>='a' and i<= 'z') :
z=i+".jpg"
else:
z='space.jpg'
list1.append(z)
y=y+1
if(count>0):
multiple_lines(count)
``` |
{
"source": "421408/tudocomp",
"score": 3
} |
#### File: tudocomp/etc/genregistry.py
```python
import collections
import os
import argparse
import re
import sys
from textwrap import dedent, indent
parser = argparse.ArgumentParser()
parser.add_argument('config')
parser.add_argument('config_path')
parser.add_argument('out_path')
parser.add_argument('--print_deps', action='store_true')
parser.add_argument('--generate_files', action='store_true')
args = parser.parse_args()
pyconfig = args.config
config_path = args.config_path
out_path = args.out_path
root_filename = os.path.join(out_path, 'root.cpp')
def eval_config(path, globs):
file0 = open(path, 'r')
content = file0.read()
file0.flush()
file0.close()
exec(content, globs)
def config_match(pattern):
textfile = open(config_path, 'r')
filetext = textfile.read()
textfile.close()
pattern = re.compile(pattern)
for line in filetext.splitlines():
for match in re.finditer(pattern, line):
return True
return False
AlgorithmConfig = collections.namedtuple(
'AlgorithmConfig', ['name', 'header', 'sub'])
AlgorithmConfig.__new__.__defaults__ = ('', '', [])
class RegistryConfig:
compressors = []
decompressors = []
generators = []
def get_kinds(self):
return [
("Compressor", self.compressors),
("Decompressor", self.decompressors),
("Generator", self.generators),
]
def code(s, i = 0, r = {}):
s = indent(dedent(s), ' ' * i)
for key in r:
s = s.replace(key, r[key])
return s
class Code:
def __init__(self, file):
self.file = file
def code(self, s, i = 0, r = {}):
self.file.write(code(s, i, r) + '\n')
def emptyline(self):
self.file.write('\n')
autogen_disclaimer = '''
/* Autogenerated file by genregistry.py */
'''
tdc = RegistryConfig()
eval_config(pyconfig, {
"config_match": config_match,
"AlgorithmConfig": AlgorithmConfig,
"tdc": tdc
})
def cpp_template_instance(instance):
str = ""
level = 0
level_num = [1]
level_rank = [0]
for (name, nsubs) in instance:
if level_rank[level] > 0:
str += ", "
str += name
level_rank[level] += 1
if nsubs > 0:
str += '<'
level += 1
level_num.append(nsubs)
level_rank.append(0)
while level > 0 and level_rank[level] >= level_num[level]:
del level_rank[level]
del level_num[level]
level -= 1
str += '>'
return str
def dfs(algo, f, q = [], instance = [], headers = []):
(name, header, sub) = algo
instance_ = instance + [(name,len(sub))]
headers_ = headers + [header]
q_ = sub + q
if len(q_) > 0:
next = q_[0]
q_ = q_[1:]
for sub_algo in next:
dfs(sub_algo, f, q_, instance_, headers_)
else:
# reached a leaf
f(instance_, headers_)
def print_instance(instance, headers):
print(cpp_template_instance(instance))
#######################
### Instance groups ###
#######################
groups = []
max_groupsize = 100;
for (classname, algos) in tdc.get_kinds():
class_groups = []
for algo in algos:
(name, _, _) = algo
group_name = name.replace(':', '_')
group_rank = 1
group_count = 0
group_headers = []
group_instances = []
def write_group():
global class_groups
class_groups.append(group_name)
if args.generate_files:
group_headers.sort()
with open(os.path.join(out_path, group_name + '.cpp'), 'w') as f:
cpp = Code(f)
cpp.code(autogen_disclaimer)
cpp.code('#include <tudocomp_driver/Registry.hpp>')
for h in group_headers:
cpp.code('#include <tudocomp/' + h + '>')
cpp.emptyline()
cpp.code('namespace tdc_algorithms {')
cpp.emptyline()
cpp.code('using namespace tdc;')
cpp.emptyline()
cpp.code('void register_$GROUP(RegistryOf<$TYPE>& r) {',
0, { '$GROUP': group_name, '$TYPE': classname})
for i in group_instances:
cpp.code('r.register_algorithm<$INSTANCE>();',
1, { '$INSTANCE': i })
cpp.code('}')
cpp.emptyline()
cpp.code('} // namespace')
def process_instance(instance, headers):
global group_name, group_rank, group_count, group_headers, group_instances
group_headers = list(set(group_headers + headers))
group_instances.append(cpp_template_instance(instance))
group_count += 1
if group_count > max_groupsize:
write_group()
group_headers = []
group_instances = []
group_count = 0
group_rank += 1
group_name = name + '_' + str(group_rank)
dfs(algo, process_instance)
if group_count > 0:
write_group()
groups.append((classname, class_groups))
################
### root.cpp ###
################
if args.generate_files:
with open(root_filename, 'w') as f:
root = Code(f)
root.code(autogen_disclaimer)
root.code('#include <tudocomp_driver/Registry.hpp>')
root.emptyline()
root.code('namespace tdc_algorithms {')
root.emptyline()
root.code('using namespace tdc;')
root.emptyline()
root.code('// forward declarations')
for (classname, class_groups) in groups:
for group_name in class_groups:
root.code('void register_$GROUP(RegistryOf<$TYPE>&);',
0, { '$GROUP': group_name, '$TYPE': classname })
root.emptyline()
root.code('void register_algorithms() {', 0)
for (classname, class_groups) in groups:
root.code('// $TYPEs', 1, { '$TYPE': classname })
root.code('{', 1)
if len(class_groups) > 0:
root.code('auto& r = Registry::of<$TYPE>();',
2, { '$TYPE': classname })
for group_name in class_groups:
root.code('register_$GROUP(r);', 2, { '$GROUP': group_name })
root.code('}', 1)
root.code('}')
root.emptyline()
root.code('} //namespace')
if args.print_deps:
files = [root_filename]
for (_, class_groups) in groups:
for group_name in class_groups:
files.append(os.path.join(out_path, group_name + '.cpp'))
sys.stdout.write(';'.join(files))
sys.stdout.flush()
os.sync()
``` |
{
"source": "4220182/py-flask",
"score": 4
} |
#### File: 4220182/py-flask/test.py
```python
def getval(a):
return a * 3
a = {'a':1, 'b':2, 'c':3}
s = {k: getval(s) for k,s in a.items() if s>2}
print(s)
a = [1,2,3]
s = [k * 2 for k in a]
print(s)
``` |
{
"source": "422562/cert-verifier",
"score": 3
} |
#### File: cert-verifier/certverifier/cert_verifier.py
```python
from certverifier.print_helper import *
from certverifier.classify_features import *
import os, sys, getopt, codecs, csv
import pandas
from OpenSSL import crypto
from time import time
def main():
argv = sys.argv[1:]
infile = ""
outfile = "output.csv"
mode = "123"
strict = False
cert_count = 0
try:
opts, args = getopt.getopt(argv, "hi:o:m:s", ["infile=","outfile=","mode=","strict"])
except getopt.GetoptError:
print_help()
sys.exit()
for opt, arg in opts:
if opt == "-h":
print_help()
sys.exit()
if opt == "-i":
infile = arg
if opt == "-o":
outfile = arg
if opt == "-m":
mode = arg
if opt == "-s":
strict = True
patterns_df = pandas.read_csv(pkgfile("data/malicious-patterns-with-malware.csv"))
malicious_patterns = patterns_df.drop('malware', axis = 1).values.tolist()
malicious_subjectCNs = pandas.read_csv(pkgfile("data/malicious_subjectCN.csv"))['subject.CN'].values.tolist()
classifiers = load_classifiers()
count_vectorizers = load_count_vectorizers()
# classify a single .PEM file
if os.path.isfile(infile):
with codecs.open(infile, 'r', 'utf-8') as certfile:
certdata = certfile.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, certdata)
class_dict = get_class_dict(cert, classifiers, count_vectorizers)
cert_type = classify_cert(cert, mode, strict, class_dict, malicious_patterns, malicious_subjectCNs)
print_cert_result(infile, cert_type)
# classify a folder with .PEM certificates
if os.path.isdir(infile):
start_time = time()
cert_counts = [0] * 5
lst = os.listdir(infile)
lst.sort()
with codecs.open(outfile, 'w', 'utf-8') as out:
print_header(out)
for file in lst:
with codecs.open(os.path.join(infile, file), 'r', 'utf-8') as certfile:
certdata = certfile.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, certdata)
class_dict = get_class_dict(cert, classifiers, count_vectorizers)
cert_type = classify_cert(cert, mode, strict, class_dict, malicious_patterns, malicious_subjectCNs)
print_to_file(out, file, cert_type)
cert_counts[cert_type] += 1
# print information to output about the progress
if sum(cert_counts) % 100 == 0:
print_certificate_counts(cert_counts)
print_classification_time(start_time)
print_certificate_counts(cert_counts)
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "422926799/SecistSploit",
"score": 3
} |
#### File: modules/listener/connection_vnc.py
```python
import os,sys,shutil
from secistsploit.core.exploit import *
from secistsploit.core.tcp.tcp_client import TCPClient
platform=sys.platform #判断获取操作系统
if platform == "darwin":
path=os.getcwd()+'/secistsploit/data/vnc/vncviewer_linux'
elif platform == "linux":
path=os.getcwd()+'/secistsploit/data/vnc/vncviewer_linux'
elif platform == "windows":
path=os.getcwd()+'/secistsploit/data/vnc/vncviewer_win.exe'
class Exploit(TCPClient):
__info__ = {
"name": "connection_vnc",
"\033[91m内容描述\033[0m": "connection模块叙述:\n"
"1.runvnc.exe 运行后会启动vnc默认5900端口支持x64 x86\n",
"2.连接密码是<PASSWORD> \n"
"\033[91m参考链接\033[0m": (
" \n"
),
"\033[91m作者\033[0m": (
"WBGlIl",
),
}
rhost = OptIP("", "远程目标IP地址")
target = OptString("Windows")
rport= OptPort("5900","port")
def run(self):
RHOST=(self.rhost)
TARGET=(self.target)
RPORT=(self.rport)
ip = " "+RHOST+":"
if TARGET == "Windows":
file=os.getcwd()+'/secistsploit/data/vnc/runvnc.exe'
shutil.copy(file,os.getcwd())
print ("\033[92m[+]\033[0m 客户端输出路径: "+os.getcwd()+"/runvnc.exe \n")
yes= input("\033[94m[*]\033[0m 如果已经执行客户端请输入yes: ")
if yes =="yes":
runvnc=path+ip+str(RPORT)
os.system(runvnc)
else:
print ("请在被攻击者机器上执行客户端程序")
```
#### File: 422926799/SecistSploit/SSF.py
```python
from __future__ import print_function
import logging.handlers
import sys
if sys.version_info.major < 3:
print("secistsploit supports only Python3. Rerun application in Python3 environment.")
exit(0)
from secistsploit.main import secistsploitInterpreter
log_handler = logging.handlers.RotatingFileHandler(filename="secistsploit_attack.log", maxBytes=500000)
log_formatter = logging.Formatter("%(asctime)s %(levelname)s %(name)s %(message)s")
log_handler.setFormatter(log_formatter)
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
LOGGER.addHandler(log_handler)
def secistsploit():
ssf = secistsploitInterpreter()
ssf.start()
if __name__ == "__main__":
secistsploit()
``` |
{
"source": "423s22/G1",
"score": 4
} |
#### File: src/Python_Files/controller.py
```python
import directory_scanner as scanner
import converter
import directory_controller as directory
def collect_scannable_files():
acceptable_file_extensions = [".xlsx", ".csv"]
return scanner.remove_invalid_file_types(scanner.retrieve_files("./data_files/machine_readable_files/*.*"), acceptable_file_extensions)
PATH = './data_files/machine_readable_files/'
documents = collect_scannable_files()
for files in documents:
extension = "." + files.split(".")[-1]
hospital = scanner.strip_path_and_ending(files)
print("Scanning: ", hospital)
try:
converter.output_to_csv(converter.scan_document(hospital, PATH, extension), hospital)
except Exception as e:
directory.generate_directory("logs")
print("Error scanning", hospital)
print("Item aded to log")
with open("./logs/error_log.txt", "a") as file:
file.write("Error Processing: " + hospital + " ")
file.write(str(e))
print("Completed")
print()
``` |
{
"source": "423s22/G2",
"score": 3
} |
#### File: G2/LocalServer/correctOutputTest.py
```python
import fileParser
def testValidDocValidates():
validate = fileParser.validatorMain()
validate.parse("Completed_Example_ETD.docx")
fp = open("Changes/requiredChanges.txt")
firstLine = fp.readline()
assert "All good" in firstLine
if __name__ == '__main__': # Code to test fileParser independently
testValidDocValidates()
``` |
{
"source": "425741639/gssresources",
"score": 4
} |
#### File: 425741639/gssresources/Part_Edition.py
```python
import random
#四则运算式参数列表生成(不带括号)
def getList(Operator,max_num,decimal_status,decimal_digit):
operator = []
calculate_num = []
operate_num = random.randint(2, 3)#操作数数量
operator_num = operate_num - 1#操作符数量
for i in range(operate_num):
if decimal_status == 1:
calculate_num.append(random.randint(0, max_num))
elif decimal_status ==0:
if random.randint(0,1) == 1:
calculate_num.append(random.randint(0, max_num))
else:
calculate_num.append(round(random.uniform(0, max_num),decimal_digit))
for i in range(operator_num):
operator.append(Operator[random.randint(0, len(Operator) - 1)])
if operator[i] == '/':
while calculate_num[i+1] == 0:
if decimal_status == 1:
calculate_num[i + 1] = random.randint(0, max_num)
elif decimal_status == 0:
if random.randint(0,1) == 1:
calculate_num[i+1] = random.randint(0, max_num)
else:
calculate_num[i + 1] = round(random.uniform(0, max_num),decimal_digit)
if decimal_status == 1:
for i in range(len(operator)):
if i == 0:
if operator[i] == '*':
temp = calculate_num[i] * calculate_num[i + 1]
elif operator[i] == '/':
while calculate_num[i] % calculate_num[i + 1] != 0:
calculate_num[i] = random.randint(0, max_num)
calculate_num[i + 1] = random.randint(0, max_num)
while calculate_num[i + 1] == 0:
calculate_num[i + 1] = random.randint(0, max_num)
temp = calculate_num[i] // calculate_num[i + 1]
else:
if operator[i] == '*':
if operator[i - 1] == '*' or operator[i - 1] == '/':
temp = temp * calculate_num[i + 1]
else:
temp = calculate_num[i] * calculate_num[i + 1]
elif operator[i] == '/':
if operator[i - 1] == '*' or operator[i - 1] == '/':
while temp % calculate_num[i + 1] != 0:
calculate_num[i + 1] = random.randint(0, max_num)
while calculate_num[i + 1] == 0:
calculate_num[i + 1] = random.randint(0, max_num)
temp = temp // calculate_num[i + 1]
else:
while calculate_num[i] % calculate_num[i + 1] != 0:
calculate_num[i] = random.randint(0, max_num)
calculate_num[i + 1] = random.randint(0, max_num)
while calculate_num[i + 1] == 0:
calculate_num[i + 1] = random.randint(0, max_num)
temp = calculate_num[i] // calculate_num[i + 1]
return operator,calculate_num
#四则运算式参数列表生成(带括号)
def getList2(Operator,max_num,decimal_status,decimal_digit):
operator = []
calculate_num = []
operate_num = random.randint(3,4)#操作数的数量
operator_num = operate_num - 1#操作符数量
for i in range(operate_num):
if decimal_status == 1:
calculate_num.append(random.randint(0, max_num))
elif decimal_status == 0:
if random.randint(0,1) == 1:
calculate_num.append(random.randint(0, max_num))
else:
calculate_num.append(round(random.uniform(0, max_num),decimal_digit))
for i in range(operator_num):
operator.append(Operator[random.randint(0, len(Operator) - 1)])
if operator[i] == '/':
while calculate_num[i+1] == 0:
if decimal_status == 1:
calculate_num[i+1] = random.randint(0,max_num)
elif decimal_status == 0:
if random.randint(0,1) == 1:
calculate_num[i+1] = random.randint(0, max_num)
else:
calculate_num[i+1] = round(random.uniform(0, max_num),decimal_digit)
if decimal_status == 1:
for i in range(len(operator)):
if i == 0:
if operator[i] == '*':
temp = calculate_num[i] * calculate_num[i + 1]
elif operator[i] == '/':
while calculate_num[i] % calculate_num[i + 1] != 0:
calculate_num[i] = random.randint(0, max_num)
calculate_num[i + 1] = random.randint(0, max_num)
while calculate_num[i + 1] == 0:
calculate_num[i + 1] = random.randint(0, max_num)
temp = calculate_num[i] // calculate_num[i + 1]
elif operator[i] == '+':
temp = calculate_num[i] + calculate_num[i + 1]
elif operator[i] == '-':
temp = calculate_num[i] - calculate_num[i + 1]
elif i == 1:
if operator[i] == '*':
temp = temp * calculate_num[i + 1]
elif operator[i] == '/':
while temp % calculate_num[i + 1] != 0:
calculate_num[i + 1] = random.randint(0, max_num)
while calculate_num[i + 1] == 0:
calculate_num[i + 1] = random.randint(0, max_num)
temp = temp // calculate_num[i + 1]
else:
if operator[i] == '*':
if operator[i - 1] == '*' or operator[i - 1] == '/':
temp = temp * calculate_num[i + 1]
else:
temp = calculate_num[i] * calculate_num[i + 1]
elif operator[i] == '/':
if operator[i - 1] == '*' or operator[i - 1] == '/':
while temp % calculate_num[i + 1] != 0:
calculate_num[i + 1] = random.randint(0, max_num)
while calculate_num[i + 1] == 0:
calculate_num[i + 1] = random.randint(0, max_num)
temp = temp // calculate_num[i + 1]
else:
while calculate_num[i] % calculate_num[i + 1] != 0:
calculate_num[i] = random.randint(0, max_num)
calculate_num[i + 1] = random.randint(0, max_num)
while calculate_num[i + 1] == 0:
calculate_num[i + 1] = random.randint(0, max_num)
temp = calculate_num[i] // calculate_num[i + 1]
return operator,calculate_num
#四则运算式组合
def getFormula(operator,calculate_num,brackets=0):
formula = ''
if brackets == 0 or brackets == 1:
formula = str(calculate_num[0])
for i in range(len(operator)):
formula = formula + operator[i] + str(calculate_num[i + 1])
elif brackets == 2:
formula = '(' + str(calculate_num[0])
for i in range(len(operator)):
if i == 0:
formula = formula + operator[i] + str(calculate_num[i + 1]) + ')'
else:
formula = formula + operator[i] + str(calculate_num[i + 1])
return formula
#四则式列表生成模块,传入参数需:program_num,Operator,max_num,decimal_status,decimal_digit,返回值为ProgramList
def getProgramList(program_num,Operator,max_num,decimal_status,decimal_digit,blackets):
ProgramList = []
while len(ProgramList) != program_num: #利用集合元素的唯一性获得无重复四则式列表
if blackets == 0:
brackets_probability = random.randint(0, 2) #2/3概率为不带括号,1/3概率为带括号
if brackets_probability==0 or brackets_probability == 1:
operator,calculate_num = getList(Operator,max_num,decimal_status,decimal_digit)
ProgramList.append(getFormula(operator,calculate_num,brackets_probability))
elif brackets_probability==2:
operator,calculate_num = getList2(Operator,max_num,decimal_status,decimal_digit)
ProgramList.append(getFormula(operator,calculate_num,brackets_probability))
elif blackets == 1:
operator, calculate_num = getList(Operator, max_num, decimal_status, decimal_digit)
ProgramList.append(getFormula(operator, calculate_num))
ProgramList = set(ProgramList) #列表转集合
ProgramList = list(ProgramList) #集合转列表
return ProgramList
#计算模块,传入参数需:decimal_status,decimal_digit,ProgramList,Program_Answer,返回值为Program_Answer
def Compute_Answer(program_num, decimal_status, decimal_digit, ProgramList):
ProgramList_Temp = []
Program_Answer = []
if decimal_status == 1:#如果不包括小数
for i in ProgramList:
ProgramList_Temp.append(i.replace('/', '//'))
for i in range(program_num):
Program_Answer.append(eval(ProgramList_Temp[i]))
elif decimal_status == 0:
for i in range(len(ProgramList)):
Program_Answer.append(round(eval(ProgramList[i]),decimal_digit))
return Program_Answer
#输出模块,传入参数:Output_Mode,ProgramList,Program_Answer
def Output_Module(program_num,ProgramList,Program_Answer):
print(str(program_num) + "道题目已生成,请选择操作方式:\n"
"1-输出至屏幕同时存为文件 2-仅输出至屏幕")
while True:
try:
Output_Mode = int(input())
while Output_Mode not in [1,2]:
Output_Mode = int(input("无该选项,请重新选择:1-输出至屏幕同时存为文件 2-仅输出至屏幕\n"))
except ValueError:
print("请输入正确选项!!!")
else:
break
if Output_Mode == 1:
print_screen(program_num,ProgramList,Program_Answer)
count1 = count2 = 1
with open('Program.txt','w',encoding='UTF-8') as f:
f.write("题目如下:\n")
for i in range(program_num):
f.write(str(count1) + '、' + str(ProgramList[i]) + '='+'\n')
count1 += 1
with open('Answer.txt', 'w', encoding='UTF-8') as f:
f.write("答案如下:\n")
for i in range(program_num):
f.write(str(count2) + '、' + str(Program_Answer[i])+'\n')
count2 += 1
else:
print_screen(program_num,ProgramList,Program_Answer)
def print_screen(program_num,ProgramList,Program_Answer):
count1 = count2 = 1
print("题目如下:")
for i in range(program_num):
print(str(count1)+'、'+ str(ProgramList[i]) + '=')
count1+=1
while True:
try:
Answer_Print = int(input("是否输出答案:0-是 1-否"))
while Answer_Print not in [0,1]:
Answer_Print = int(input("无该选项,请重新选择:0-是 1-否"))
except ValueError:
print("请输入正确选项!")
else:
break
if Answer_Print == 0:
print("答案如下:")
for i in range(program_num):
print(str(count2) + '、' + str(Program_Answer[i]))
count2+=1
def main():
while True:
try:
program_num = int(input("请输入题目数量:"))
while program_num <= 0:
program_num = int(input("输入为非正整数!!!\n请输入有效题目数量:"))
except ValueError:
print("请输入整数!!!")
else:
break
while True:
try:
max_num = int(input("请输入最大操作数:"))
while max_num < 0:
max_num = int(input("输入为负数!!!\n请输入有效最大操作数:"))
except ValueError:
print("请输入非负整数!")
else:
break
while True:
try:
blackets = int(input("是否包含括号:0-是 1-否\n"))
while blackets not in [0,1]:
blackets = int(input("无该选项,请重新选择:0-是 1-否\n"))
except ValueError:
print("请输入正确选项!")
else:
break
while True:
try:
decimal_status = int(input("是否包括小数:0-是 1-否\n"))
while decimal_status not in [0,1]:
decimal_status = int(input("无该选项,请重新选择:0-是 1-否\n"))
except ValueError:
print("请输入正确选项!")
else:
break
decimal_digit = 0
if decimal_status == 0:
while True:
try:
decimal_digit = int(input("请输入小数位数:"))
while decimal_digit < 0:
decimal_digit = int(input("输入为非正整数!!!\n请输入有效小数位数:"))
except ValueError:
print("请输入非负整数!")
else:
break
Operator_str_temp = input("请输入操作符,以空格隔开:")
Operator_str = Operator_str_temp.strip(' ')
Operator = Operator_str.split(' ')
while True:
count = 0
for i in Operator:
if i not in ['+','-','*','/']:
Operator_str_temp = input("输入含非运算符!!!\n请输入正确的操作符:")
Operator_str = Operator_str_temp.strip(' ')
Operator = Operator_str.split(' ')
break
else:
count += 1
if count == len(Operator):
break
ProgramList = getProgramList(program_num,Operator,max_num,decimal_status,decimal_digit,blackets)
Program_Answer = Compute_Answer(program_num, decimal_status, decimal_digit, ProgramList)
Output_Module(program_num, ProgramList, Program_Answer)
if __name__ == '__main__':
main()
``` |
{
"source": "425776024/CoolNERTool",
"score": 2
} |
#### File: CoolNERTool/src/load_json.py
```python
import json
def get_json(path_json: str):
with open(path_json, encoding='utf-8') as f:
return json.load(f)
``` |
{
"source": "425776024/MOEAD",
"score": 3
} |
#### File: src/utils/Draw_Utils.py
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
'''
绘图工具包
'''
fig = plt.figure()
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
ax = 0
def show():
plt.show()
def draw_MOEAD_Pareto(moead, name):
Pareto_F_ID = moead.EP_X_ID
Pop_F_Data = moead.Pop_FV
Len = len(Pop_F_Data[0])
if Len == 2:
r_x = Pop_F_Data[0][:]
r_y = Pop_F_Data[0][:]
for pi, pp in enumerate(Pop_F_Data):
plt.scatter(pp[0], pp[1], c='black', s=5)
for pid in Pareto_F_ID:
p = Pop_F_Data[pid]
if p[0] < r_x[0]:
r_x[0] = p[0]
if p[0] > r_x[1]:
r_x[1] = p[0]
if p[1] < r_y[0]:
r_y[0] = p[0]
if p[1] > r_y[1]:
r_y[1] = p[1]
# cc=(np.random.rand(),np.random.rand(),np.random.rand())
plt.scatter(p[0], p[1], c='r', s=20)
# plt.text(p[0], p[1]+0.2*np.random.rand(), str(pid), fontsize=10)
plt.xlabel('Function 1', fontsize=15)
plt.ylabel('Function 2', fontsize=15)
plt.title(name)
# plt.xlim(r_x[0] - 0.1, r_x[1] + 0.1)
# plt.ylim(r_y[0] - 0.1, r_y[1] + 0.1)
# if Len == 3:
# global ax, fig
# if ax == 0:
# ax = Axes3D(fig)
# ax.set_xlabel('Function 1')
# ax.set_ylabel('Function 2')
# ax.set_zlabel('Function 3')
# for pp in Pop_F_Data:
# ax.scatter(pp[0], pp[1], pp[2], c='black', s=5)
# for pid in Pareto_F_ID:
# p = Pop_F_Data[pid]
# ax.scatter(p[0], p[1], p[2], c='red', s=10)
# ax.set_xlim([0,20])
# ax.set_ylim([0,20])
# ax.set_zlim([0,20])
def draw_W(moead):
Start_Pts = moead.Z
path = moead.csv_file_path + '/' + moead.name + '.csv'
data = np.loadtxt(path)
Pareto_F_ID = moead.EP_X_ID
Pop_F_Data = moead.Pop_FV
if data.shape[1] == 3:
global ax, fig
if ax == 0:
ax = Axes3D(fig)
x, y, z = data[:, 0], data[:, 1], data[:, 2]
VecStart_x = Start_Pts[0]
VecStart_y = Start_Pts[1]
VecStart_z = Start_Pts[2]
VecEnd_x = data[:, 0]
VecEnd_y = data[:, 1]
VecEnd_z = data[:, 2]
ax.scatter(x, y, z, marker='.', s=50, label='', color='r')
for i in range(VecEnd_x.shape[0]):
ax.plot([VecStart_x, VecEnd_x[i]], [VecStart_y, VecEnd_y[i]], zs=[VecStart_z, VecEnd_z[i]])
if data.shape[1] == 2:
# x, y = data[:, 0], data[:, 1]
# plt.xlabel('X')
# plt.xlabel('Y')
# plt.scatter(x, y, marker='.', s=50, label='', color='r')
VecStart_x = Start_Pts[0]
VecStart_y = Start_Pts[1]
VecEnd_x = data[:, 0]
VecEnd_y = data[:, 1]
for i in range(VecEnd_y.shape[0]):
if i == moead.now_y:
plt.plot([VecEnd_x[i], Pop_F_Data[i][0]], [VecEnd_y[i], Pop_F_Data[i][1]])
plt.plot([VecStart_x, VecEnd_x[i]], [VecStart_y, VecEnd_y[i]])
```
#### File: src/utils/Mean_Vector_Util.py
```python
import numpy as np
'''
求解均值向量
'''
class Mean_vector:
# 对m维空间,目标方向个数H
def __init__(self, H=5, m=3, path='out.csv'):
self.H = H
self.m = m
self.path = path
self.stepsize = 1 / H
def perm(self, sequence):
# !!! 序列全排列,且无重复
l = sequence
if (len(l) <= 1):
return [l]
r = []
for i in range(len(l)):
if i != 0 and sequence[i - 1] == sequence[i]:
continue
else:
s = l[:i] + l[i + 1:]
p = self.perm(s)
for x in p:
r.append(l[i:i + 1] + x)
return r
def get_mean_vectors(self):
H = self.H
m = self.m
sequence = []
for ii in range(H):
sequence.append(0)
for jj in range(m - 1):
sequence.append(1)
ws = []
pe_seq = self.perm(sequence)
for sq in pe_seq:
s = -1
weight = []
for i in range(len(sq)):
if sq[i] == 1:
w = i - s
w = (w - 1) / H
s = i
weight.append(w)
nw = H + m - 1 - s
nw = (nw - 1) / H
weight.append(nw)
if weight not in ws:
ws.append(weight)
return ws
def save_mv_to_file(self, mv):
f = np.array(mv, dtype=np.float64)
np.savetxt(fname=self.path, X=f)
def generate(self):
m_v = self.get_mean_vectors()
self.save_mv_to_file(m_v)
# mv = Mean_vector(10, 3, 'test.csv')
# mv.generate()
``` |
{
"source": "428lab/debug-shrine",
"score": 2
} |
#### File: debug-shrine/server/main.py
```python
from flask import Flask
from flask_cors import CORS
import router
def create_app():
# Generate Flask App Instance
app = Flask(__name__)
app.register_blueprint(router.router)
app.config['JSON_AS_ASCII'] = False #日本語文字化け対策
app.config["JSON_SORT_KEYS"] = False #ソートをそのまま
CORS(app, origins=["http://localhost", "http://localhost:3000"])
return app
app = create_app()
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=80, threaded=True, use_reloader=False)
```
#### File: debug-shrine/server/router.py
```python
from flask import Blueprint
# from controller import user_controller
from logging import config
from json import load
# import auth
# import logger
# Generate Router Instance
router = Blueprint('router', __name__)
@router.route("/", methods=['GET'])
# @logger.http_request_logging
# @auth.requires_auth
def hello_world():
return "Hello World!!"
# @router.route("/api/v1/users/getUserList", methods=['GET'])
# @logger.http_request_logging
# @auth.requires_auth
# def api_v1_users_get_user_list():
# return user_controller.get_user()
# @router.after_request
# def after_request(response):
# # response.headers.add('Access-Control-Allow-Origin', '*')
# response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
# response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
# return response
``` |
{
"source": "428lab/reception_bot",
"score": 2
} |
#### File: reception_bot/plugins/log.py
```python
class Handler:
def __init__(self, db):
self.db = db
self.info = {
"name":"log",
"permission":"user",
"type":"command",
"commands":["*"],
"version":"0.0.1",
}
def get_plugin_info(self):
return self.info
def on_message(self, server_id, server_name, user_id, user_name, channel_id, channel_name, content, command):
self.db.discord_log_chat(server_id, channel_id, user_id, content)
reaction = {
"message": None,
"embed": None,
"processed": True,
"through": True,
"file": None
}
return reaction
``` |
{
"source": "428s/kamonohashi",
"score": 2
} |
#### File: rest/models/tenant_api_models_edit_input_model.py
```python
import pprint
import re # noqa: F401
import six
class TenantApiModelsEditInputModel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'available_infinite_time_notebook': 'bool',
'default_git_id': 'int',
'default_registry_id': 'int',
'display_name': 'str',
'git_ids': 'list[int]',
'registry_ids': 'list[int]',
'storage_id': 'int'
}
attribute_map = {
'available_infinite_time_notebook': 'availableInfiniteTimeNotebook',
'default_git_id': 'defaultGitId',
'default_registry_id': 'defaultRegistryId',
'display_name': 'displayName',
'git_ids': 'gitIds',
'registry_ids': 'registryIds',
'storage_id': 'storageId'
}
def __init__(self, available_infinite_time_notebook=None, default_git_id=None, default_registry_id=None, display_name=None, git_ids=None, registry_ids=None, storage_id=None): # noqa: E501
"""TenantApiModelsEditInputModel - a model defined in Swagger""" # noqa: E501
self._available_infinite_time_notebook = None
self._default_git_id = None
self._default_registry_id = None
self._display_name = None
self._git_ids = None
self._registry_ids = None
self._storage_id = None
self.discriminator = None
if available_infinite_time_notebook is not None:
self.available_infinite_time_notebook = available_infinite_time_notebook
if default_git_id is not None:
self.default_git_id = default_git_id
if default_registry_id is not None:
self.default_registry_id = default_registry_id
self.display_name = display_name
self.git_ids = git_ids
self.registry_ids = registry_ids
self.storage_id = storage_id
@property
def available_infinite_time_notebook(self):
"""Gets the available_infinite_time_notebook of this TenantApiModelsEditInputModel. # noqa: E501
:return: The available_infinite_time_notebook of this TenantApiModelsEditInputModel. # noqa: E501
:rtype: bool
"""
return self._available_infinite_time_notebook
@available_infinite_time_notebook.setter
def available_infinite_time_notebook(self, available_infinite_time_notebook):
"""Sets the available_infinite_time_notebook of this TenantApiModelsEditInputModel.
:param available_infinite_time_notebook: The available_infinite_time_notebook of this TenantApiModelsEditInputModel. # noqa: E501
:type: bool
"""
self._available_infinite_time_notebook = available_infinite_time_notebook
@property
def default_git_id(self):
"""Gets the default_git_id of this TenantApiModelsEditInputModel. # noqa: E501
:return: The default_git_id of this TenantApiModelsEditInputModel. # noqa: E501
:rtype: int
"""
return self._default_git_id
@default_git_id.setter
def default_git_id(self, default_git_id):
"""Sets the default_git_id of this TenantApiModelsEditInputModel.
:param default_git_id: The default_git_id of this TenantApiModelsEditInputModel. # noqa: E501
:type: int
"""
self._default_git_id = default_git_id
@property
def default_registry_id(self):
"""Gets the default_registry_id of this TenantApiModelsEditInputModel. # noqa: E501
:return: The default_registry_id of this TenantApiModelsEditInputModel. # noqa: E501
:rtype: int
"""
return self._default_registry_id
@default_registry_id.setter
def default_registry_id(self, default_registry_id):
"""Sets the default_registry_id of this TenantApiModelsEditInputModel.
:param default_registry_id: The default_registry_id of this TenantApiModelsEditInputModel. # noqa: E501
:type: int
"""
self._default_registry_id = default_registry_id
@property
def display_name(self):
"""Gets the display_name of this TenantApiModelsEditInputModel. # noqa: E501
:return: The display_name of this TenantApiModelsEditInputModel. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this TenantApiModelsEditInputModel.
:param display_name: The display_name of this TenantApiModelsEditInputModel. # noqa: E501
:type: str
"""
if display_name is None:
raise ValueError("Invalid value for `display_name`, must not be `None`") # noqa: E501
self._display_name = display_name
@property
def git_ids(self):
"""Gets the git_ids of this TenantApiModelsEditInputModel. # noqa: E501
:return: The git_ids of this TenantApiModelsEditInputModel. # noqa: E501
:rtype: list[int]
"""
return self._git_ids
@git_ids.setter
def git_ids(self, git_ids):
"""Sets the git_ids of this TenantApiModelsEditInputModel.
:param git_ids: The git_ids of this TenantApiModelsEditInputModel. # noqa: E501
:type: list[int]
"""
if git_ids is None:
raise ValueError("Invalid value for `git_ids`, must not be `None`") # noqa: E501
self._git_ids = git_ids
@property
def registry_ids(self):
"""Gets the registry_ids of this TenantApiModelsEditInputModel. # noqa: E501
:return: The registry_ids of this TenantApiModelsEditInputModel. # noqa: E501
:rtype: list[int]
"""
return self._registry_ids
@registry_ids.setter
def registry_ids(self, registry_ids):
"""Sets the registry_ids of this TenantApiModelsEditInputModel.
:param registry_ids: The registry_ids of this TenantApiModelsEditInputModel. # noqa: E501
:type: list[int]
"""
if registry_ids is None:
raise ValueError("Invalid value for `registry_ids`, must not be `None`") # noqa: E501
self._registry_ids = registry_ids
@property
def storage_id(self):
"""Gets the storage_id of this TenantApiModelsEditInputModel. # noqa: E501
:return: The storage_id of this TenantApiModelsEditInputModel. # noqa: E501
:rtype: int
"""
return self._storage_id
@storage_id.setter
def storage_id(self, storage_id):
"""Sets the storage_id of this TenantApiModelsEditInputModel.
:param storage_id: The storage_id of this TenantApiModelsEditInputModel. # noqa: E501
:type: int
"""
if storage_id is None:
raise ValueError("Invalid value for `storage_id`, must not be `None`") # noqa: E501
self._storage_id = storage_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TenantApiModelsEditInputModel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TenantApiModelsEditInputModel):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: rest/models/training_api_models_create_input_model.py
```python
import pprint
import re # noqa: F401
import six
class TrainingApiModelsCreateInputModel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'container_image': 'ComponentsContainerImageInputModel',
'cpu': 'int',
'data_set_id': 'int',
'entry_point': 'str',
'git_model': 'ComponentsGitCommitInputModel',
'gpu': 'int',
'memo': 'str',
'memory': 'int',
'name': 'str',
'options': 'dict(str, str)',
'parent_id': 'int',
'partition': 'str',
'zip': 'bool'
}
attribute_map = {
'container_image': 'containerImage',
'cpu': 'cpu',
'data_set_id': 'dataSetId',
'entry_point': 'entryPoint',
'git_model': 'gitModel',
'gpu': 'gpu',
'memo': 'memo',
'memory': 'memory',
'name': 'name',
'options': 'options',
'parent_id': 'parentId',
'partition': 'partition',
'zip': 'zip'
}
def __init__(self, container_image=None, cpu=None, data_set_id=None, entry_point=None, git_model=None, gpu=None, memo=None, memory=None, name=None, options=None, parent_id=None, partition=None, zip=None): # noqa: E501
"""TrainingApiModelsCreateInputModel - a model defined in Swagger""" # noqa: E501
self._container_image = None
self._cpu = None
self._data_set_id = None
self._entry_point = None
self._git_model = None
self._gpu = None
self._memo = None
self._memory = None
self._name = None
self._options = None
self._parent_id = None
self._partition = None
self._zip = None
self.discriminator = None
self.container_image = container_image
self.cpu = cpu
self.data_set_id = data_set_id
self.entry_point = entry_point
self.git_model = git_model
self.gpu = gpu
if memo is not None:
self.memo = memo
self.memory = memory
self.name = name
if options is not None:
self.options = options
if parent_id is not None:
self.parent_id = parent_id
if partition is not None:
self.partition = partition
if zip is not None:
self.zip = zip
@property
def container_image(self):
"""Gets the container_image of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The container_image of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: ComponentsContainerImageInputModel
"""
return self._container_image
@container_image.setter
def container_image(self, container_image):
"""Sets the container_image of this TrainingApiModelsCreateInputModel.
:param container_image: The container_image of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: ComponentsContainerImageInputModel
"""
if container_image is None:
raise ValueError("Invalid value for `container_image`, must not be `None`") # noqa: E501
self._container_image = container_image
@property
def cpu(self):
"""Gets the cpu of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The cpu of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: int
"""
return self._cpu
@cpu.setter
def cpu(self, cpu):
"""Sets the cpu of this TrainingApiModelsCreateInputModel.
:param cpu: The cpu of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: int
"""
if cpu is None:
raise ValueError("Invalid value for `cpu`, must not be `None`") # noqa: E501
self._cpu = cpu
@property
def data_set_id(self):
"""Gets the data_set_id of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The data_set_id of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: int
"""
return self._data_set_id
@data_set_id.setter
def data_set_id(self, data_set_id):
"""Sets the data_set_id of this TrainingApiModelsCreateInputModel.
:param data_set_id: The data_set_id of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: int
"""
if data_set_id is None:
raise ValueError("Invalid value for `data_set_id`, must not be `None`") # noqa: E501
self._data_set_id = data_set_id
@property
def entry_point(self):
"""Gets the entry_point of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The entry_point of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: str
"""
return self._entry_point
@entry_point.setter
def entry_point(self, entry_point):
"""Sets the entry_point of this TrainingApiModelsCreateInputModel.
:param entry_point: The entry_point of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: str
"""
if entry_point is None:
raise ValueError("Invalid value for `entry_point`, must not be `None`") # noqa: E501
self._entry_point = entry_point
@property
def git_model(self):
"""Gets the git_model of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The git_model of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: ComponentsGitCommitInputModel
"""
return self._git_model
@git_model.setter
def git_model(self, git_model):
"""Sets the git_model of this TrainingApiModelsCreateInputModel.
:param git_model: The git_model of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: ComponentsGitCommitInputModel
"""
if git_model is None:
raise ValueError("Invalid value for `git_model`, must not be `None`") # noqa: E501
self._git_model = git_model
@property
def gpu(self):
"""Gets the gpu of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The gpu of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: int
"""
return self._gpu
@gpu.setter
def gpu(self, gpu):
"""Sets the gpu of this TrainingApiModelsCreateInputModel.
:param gpu: The gpu of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: int
"""
if gpu is None:
raise ValueError("Invalid value for `gpu`, must not be `None`") # noqa: E501
self._gpu = gpu
@property
def memo(self):
"""Gets the memo of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The memo of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: str
"""
return self._memo
@memo.setter
def memo(self, memo):
"""Sets the memo of this TrainingApiModelsCreateInputModel.
:param memo: The memo of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: str
"""
self._memo = memo
@property
def memory(self):
"""Gets the memory of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The memory of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: int
"""
return self._memory
@memory.setter
def memory(self, memory):
"""Sets the memory of this TrainingApiModelsCreateInputModel.
:param memory: The memory of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: int
"""
if memory is None:
raise ValueError("Invalid value for `memory`, must not be `None`") # noqa: E501
self._memory = memory
@property
def name(self):
"""Gets the name of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The name of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TrainingApiModelsCreateInputModel.
:param name: The name of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) < 1:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def options(self):
"""Gets the options of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The options of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: dict(str, str)
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this TrainingApiModelsCreateInputModel.
:param options: The options of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: dict(str, str)
"""
self._options = options
@property
def parent_id(self):
"""Gets the parent_id of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The parent_id of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: int
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""Sets the parent_id of this TrainingApiModelsCreateInputModel.
:param parent_id: The parent_id of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: int
"""
self._parent_id = parent_id
@property
def partition(self):
"""Gets the partition of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The partition of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: str
"""
return self._partition
@partition.setter
def partition(self, partition):
"""Sets the partition of this TrainingApiModelsCreateInputModel.
:param partition: The partition of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: str
"""
self._partition = partition
@property
def zip(self):
"""Gets the zip of this TrainingApiModelsCreateInputModel. # noqa: E501
:return: The zip of this TrainingApiModelsCreateInputModel. # noqa: E501
:rtype: bool
"""
return self._zip
@zip.setter
def zip(self, zip):
"""Sets the zip of this TrainingApiModelsCreateInputModel.
:param zip: The zip of this TrainingApiModelsCreateInputModel. # noqa: E501
:type: bool
"""
self._zip = zip
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TrainingApiModelsCreateInputModel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TrainingApiModelsCreateInputModel):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
``` |
{
"source": "42-AI/DemocracyWatcher",
"score": 3
} |
#### File: DemocracyWatcher/.42AI/test_environment.py
```python
import sys
REQUIRED_PYTHON = "python3.8"
required_major = 3
required_minor = 8
def main():
system_major = sys.version_info.major
system_minor = sys.version_info.minor
if system_major != required_major:
raise TypeError(
"This project requires Python{}.{}. Found: Python{}.{}".format(
required_major, required_minor, system_major, system_minor))
elif system_minor != required_minor:
raise TypeError(
"This project requires Python{}.{}. Found: Python{}.{}".format(
required_major, required_minor, system_major, system_minor))
else:
print(">>> Development environment passes all tests!")
if __name__ == '__main__':
main()
```
#### File: data/load_dataset/twitter_predict.py
```python
import pandas as pd
import numpy as np
from unidecode import unidecode
import os
from src import config
def get_tweets_csv(csv_path: str) -> np.array:
df = pd.read_csv(csv_path)
X = df['text'].apply(lambda row: unidecode(row)).to_numpy()
return X
def get_tweets_df(df) -> np.array:
X = df['text'].apply(lambda row: unidecode(row)).to_numpy()
return X
```
#### File: src/models/main.py
```python
from src.models.huggingface.roberta import HuggingFaceModel
from src.models.sklearn.Naive_Bayes import Naive_Bayes
from src.models.random.predict_model_random import RandomModel
import argparse
from src.models.pytorch.local_main import torch_main
from src.models.ModelManager import ModelManager
def add_models_args(parser):
parser.add_argument('--model',
help="Training based on the model entered",
default='random',
choices=['random', 'naive-bayes',
'huggingface', 'torch']
)
parser.add_argument('--task',
required=True,
help="Task to be perforemed",
choices=['train', 'test', 'predict']
)
parser.add_argument('--dataset_type',
default="bi",
choices=['bi', 'tri', 'predict'],
help="Wether the dataset has 2 or 3 label",
)
parser.add_argument('--flat_y',
default=False,
action="store_true",
help="Wether the dataset has 2 or 3 label",
)
parser.add_argument('--in_csv',
required=True,
help="The input csv",
# type=argparse.FileType('r')
)
parser.add_argument('--out_csv',
help="Path of output csv file: must finish by '.csv'\
Required if --task is test or predict"
)
parser.add_argument('--weights_in',
help="Only if --task is test or predict.\
If no weights are passed and --task is test, \
the model will train first on the test_file.csv",
default=None
)
parser.add_argument('--weights_out',
help="Path to save the weights if --task is train",
default=None
)
parser.add_argument('--score',
help="Path to save the weights if --task is train",
choices=['accuracy'],
default='accuracy'
)
def models_main(args):
"""Redirect args to the asking model in the CLI
Args:
args: args passed in CLI
"""
if args.model == "torch":
torch_main(args)
return
if args.model == "naive-bayes":
mdl = Naive_Bayes()
elif args.model == "huggingface":
mdl = HuggingFaceModel()
elif args.model == "random":
mdl = RandomModel()
mm = ModelManager(mdl, args.dataset_type, args.flat_y)
if args.weights_in:
mm.load(args.weights_in)
if args.task == "test":
mm.test(args.in_csv, args.score)
elif args.task == "train":
mm.train(args.in_csv, args.weights_out)
elif args.task == "predict":
mm.predict(args.in_csv, args.out_csv)
```
#### File: models/sklearn/Naive_Bayes.py
```python
import sklearn
from sklearn.naive_bayes import MultinomialNB as NB
import pandas as pd
import numpy as np
import scipy
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import joblib
from src.data.load_dataset.base_dataset_tri_label import get_X_y_tri
from src.data.load_dataset.base_dataset_bi_label import get_X_y_bi
from src.models.BaseModel import BaseModel
class Naive_Bayes(BaseModel):
"""_summary_
Multinomial Naive Bayes model based on hyperparam alpha = 1.
- Multinomial Model only selected beacuse seems to be common practice
for NLP (no thorough research done)
- aplha set to 1 because default value (no iteration on alpha done)
"""
def __init__(self):
self.__clf = NB(alpha=1)
self.trained_preprocessing = False
def train(self, X_train: scipy.sparse.csr.csr_matrix,
y_train: scipy.sparse.csr.csr_matrix):
"""
X_train: {array-like, sparse matrix} of shape (n_samples, n_features)
y_train: array-like of shape (n_samples,)
"""
self.__clf = self.__clf.fit(X_train, y_train)
def preprocess(self, X):
if not self.trained_preprocessing:
self.vec = CountVectorizer()
# 1st step of vectorization: CountVectorizer vectorizes X_train and X_test
_ = self.vec.fit_transform(X)
# # 2nd step: TF-IDF improves the vectorization of vectors created by
# # CountVectorizer
# self.tf_transformer = TfidfTransformer(use_idf=False).fit(X)
self.trained_preprocessing = True
X_vec = self.vec.transform(X)
# X_tf = self.tf_transformer.transform(X_vec)
return X_vec
def one_hot_y(self, y):
nb_features = 3
b = np.zeros((y.size, nb_features))
b[np.arange(y.size), y] = 1
return b
def predict(self, X: scipy.sparse.csr.csr_matrix):
predicted = self.__clf.predict(X)
matrix_predicted = self.one_hot_y(predicted)
return matrix_predicted
def add_predictions_to_df(self, df, y):
y_preds = pd.DataFrame(y,
columns=[
'predict_Positive',
'predict_Negative',
'predict_Neutral',
]
)
return pd.concat([df, y_preds], axis=1)
# def naive_bayes_train(csv_in, weights_out, weights_in=None):
# df = pd.read_csv(csv_in)
# X, y = get_X_y_tri(df, flat_y=True)
# if weights_in:
# nb = Naive_Bayes().load(weights_in)
# else:
# nb = Naive_Bayes()
# X_prep = nb.preprocess(X)
# nb.train(X_prep, y)
# nb.save(weights_out)
# def naive_bayes_predict(csv_in, csv_out, weights_in):
# df = pd.read_csv(csv_in)
# X, _ = get_X_y_tri(df, flat_y=True)
# nb = Naive_Bayes().load(weights_in)
# X_prep = nb.preprocess(X)
# y_pred = nb.predict(X_prep)
# df = add_predictions_to_df(df, y_pred)
# df.to_csv(csv_out)
# print(f"\nCsv with predictions created at {csv_out}\n")
# def naive_bayes_test(csv_in, csv_out, weights_in, score='accuracy'):
# df = pd.read_csv(csv_in)
# X, y = get_X_y_tri(df, flat_y=True)
# nb = Naive_Bayes().load(weights_in)
# X_prep = nb.preprocess(X)
# y_pred = nb.predict(X_prep)
# accuracy = nb.get_score(y, y_pred)
# print(f"Accuracy: {accuracy}")
# df.to_csv(csv_out)
# print(f"\nCsv with predictions created at {csv_out}\n")
# def naive_bayes_alex(csv_in, csv_out, weights_in=None, score='accuracy'):
# """
# Args:
# csv_in (_type_): _description_
# csv_out (_type_): _description_
# weights_in (_type_, optional): _description_. Defaults to None.
# score (str, optional): _description_. Defaults to 'accuracy'.
# """
# # =================== PREPROCESS DATA =============================
# df = pd.read_csv(csv_in)
# X = df[['text']]
# y = df[['Positive', 'Negative', 'Neutral']]
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# X_train = X_train.values
# X_test = X_test.values
# y_train_1d = y_train['Positive'] + (y_train['Negative'] * (-1))
# y_test_1d = y_test['Positive'] + (y_test['Negative'] * (-1))
# y_train_1d = y_train_1d.values
# y_test_1d = y_test_1d.values
# X_train = X_train.squeeze()
# X_test = X_test.squeeze()
# # =================== VECTORIZATION =============================
# # 1st step of vectorization: CountVectorizer vectorizes X_train and X_test
# vec = CountVectorizer()
# X_train_trans = vec.fit_transform(X_train)
# X_test_trans = vec.transform(X_test)
# # 2nd step: TF-IDF improves the vectorization of vectors created by
# # CountVectorizer
# tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_trans)
# X_train_tf = tf_transformer.transform(X_train_trans)
# X_test_tf = tf_transformer.transform(X_test_trans)
# # =================== TRAIN + PREDICT ===============================
# nb = NB()
# nb.train(X_train_tf, y_train_1d)
# y_pred = nb.predict(X_test_tf)
# # =================== EXPORT CSV_OUT ===============================
# df["predict_Negative"] = (y_pred == -1).astype(int)
# df["predict_Neutral"] = (y_pred == 0).astype(int)
# df["predict_Positive"] = (y_pred == 1).astype(int)
# df.to_csv(csv_out)
# print(f"\nCsv with predictions created at {csv_out}\n")
# # =================== PRINT SCORE ===============================
# accuracy = sklearn.metrics.accuracy_score(y_test_1d, y_pred)
# print(f"Accuracy: {accuracy}")
# return (accuracy)
# def naive_bayes_main(args):
# if args.task == 'train':
# naive_bayes_train(args.train_csv, args.weights_out, args.weights_in)
# elif args.task == 'test':
# naive_bayes_test(args.test_csv, args.out_csv, args.weights_in)
# elif args.task == 'predict':
# naive_bayes_predict(args.predict_csv, args.csv_out, args.weights_in)
``` |
{
"source": "42-AI/pdf_builder",
"score": 2
} |
#### File: pdf_builder/bootcamp/format.py
```python
import re
# ============================================================================#
# ================================ FUNCTIONS =================================#
# ============================================================================#
# INSERT BOOTCAMP TITLE / PDF TITLE
######################################
def get_line_containing(file, content):
"""
Search a line containing a specific content.
Args:
file (undefined): file name
content (undefined): searched content
"""
index = 0
with open(file, 'r') as infile:
for idx, line in enumerate(infile):
if re.match(r'.*{}.*'.format(content), line):
index = idx + 1
return (index)
def insert_line(file, idx, content):
"""
Insert line into a file at a precise line.
Args:
file (undefined): file name
idx (undefined): line number
content (undefined): content to be added.
"""
f = open(file, "r")
contents = f.readlines()
f.close()
contents.insert(idx, "{}".format(content))
f = open(file, "w")
contents = "".join(contents)
f.write(contents)
f.close()
def insert_day_title(day_title):
"""
Insert day title to the template file.
Args:
args (undefined):
"""
idx = get_line_containing("tmp/template.latex", "day_number")
insert_line("tmp/template.latex", idx, day_title.split(' - ')[0])
idx = get_line_containing("tmp/template.latex", "day_title")
insert_line("tmp/template.latex", idx, day_title.split(' - ')[1])
```
#### File: pdf_builder/common/pandoc.py
```python
from pdf_builder.common.utils import sub_run, error
# ============================================================================#
# ================================ FUNCTIONS =================================#
# ============================================================================#
# RUN PANDOC
###############
def run_pandoc(file_name):
"""
Build pdf file for each markdown.
Args:
file_name (undefined):
"""
res = sub_run("pandoc {} --to=pdf --pdf-engine=pdflatex --highlight-style=breezedark\
-t latex -o {} --template=tmp/template.latex"
.format(file_name, file_name + ".pdf"))
if res.stderr:
print(file_name)
error(res.stderr.decode().strip(), file_name)
def run_pandoc_all(outfile, debug):
"""
Build a pdf with all markdown files.
Args:
outfile (undefined): output file name
debug (undefined): debug option
"""
res = sub_run("pandoc tmp/*.md --to=pdf --pdf-engine=pdflatex --highlight-style=breezedark\
-t latex -o {} --template=tmp/template.latex".format(outfile))
if res.stderr:
error(res.stderr.decode().strip())
if not debug:
sub_run("rm -rf tmp")
``` |
{
"source": "42-AI/TCP-Unity-Client",
"score": 3
} |
#### File: AllAgents/bob/utils.py
```python
from bomberman import defines
from bomberman.defines import t_action
from bomberman.states.StatePlayer import StatePlayer
from typing import List, Tuple, Union
Coordinates = Union[Tuple[int, int], Tuple[float, float]]
def get_players(pp: List[StatePlayer]):
if pp[0].enemy:
me, yu = pp[1], pp[0]
else:
me, yu = pp[0], pp[1]
return me, yu
def move_from_to(from_: Coordinates, to_: Coordinates) -> t_action:
diff_horizontal = from_[0] - to_[0]
diff_vertical = from_[1] - to_[1]
if abs(diff_horizontal) > abs(diff_vertical):
if diff_horizontal > 0:
action = defines.Down
else:
action = defines.Up
else:
if diff_vertical > 0:
action = defines.Right
else:
action = defines.Left
return action
```
#### File: TCP-Unity-Client/AllAgents/NoSuicide.py
```python
from bomberman.agents.BaseAgent import BaseAgent
from bomberman.states.State import State
from bomberman.defines import t_action
from bomberman import defines
from random import Random
class NoSuicide(BaseAgent):
def __init__(self, player_num: int) -> None:
super().__init__(player_num)
def get_action(self, state: State) -> t_action:
"""Choose an action from a given state
This is where you put something smart to choose an action.
Args:
-------
state (State): State object from the client/Environment
Returns:
-------
int: Agent action
It must be one the action defined in bomberman.defines
For example:
from bomberman import defines
return defines.Bomb
"""
return (Random().choice(defines.move_space))
``` |
{
"source": "42Bastian/arm64-pgtable-tool",
"score": 3
} |
#### File: arm64-pgtable-tool/pgtt/mmap.py
```python
from enum import Enum
from enum import IntEnum
import errno
import re
import sys
from dataclasses import dataclass
# Internal deps
from . import args
from . import log
# External deps
from intervaltree import Interval, IntervalTree
class AP_TYPE(IntEnum):
NS = 16,
UXN = 8,
SXN = 4,
SRW_UNA = 0, # implies User no access
SRW_URW = 1, # implies SRW
SRO_UNA = 2, # implies User no access
SRO_URO = 3 # implies SRO
class MEMORY_TYPE(IntEnum):
DEVICE = 0,
CACHE_WB = 1,
CACHE_WT = 2,
NO_CACHE = 3,
SHARED = 4,
GLOBAL = 8
@dataclass
class Region:
"""
Class representing a single region in the memory map.
"""
lineno: int # line number in source memory map file
comment: str # name/comment e.g. DRAM, GIC, UART, ...
addr: int # base address
virtaddr: int # virtual base addr
length: int # length in bytes
memory_type: MEMORY_TYPE # True for Device-nGnRnE, False for Normal WB RAWA
ap_type: AP_TYPE # Access right
num_contig = 1
def copy( self, **kwargs ):
"""
Create a duplicate of this Region.
Use kwargs to override this region's corresponding properties.
"""
region = Region(self.lineno,self.comment, self.addr, self.virtaddr, self.length, self.memory_type, self.ap_type)
for kw,arg in kwargs.items():
region.__dict__[kw] = arg
return region
def __str__( self ):
"""
Override default __str__ to print addr and length in hex format.
"""
return "Region(lineno={}, comment='{}', addr={}, virtaddr={}, length={}, memory_type={}".format(
self.lineno, self.comment, hex(self.addr), hex(self.virtaddr), hex(self.length), self.memory_type, self.ap_type
)
class MemoryMap():
"""
Class representing the user's entire specified memory map.
This is a wrapper around chaimleib's intervaltree library.
"""
def __init__( self, map_file:str ):
self._ivtree = IntervalTree()
if map_file == "stdin" :
map_file_handle=sys.stdin
else:
try:
map_file_handle= open(map_file, "r")
except OSError as e:
log.error(f"failed to open map file: {e}")
sys.exit(e.errno)
with map_file_handle:
map_file_lines = map_file_handle.readlines()
"""
Loop through each line in the map file.
"""
for lineno,line in enumerate(map_file_lines):
line = line.strip()
log.debug()
log.debug(f"parsing line {lineno}: {line}")
if len(line) == 0:
continue
if line[0] == '#':
continue
if line.startswith('//'):
continue
def abort_bad_region( msg:str, variable ) -> None:
"""
Pretty-print an error message and force-exit the script.
"""
log.error(f"in {map_file_handle} on line {lineno+1}: bad region {msg}: {variable}")
log.error(f" {line}")
log.error(f" {' '*line.find(variable)}{'^'*len(variable)}")
sys.exit(errno.EINVAL)
"""
Ensure correct number of fields have been specified.
"""
split_line = line.split(",")
if len(split_line) < 6:
abort_bad_region("format: incomplete", line)
if len(split_line) > 6:
abort_bad_region("format: unexpected field(s)", line[line.find(split_line[4]):])
(addr, virtaddr, length, memtype, rights, comment) = split_line
addr = addr.strip()
virtaddr = virtaddr.strip()
if virtaddr == "":
virtaddr = addr
length = length.strip()
memtype = memtype.strip()
split_memtype = memtype.split(":")
if len(split_memtype) > 3:
abort_bad_region("To many options", line)
memtype = split_memtype[0]
split_rights = rights.split(":")
if len(split_rights) < 1:
abort_bad_region("Missing rights", line)
# if len(split_rights) > 2:
# abort_bad_region("To many rights", line)
comment = comment.strip()
"""
Parse region base address.
"""
log.debug(f"parsing base address: {addr}")
try:
addr = eval(addr)
except SyntaxError:
abort_bad_region("base address", addr)
log.debug(f"parsing virtual base address: {virtaddr}")
try:
virtaddr = eval(virtaddr)
except SyntaxError:
abort_bad_region("virtual address", virtaddr)
if addr > (1 << args.tsz):
abort_bad_region("out address too largs", addr)
if virtaddr > (1 << args.tsz):
abort_bad_region("VA address too largs", hex(virtaddr))
"""
Parse region length.
"""
log.debug(f"parsing length: {length}")
length1 = re.sub(r"(\d+)K","(\\1*1024)", length)
length1 = re.sub(r"(\d+)M","(\\1*1024*1024)", length1)
length1 = re.sub(r"(\d+)G","(\\1*1024*1024*1024)", length1)
length1 = re.sub(r"(\d+)T","(\\1*1024*1024*1024*1024)", length1)
try:
length = eval(length1)
except SyntaxError:
abort_bad_region("length", length1)
"""
Fudge region to be mappable at chosen granule size.
"""
misalignment = addr % args.tg
if misalignment:
addr = addr - misalignment
length = length + args.tg
log.debug("corrected misalignment, new addr={}, length={}".format(hex(addr), hex(length)))
misalignment = virtaddr % args.tg
if misalignment:
virtaddr = virtaddr - misalignment
log.debug("corrected misalignment, new addr={}, length={}".format(hex(addr), hex(length)))
overflow = length % args.tg
if overflow:
length = length + args.tg - overflow
log.debug("corrected overflow, new length={}".format(hex(length)))
"""
Parse region attributes.
"""
memory_type = 0
log.debug(f"parsing memory type: {memtype}")
for memtype in split_memtype:
memtype = memtype.strip()
if not memtype in ["DEVICE", "CACHE_WB", "CACHE_WT", "NO_CACHE", "GLOBAL", "SHARED" ]:
abort_bad_region("memory type", memtype)
if (memory_type & 3) and memtype in ["DEVICE", "CACHE_WB", "CACHE_WT", "NO_CACHE"]:
abort_bad_region("memory type", memtype)
if memtype == "DEVICE":
memory_type |= MEMORY_TYPE.DEVICE
elif memtype == "CACHE_WB":
memory_type |= MEMORY_TYPE.CACHE_WB
elif memtype == "CACHE_WT":
memory_type |= MEMORY_TYPE.CACHE_WT
elif memtype == "NO_CACHE":
memory_type |= MEMORY_TYPE.NO_CACHE
elif memtype == "SHARED":
memory_type |= MEMORY_TYPE.SHARED
else:
memory_type |= MEMORY_TYPE.GLOBAL
log.debug(f"{memory_type=}")
"""
Parse access rights
"""
ap_right = AP_TYPE.SXN|AP_TYPE.UXN
for ap in split_rights:
ap = ap.strip()
if not ap in ["SX", "UX", "SRW_UNA", "SRW_URW", "SRO_UNA", "SRO_URO", "NS", "GLOBAL"]:
abort_bad_region("access rights", ap)
if ap == "SX":
ap_right &= ~AP_TYPE.SXN
elif ap == "UX":
ap_right &= ~AP_TYPE.UXN
elif ap == "NS":
ap_right |= AP_TYPE.NS
elif ap == "SHARED":
ap_right |= AP_TYPE.SHARED
elif ap == "GLOBAL":
memory_type |= MEMORY_TYPE.GLOBAL
elif ap == "SRW_UNA":
if (ap_right & 3):
abort_bad_region("access rights", ap)
ap_right |= AP_TYPE.SRW_UNA
elif ap == "SRW_URW":
if (ap_right & 3):
abort_bad_region("access rights", ap)
ap_right |= AP_TYPE.SRW_URW
elif ap == "SRO_UNA":
if (ap_right & 3):
abort_bad_region("access rights", ap)
ap_right |= AP_TYPE.SRO_UNA
else:
if (ap_right & 3):
abort_bad_region("access rights", ap)
ap_right |= AP_TYPE.SRO_URO
if memory_type == MEMORY_TYPE.DEVICE and (ap_right >> 2) != 3:
abort_bad_region(": Device region not be SX or UX!",hex(ap_right))
"""
Check for overlap with other regions.
"""
log.debug(f"checking for overlap with existing regions")
overlap = sorted(self._ivtree[virtaddr:virtaddr+length])
if overlap:
log.error(f"in {map_file} on line {lineno+1}: region overlaps other regions")
log.error(f" {line}")
log.error(f"the overlapped regions are:")
[log.error(f" {map_file_lines[iv.data.lineno-1].strip()} (on line {iv.data.lineno})") for iv in overlap]
sys.exit(errno.EINVAL)
"""
Add parsed region to memory map.
"""
r = Region(lineno+1, comment, addr, virtaddr, length, memory_type, ap_right)
self._ivtree.addi(virtaddr, virtaddr+length, r)
log.debug(f"added {r}")
def regions( self ):
"""
Return list of Region objects sorted by ascending base address.
"""
return list(map(lambda r: r[2], sorted(self._ivtree)))
regions = MemoryMap(args.i).regions()
``` |
{
"source": "42bbichero/MovieMon",
"score": 4
} |
#### File: moviemon/classes/map.py
```python
class Map:
def __init__(self, h_case=10, w_case=10, current_case=1):
if current_case < 1 or (w_case * h_case) < current_case:
raise Exception('Position is not good.')
if h_case < 1 or h_case > 100:
raise Exception('Height is not good.')
if w_case < 1 or w_case > 100:
raise Exception('Width is not good.')
else:
self.h_case = h_case
self.w_case = w_case
self.current_case = current_case
def get_position(self):
return self.current_case
def get_map(self):
return {
'position': self.current_case,
'width': self.w_case,
'heigth': self.h_case,
'up': ((self.current_case - self.w_case) >= 1),
'down': ((self.current_case + self.w_case) <= (self.w_case * self.h_case)),
'left': ((self.current_case - 1) % self.w_case != 0),
'right': (self.current_case % self.w_case != 0),
}
def move_up(self):
if (self.current_case - self.w_case) >= 1:
self.current_case -= self.w_case
return True
else:
return False
def move_down(self):
if (self.current_case + self.w_case) <= (self.w_case * self.h_case):
self.current_case += self.w_case
return True
else:
return False
def move_left(self):
if (self.current_case - 1) % self.w_case != 0:
self.current_case -= 1
return True
else:
return False
def move_right(self):
if self.current_case % self.w_case != 0:
self.current_case += 1
return True
else:
return False
```
#### File: moviemon/classes/players.py
```python
class Players:
def __init__(self, strength=0, movieballs=0):
self.strength = strength
self.movieballs = movieballs
def get_strength(self):
return self.strength
def get_movieballs(self):
return self.movieballs
def strength_up(self):
self.strength += 1
def movieballs_up(self):
self.movieballs += 1
def movieballs_down(self):
if self.movieballs > 0:
self.movieballs -= 1
``` |
{
"source": "42B/i2plib",
"score": 3
} |
#### File: docs/examples/wget.py
```python
import sys
import asyncio
from urllib.parse import urlparse
import i2plib
async def http_get(sam_address, loop, session_name, url):
url = urlparse(url)
r, w = await i2plib.stream_connect(session_name, url.netloc,
sam_address=sam_address, loop=loop)
w.write("GET {} HTTP/1.0\nHost: {}\r\n\r\n".format(
url.path, url.netloc).encode())
buflen, resp = 4096, b""
while 1:
data = await r.read(buflen)
if len(data) > 0:
resp += data
else:
break
w.close()
try:
return resp.split(b"\r\n\r\n", 1)[1].decode()
except IndexError:
return resp.decode()
async def wget(sam_address, loop, url):
session_name = "wget"
READY = asyncio.Event(loop=loop)
asyncio.ensure_future(i2plib.create_session(session_name,
sam_address=sam_address, loop=loop, session_ready=READY), loop=loop)
await READY.wait()
res = await http_get(sam_address, loop, session_name, url)
print(res)
if __name__ == "__main__":
sam_address = i2plib.get_sam_address()
if len(sys.argv) == 2:
url = sys.argv[1]
if not url.startswith("http://"):
url = "http://" + url
loop = asyncio.get_event_loop()
loop.run_until_complete(wget(sam_address, loop, url))
loop.stop()
loop.close()
else:
print("""Fetch I2P URL. Usage:
python wget.py http://site.i2p/""")
```
#### File: i2plib/i2plib/utils.py
```python
import socket
import os
import i2plib.sam
def get_free_port():
"""Get a free port on your local host"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 0))
free_port = s.getsockname()[1]
s.close()
return free_port
def is_address_accessible(address):
"""Check if address is accessible or down"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
is_accessible = s.connect_ex(address) == 0
s.close()
return is_accessible
def address_from_string(address_string):
"""Address tuple from host:port string"""
address = address_string.split(":")
return (address[0], int(address[1]))
def get_sam_address():
"""
Get SAM address from environment variable I2P_SAM_ADDRESS, or use a default
value
"""
value = os.getenv("I2P_SAM_ADDRESS")
return address_from_string(value) if value else i2plib.sam.DEFAULT_ADDRESS
def get_new_destination(sam_address=i2plib.sam.DEFAULT_ADDRESS,
sig_type=i2plib.sam.Destination.default_sig_type):
"""Generates new I2P destination of a chosen signature type"""
sam_socket = i2plib.sam.get_socket(sam_address)
sam_socket.send(i2plib.sam.dest_generate(sig_type))
a = i2plib.sam.get_response(sam_socket)
sam_socket.close()
return i2plib.sam.Destination(a['PRIV'], has_private_key=True)
``` |
{
"source": "42B/krampus",
"score": 2
} |
#### File: krampus/lib/krampus_logging.py
```python
import boto3
import time
import os
from hypchat import HypChat
# given how modules work with python it was easiest to use globals
# I know, I know
messages = []
hc_room = None
# yeah this is a mess and should have been fully static sometimes
# it is easier to just avoid side effects, you know?
class KLog(object):
def __init__(self, bucket_name, key, region="us-east-1"):
self.conn = boto3.resource("s3", region)
self.bucket = self.conn.Bucket(bucket_name)
self.key = key
self.log_file = self.bucket.Object(key)
# add a log msg to the list
# because we are doing unique files per run we store all messages in mem
# then before krampus exits we upload to the specified key
@staticmethod
def log(msg, level="info"):
levels = ["info", "warn", "critical"] # keep it simple
level = level.lower()
if level not in levels:
level = "info" # don't allow random stuff
# print the stdout part
# stdout print prepends
prepends = {
"info": "[i]",
"warn": "[-]",
"critical": "[!]"
}
print "%s %s" % (prepends[level], msg)
# see if it should go to the hipchat room
if level == "critical":
KLog.hipLog(msg)
# due to interesting decisions log message stay in mem until run finish
messages.append({
"level": level,
"msg": msg,
"timestamp": int(time.time())
})
# log something to the hipchat room
@staticmethod
def hipLog(msg):
if not hc_room:
# don't change below to critical, think about it...
KLog.log("tried to log to hipchat without a working connection", "warn")
return False
# otherwise let's set as red
hc_room.notification("KRAMPUS: " + msg, "red")
# write the final product
def writeLogFile(self):
# we will need to go through each of the entries to make them into a
# friendly-ish log format. instead of dumping json objs from the
# array of messages, we'll create newline delimited log messages
# to write to our key
buff = ""
for m in messages:
buff += "[%d] %s: %s\n" % (m['timestamp'], m['level'].upper(), m['msg'])
# now we can worry about putting to s3
resp = self.bucket.Object(self.key).put(Body=buff)
return resp
# just trust me when I say at the time I was out of options and needed global namespace
# should have planned better man
if os.getenv('HIPCHAT_ACCESS_TOKEN') and os.getenv('HIPCHAT_ROOM'):
try:
hc_room = HypChat(os.getenv('HIPCHAT_ACCESS_TOKEN')).get_room(os.getenv('HIPCHAT_ROOM'))
except:
KLog.log("problem starting hipchat, check env vars and connection", "warn")
``` |
{
"source": "42B/quandl-python",
"score": 2
} |
#### File: quandl-python/test/test_database.py
```python
try:
from urllib.parse import urlparse
from urllib.parse import parse_qs
except ImportError:
from urlparse import urlparse
from cgi import parse_qs
import re
import unittest
from test.helpers.httpretty_extension import httpretty
import json
import six
from quandl.errors.quandl_error import (InternalServerError, QuandlError)
from quandl.api_config import ApiConfig
from quandl.model.database import Database
from quandl.connection import Connection
from test.test_retries import ModifyRetrySettingsTestCase
from mock import patch, call, mock_open
from test.factories.database import DatabaseFactory
from test.factories.meta import MetaFactory
class GetDatabaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
httpretty.enable()
database = {'database': DatabaseFactory.build(database_code='NSE')}
httpretty.register_uri(httpretty.GET,
re.compile(
'https://www.quandl.com/api/v3/databases/*'),
body=json.dumps(database))
cls.db_instance = Database(Database.get_code_from_meta(
database['database']), database['database'])
@classmethod
def tearDownClass(cls):
httpretty.disable()
httpretty.reset()
@patch('quandl.connection.Connection.request')
def test_database_calls_connection(self, mock):
database = Database('NSE')
database.data_fields()
expected = call('get', 'databases/NSE', params={})
self.assertEqual(mock.call_args, expected)
def test_database_returns_database_object(self):
database = Database('NSE')
self.assertIsInstance(database, Database)
self.assertEqual(database.database_code, 'NSE')
@patch('quandl.model.dataset.Dataset.all')
def test_database_datasets_calls_datasets_all(self, mock):
self.db_instance.datasets()
expected = call(
params={'query': '', 'database_code': 'NSE', 'page': 1})
self.assertEqual(mock.call_args, expected)
@patch('quandl.model.dataset.Dataset.all')
def test_database_datasets_accepts_query_params(self, mock):
self.db_instance.datasets(params={'query': 'foo', 'page': 2})
expected = call(
params={'query': 'foo', 'database_code': 'NSE', 'page': 2})
self.assertEqual(mock.call_args, expected)
class ListDatabasesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
httpretty.enable()
databases = {'databases': DatabaseFactory.build_batch(10)}
meta = {'meta': MetaFactory.build()}
databases.update(meta)
httpretty.register_uri(httpretty.GET,
re.compile(
'https://www.quandl.com/api/v3/databases*'),
body=json.dumps(databases))
cls.expected_databases = databases
@classmethod
def tearDownClass(cls):
httpretty.disable()
httpretty.reset()
@patch('quandl.connection.Connection.request')
def test_databases_calls_connection(self, mock):
Database.all()
expected = call('get', 'databases', params={})
self.assertEqual(mock.call_args, expected)
def test_databases_returns_database_objects(self):
results = Database.all()
self.assertEqual(len(results), 10)
for result in results:
self.assertIsInstance(result, Database)
def test_databases_has_meta(self):
results = Database.all()
self.assertIsNotNone(results.meta)
def test_databases_returns_expected_ids(self):
results = Database.all()
self.assertEqual(len(results), 10)
six.assertCountEqual(self, [x.id for x in results],
[x['id'] for x in self.expected_databases['databases']])
def test_databases_has_more(self):
results = Database.all()
self.assertTrue(results.has_more_results())
class BulkDownloadDatabaseTest(ModifyRetrySettingsTestCase):
def setUp(self):
httpretty.enable()
httpretty.register_uri(httpretty.GET,
re.compile(
'https://www.quandl.com/api/v3/databases/*'),
adding_headers={
'Location': 'https://www.blah.com/download/db.zip'
},
body='{}', status=302)
httpretty.register_uri(httpretty.GET,
re.compile('https://www.blah.com/'), body='{}')
database = {'database': DatabaseFactory.build(database_code='NSE')}
self.database = Database(database['database']['database_code'], database['database'])
ApiConfig.api_key = 'api_token'
ApiConfig.api_version = '2015-04-09'
def tearDown(self):
httpretty.disable()
httpretty.reset()
def test_get_bulk_downnload_url_with_download_type(self):
url = self.database.bulk_download_url(params={'download_type': 'partial'})
parsed_url = urlparse(url)
self.assertEqual(parsed_url.scheme, 'https')
self.assertEqual(parsed_url.netloc, 'www.quandl.com')
self.assertEqual(parsed_url.path, '/api/v3/databases/NSE/data')
self.assertDictEqual(parse_qs(parsed_url.query), {
'download_type': ['partial'],
'api_key': ['api_token'], 'api_version': ['2015-04-09']})
def test_get_bulk_download_url_without_download_type(self):
url = self.database.bulk_download_url()
parsed_url = urlparse(url)
self.assertDictEqual(parse_qs(parsed_url.query), {
'api_key': ['api_token'], 'api_version': ['2015-04-09']})
def test_bulk_download_to_fileaccepts_download_type(self):
m = mock_open()
with patch.object(Connection, 'request') as mock_method:
mock_method.return_value.url = 'https://www.blah.com/download/db.zip'
with patch('quandl.model.database.open', m, create=True):
self.database.bulk_download_to_file(
'.', params={'download_type': 'partial'})
expected = call('get',
'databases/NSE/data',
params={'download_type': 'partial'},
stream=True)
self.assertEqual(mock_method.call_args, expected)
def test_bulk_download_to_file_writes_to_file(self):
m = mock_open()
with patch('quandl.model.database.open', m, create=True):
self.database.bulk_download_to_file('.')
m.assert_called_once_with(six.u('./db.zip'), 'wb')
m().write.assert_called_once_with(six.b('{}'))
def test_bulk_download_raises_exception_when_no_path(self):
self.assertRaises(
QuandlError, lambda: self.database.bulk_download_to_file(None))
def test_bulk_download_raises_exception_when_error_response(self):
ApiConfig.retry_backoff_factor = 0
httpretty.reset()
httpretty.register_uri(httpretty.GET,
re.compile(
'https://www.quandl.com/api/v3/databases/*'),
body=json.dumps(
{'quandl_error':
{'code': 'QEMx01', 'message': 'something went wrong'}}),
status=500)
self.assertRaises(
InternalServerError, lambda: self.database.bulk_download_to_file('.'))
```
#### File: quandl-python/test/test_util.py
```python
import unittest
import datetime
import six
from quandl.util import Util
# use this function just for test This function is define in python 2 but not at python 3
def cmp(a, b):
a = sorted(a)
b = sorted(b)
return (a > b) - (a < b)
class UtilTest(unittest.TestCase):
def test_methodize(self):
self.assertEqual(
Util.methodize(six.u('Hello World...Foo-Bar')), 'hello_worldfoo_bar')
def test_convert_to_dates(self):
d = '2015-04-09'
dt = '2015-07-24T02:39:40.624Z'
dic = {'foo': d, d: {'bar': dt}}
result = Util.convert_to_dates(dic)
self.assertIsInstance(result['foo'], datetime.date)
self.assertIsInstance(result[d]['bar'], datetime.datetime)
def test_merge_options_when_key_exists_in_options(self):
params = {'foo': 'bar', 'foo2': 'bar2'}
options = {'params': {'foo': 'bar3'}}
merged = Util.merge_options('params', params, **options)
self.assertDictEqual(
merged, {'params': {'foo': 'bar3', 'foo2': 'bar2'}})
def test_merge_options_when_key_doesnt_exist_in_options(self):
params = {'foo': 'bar', 'foo2': 'bar2'}
options = {'params': {'foo3': 'bar3'}}
merged = Util.merge_options('params', params, **options)
self.assertDictEqual(
merged, {'params': {'foo': 'bar',
'foo2': 'bar2', 'foo3': 'bar3'}})
def test_constructed_path(self):
path = '/hello/:foo/world/:id'
params = {'foo': 'bar', 'id': 1, 'another': 'a'}
result = Util.constructed_path(path, params)
self.assertEqual(result, '/hello/bar/world/1')
self.assertDictEqual(params, {'another': 'a'})
def test_convert_options(self):
options = {'params': {'ticker': ['AAPL', 'MSFT'],
'per_end_date': {'gte': {'2015-01-01'}},
'qopts': {'columns': ['ticker', 'per_end_date'],
'per_page': 5}}}
expect_result = {'params': {'qopts.per_page': 5,
'per_end_date.gte': set(['2015-01-01']),
'ticker[]': ['AAPL', 'MSFT'],
'qopts.columns[]': ['ticker', 'per_end_date']}}
result = Util.convert_options(**options)
self.assertEqual(cmp(result, expect_result), 0)
options = {'params': {'ticker': 'AAPL', 'per_end_date': {'gte': {'2015-01-01'}},
'qopts': {'columns': ['ticker', 'per_end_date']}}}
expect_result = {'params': {'per_end_date.gte': set(['2015-01-01']),
'ticker': 'AAPL',
'qopts.columns[]': ['ticker', 'per_end_date']}}
result = Util.convert_options(**options)
self.assertEqual(cmp(result, expect_result), 0)
``` |
{
"source": "42cc/dashr-gw",
"score": 2
} |
#### File: apps/core/context_processors.py
```python
from apps.core.utils import get_minimal_transaction_amount
def minimal_amounts(request):
if request.is_ajax():
return
return {
'minimal_deposit_amount': get_minimal_transaction_amount('deposit'),
'minimal_withdrawal_amount': get_minimal_transaction_amount(
'withdrawal',
),
}
```
#### File: apps/core/models.py
```python
from __future__ import unicode_literals
import uuid
from datetime import timedelta
from decimal import Decimal
from encrypted_fields import EncryptedCharField
from solo.models import SingletonModel
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models.signals import post_save
from django.utils import formats
from django.utils.translation import ugettext as _
from apps.core.validators import (
dash_address_validator,
ripple_address_validator,
withdrawal_min_dash_amount_validator,
)
from apps.core.wallet import DashWallet
class GatewaySettings(SingletonModel):
gateway_fee_percent = models.DecimalField(
max_digits=5,
decimal_places=2,
default=0,
verbose_name='Gateway fee (percentage)',
validators=[MinValueValidator(0), MaxValueValidator(100)],
)
max_dash_miner_fee = models.DecimalField(
max_digits=16,
decimal_places=8,
default=Decimal('0.1'),
verbose_name='Dash - maximal miner fee',
help_text=(
'This value is used to calculate amount that is sent in '
'withdrawal transactions. <b>It should be the same as <code>'
'maxtxfee</code> of your Dash node.</b>'
),
validators=[MinValueValidator(0)],
)
dash_required_confirmations = models.PositiveIntegerField(
default=6,
verbose_name='Dash - minimal confirmations',
)
transaction_expiration_minutes = models.PositiveIntegerField(
default=60,
verbose_name='Transaction expiration (minutes)',
)
def __str__(self):
return 'Gateway Settings'
class Meta:
verbose_name = 'Gateway Settings'
class RippleWalletCredentials(SingletonModel):
address = models.CharField(
max_length=35,
validators=[ripple_address_validator],
verbose_name='Address',
)
secret = EncryptedCharField(max_length=29, verbose_name='Secret key')
def __str__(self):
return 'Ripple Wallet Credentials'
class Meta:
verbose_name = 'Ripple Wallet Credentials'
class Page(models.Model):
slug = models.SlugField(max_length=300, db_index=True, unique=True)
title = models.CharField(verbose_name=_("Title"), max_length=200)
description = models.TextField(verbose_name=_("Description"), blank=True)
class Meta:
ordering = ('-title', )
def __str__(self):
return self.title
class TransactionStates(object):
INITIATED = 1
UNCONFIRMED = 2
CONFIRMED = 3
PROCESSED = 4
OVERDUE = 5
FAILED = 6
NO_RIPPLE_TRUST = 7
class BaseTransaction(models.Model, TransactionStates):
timestamp = models.DateTimeField(auto_now_add=True)
dash_address = models.CharField(
max_length=35,
validators=[dash_address_validator],
)
class Meta:
abstract = True
def get_overdue_datetime(self):
return self.timestamp + timedelta(
minutes=GatewaySettings.get_solo().transaction_expiration_minutes,
)
def get_state_history(self):
return [
{
'state': state.current_state,
'timestamp': formats.date_format(
state.datetime,
'DATETIME_FORMAT',
),
} for state in self.state_changes.order_by('datetime').all()
]
def get_normalized_dash_to_transfer(self):
if not isinstance(self.dash_to_transfer, Decimal):
return self.dash_to_transfer
# Based on https://docs.python.org/2.7/library/decimal.html#decimal-faq
if self.dash_to_transfer == self.dash_to_transfer.to_integral():
return self.dash_to_transfer.quantize(Decimal(1))
return self.dash_to_transfer.normalize()
class DepositTransaction(BaseTransaction):
STATE_CHOICES = (
(
TransactionStates.INITIATED,
'Initiated. Send {dash_to_transfer} DASH to {dash_address} before '
'{overdue_datetime}',
),
(
TransactionStates.UNCONFIRMED,
'Received {dash_to_transfer} DASH. Waiting for '
'{confirmations_number} confirmations',
),
(
TransactionStates.CONFIRMED,
'Confirmed receiving {dash_to_transfer} DASH. Initiated an '
'outgoing transaction',
),
(
TransactionStates.PROCESSED,
'Transaction is processed. Hash of a Ripple transaction is '
'{outgoing_ripple_transaction_hash}',
),
(
TransactionStates.OVERDUE,
'Time expired. Transactions to {dash_address} are no longer '
'tracked',
),
(
TransactionStates.FAILED,
'Transaction failed. Please contact our support team',
),
(
TransactionStates.NO_RIPPLE_TRUST,
'The ripple account {ripple_address} does not trust our gateway. '
'Please set a trust line to {gateway_ripple_address}',
),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
dash_to_transfer = models.DecimalField(
max_digits=16,
decimal_places=8,
)
state = models.PositiveSmallIntegerField(
default=TransactionStates.INITIATED,
choices=STATE_CHOICES,
)
ripple_address = models.CharField(
max_length=35,
validators=[ripple_address_validator],
)
outgoing_ripple_transaction_hash = models.CharField(
max_length=64,
blank=True,
)
def __str__(self):
return 'Deposit {}'.format(self.id)
def save(self, *args, **kwargs):
dash_wallet = DashWallet()
if not self.dash_address:
self.dash_address = dash_wallet.get_new_address()
super(DepositTransaction, self).save(*args, **kwargs)
def get_current_state(self):
values = self.__dict__
values['dash_to_transfer'] = self.get_normalized_dash_to_transfer()
values['overdue_datetime'] = formats.date_format(
self.get_overdue_datetime(),
'DATETIME_FORMAT',
)
values['confirmations_number'] = (
GatewaySettings.get_solo().dash_required_confirmations
)
values['gateway_ripple_address'] = (
RippleWalletCredentials.get_solo().address
)
return self.get_state_display().format(**values)
@staticmethod
def post_save_signal_handler(instance, **kwargs):
DepositTransactionStateChange.objects.create(
transaction=instance,
current_state=instance.get_current_state(),
)
class WithdrawalTransaction(BaseTransaction):
STATE_CHOICES = (
(
TransactionStates.INITIATED,
'Initiated. Send {dash_to_transfer} Dash tokens to '
'{ripple_address} with a destination tag {destination_tag} before '
'{overdue_datetime}',
),
(
TransactionStates.CONFIRMED,
'Received {dash_to_transfer} Dash tokens. Initiated an outgoing '
'transaction',
),
(
TransactionStates.PROCESSED,
'Transaction is processed. Hash of a Dash transaction is '
'{outgoing_dash_transaction_hash}',
),
(
TransactionStates.OVERDUE,
'Time expired. Transactions with the destination tag '
'{destination_tag} are no longer tracked',
),
(
TransactionStates.FAILED,
'Transaction failed. Please contact our support team',
),
)
id = models.BigAutoField(
primary_key=True,
serialize=False,
verbose_name='ID',
)
dash_to_transfer = models.DecimalField(
max_digits=16,
decimal_places=8,
validators=[withdrawal_min_dash_amount_validator],
)
state = models.PositiveSmallIntegerField(
default=TransactionStates.INITIATED,
choices=STATE_CHOICES,
)
outgoing_dash_transaction_hash = models.CharField(
max_length=64,
blank=True,
)
def __str__(self):
return 'Withdrawal {}'.format(self.id)
@property
def destination_tag(self):
return self.id
def get_current_state(self):
values = self.__dict__
values['dash_to_transfer'] = self.get_normalized_dash_to_transfer()
values['overdue_datetime'] = formats.date_format(
self.get_overdue_datetime(),
'DATETIME_FORMAT',
)
values['destination_tag'] = self.destination_tag
values['ripple_address'] = RippleWalletCredentials.get_solo().address
return self.get_state_display().format(**values)
@staticmethod
def post_save_signal_handler(instance, **kwargs):
WithdrawalTransactionStateChange.objects.create(
transaction=instance,
current_state=instance.get_current_state(),
)
class BaseTransactionStateChange(models.Model):
datetime = models.DateTimeField(auto_now_add=True)
current_state = models.CharField(max_length=500)
class Meta:
abstract = True
class DepositTransactionStateChange(BaseTransactionStateChange):
transaction = models.ForeignKey(
DepositTransaction,
related_name='state_changes',
)
class WithdrawalTransactionStateChange(BaseTransactionStateChange):
transaction = models.ForeignKey(
WithdrawalTransaction,
related_name='state_changes',
)
post_save.connect(
DepositTransaction.post_save_signal_handler,
sender=DepositTransaction,
)
post_save.connect(
WithdrawalTransaction.post_save_signal_handler,
sender=WithdrawalTransaction,
)
```
#### File: core/tests/test_context_processors.py
```python
from django.test import RequestFactory, TestCase
from apps.core import context_processors
from apps.core.utils import get_minimal_transaction_amount
class ContextProcessorsTest(TestCase):
def test_minimal_amounts(self):
request = RequestFactory().get('')
context = context_processors.minimal_amounts(request)
self.assertIn('minimal_deposit_amount', context)
self.assertEqual(
context['minimal_deposit_amount'],
get_minimal_transaction_amount('deposit'),
)
self.assertIn('minimal_withdrawal_amount', context)
self.assertEqual(
context['minimal_withdrawal_amount'],
get_minimal_transaction_amount('withdrawal'),
)
ajax_request = RequestFactory().get(
'',
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
)
self.assertIsNone(context_processors.minimal_amounts(ajax_request))
```
#### File: core/tests/test_utils.py
```python
from decimal import Decimal
from django.test import TestCase
from apps.core.models import GatewaySettings
from apps.core.utils import (
get_minimal_transaction_amount,
get_received_amount,
)
class UtilsTest(TestCase):
def setUp(self):
GatewaySettings.objects.create(
gateway_fee_percent=Decimal('0.5'),
max_dash_miner_fee=Decimal('0.001'),
)
def test_get_received_amount_deposit(self):
self.assertEqual(
get_received_amount('1', 'deposit'),
Decimal('0.995'),
)
self.assertEqual(
get_received_amount('1.1', 'deposit'),
Decimal('1.0945'),
)
self.assertEqual(
get_received_amount('0', 'deposit'),
Decimal('0'),
)
self.assertEqual(
get_received_amount('1.123456789', 'deposit'),
Decimal('1.11783949'),
)
self.assertEqual(
get_received_amount('1.123456784', 'deposit'),
Decimal('1.11783949'),
)
def test_get_received_amount_withdrawal(self):
self.assertEqual(
get_received_amount('1', 'withdrawal'),
Decimal('0.994'),
)
self.assertEqual(
get_received_amount('1.1', 'withdrawal'),
Decimal('1.0935'),
)
self.assertEqual(
get_received_amount('0', 'withdrawal'),
Decimal('0'),
)
self.assertEqual(
get_received_amount('1.123456789', 'withdrawal'),
Decimal('1.11683949'),
)
self.assertEqual(
get_received_amount('1.123456784', 'withdrawal'),
Decimal('1.11683949'),
)
def test_get_minimal_withdrawal_amount(self):
self.assertEqual(
get_minimal_transaction_amount('deposit'),
Decimal('0.00000002'),
)
self.assertEqual(
get_minimal_transaction_amount('withdrawal'),
Decimal('0.00100504'),
)
```
#### File: apps/core/utils.py
```python
from decimal import Decimal, ROUND_DOWN, ROUND_UP
from django.apps import apps
dash_minimal = Decimal('0.00000001')
def get_minimal_transaction_amount(transaction_type):
gateway_settings = apps.get_model('core', 'GatewaySettings').get_solo()
gateway_fee = gateway_settings.gateway_fee_percent / 100
minimal_amount = dash_minimal
# Add fees.
if transaction_type == 'withdrawal':
minimal_amount += gateway_settings.max_dash_miner_fee
minimal_amount /= (1 - gateway_fee)
# Round minimal amount to 8 decimal places.
minimal_amount = minimal_amount.quantize(
dash_minimal,
rounding=ROUND_UP,
)
return minimal_amount
def get_received_amount(amount, transaction_type):
# Round amount to 8 decimal places.
amount = Decimal(amount).quantize(
dash_minimal,
rounding=ROUND_DOWN,
)
gateway_settings = apps.get_model('core', 'GatewaySettings').get_solo()
gateway_fee = gateway_settings.gateway_fee_percent / 100
# Subtract fees.
received_amount = amount * (1 - gateway_fee)
if transaction_type == 'withdrawal':
received_amount -= gateway_settings.max_dash_miner_fee
# Round received amount to 8 decimal places.
received_amount = received_amount.quantize(
dash_minimal,
rounding=ROUND_DOWN,
)
return max(received_amount, 0)
``` |
{
"source": "42cc/ripple_api",
"score": 2
} |
#### File: ripple_api/ripple_api/models.py
```python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import ModelTracker
from signals import transaction_status_changed, transaction_failure_send
class Transaction(models.Model):
RECEIVED = 0
PROCESSED = 1
MUST_BE_RETURN = 2
RETURNING = 3
RETURNED = 4
PENDING = 5
SUBMITTED = 6
FAILURE = 7
SUCCESS = 8
CREATED = 9
SUCCESS_PROCESSED = 10
FAIL_FIXED = 100
STATUS_CHOICES = (
(RECEIVED, _(u'Transaction received')),
(PROCESSED, _(u'Transaction was processed')),
(MUST_BE_RETURN, _(u'This transaction must be returned to user')),
(RETURNING, _(u'Created new transaction for returning')),
(RETURNED, _(u'Transaction was returned')),
(PENDING, _(u'Pending to submit')),
(SUBMITTED, _(u'Transaction was submitted')),
(FAILURE, _(u'Transaction was failed')),
(SUCCESS, _(u'Transaction was completed successfully')),
(CREATED, _(u'Transaction was created but not sign')),
(SUCCESS_PROCESSED,
_(u'Transaction was processed after successful submit')),
(FAIL_FIXED, _(u'The failed transaction was fixed by a new retry'))
)
account = models.CharField(max_length=100)
destination = models.CharField(max_length=100)
hash = models.CharField(max_length=100, blank=True)
tx_blob = models.TextField(blank=True)
currency = models.CharField(max_length=3)
issuer = models.CharField(max_length=100)
value = models.CharField(max_length=100)
source_tag = models.IntegerField(null=True, blank=True)
destination_tag = models.IntegerField(null=True, blank=True)
ledger_index = models.IntegerField(null=True, blank=True)
status = models.SmallIntegerField(choices=STATUS_CHOICES, default=RECEIVED)
parent = models.ForeignKey('self', null=True, blank=True,
related_name='returning_transaction')
created = models.DateTimeField(auto_now_add=True)
status_tracker = ModelTracker(fields=['status'])
def __unicode__(self):
return u'[%s] %s. %s %s from %s to %s' % (
self.pk, self.created, self.value,
self.currency, self.account, self.destination
)
def save(self, *args, **kwargs):
created = bool(self.pk)
super(Transaction, self).save(*args, **kwargs)
if created and self.status_tracker.previous('status') is not None:
transaction_status_changed.send(
sender=self.__class__,
instance=self,
old_status=self.status_tracker.previous('status')
)
if self.status == self.FAILURE:
transaction_failure_send.send(sender=self.__class__, instance=self)
```
#### File: ripple_api/ripple_api/test_trade.py
```python
import unittest
from decimal import Decimal
from django.conf import settings
from django.test import TestCase
from mock import patch
from .trade import sell_all
part_pay = 0.5
part_get = 0.1
amount_get = 1
amount_pay = 2
sequence = 1
balance_pay = 100
balance_get = 200
AffectedNodes = {
u'DeletedNode': {
'LedgerEntryType': 'Offer',
'PreviousFields': {
u'TakerPays': {
u'currency': 'TUS',
u'value': str(part_pay),
u'issuer': u'<KEY>'
},
u'Account': u'<KEY>',
u'TakerGets': {
u'currency': 'TBT',
u'value': str(part_get),
u'issuer': u'<KEY>'
},
}
},
}
data = {
u'status': u'success',
u'engine_result': u'tesSUCCESS',
u'engine_result_message': u'The transaction was applied.',
u'engine_result_code': 0,
u'tx_blob': u'test',
u'tx_json': {
u'Account': settings.RIPPLE_ACCOUNT,
u'hash': u'test_hash',
u'Sequence': sequence,
u'TakerPays': {
u'currency': 'TBT',
u'value': amount_pay,
u'issuer': '<KEY>'
},
u'TakerGets': {
u'currency': 'TUS',
u'value': amount_get,
u'issuer': '<KEY>',
u'TransactionType': u'OfferCreate'
}
}
}
tx_data = {
u'status': u'success',
u'Account': settings.RIPPLE_ACCOUNT,
u'Sequence': sequence,
u'TakerPays': {
u'currency': 'TBT',
u'value': str(amount_pay),
u'issuer': '<KEY>'
},
u'meta': {
u'TransactionResult': u'tesSUCCESS',
u'AffectedNodes': [{
u'ModifiedNode': {
u'LedgerEntryType': u'Offer',
u'FinalFields': {
u'Account': u'<KEY>',
u'Sequence': sequence,
u'TakerPays': {
u'currency': 'TUS',
u'value': str(amount_pay),
u'issuer': u'<KEY>'
},
u'TakerGets': {
u'currency': 'TBT',
u'value': str(amount_get),
u'issuer': u'<KEY>'
},
u'Flags': 0
},
u'PreviousFields': {
u'TakerPays': {
u'currency': 'TUS',
u'value': str(balance_pay),
u'issuer': u'<KEY>'
},
u'TakerGets': {
u'currency': 'TBT',
u'value': str(balance_get),
u'issuer': u'<KEY>'
}
}
}
}]
},
u'TakerGets': {
u'currency': 'TUS',
u'value': str(amount_get),
u'issuer': '<KEY>',
},
u'validated': True,
u'TransactionType': u'OfferCreate'
}
def generate_ripple_transaction_meta(final_balance, previous_balance):
return {
'AffectedNodes': [
{
"ModifiedNode": {
"FinalFields": {
"Balance": {
"currency": "TUS",
"issuer": "<KEY>",
"value": str(final_balance)
},
"HighLimit": {
"currency": "TUS",
"issuer": '<KEY>',
"value": "10000"
},
},
"LedgerEntryType": "RippleState",
"PreviousFields": {
"Balance": {
"currency": "TUS",
"issuer": "<KEY>",
"value": str(previous_balance)
}
},
}
},
{
"ModifiedNode": {
"FinalFields": {
"Account": settings.RIPPLE_ACCOUNT,
"Balance": "80678117",
},
"LedgerEntryType": "AccountRoot",
"PreviousFields": {
"Balance": "80688117",
"Sequence": 959
},
}
},
{
"ModifiedNode": {
"FinalFields": {
"Balance": {
"currency": "TUS",
"issuer": "<KEY>",
"value": "0.1"
},
"HighLimit": {
"currency": "TUS",
"issuer": "<KEY>",
"value": "0"
},
},
"LedgerEntryType": "RippleState",
"PreviousFields": {
"Balance": {
"currency": "TUS",
"issuer": "<KEY>",
"value": "0.2"
}
},
}
}
],
'TransactionResult': 'tesSUCCESS'
}
# FIXME: disable tests for now as these are not consistent with
# the implementation at all
# https://github.com/42cc/ripple_api/issues/24
@unittest.skip
class TradeTestCase(TestCase):
def setUp(self):
self.create_data = data
self.tx_data = tx_data
@patch('ripple_api.ripple_api.call_api')
@patch('ripple_api.trade.create_offer')
def test_offer_create_error(self, create_offer_mock, call_api_mock):
"""Test if do not sell all when has error in offer."""
self.create_data['engine_result'] = 'error'
create_offer_mock.return_value = self.create_data
call_api_mock.return_value = self.create_data
exchange_result = sell_all(
sell_needed={
'value': amount_pay, 'currency': 'TUS',
'issuer': '<KEY>'
},
buy_expected={
'value': amount_get, 'currency': 'TBT',
'issuer': '<KEY>'
},
)
# check result error, sold 0, bought 0
self.assertEqual(exchange_result['status'], 'error')
self.assertEqual(exchange_result['status_msg'],
'Offer creation failed: error')
self.assertEqual(exchange_result['sold'], 0)
self.assertEqual(exchange_result['bought'], 0)
@patch('ripple_api.ripple_api.call_api')
@patch('ripple_api.trade.create_offer')
def test_offer_create_nothing_happened(self, create_offer_mock,
call_api_mock):
"""Test if do not sell all when offer without data 'AffectedNodes'."""
self.create_data['engine_result'] = 'tesSUCCESS'
rez = self.tx_data['meta']['AffectedNodes']
del self.tx_data['meta']['AffectedNodes']
create_offer_mock.return_value = self.create_data
call_api_mock.return_value = self.tx_data
exchange_result = sell_all(
sell_needed={
'value': amount_pay, 'currency': 'TUS',
'issuer': '<KEY>'
},
buy_expected={
'value': amount_get, 'currency': 'TBT',
'issuer': '<KEY>'
},
)
self.tx_data['meta']['AffectedNodes'] = rez
# check result success, sold 0, bought 0
self.assertEqual(exchange_result['status'], 'error')
self.assertEqual(exchange_result['status_msg'],
"Offer was not identified.")
self.assertEqual(exchange_result['sold'], 0)
self.assertEqual(exchange_result['bought'], 0)
@patch('ripple_api.ripple_api.call_api')
@patch('ripple_api.trade.create_offer')
def test_offer_create_not_happened(self, create_offer_mock, call_api_mock):
"""Test if do not sell all when offer without data 'ModifiedNode'."""
self.create_data['engine_result'] = 'tesSUCCESS'
rez = self.tx_data['meta']['AffectedNodes'][0]['ModifiedNode']
del self.tx_data['meta']['AffectedNodes'][0]['ModifiedNode']
create_offer_mock.return_value = self.create_data
call_api_mock.return_value = self.tx_data
exchange_result = sell_all(
sell_needed={
'value': amount_pay, 'currency': 'TUS',
'issuer': '<KEY>'
},
buy_expected={
'value': amount_get, 'currency': 'TBT',
'issuer': '<KEY>'
},
)
self.tx_data['meta']['AffectedNodes'][0]['ModifiedNode'] = rez
# check result success, sold 0, bought 0
self.assertEqual(exchange_result['status'], 'success')
self.assertEqual(exchange_result['sold'], 0)
self.assertEqual(exchange_result['bought'], 0)
@patch('ripple_api.ripple_api.call_api')
@patch('ripple_api.trade.create_offer')
def test_offer_create_sold_a_part(self, create_offer_mock, call_api_mock):
"""Test if correct sell a part, when offer has only part for sell."""
self.create_data['engine_result'] = 'tesSUCCESS'
create_offer_mock.return_value = self.create_data
call_api_mock.return_value = self.tx_data
exchange_result = sell_all(
sell_needed={
'value': amount_pay, 'currency': 'TUS',
'issuer': '<KEY>'
},
buy_expected={
'value': amount_get, 'currency': 'TBT',
'issuer': '<KEY>'
},
)
# check result success, sold only a part
self.assertEqual(exchange_result['status'], 'success')
self.assertEqual(exchange_result['bought'],
Decimal(balance_get - amount_get))
self.assertEqual(exchange_result['sold'],
Decimal(balance_pay - amount_pay))
@patch('ripple_api.ripple_api.call_api')
@patch('ripple_api.trade.create_offer')
def test_offer_create_sold_everything(self, create_offer_mock,
call_api_mock):
"""Test if correct we can sell all."""
self.create_data['engine_result'] = 'tesSUCCESS'
del self.tx_data['meta']['AffectedNodes'][0]['ModifiedNode']
self.tx_data['meta']['AffectedNodes'][-1] = AffectedNodes
create_offer_mock.return_value = self.create_data
call_api_mock.return_value = self.tx_data
exchange_result = sell_all(
sell_needed={
'value': amount_pay, 'currency': 'TUS',
'issuer': '<KEY>'
},
buy_expected={
'value': amount_get, 'currency': 'TBT',
'issuer': '<KEY>'
},
)
# check result success, sold a part
self.assertEqual(exchange_result['status'], 'success')
self.assertEqual(exchange_result['sold'], Decimal("%.6f" % part_pay))
self.assertEqual(exchange_result['bought'], Decimal("%.6f" % part_get))
```
#### File: ripple_api/ripple_api/test_trust.py
```python
import json
from django.conf import settings
from django.test import TestCase
from requests import Response
from mock import patch
from .ripple_api import trust_set, RippleApiError
sequence = 12
destination_account = u'<KEY>'
wrong_secret = "somesecret"
data = {
u"engine_result": u"tesSUCCESS",
u"engine_result_code": 0,
u"engine_result_message": (u"The transaction was applied. Only final " +
u"in a validated ledger."),
u"status": u"success",
u"tx_blob": u"-- hexBinary data --",
u"tx_json": {
u"Account": settings.RIPPLE_ACCOUNT,
u"Fee": u"10000",
u"Flags": 262144,
u"LimitAmount": {
u"currency": u"USD",
u"issuer": destination_account,
u"value": u"1"
},
u"Sequence": sequence,
u"SigningPubKey": u"-- hexBinary data of SigningPubKey --",
u"TransactionType": u"TrustSet",
u"TxnSignature": u"-- hexBinary data of TxnSignature --",
u"hash": u"-- hexBinary data of hash --"
}
}
error_data = {
u"error": "badSecret",
u"error_code": 39,
u"error_message": "Secret does not match account.",
u"request": {
u"command": u"submit",
u"secret": wrong_secret,
u"tx_json": {
u"Account": settings.RIPPLE_ACCOUNT,
u"Fee": "10000",
u"Flags": 262144,
u"LimitAmount": {
u"currency": "USD",
u"issuer": destination_account,
u"value": "1"
},
u"TransactionType": "TrustSet"
}
},
u"status": "error"
}
class TrustSetTestCase(TestCase):
def setUp(self):
pass
@patch('requests.post')
def test_trust_set_error(self, post_mock):
"""Test if RippleApiError raised in case when secret is wrong"""
response = Response()
response._content = json.dumps({u"result": error_data})
post_mock.return_value = response
exp_msg = u'39: badSecret. Secret does not match account.'
with self.assertRaisesMessage(RippleApiError, exp_msg):
trust_set(
settings.RIPPLE_ACCOUNT, wrong_secret,
destination_account,
1, u"USD", flags={"AllowRipple": False, "Freeze": True}
)
@patch('requests.post')
def test_trust_set_success(self, post_mock):
response = Response()
response._content = json.dumps({u"result": data})
post_mock.return_value = response
result = trust_set(
settings.RIPPLE_ACCOUNT, settings.RIPPLE_SECRET,
destination_account,
1, u"USD", flags={"AllowRipple": True}
)
self.assertDictEqual(result, data)
``` |
{
"source": "42cursus-youkim/packman",
"score": 3
} |
#### File: 42cursus-youkim/packman/makegen.py
```python
import re
import textwrap
from pathlib import Path
def get_subdirs(path: Path):
yield from (p for p in path.glob("[!.]*") if p.is_dir())
def get_src_files(subdir: Path):
yield from (p for p in subdir.glob("*.c"))
def make_var(name: str, values: list[str]) -> str:
name_len = len(name)
wrapped = textwrap.wrap(" ".join(values), 70 - name_len)
wrapped_len = len(wrapped)
if wrapped_len == 0:
return ""
pad = "\t" * ((name_len + 3) // 4)
res = f"{name} = {wrapped[0]}"
if wrapped_len > 1:
res += "\\"
for w in wrapped[1:-1]:
res += f"\n{pad}{w}\\"
if wrapped_len > 1:
res += f"\n{pad}{wrapped[-1]}"
res += "\n"
return res
def create_values(subdir: Path) -> str:
values = [s.stem for s in get_src_files(subdir)]
return make_var(f"{subdir.name.replace('std__', '')}V", values)
def create_template(path: Path) -> str:
subdirs = [s for s in get_subdirs(path)]
packages = [s.name.replace("std__", "") for s in subdirs]
res = make_var("PKGS", packages) + "\n"
for subdir in subdirs:
res += create_values(subdir)
return res
def regex_index(lst: list[str], pattern: str, flags=0):
for i, line in enumerate(lst):
if re.match(pattern, line, flags):
return i
raise ValueError(f"{pattern} not found in {lst}")
def replace_text(
text: str,
to_replace: str,
begin_text: str = r"#\s*@packages",
end_text: str = "#",
) -> str:
lines = text.splitlines()
begin = 1 + regex_index(lines, begin_text, re.I)
end = begin + regex_index(lines[begin:], end_text, re.I)
# replace lines
lines[begin:end] = [to_replace]
return "\n".join(lines)
def makegen(src_dir: Path = Path('src'), makefile: Path = Path('Makefile')):
template = create_template(src_dir)
text = makefile.read_text()
makefile.write_text(replace_text(text, template))
def main():
makegen()
if __name__ == "__main__":
main()
``` |
{
"source": "42cursus-youkim/Rank04-CPP-Module01",
"score": 3
} |
#### File: Rank04-CPP-Module01/ex04/test.py
```python
from pathlib import Path
from subprocess import run
from hypothesis import example, given
from hypothesis import strategies as st
from termcolor import cprint
EXIT_FAILURE = 1
@given(st.text(min_size=1), st.text(min_size=1))
@example("para", "PARA")
@example("para", "")
@example(" ", "!!!!")
def test_replace_works(word: str, to: str):
cprint(f"case: <{word}> => <{to=}>", "yellow")
param = ["./prog.out", "test.txt", word, to]
try:
result = run(param)
except ValueError:
cprint(f"skipping due to {word} or {to} a NULL byte", "magenta")
return
if len(param) != 4:
assert result.returncode == EXIT_FAILURE
else:
text = Path("test.replace").read_text()
answer = Path("test.txt").read_text().replace(word, to)
assert text == answer
def test_invalid_file():
param = ["./prog.out", "DOES.NOT.EXIST", "para", "PARA"]
assert run(param).returncode == EXIT_FAILURE
def test_invalid_arg():
param = ["./prog.out"]
for _ in range(6):
assert run(param).returncode == EXIT_FAILURE
param.append("a")
if __name__ == "__main__":
test_replace_works()
test_invalid_file()
test_invalid_arg()
``` |
{
"source": "4-2-cz/Zabbix-Addons",
"score": 2
} |
#### File: Extensions/Screen-Builder/screen_info.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2014 Vermont 24x7"
__version__ = "1.0"
import ConfigParser
import sys, os
sys.path.append('./lib')
from zabbix_api import ZabbixAPI, ZabbixAPIException
import json
class Config:
def __init__(self, conf_file):
self.config = None
self.zabbix_frontend = ''
self.zabbix_user = ''
self.zabbix_password = ''
self.screen_name = ''
self.screen_hsize = 0
self.graph_width = 0
self.graph_height = 0
self.conf_file = conf_file
if not os.path.exists(self.conf_file):
print "Can't open config file %s" % self.conf_file
exit(1)
# Read common config
self.config = ConfigParser.ConfigParser()
self.config.read(self.conf_file)
def parse(self):
# Parse common config
try:
self.zabbix_frontend = self.config.get('common', 'zabbix_frontend')
except:
self.zabbix_frontend = 'localhost'
try:
self.zabbix_user = self.config.get('common', 'zabbix_user')
except:
self.zabbix_user = 'admin'
try:
self.zabbix_password = self.config.get('common', 'zabbix_password')
except:
self.zabbix_password = ''
try:
self.screen_name = self.config.get('screen', 'name')
except:
print "No name given for screen to create"
sys.exit(1)
try:
self.screen_hsize = self.config.get('screen', 'hsize')
except:
self.screen_hsize = 2
try:
self.graph_width = self.config.get('screen', 'graph_width')
except:
self.graph_width = 500
try:
self.graph_height = self.config.get('screen', 'graph_height')
except:
self.graph_height = 100
def cleanup():
pass
def main():
import atexit
atexit.register(cleanup)
#get screens
for screen in zapi.screen.get({ "output": "extend", "selectScreenItems": "extend" }):
print json.dumps(screen, indent=4)
sys.exit(1)
if __name__ == "__main__":
global config
config_file = './screenbuilder.conf'
config = Config(config_file)
config.parse()
zapi = ZabbixAPI(server=config.zabbix_frontend)
try:
print("Connecting to Zabbix API")
zapi.login(config.zabbix_user, config.zabbix_password)
print("Connected to Zabbix API Version: %s" % zapi.api_version())
except ZabbixAPIException, e:
print("Zabbix API connection failed")
print("Additional info: %s" % e)
sys.exit(1)
main()
``` |
{
"source": "42electronics/level_c",
"score": 2
} |
#### File: level_c/lesson_10/tk_6050_all.py
```python
from tkinter import *
from mpu6050 import mpu6050
sensor = mpu6050(0x68)
def update():
ax.delete(0,END)
ay.delete(0,END)
az.delete(0,END)
gx.delete(0,END)
gy.delete(0,END)
gz.delete(0,END)
data = sensor.get_all_data()
ax_data = data[0]['x']
ay_data = data[0]['y']
az_data = data[0]['z']
gx_data = data[1]['x']
gy_data = data[1]['y']
gz_data = data[1]['z']
ax.insert(0, '%.1f'%ax_data)
ay.insert(0, '%.1f'%ay_data)
az.insert(0, '%.1f'%az_data)
gx.insert(0, '%.1f'%gx_data)
gy.insert(0, '%.1f'%gy_data)
gz.insert(0, '%.1f'%gz_data)
root.after(300, update)
root = Tk()
root.title('MPU6050')
Label(root, text = 'Accel X:').grid(row=0, column=0)
Label(root, text = 'Accel Y:').grid(row=1, column=0)
Label(root, text = 'Accel Z:').grid(row=2, column=0)
Label(root, text = 'Gyro X:').grid(row=3, column=0)
Label(root, text = 'Gyro Y:').grid(row=4, column=0)
Label(root, text = 'Gyro Z:').grid(row=5, column=0)
ax = Entry(root)
ay = Entry(root)
az = Entry(root)
gx = Entry(root)
gy = Entry(root)
gz = Entry(root)
ax.grid(row=0, column=1)
ay.grid(row=1, column=1)
az.grid(row=2, column=1)
gx.grid(row=3, column=1)
gy.grid(row=4, column=1)
gz.grid(row=5, column=1)
Button(root, text='Quit', command=root.destroy).grid(row=6, column=0)
update()
root.mainloop()
```
#### File: level_c/lesson_12/camera_capture.py
```python
import time, os
from tkinter import *
root = Tk()
root.title('Photos')
def quit():
root.destroy()
print('Program Exiting...')
raise SystemExit()
def update():
global img
timestamp = (time.strftime('%Y-%m-%d_%H:%M:%S'))
img_file = ('/home/pi/Pictures/%s.png' % timestamp)
os.system('raspistill -o %s -e png -w 640 -h 480' % img_file)
print('Image saved as %s' % img_file)
img = PhotoImage(file='%s' % img_file)
Label(root, image=img).grid(row=0, column=0)
img = PhotoImage(file='/home/pi/Pictures/init.png')
Label(root, image=img).grid(row=0, column=0)
Button(root, text='Capture', command=update).grid(row=1, column=0)
Button(root, text='Quit', command=quit).grid(row=2, column=0)
root.mainloop()
```
#### File: level_c/lesson_15/internet_oled.py
```python
import time
import Adafruit_SSD1306
from PIL import Image, ImageDraw, ImageFont
import requests
disp = Adafruit_SSD1306.SSD1306_128_64(rst=None)
disp.begin()
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSans.ttf',18)
def update_color():
draw.rectangle((0,0,width,height), outline=0, fill=0)
feed = requests.get('http://api.thingspeak.com/channels/1417/field/1/last.json')
data = feed.json()
current = data['field1']
return(current)
try:
while True:
color = update_color()
draw.text((0, 0), 'Cheerlights', font=font, fill=255)
draw.text((0, 22), 'color is:', font=font, fill=255)
draw.text((0, 44), color, font=font, fill=255)
disp.image(image)
disp.display()
time.sleep(30)
except KeyboardInterrupt:
disp.clear()
disp.display()
SystemExit()
```
#### File: level_c/lesson_3/tk_keyboard.py
```python
from tkinter import Tk
def on_close():
print('Quitting Program...')
root.destroy()
def key_input(event):
if event.char == 'a':
print('a pressed')
elif event.char == 's':
print('s pressed')
elif event.char == 'd':
print('d pressed')
elif event.char == 'f':
print('f pressed')
elif event.char == 'q':
on_close()
else:
print('Invalid input')
root = Tk()
root.bind('<KeyPress>', key_input)
root.protocol("WM_DELETE_WINDOW", on_close)
root.mainloop()
```
#### File: level_c/lesson_3/tx_xy.py
```python
from tkinter import Tk, Canvas
def click(event):
print("Clicked at x=",event.x," y=",event.y)
def on_close():
root.destroy()
root = Tk()
root.protocol("WM_DELETE_WINDOW", on_close)
project = Canvas(root)
project.configure(width=300, height=300)
project.bind("<Button-1>", click)
project.pack()
root.mainloop()
```
#### File: level_c/lesson_9/mpu6050_rgb.py
```python
import RPi.GPIO as GPIO
import time
from mpu6050 import mpu6050
red = 13
green = 19
blue = 26
GPIO.setmode(GPIO.BCM)
GPIO.setup(red, GPIO.OUT)
GPIO.setup(green, GPIO.OUT)
GPIO.setup(blue, GPIO.OUT)
def led_update(red_value,green_value,blue_value):
GPIO.output(red, red_value)
GPIO.output(green, green_value)
GPIO.output(blue, blue_value)
sensor = mpu6050(0x68)
try:
while True:
data = sensor.get_accel_data()
y_accel = data['y']
if y_accel > 4:
led_update(1,0,0)
elif y_accel < -4:
led_update(0,0,1)
else:
led_update(0,1,0)
time.sleep(0.05)
except KeyboardInterrupt:
led_update(0,0,0)
GPIO.cleanup()
``` |
{
"source": "42enrique/osteoblasticCNN",
"score": 3
} |
#### File: osteoblasticCNN/keras-covid-19/train_lib.py
```python
from random import shuffle, choice
from PIL import Image
import os
import numpy as np
import matplotlib as plt
import random
import glob
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import time
import pandas as pd
import cv2
import math
from random import randint
def load_rgb_data(IMAGE_DIRECTORY,IMAGE_SIZE, shuffle=True):
print("Loading images...")
data = []
#labels=[]
directories = next(os.walk(IMAGE_DIRECTORY))[1]
print(directories)
for diretcory_name in directories:
print("Loading {0}".format(diretcory_name))
file_names = next(os.walk(os.path.join(IMAGE_DIRECTORY, diretcory_name)))[2]
print("we will load [", len(file_names), "] files from [",diretcory_name,"] class ..." )
for i in range(len(file_names)):
image_name = file_names[i]
image_path = os.path.join(IMAGE_DIRECTORY, diretcory_name, image_name)
if ('.DS_Store' not in image_path):
#print(image_path)
label = diretcory_name
img = Image.open(image_path)
rgbimg = Image.new("RGB", img.size)
rgbimg.paste(img)
img=rgbimg
#print(np.array(img).shape)
img = img.resize((IMAGE_SIZE, IMAGE_SIZE), Image.ANTIALIAS)
#print(np.array(img).shape)
data.append([np.array(img), label])
if (shuffle):
random.shuffle(data)
training_images = np.array([i[0] for i in data]).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 3)
training_labels = np.array([i[1] for i in data])
print("File loading completed.")
return training_images, training_labels
def load_rgb_data_cv(IMAGE_DIRECTORY,IMAGE_SIZE, shuffle=True):
print("Loading images...")
data = []
#labels=[]
directories = next(os.walk(IMAGE_DIRECTORY))[1]
for diretcory_name in directories:
print("Loading {0}".format(diretcory_name))
file_names = next(os.walk(os.path.join(IMAGE_DIRECTORY, diretcory_name)))[2]
print("we will load [", len(file_names), "] files from [",diretcory_name,"] class ..." )
for i in range(len(file_names)):
image_name = file_names[i]
image_path = os.path.join(IMAGE_DIRECTORY, diretcory_name, image_name)
#print(image_path)
label = diretcory_name
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
#print(np.array(img).shape)
data.append([np.array(img), label])
if (shuffle):
random.shuffle(data)
training_images = np.array([i[0] for i in data]).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 3)
training_labels = np.array([i[1] for i in data])
print("File loading completed.")
return training_images, training_labels
def normalize_data(dataset):
print("normalize data")
dataset= dataset/255.0
return dataset
def display_image(trainX, trainY, index=0):
plt.imshow(trainX[index])
print ("Label = " + str(np.squeeze(trainY[index])))
print ("image shape: ",trainX[index].shape)
def display_one_image(one_image, its_label):
plt.imshow(one_image)
print ("Label = " + its_label)
print ("image shape: ",one_image.shape)
def display_dataset_shape(X,Y):
print("Shape of images: ", X.shape)
print("Shape of labels: ", Y.shape)
def plot_sample_from_dataset(images, labels,rows=5, colums=5, width=8,height=8):
plt.figure(figsize=(width,height))
for i in range(rows*colums):
plt.subplot(rows,colums,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[i], cmap=plt.cm.binary)
plt.xlabel(labels[i])
plt.show()
def display_dataset_folders(path):
classes=os.listdir(path)
classes.sort()
print(classes)
def get_data_distribution(IMAGE_DIRECTORY, output_file=None,plot_stats=True):
print("Loading images...")
#list structure to collect the statistics
stats=[]
#get all image directories
directories = next(os.walk(IMAGE_DIRECTORY))[1]
for diretcory_name in directories:
print("Loading {0}".format(diretcory_name))
images_file_names = next(os.walk(os.path.join(IMAGE_DIRECTORY, diretcory_name)))[2]
print("we will load [", len(images_file_names), "] files from [",diretcory_name,"] class ..." )
for i in range(len(images_file_names)):
image_name = images_file_names[i]
image_path = os.path.join(IMAGE_DIRECTORY, diretcory_name, image_name)
#print(image_path)
#the class is assumed to be equal to the directorty name
label = diretcory_name
img = Image.open(image_path)
#convert any image to RGB to make sure that it has three channels
rgbimg = Image.new("RGB", img.size)
rgbimg.paste(img)
img=rgbimg
#get the width and the height of the image in pixels
width,height = img.size
#get the size of the image in KB
size_kb=os.stat(image_path).st_size/1000
#add the size to a list of sizes to be
stats.append([label,os.path.basename(image_name),width,height,size_kb])
if (output_file is not None):
#convert the list into a dataframe to make it easy to save into a CSV
stats_dataframe = pd.DataFrame(stats,columns=['Class','Filename','Width','Height','Size_in_KB'])
stats_dataframe.to_csv(output_file,index=False)
print("Stats collected and saved in .",output_file)
else:
print("Stats collected");
return stats
def plot_dataset_distribution (stats, num_cols=5, width=10, height=5, histogram_bins = 10, histogram_range=[0, 1000], figure_padding=4):
#convert the list into a dataframe
stats_frame = pd.DataFrame(stats,columns=['Class','Filename','Width','Height','Size_in_KB'])
#extract the datframe related to sizes only
list_sizes=stats_frame['Size_in_KB']
#get the number of classes in the dataset
number_of_classes=stats_frame['Class'].nunique()
print(number_of_classes, " classes found in the dataset")
#create a list of (list of sizes) for each class of images
#we start by the the sizes of all images in the dataset
list_sizes_per_class=[list_sizes]
class_names=['whole dataset']
print("Images of the whole dataset have an average size of ", list_sizes.mean())
for c in stats_frame['Class'].unique():
print("sizes of class [", c, "] have an average size of ", list_sizes.loc[stats_frame['Class']== c].mean())
#then, we append the sizes of images of a particular class
list_sizes_per_class.append(list_sizes.loc[stats_frame['Class'] == c])
class_names.append(c)
num_rows=math.ceil((number_of_classes+1)/num_cols)
if (number_of_classes<num_cols):
num_cols=number_of_classes+1
fig,axes = plt.subplots(num_rows, num_cols, figsize=(width,height))
fig.tight_layout(pad=figure_padding)
class_count=0
if (num_rows==1 or num_cols==1):
for i in range(num_rows):
for j in range(num_cols):
axes[j+i].hist(list_sizes_per_class[num_cols*i+j], bins = histogram_bins, range=histogram_range)
axes[j+i].set_xlabel('Image size (in KB)', fontweight='bold')
axes[i+j].set_title(class_names[j+i] + ' images ', fontweight='bold')
class_count=class_count+1
if (class_count==number_of_classes+1):
break
else:
for i in range(num_rows):
for j in range(num_cols):
axes[i,j].hist(list_sizes_per_class[num_cols*i+j], bins = histogram_bins, range=histogram_range)
axes[i,j].set_xlabel('Image size (in KB)', fontweight='bold')
axes[i,j].set_title(class_names[i] + ' images ', fontweight='bold')
class_count=class_count+1
if (class_count==number_of_classes+1):
break
def reshape_image_for_neural_network_input(image, IMAGE_SIZE=244):
print ("flatten the image")
image = np.reshape(image,[IMAGE_SIZE* IMAGE_SIZE*3,1])
print ("image.shape", image.shape)
print ("reshape the image to be similar to the input feature vector")
#image = np.reshape(image,[1,IMAGE_SIZE, IMAGE_SIZE,3]).astype('float')
image = image.reshape(1,IMAGE_SIZE,IMAGE_SIZE,3).astype('float')
print ("image.shape", image.shape)
return image
def plot_loss_accuracy(H, EPOCHS, output_file=None):
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on COVID-19 Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
if (output_file is not None):
plt.savefig(output_file)
def draw_accuracy_graph(history):
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
def draw_loss_graph(history):
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#-------------------------------#
def plot_test_image(testX, image_index, predictions_array, true_binary_labels):
"""
testX: this is the test dataset
image_index: index of the image that we will plot from the test dataset
predictions_array: it is the array that contains all the predictions of the test dataset as output of model.predict(testX)
true_binary_labels: these are the true label expressed as INTEGER values. It does not work with hot-encoding and string labels.
"""
single_predictions_array, true_binary_label, test_image = predictions_array, true_binary_labels[image_index], testX[image_index]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(test_image, cmap=plt.cm.binary)
predicted_binary_label = np.argmax(predictions_array)
#print ("predicted_binary_label:", predicted_binary_label)
#print ("true_binary_label:",true_binary_label)
if predicted_binary_label == true_binary_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("predicted: {} {:2.0f}% (true: {})".format(predicted_binary_label,
100*np.max(single_predictions_array),
true_binary_label),
color=color)
def plot_value_array(i, predictions_array, true_label, number_of_classes=3):
predictions_array, true_label = predictions_array, true_label[i]
plt.style.use(['classic'])
plt.grid(False)
plt.xticks(range(number_of_classes))
plt.yticks([])
thisplot = plt.bar(range(number_of_classes), 1, color="#FFFFFF")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
#print(true_label[0])
#print(predicted_label)
thisplot[predicted_label].set_color('red')
thisplot[true_label[0]].set_color('blue')
def plot_sample_predictions(testX, predictions_array, true_binary_labels,number_of_classes=3,num_rows = 10, num_cols = 4, width=None, height=None, is_random=True):
"""
this method plots a sample of predictions from the test dataset and highlight correct and wrong predictions
testX: this is the test dataset
image_index: index of the image that we will plot from the test dataset
predictions_array: it is the array that contains all the predictions of the test dataset as output of model.predict(testX)
true_binary_labels: these are the true labels array expressed as INTEGER values. It does not work with hot-encoding and string labels.
"""
num_images = num_rows*num_cols
if (num_images>testX.shape[0]):
raise Exception("num_rows*num_cols is",(num_rows*num_cols), "must be smaller than number of images in the Test Dataset", testX.shape[0])
if width is None:
width=6*num_cols
if height is None:
height=2*num_rows
plt.figure(figsize=(width, height))
plt.style.use(['seaborn-bright'])
image_index=-1
for i in range(num_images):
if (is_random==True):
image_index=randint(0,testX.shape[0]-1)
else:
image_index=image_index+1
#print(image_index)
#---------------
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_test_image(testX, image_index, predictions_array[image_index], true_binary_labels)
#---------------
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(image_index, predictions_array[image_index], true_binary_labels, number_of_classes)
plt.tight_layout()
plt.show()
``` |
{
"source": "42ip/ShuttleBot",
"score": 3
} |
#### File: ShuttleBot/commands/earth.py
```python
import discord
import requests
import random
from PIL import Image, ImageDraw
import io
async def earth(message,channel):
if len(message.mentions) == 0:
person = message.author
else:
person = message.mentions[0]
response = requests.get(person.avatar_url)
image_bytes = io.BytesIO(response.content)
im2 = Image.open(image_bytes)
response = requests.get(
'https://cdn.mos.cms.futurecdn.net/3upZx2gxxLpW7MBbnKYQLH-1200-80.jpg')
image_bytes = io.BytesIO(response.content)
im1 = Image.open(image_bytes)
im2 = im2.resize((470, 470))
mask_im = Image.new("L", im2.size, 0)
draw = ImageDraw.Draw(mask_im)
draw.ellipse((0, 0, im2.width, im2.height), fill=150 )
im = im1.copy()
im.paste(im2, (35, 40), mask_im)
msgs = ["Zuckerberg told me that you were blue today, well, you are now the Blue Planet! <:deadinside:762920553941303327>",
"You are now a 12,000 km wide ball called Earth. Congratulations <:poggies:886538902184292393>",
"I present to you the face of the planet with 7.8 billion people who contributed nothing to the space <:superAngry:843088789349335050>"]
with io.BytesIO() as image_binary:
im.save(image_binary, 'PNG')
image_binary.seek(0)
picture = discord.File(image_binary, "space.png")
await channel.send(random.choice(msgs))
await channel.send(file=picture)
``` |
{
"source": "42jaylonw/rrc_2021_three_wolves",
"score": 2
} |
#### File: deep_whole_body_controller/utility/reward_utils.py
```python
import numpy as np
def ComputeDist(p0, p1):
return np.linalg.norm(np.subtract(p1, p0))
def FVCap(v_cap, r):
return max(-v_cap, min(r, v_cap))
def ComputeAcc(pos_3, time_step=0.1):
assert pos_3.shape == (3, 3)
vel_0 = ComputeDist(pos_3[0], pos_3[1]) / time_step
vel_1 = ComputeDist(pos_3[1], pos_3[2]) / time_step
acc_3 = (vel_1 - vel_0) / time_step
return acc_3
def ExpSqr(cur, tar=0, wei=-3):
assert wei < 0
return np.sum(np.exp(wei * np.square(np.abs(tar - cur))))
def Delta(seq):
seq = np.array(seq)
assert seq.ndim == 1
_diff = seq - np.mean(seq)
return np.sum(np.abs(_diff))
```
#### File: three_wolves/envs/contact_cube_env.py
```python
import time
import gym
import numpy as np
import pybullet
from trifinger_simulation import TriFingerPlatform, visual_objects
from trifinger_simulation.tasks import move_cube_on_trajectory as task
from three_wolves.envs.base_cube_env import ActionType, BaseCubeTrajectoryEnv
from three_wolves.envs.utilities.env_utils import HistoryWrapper, resetCamera
from three_wolves.deep_whole_body_controller import position_controller, contact_planner
from three_wolves.deep_whole_body_controller.utility import pinocchio_utils, reward_utils, trajectory
class ContactControlEnv(BaseCubeTrajectoryEnv):
def render(self, mode='human'):
pass
def __init__(self, goal_trajectory, visualization, randomization, evaluation=False, history_num=1, robot_type='sim'):
super(ContactControlEnv, self).__init__(
goal_trajectory=goal_trajectory,
action_type=ActionType.POSITION,
step_size=3)
self.visualization = visualization
self.randomization = randomization
self.evaluation = evaluation
self.observer = HistoryWrapper(history_num)
self.kinematics = pinocchio_utils.Kinematics(robot_type)
self.contact_planner = contact_planner.ContactPlanner()
self.position_controller = position_controller.PositionController(self.kinematics,
self.observer, self.step_size)
self.max_episode = task.EPISODE_LENGTH
self.tip_force_offset = []
# create observation space
spaces = TriFingerPlatform.spaces
self.observation_space = gym.spaces.Box(
low=np.hstack([
spaces.object_position.gym.low, # cube position
[-2 * np.pi] * 3, # cube rpy
spaces.object_position.gym.low, # goal position
[-0.3] * 3, # goal-cube difference
[0] # goal-cube distance
]),
high=np.hstack([
spaces.object_position.gym.high, # cube position
[2 * np.pi] * 3, # cube rpy
spaces.object_position.gym.high, # goal position
[0.3] * 3, # goal-cube difference
[1] # goal-cube distance
])
)
self.action_space = self.contact_planner.action_space
def reset(self):
"""Reset the environment."""
# hard-reset simulation
self.goal_marker = None
del self.platform
# initialize simulation
initial_robot_position = (
TriFingerPlatform.spaces.robot_position.default
)
# initialize cube at the centre
_random_obj_xy_pos = np.random.uniform(
low=[-0.04] * 2,
high=[0.04] * 2,
)
_random_obj_yaw_ori = np.random.uniform(-2 * np.pi, 2 * np.pi)
_random_obj_yaw_ori = pybullet.getQuaternionFromEuler([0, 0, _random_obj_yaw_ori])
random_object_pose = task.move_cube.Pose(
position=[_random_obj_xy_pos[0],
_random_obj_xy_pos[1],
task.INITIAL_CUBE_POSITION[2]],
orientation=_random_obj_yaw_ori
)
self.platform = TriFingerPlatform(
visualization=self.visualization,
initial_robot_position=initial_robot_position,
initial_object_pose=random_object_pose,
)
if self.randomization:
cube_id = self.platform.cube._object_id
random_mass = 0.094*np.random.uniform(0.9, 1.1)
random_lateral_friction = 1*np.random.uniform(0.9, 1)
random_step_size = np.random.randint(1, 6)
pybullet.changeDynamics(cube_id, -1, mass=random_mass, lateralFriction=random_lateral_friction)
self.step_size = random_step_size
# get goal trajectory
if self.goal is None:
trajectory = task.sample_goal()
else:
trajectory = self.goal
# visualize the goal
if self.visualization:
self.goal_marker = visual_objects.CubeMarker(
width=task.move_cube._CUBE_WIDTH,
position=trajectory[0][1],
orientation=(0, 0, 0, 1),
pybullet_client_id=self.platform.simfinger._pybullet_client_id,
)
resetCamera()
self.info = {"time_index": -1, "trajectory": trajectory, "eval_score": 0}
self.step_count = 0
self.drop_times = 0
self.tip_force_offset = []
# initial step
robot_action = self._gym_action_to_robot_action(self._initial_action)
t = self.platform.append_desired_action(robot_action)
self.info["time_index"] = t
self.step_count += 1
obs, _ = self._create_observation(self.info["time_index"])
return obs
def _create_observation(self, t):
robot_observation = self.platform.get_robot_observation(t)
camera_observation = self.platform.get_camera_observation(t)
object_observation = camera_observation.filtered_object_pose
active_goal = np.asarray(
task.get_active_goal(self.info["trajectory"], t)
)
cube_pos = object_observation.position
cube_orn = pybullet.getEulerFromQuaternion(object_observation.orientation)
finger_pos = self.kinematics.forward_kinematics(robot_observation.position)
obs_dict = {
"joint_position": robot_observation.position, # joint position
"joint_velocity": robot_observation.velocity, # joint velocity
"joint_torque": robot_observation.torque, # joint torque
"tip_force": robot_observation.tip_force, # tip force
"object_position": cube_pos, # cube position
"object_rpy": cube_orn, # cube orientation
"goal_position": active_goal, # goal position
"object_goal_distance": active_goal - cube_pos, # cube to goal distance
"tip_0_position": finger_pos[0], # tri-finger position 0
"tip_1_position": finger_pos[1], # tri-finger position 1
"tip_2_position": finger_pos[2], # tri-finger position 2
}
self.observer.update(obs_dict)
rl_obs = np.hstack([
cube_pos, # cube position
cube_orn, # cube rpy
active_goal, # goal position
active_goal - cube_pos, # goal-cube difference
np.linalg.norm(active_goal - cube_pos) # goal-cube distance
])
return rl_obs, obs_dict
def _internal_step(self, action):
self.step_count += 1
# send action to robot
robot_action = self._gym_action_to_robot_action(action)
t = self.platform.append_desired_action(robot_action)
# update goal visualization
if self.visualization:
goal_position = task.get_active_goal(self.info["trajectory"], t)
self.goal_marker.set_state(goal_position, (0, 0, 0, 1))
time.sleep(0.001)
return t
def apply_action(self, action):
tg = trajectory.get_interpolation_planner(init_pos=self.observer.dt['joint_position'],
tar_pos=action,
start_time=0,
reach_time=self.step_size)
for i in range(self.step_size):
if self.step_count >= self.max_episode:
break
_action = tg(i + 1)
t = self._internal_step(_action)
self.info["time_index"] = t
_, obs_dict = self._create_observation(self.info["time_index"])
if self.evaluation:
eval_score = self.compute_reward(
obs_dict["object_position"],
obs_dict["goal_position"],
self.info,
)
self.info['eval_score'] += eval_score
# return score
def update(self, policy_action):
self._last_goal = self.observer.dt['goal_position']
contact_face_ids, contact_points = self.contact_planner.compute_contact_points(policy_action)
self.position_controller.update(contact_points, contact_face_ids)
def step(self, policy_action):
self.update(policy_action)
self.position_controller.tips_reach(self.apply_action, self.tip_force_offset)
reward = 0
while not self.Dropped() and not self.step_count >= self.max_episode:
if (self._last_goal != self.observer.dt['goal_position']).all():
self.update(policy_action)
cur_phase_action = self.position_controller.get_action()
self.apply_action(cur_phase_action)
reward += self.position_controller.get_reward() * 0.001 * self.step_size
self.drop_times += 1
done = self.drop_times >= 3 or self.step_count >= self.max_episode
if self.evaluation:
done = self.step_count >= self.max_episode
return self._create_observation(self.info["time_index"])[0], reward, done, self.info
def Dropped(self):
tip_touch = np.subtract(self.observer.dt['tip_force'], self.tip_force_offset[0]) > 0
cube_pos = np.array(self.observer.dt['object_position'])
tri_distance = [reward_utils.ComputeDist(self.observer.dt['tip_0_position'], cube_pos),
reward_utils.ComputeDist(self.observer.dt['tip_1_position'], cube_pos),
reward_utils.ComputeDist(self.observer.dt['tip_2_position'], cube_pos)]
is_dropped = np.sum(tip_touch) < 2 or any(np.array(tri_distance) > 0.08)
return is_dropped
class RealContactControlEnv(ContactControlEnv):
def __init__(self,
goal_trajectory):
super().__init__(goal_trajectory=goal_trajectory,
visualization=False,
evaluation=False,
randomization=False,
robot_type='real')
self.max_episode = task.EPISODE_LENGTH
def _internal_step(self, action):
self.step_count += 1
# send action to robot
robot_action = self._gym_action_to_robot_action(action)
t = self.platform.append_desired_action(robot_action)
return t
def step(self, policy_action):
if self.platform is None:
raise RuntimeError("Call `reset()` before starting to step.")
self.update(policy_action)
self.position_controller.tips_reach(self.apply_action, self.tip_force_offset)
reward = 0
while not self.Dropped() and not self.step_count >= self.max_episode:
if list(self._last_goal) != list(self.observer.dt['goal_position']):
self.update(policy_action)
cur_phase_action = self.position_controller.get_action()
self.apply_action(cur_phase_action)
# reward += self.position_controller.get_reward() * 0.001 * self.step_size
# self.drop_times += 1
done = self.step_count >= self.max_episode
return self._create_observation(self.info["time_index"])[0], reward, done, self.info
def reset(self):
import robot_fingers
# cannot reset multiple times
if self.platform is not None:
raise RuntimeError(
"Once started, this environment cannot be reset."
)
self.platform = robot_fingers.TriFingerPlatformWithObjectFrontend()
# get goal trajectory
if self.goal is None:
trajectory = task.sample_goal()
else:
trajectory = self.goal
self.info = {"time_index": -1, "trajectory": trajectory}
self.step_count = 0
# initial step
for i in range(int(1./(0.001*self.step_size))):
robot_action = self._gym_action_to_robot_action(self._initial_action)
t = self.platform.append_desired_action(robot_action)
self.info["time_index"] = t
self.step_count += 1
obs, _ = self._create_observation(self.info["time_index"])
return obs
if __name__ == '__main__':
env = ContactControlEnv(goal_trajectory=None,
visualization=True,
randomization=False)
observation = env.reset()
is_done = False
t = 0
while t < env.max_episode:
observation, score, is_done, info = env.step([0.5 + 0.25 / 2, 0.25 / 2, 0.75 + 0.2 / 2,
0.5, 0.5, 0.5])
print("eval_score:", score)
t += 0.001 * env.step_size
if is_done:
env.reset()
```
#### File: envs/utilities/env_utils.py
```python
from trifinger_simulation.visual_objects import CubeMarker
import pybullet
import numpy as np
U = []
def tag(xyz):
U.append(CubeMarker(
width=0.03,
position=xyz,
orientation=(0, 0, 0, 1),
color=(1, 0, 0, 0.7),
pybullet_client_id=0,
))
def clean():
for o in U:
pybullet.removeBody(o.body_id, physicsClientId=0)
def resetCamera():
pybullet.resetDebugVisualizerCamera(
cameraDistance=0.5,
cameraYaw=0,
cameraPitch=-41,
cameraTargetPosition=[0, 0, 0],
physicsClientId=0
)
class HistoryWrapper:
def __init__(self,
history_num=3):
self._history_obs = {}
self.history_num = history_num
self.dt = {}
def reset(self, init_obs_dict):
for k, v in init_obs_dict.items():
self._history_obs.update({k: [v]*self.history_num})
return self.get_history_obs()
def update(self, obs_dict):
if not self.dt:
self.dt = obs_dict
return self.reset(obs_dict)
self.dt = obs_dict
for k, v in obs_dict.items():
assert len(v) == len(self._history_obs[k][0]), 'wrong shape'
assert k in self._history_obs.keys(), 'wrong key'
self._history_obs[k].pop()
self._history_obs[k].insert(0, v)
assert len(self._history_obs[k]) == self.history_num
return self.get_history_obs()
def get_history_obs(self):
_obs = []
for _, v in self._history_obs.items():
_obs.append(np.hstack(v))
return np.hstack(_obs)
def search(self, k):
return np.array(self._history_obs[k])
if __name__ == '__main__':
his = HistoryWrapper(3)
his.reset({'pos': [1], 'orn': [6]})
q = his.get_history_obs()
```
#### File: envs/utilities/model_utils.py
```python
import os
import matplotlib.pyplot as plt
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.results_plotter import load_results, ts2xy
import numpy as np
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, check_freq: int, log_dir: str, verbose=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.best_mean_reward = -np.inf
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
if self.verbose > 0:
print(f"Num timesteps: {self.num_timesteps}")
print(
f"Best mean reward: {self.best_mean_reward:.2f} - Last mean reward per episode: {mean_reward:.2f}")
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
# Example for saving best model
if self.verbose > 0:
print(f"Saving new best model to {self.save_path}.zip")
self.model.save(self.save_path)
return True
def plot_results(log_folder, title='Learning Curve'):
from scipy.signal import savgol_filter
R = load_results(log_folder)['r']
T = load_results(log_folder)['t']
_w = 7
_window_size = len(R) // _w if (len(R) // _w) % 2 != 0 else len(R) // _w + 1
filtered = savgol_filter(R, _window_size, 1)
plt.title('smoothed returns')
plt.ylabel('Returns')
plt.xlabel('time step')
plt.plot(T, filtered)
plt.grid()
plt.show()
```
#### File: trifinger_simulation/gym_wrapper/data_logger.py
```python
import pickle
class EpisodeData:
"""
The structure in which the data from each episode
will be logged.
"""
def __init__(self, joint_goal, tip_goal):
self.joint_goal = joint_goal
self.tip_goal = tip_goal
self.joint_positions = []
self.tip_positions = []
self.timestamps = []
def append(self, joint_pos, tip_pos, timestamp):
self.joint_positions.append(joint_pos)
self.tip_positions.append(tip_pos)
self.timestamps.append(timestamp)
class DataLogger:
"""
Dumps the env episodic data to a pickle file
"""
def __init__(self):
self.episodes = []
self._curr = None
def new_episode(self, joint_goal, tip_goal):
if self._curr:
# convert to dict for saving so loading has no dependencies
self.episodes.append(self._curr.__dict__)
self._curr = EpisodeData(joint_goal, tip_goal)
def append(self, joint_pos, tip_pos, timestamp):
self._curr.append(joint_pos, tip_pos, timestamp)
def store(self, filename):
with open(filename, "wb") as file_handle:
pickle.dump(self.episodes, file_handle)
```
#### File: rrc_2021_three_wolves/trifinger_simulation/real_finger.py
```python
import os
from ament_index_python.packages import get_package_share_directory
import robot_interfaces
import robot_fingers
from trifinger_simulation import finger_types_data
from trifinger_simulation.sim_finger import SimFinger
class RealFinger:
"""
The RealFinger class provides an interface to the real robot. Any script
that creates an instance of the :class:`.SimFinger` can create an instance
of this class and use it in the same way.
"""
def __init__(
self,
finger_type,
finger_config_suffix,
enable_visualization=False,
):
"""
Constructor, initializes the physical world we will work in.
Args:
finger_type (string): Name of the finger type. In order to get
a list of the valid finger types, call
:meth:`.finger_types_data.get_valid_finger_types`.
finger_config_suffix (int): ID of the finger that is used. Has to
be one of [0, 120, 240]. This is only if a single finger is to
be used on any of the robots, and is ignored otherwise.
enable_visualization (bool, optional): Set to 'True' to run a GUI
for visualization. This uses pyBullet but only for
visualization, i.e. the state of the simulation is constantly
set to match the one of the real robot.
"""
# Simulation is only used for visualization, so only run it when needed
self.simulator = None
if enable_visualization:
self.simulator = SimFinger(
finger_type=finger_type,
time_step=0.001, # todo: not sure if this is correct
enable_visualization=True,
)
number_of_fingers = finger_types_data.get_number_of_fingers(
finger_type
)
if number_of_fingers == 1:
if finger_type == "fingerone":
config_file_path = os.path.join(
get_package_share_directory("robot_fingers"),
"config",
"finger_%s.yml" % finger_config_suffix,
)
elif finger_type == "fingeredu":
config_file_path = os.path.join(
get_package_share_directory("robot_fingers"),
"config",
"fingeredu_%s.yml" % finger_config_suffix,
)
finger_data = robot_interfaces.finger.SingleProcessData()
self.real_finger_backend = (
robot_fingers.create_real_finger_backend(
finger_data, config_file_path
)
)
self.robot = robot_interfaces.finger.Frontend(finger_data)
self.Action = robot_interfaces.finger.Action
elif number_of_fingers == 3:
if finger_type == "trifingerone":
config_file_path = os.path.join(
get_package_share_directory("robot_fingers"),
"config",
"trifinger.yml",
)
elif finger_type == "trifingeredu":
config_file_path = os.path.join(
get_package_share_directory("robot_fingers"),
"config",
"trifingeredu.yml",
)
finger_data = robot_interfaces.trifinger.SingleProcessData()
self.real_finger_backend = robot_fingers.create_trifinger_backend(
finger_data, config_file_path
)
self.robot = robot_interfaces.trifinger.Frontend(finger_data)
self.Action = robot_interfaces.trifinger.Action
self.real_finger_backend.initialize()
def append_desired_action(self, action):
"""
Append an action to the action timeseries, that should be
applied to the robot.
Args:
action (self.Action): Joint positions or torques or both
Returns:
self.action_index (int): The current time-index at which the action
was applied.
"""
return self.robot.append_desired_action(action)
def get_observation(self, time_index):
"""
Get the observation from the robot at a specified time_index.
Args:
time_index (int): the time_index at which the observation is
needed
Returns:
observation (robot.Observation): the corresponding observation
"""
observation = self.robot.get_observation(time_index)
if self.simulator is not None:
self.simulator.reset_finger_positions_and_velocities(
joint_positions=observation.position
)
return observation
def reset_finger(self, joint_positions):
"""
Move the finger(s) to a random position (sampled in the joint space).
The sampled random position is set as target and the robot is stepped
for one second to give it time to reach there.
"""
action = self.Action(position=joint_positions)
for i in range(1000):
t = self.append_desired_action(action)
observation = self.get_observation(t)
return observation
```
#### File: tasks/move_cube_on_trajectory/run_replay.py
```python
import argparse
import json
import pathlib
import pickle
import sys
import numpy as np
from . import replay_action_log
def add_arguments(parser):
parser.add_argument(
"log_directory",
type=pathlib.Path,
help="Directory containing the generated log files.",
)
def main(log_directory: pathlib.Path):
try:
if not log_directory.is_dir():
print(
"'{}' does not exist or is not a directory.".format(
log_directory
)
)
sys.exit(1)
logfile_tmpl = str(log_directory / "action_log_{:02d}.p")
# load samples
sample_file = log_directory / "test_data.p"
with open(sample_file, "rb") as fh:
test_data = pickle.load(fh)
# run "replay_action_log.py" for each sample
rewards = []
for i, sample_json in enumerate(test_data):
print("\n___Replay trajectory {}___".format(i))
sample = json.loads(sample_json)
reward = replay_action_log.replay_action_log(
logfile_tmpl.format(i), sample
)
rewards.append(reward)
# report
print("\n=======================================================\n")
report = {
"mean": np.mean(rewards),
"median": np.median(rewards),
"std": np.std(rewards),
}
print(
"reward median: {:.3f},\tmean: {:.3f},\tstd: {:.3f}\n".format(
report["median"], report["mean"], report["std"]
)
)
# save report to file
report_file = log_directory / "reward.json"
with open(report_file, "w") as f:
json.dump(report, f)
except Exception as e:
print("Error: {}".format(e), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
add_arguments(parser)
args = parser.parse_args()
main(args.log_directory)
```
#### File: tasks/move_cube/run_replay.py
```python
import argparse
import os
import pickle
import sys
import typing
import numpy as np
from . import replay_action_log
class TestSample(typing.NamedTuple):
difficulty: int
iteration: int
init_pose_json: str
goal_pose_json: str
logfile: str
def main(input_directory: str):
try:
if not os.path.isdir(input_directory):
print(
"'{}' does not exist or is not a directory.".format(
input_directory
)
)
sys.exit(1)
levels = (1, 2, 3, 4)
# load samples
sample_file = os.path.join(input_directory, "test_data.p")
with open(sample_file, "rb") as fh:
test_data = pickle.load(fh)
# run "replay_action_log.py" for each sample
level_rewards: dict = {level: [] for level in levels}
for sample in test_data:
print(
"Replay level {} sample {}".format(
sample.difficulty, sample.iteration
)
)
reward = replay_action_log.replay_action_log(
sample.logfile,
sample.difficulty,
sample.init_pose_json,
sample.goal_pose_json,
)
level_rewards[sample.difficulty].append(reward)
# report
print("\n=======================================================\n")
report = ""
total_reward = 0
for level, rewards in level_rewards.items():
rewards = np.asarray(rewards)
mean = rewards.mean()
report += "Level {} mean reward:\t{:.3f},\tstd: {:.3f}\n".format(
level, mean, rewards.std()
)
total_reward += level * mean
report += "-------------------------------------------------------\n"
report += "Total Weighted Reward: {:.3f}\n".format(total_reward)
print(report)
# save report to file
report_file = os.path.join(input_directory, "reward.txt")
with open(report_file, "w") as fh:
fh.write(report)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
def add_arguments(parser):
parser.add_argument(
"input_directory",
type=str,
help="Directory containing the generated log files.",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
add_arguments(parser)
args = parser.parse_args()
main(args.input_directory)
```
#### File: tasks/rearrange_dice/__main__.py
```python
import argparse
import sys
import numpy as np
import cv2
import trifinger_simulation
from . import json_goal_from_config, sample_goal, goal_to_json
from . import visualize_2d, generate_goal_mask
def cmd_sample_goal(args):
try:
goal = sample_goal()
print(goal_to_json(goal))
if args.show:
visualize_2d(goal)
if args.show_masks:
data_dir = trifinger_simulation.get_data_dir()
camera_param_dir = data_dir / "camera_params"
camera_params = trifinger_simulation.camera.load_camera_parameters(
camera_param_dir, "camera{id}_cropped_and_downsampled.yml"
)
masks = generate_goal_mask(camera_params, goal)
masks = np.hstack(masks)
cv2.imshow("Goal Masks", masks)
cv2.waitKey()
except FileExistsError as e:
print(e, file=sys.stderr)
sys.exit(1)
def cmd_goal_from_config(args):
try:
goal = json_goal_from_config(args.goal_config_file)
print(goal)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
def main():
parser = argparse.ArgumentParser("rearrange_dice", description=__doc__)
subparsers = parser.add_subparsers(title="command", dest="command")
subparsers.required = True
sub = subparsers.add_parser(
"sample_goal",
description="""Sample a random goal. The goal is written to stdout as
JSON string.
""",
)
sub.add_argument(
"--show", action="store_true", help="Visualize the goal positions."
)
sub.add_argument(
"--show-masks",
action="store_true",
help="Show the goal masks (using some default camera parameters).",
)
sub.set_defaults(func=cmd_sample_goal)
sub = subparsers.add_parser(
"goal_from_config",
description="""Load or sample a goal based on the given config file.
The goal is writtten to stdout as JSON string.
""",
)
sub.add_argument(
"goal_config_file", type=str, help="Path to a goal config JSON file."
)
sub.set_defaults(func=cmd_goal_from_config)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
``` |
{
"source": "42lacksky/ML_DecisionTree",
"score": 3
} |
#### File: 42lacksky/ML_DecisionTree/decision_tree.py
```python
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
class Rule(object):
def __init__(self, feature_number, feature_value):
self.feature_num = feature_number
self.feature_value = feature_value
self.predicate = lambda X: X[:, self.feature_num] >= self.feature_value
def split(self, X, y=None):
if y is None:
return X[self.predicate(X)], X[~self.predicate(X)]
return X[self.predicate(X)], y[self.predicate(X)], X[~self.predicate(X)], y[~self.predicate(X)]
def match(self, x):
return x[self.feature_num] >= self.feature_value
class Leaf(object):
def __init__(self, labels):
self.labels = labels
self.unique_label_counts = np.asarray((np.unique(self.labels, return_counts=True))).T
self.prediction = self.unique_label_counts[self.unique_label_counts[:, 1].argsort()][0][0]
def predict(self, X):
return np.array([self.prediction] * X.shape[0])
class DecisionNode(object):
def __init__(self, rows, labels, predicate, true_branch, false_branch):
self.rows = rows
self.labels = labels
self.predicate = predicate
self.true_branch = true_branch
self.false_branch = false_branch
self.unique_label_counts = np.asarray((np.unique(self.labels, return_counts=True))).T
self.prediction = self.unique_label_counts[self.unique_label_counts[:, 1].argsort()][0][0]
def predict(self, X):
return np.array([self.prediction] * X.shape[0])
class DecisionTree(BaseEstimator, TransformerMixin):
def __init__(self, depth=5, n_splits=5):
self.depth = depth
self.n_splits = n_splits
self.selected_values_per_feature = None
self.label_counts = None
self.tree_root = None
def fit(self, X, y=None):
self.selected_values_per_feature = np.zeros((X.shape[1], self.n_splits), dtype=float)
self.tree_root = self._build_tree(X, y, 0)
return self
def predict(self, X):
predictions = []
for x in X:
predictions.append(int(self._classify(x, self.tree_root, current_depth=0)[0]))
return np.array(predictions)
def _classify(self, sample, node, current_depth):
if self.depth == current_depth or isinstance(node, Leaf):
return node.predict(sample)
current_depth += 1
if node.predicate.match(sample):
return self._classify(sample, node.true_branch, current_depth)
else:
return self._classify(sample, node.false_branch, current_depth)
def _build_tree(self, features, labels, current_depth):
gain, predicate = self._find_best_split(features, labels)
if (current_depth == self.depth) or (gain == 0):
return Leaf(labels)
true_rows, true_labels, false_rows, false_labels = predicate.split(features, labels)
current_depth += 1
true_branch = self._build_tree(true_rows, true_labels, current_depth=current_depth)
false_branch = self._build_tree(false_rows, false_labels, current_depth=current_depth)
return DecisionNode(features, labels, predicate, true_branch, false_branch)
def _find_best_split(self, rows, labels):
best_gain = 0
best_predicate = None
current_uncertainty = self._gini(sample_labels=labels)
for feature_number in range(rows.shape[1]):
unique_values = self._get_unique_values(rows[:, feature_number])
for feature_value in unique_values:
predicate = Rule(feature_number=feature_number, feature_value=feature_value)
true_rows, true_labels, false_rows, false_labels = predicate.split(rows, labels)
if true_rows.shape[0] == 0 or false_rows.shape[0] == 0:
continue
gain = self._information_gain(true_labels, false_labels, current_uncertainty)
if gain >= best_gain:
best_gain, best_predicate = gain, predicate
return best_gain, best_predicate
def _get_unique_values(self, X_column):
unique_feature_values = np.sort(np.unique(X_column))
if unique_feature_values.shape[0] < self.n_splits:
return unique_feature_values
selected_values_indexes = np.linspace(0, unique_feature_values.shape[0], num=self.n_splits, endpoint=False,
dtype=int)
return unique_feature_values[selected_values_indexes]
def _gini(self, sample_labels):
def get_label_counts(labels):
return np.asarray((np.unique(labels, return_counts=True))).T
impurity = 1
for label, label_count in get_label_counts(labels=sample_labels):
label_probability = label_count / sample_labels.shape[0]
impurity -= label_probability ** 2
return impurity
def _information_gain(self, labels_left, labels_right, current_uncertainty):
p = float(labels_left.shape[0]) / (labels_left.shape[0] + labels_right.shape[0])
return current_uncertainty - p * self._gini(labels_left) - (1 - p) * self._gini(labels_right)
if __name__ == '__main__':
dataset = make_classification(n_samples=300)
X_train, X_test, y_train, y_test = train_test_split(dataset[0], dataset[1], test_size=0.3)
tree = DecisionTree(depth=5, n_splits=5).fit(X_train, y_train)
print(accuracy_score(y_test, tree.predict(X_test)))
``` |
{
"source": "42MachineLearning/pyrb",
"score": 2
} |
#### File: pyrb/tests/test_risk_budgeting.py
```python
import unittest
import numpy as np
from pyrb.allocation import EqualRiskContribution, RiskBudgeting, ConstrainedRiskBudgeting
class AllocationTest(unittest.TestCase):
def setUp(self):
self.cor = np.array([[1, 0.1, 0.4, 0.5, 0.5], [0.1, 1, 0.7, 0.4, 0.4], [
0.4, 0.7, 1, 0.8, 0.05], [0.5, 0.4, 0.8, 1, 0.1], [0.5, 0.4, 0.05, 0.1, 1]])
self.vol = [0.15, 0.20, 0.25, 0.3, 0.1]
self.n = len(self.vol)
self.cov = self.cor * np.outer(self.vol, self.vol)
self.budgets = [0.2, 0.2, 0.3, 0.1, 0.2]
self.bounds = np.array(
[[0.2, 0.3], [0.2, 0.3], [0.05, 0.15], [0.05, 0.15], [0.25, 0.35]])
self.ERC = EqualRiskContribution(self.cov)
self.RB = RiskBudgeting(self.cov, self.budgets)
self.CRB = ConstrainedRiskBudgeting(
self.cov, budgets=None, pi=None, bounds=self.bounds)
class PyrbTest(AllocationTest):
def test_erc(self):
self.ERC.solve()
np.testing.assert_almost_equal(np.sum(self.ERC.x), 1)
np.testing.assert_almost_equal(
np.dot(
np.dot(
self.ERC.x,
self.cov),
self.ERC.x) ** 0.5,
self.ERC.get_risk_contributions(
scale=False).sum(),
decimal=10)
self.assertTrue(
abs(self.ERC.get_risk_contributions().mean() - 1.0 / self.n) < 1e-5)
def test_rb(self):
self.RB.solve()
np.testing.assert_almost_equal(np.sum(self.RB.x), 1, decimal=5)
np.testing.assert_almost_equal(
np.dot(
np.dot(
self.RB.x,
self.cov),
self.RB.x) ** 0.5,
self.RB.get_risk_contributions(
scale=False).sum(),
decimal=10)
self.assertTrue(
abs(self.RB.get_risk_contributions() - self.budgets).sum() < 1e-5)
def test_cerb(self):
self.CRB.solve()
np.testing.assert_almost_equal(np.sum(self.CRB.x), 1)
np.testing.assert_almost_equal(
self.CRB.get_risk_contributions()[1], 0.2455, decimal=5)
np.testing.assert_almost_equal(np.sum(self.CRB.x[1]), 0.2)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "42maru/saas-gitbook",
"score": 3
} |
#### File: saas-gitbook/f2maru_qa/client.py
```python
import json
import logging
from typing import List
import requests
logger = logging.getLogger(__name__)
class Client(object):
def __init__(self, app_code: str, api_key: str, **kwargs):
self._app_code: str = app_code
self._api_key: str = api_key
self._config: str = kwargs
# todo domain setting 필요.
self.__host = '172.16.58.3'
self.__bulk_path = '/api/application/documents/bulk'
self.__broker_path = '/api/broker'
self.__headers = {'Content-Type': 'application/json; charset=utf-8'}
self.__headers.update({
'X-PLATFORM42-API-KEY': api_key,
'X-PLATFORM42-APP-ID': app_code
})
def bulk_insert(self, data: List[dict]) -> bool:
def validation(obj: dict):
assert "title" in obj, "title is mandatory"
assert "content" in obj, "content is mandatory"
assert obj['content'], "content can not be empty"
def validations(objects: List[dict]):
[validation(obj) for obj in objects]
validations(data)
try:
requests.post(self.__write_url, data=json.dumps(data), headers=self.__headers)
return True
except Exception as e:
logger.error("fail add new object [Error Msg] ({})".format(str(e)))
return False
def insert(self, data: dict) -> bool:
return self.bulk_insert([data])
def inquiry(self, query: str, answer_count: int = 3):
# todo debug없으면 동작안함.
payload = {'query': query, 'count': answer_count, 'debug': False}
res = requests.get(self.__read_url, params=payload, headers=self.__headers)
return res.json() if res and res.status_code == 200 else None
@property
def __write_url(self):
return "http://{}{}".format(self.__host, self.__bulk_path)
@property
def __read_url(self):
return "http://{}{}".format(self.__host, self.__broker_path)
``` |
{
"source": "42nick/my_plotting_utils",
"score": 2
} |
#### File: my_plotting_utils/tests/conftest.py
```python
import sys
import pytest
# @pytest.fixture
# def capture_stdout(monkeypatch):
# buffer = {"stdout": "", "write_calls": 0, "sprint": []}
# def fake_write(s):
# buffer["stdout"] += s
# buffer["write_calls"] += 1
# buffer["sprint"].append(s)
# monkeypatch.setattr(sys.stdout, 'write', fake_write)
# return buffer
@pytest.fixture
def capture_stdout(monkeypatch):
string_buffer = {"stdout": "", "write_calls": 0, "sprint": []}
def fake_stdout(s):
string_buffer["stdout"] += s
string_buffer["write_calls"] += 1
string_buffer["sprint"].append(s)
monkeypatch.setattr(sys.stdout, "write", fake_stdout)
return string_buffer
``` |
{
"source": "42Numeric/noisyopt",
"score": 2
} |
#### File: noisyopt/tests/test_noisyopt.py
```python
import numpy as np
import numpy.testing as npt
import noisyopt
def test_minimize():
deltatol = 1e-3
## basic testing without stochasticity
def quadratic(x):
return (x**2).sum()
res = noisyopt.minimize(quadratic, np.asarray([0.5, 1.0]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([2.5, -3.2]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([2.5, -3.2, 0.9, 10.0, -0.3]),
deltatol=deltatol)
npt.assert_allclose(res.x, np.zeros(5), atol=deltatol)
npt.assert_equal(res.free, [False, False, False, False, False])
## test bound handling
res = noisyopt.minimize(quadratic, np.asarray([0.5, 0.5]),
bounds=np.asarray([[0, 1], [0, 1]]), deltatol=deltatol)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
res = noisyopt.minimize(quadratic, np.asarray([0.8, 0.8]),
bounds=np.asarray([[0.5, 1], [0.5, 1]]),
deltatol=deltatol)
npt.assert_allclose(res.x, [0.5, 0.5], atol=deltatol)
npt.assert_equal(res.free, [False, False])
## test determination of unconstrained variables
def quadratic_except_last(x):
return (x[:-1]**2).sum()
res = noisyopt.minimize(quadratic_except_last, np.asarray([0.5, 1.0]))
npt.assert_approx_equal(res.x[0], 0.0)
npt.assert_equal(res.free, [False, True])
## test errorcontrol for stochastic function
def stochastic_quadratic(x, seed=None):
prng = np.random if seed is None else np.random.RandomState(seed)
return (x**2).sum() + prng.randn(1) + 0.5*np.random.randn(1)
deltatol = 0.5
# test unpaired
res = noisyopt.minimize(stochastic_quadratic, np.array([4.55, 3.0]),
deltainit=2.0, deltatol=deltatol,
errorcontrol=True)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
# test paired
res = noisyopt.minimize(stochastic_quadratic, np.array([4.55, 3.0]),
deltainit=2.0, deltatol=deltatol,
errorcontrol=True, paired=True)
npt.assert_allclose(res.x, [0.0, 0.0], atol=deltatol)
npt.assert_equal(res.free, [False, False])
def test_bisect():
xtol = 1e-6
## simple tests
root = noisyopt.bisect(lambda x: x, -2, 2, xtol=xtol)
npt.assert_allclose(root, 0.0, atol=xtol)
root = noisyopt.bisect(lambda x: x-1, -2, 2, xtol=xtol)
npt.assert_allclose(root, 1.0, atol=xtol)
## extrapolate if 0 outside of interval
root = noisyopt.bisect(lambda x: x, 1, 2, xtol=xtol)
npt.assert_allclose(root, 0.0, atol=xtol)
npt.assert_raises(noisyopt.BisectException,
noisyopt.bisect, lambda x: x, 1, 2,
xtol=xtol, outside='raise')
## extrapolate with nonlinear function
root = noisyopt.bisect(lambda x: x+x**2, 1.0, 2, xtol=xtol)
assert root < 1.0
## test with stochastic function
xtol = 1e-1
func = lambda x: x - 0.25 + np.random.normal(scale=0.01)
root = noisyopt.bisect(noisyopt.AveragedFunction(func), -2, 2, xtol=xtol,
errorcontrol=True)
npt.assert_allclose(root, 0.25, atol=xtol)
def test_AveragedFunction():
## averaging a simple function
func = lambda x: np.asarray(x).sum()
avfunc = noisyopt.AveragedFunction(func, N=30)
av, avse = avfunc([1.0, 1.0])
npt.assert_equal(av, 2.0)
npt.assert_equal(avse, 0.0)
# se of function value difference between two points is zero
# (as function evaluation is not stochastic)
diffse = avfunc.diffse([1.0, 1.0], [2.0, 1.0])
npt.assert_equal(diffse, 0.0)
## changing the number of evaluations
avfunc.N *= 2
npt.assert_equal(avfunc.N, 60)
## averaging a stochastic function
func = lambda x: np.asarray(x).sum() + np.random.randn()
avfunc = noisyopt.AveragedFunction(func, N=30)
# check that reevaluation gives the same thing due to caching
av30_1, avse30_1 = avfunc([1.0, 1.0])
av30_2, avse30_2 = avfunc([1.0, 1.0])
npt.assert_equal(av30_1, av30_2)
npt.assert_equal(avse30_1, avse30_2)
# check that se decreases if
avfunc.N *= 2
av60, avse60 = avfunc([1.0, 1.0])
assert av30_1 != av60
assert avse30_1 > avse60
# test with floating point N
noisyopt.AveragedFunction(func, N=30.0, paired=True)
if __name__ == '__main__':
npt.run_module_suite()
``` |
{
"source": "42pde-bakk/DSLR",
"score": 3
} |
#### File: DSLR/srcs/scatter_plot.py
```python
from pkgs.parsing import check_input
from pkgs.feature import Feature
import pandas as pd
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
def create_dict(houses) -> dict:
courses = dict()
for house in houses:
courses[house] = dict()
for name, dtype in houses[house].dtypes.iteritems():
if dtype == np.float64:
column = [float(x) for x in houses[house][name].values if not math.isnan(x)]
courses[house][name] = Feature(name, column)
return courses
def plot(houses):
for house in houses:
plt.scatter(houses[house]["Defense Against the Dark Arts"], houses[house]["Astronomy"], alpha=0.4)
plt.xlabel("Defense Against the Dark Arts")
plt.ylabel("Astronomy")
plt.legend(houses.keys())
plt.show()
def main():
check_input(sys.argv)
data = pd.read_csv(sys.argv[1], index_col=0)
houses = {x: pd.DataFrame(y) for x, y in data.groupby('Hogwarts House', as_index=False)}
courses = create_dict(houses)
plot(houses)
if __name__ == '__main__':
main()
``` |
{
"source": "42piratas/shakespeare",
"score": 2
} |
#### File: shakespeare/modules/hentities.py
```python
import nltk
import hg
def hfind_entities():
for w in hg.content_raw_words:
nameEnt = nltk.ne_chunk(hg.content_raw_words)
namedEnt.draw()
if __name__ == '__main__':
from hextractor import hextract
from hfilter import hfilter
hextract()
hfilter()
hfind_entities()
```
#### File: shakespeare/modules/hfilter.py
```python
import string
import hg
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords, wordnet
from collections import Counter
stop_words = set(stopwords.words("english"))
# syns = wordnet.synset
'''Filtered content means:
- Punctuation removed
- Tokenized by words
- Stopwords removed'''
def hfilter():
# RAW CONTENT ##############################
# Remove punctuation
content_raw_no_punctuation = hg.content_raw.translate(None, string.punctuation)
# Tokenize by words
hg.content_raw_words = word_tokenize(content_raw_no_punctuation)
# Counting words from raw content
hg.content_raw_words_len = len(hg.content_raw_words)
# Counting unique words from raw content
hg.content_raw_words_unique_len = len(set(hg.content_raw_words))
# Defining top words from raw content
content_raw_words_counter = Counter(hg.content_raw_words)
hg.content_raw_words_top = (content_raw_words_counter.most_common(hg.top_words_num))
# # Creating list of synonyms
# for w in hg.content_raw_words:
# syn = syns(w)
# hg.content_raw_words_syns.append(syn)
# FILTERED CONTENT ##############################
# Remove stop words // Filtered by words!
hg.content_filtered_words = [w for w in hg.content_raw_words if w not in stop_words]
# Counting words from filtered content
hg.content_filtered_words_len = len(hg.content_filtered_words)
# Counting unique words from raw content
hg.content_filtered_words_unique_len = len(set(hg.content_filtered_words))
# Defining top words from filtered content
content_filtered_words_counter = Counter(hg.content_filtered_words)
hg.content_filtered_words_tops = (content_filtered_words_counter.most_common(hg.top_words_num))
if __name__ == '__main__':
from hextractor import hextract
hextract()
hfilter()
``` |
{
"source": "42pre/ansible-role-omero-server",
"score": 2
} |
#### File: resources/tests/test_default.py
```python
import os
import pytest
import testinfra.utils.ansible_runner
from time import time
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# Ubuntu sudo doesn't set HOME so it tries to write to /root
ENV = 'OMERO_USERDIR=/home/data-importer/omero-{}'.format(time())
OMERO = '/opt/omero/server/OMERO.server/bin/omero'
OMERO_LOGIN = '-C -s localhost -u root -w omero'
def test_service_running_and_enabled(host):
assert host.service('omero-server').is_running
assert host.service('omero-server').is_enabled
def test_omero_root_login(host):
with host.sudo('data-importer'):
host.check_output('%s %s login %s' % (ENV, OMERO, OMERO_LOGIN))
@pytest.mark.parametrize("key,value", [
('omero.data.dir', '/OMERO'),
('omero.client.ui.tree.type_order',
'["screen", "plate", "project", "dataset"]'),
('omero.policy.binary_access', '-read,-write,-image,-plate'),
])
def test_omero_server_config(host, key, value):
with host.sudo('omero-server'):
cfg = host.check_output('%s %s config get %s' % (ENV, OMERO, key))
assert cfg == value
def test_omero_datadir(host):
d = host.file('/OMERO')
assert d.is_directory
assert d.user == 'omero-server'
assert d.group == 'root'
assert d.mode == 0o755
def test_omero_managedrepo(host):
d = host.file('/OMERO/ManagedRepository')
assert d.is_directory
assert d.user == 'omero-server'
assert d.group == 'importer'
assert d.mode == 0o2775
def test_inplace_import(host):
fake_file = '/data/import/test.fake'
with host.sudo('data-importer'):
outimport = host.check_output(
'%s %s %s import --skip=upgrade --transfer=ln_s %s' %
(ENV, OMERO, OMERO_LOGIN, fake_file))
imageid = int(outimport.split(':', 1)[1])
assert imageid
query = ('SELECT concat(ofile.path, ofile.name) '
'FROM FilesetEntry AS fse '
'JOIN fse.fileset AS fileset '
'JOIN fse.originalFile AS ofile '
'JOIN fileset.images AS image '
'WHERE image.id = %d' % imageid)
with host.sudo('data-importer'):
outhql = host.check_output(
'%s %s %s hql -q --style plain "%s"' %
(ENV, OMERO, OMERO_LOGIN, query))
f = host.file('/OMERO/ManagedRepository/%s' % outhql.split(',', 1)[1])
assert f.is_symlink
assert f.linked_to == fake_file
``` |
{
"source": "42six/PiClock-test",
"score": 2
} |
#### File: PiClock-test/Clock/PyQtPiClock.py
```python
import sys, os, platform, signal
import datetime, time, json
from pprint import pprint
import random
sys.dont_write_bytecode = True
from PyQt4 import QtGui, QtCore, QtNetwork
from PyQt4.QtGui import QPixmap, QMovie
from PyQt4.QtCore import Qt, QByteArray, QUrl, QFile, QIODevice, QString
from PyQt4.QtNetwork import QNetworkAccessManager, QNetworkRequest, QNetworkReply, QNetworkProxy
from subprocess import Popen
from GoogleMercatorProjection import LatLng, Point, getCorners
import re
import ApiKeys
#def main():
def tick():
global hourpixmap, minpixmap, secpixmap
global hourpixmap2, minpixmap2, secpixmap2
global lastmin
global clockrect
global datex, datex2, datey2
now = datetime.datetime.now()
angle = now.second * 6
ts = secpixmap.size()
secpixmap2 = secpixmap.transformed(
QtGui.QMatrix().scale(
float(clockrect.width())/ts.height(),
float(clockrect.height())/ts.height()
).rotate(angle),
Qt.SmoothTransformation
)
sechand.setPixmap(secpixmap2)
ts = secpixmap2.size()
sechand.setGeometry(
clockrect.center().x()-ts.width()/2,
clockrect.center().y()-ts.height()/2,
ts.width(),
ts.height()
)
if now.minute != lastmin:
lastmin = now.minute
angle = now.minute * 6
ts = minpixmap.size()
minpixmap2 = minpixmap.transformed(
QtGui.QMatrix().scale(
float(clockrect.width())/ts.height(),
float(clockrect.height())/ts.height()
).rotate(angle),
Qt.SmoothTransformation
)
minhand.setPixmap(minpixmap2)
ts = minpixmap2.size()
minhand.setGeometry(
clockrect.center().x()-ts.width()/2,
clockrect.center().y()-ts.height()/2,
ts.width(),
ts.height()
)
angle = ((now.hour % 12) + now.minute / 60.0) * 30.0
ts = hourpixmap.size()
hourpixmap2 = hourpixmap.transformed(
QtGui.QMatrix().scale(
float(clockrect.width())/ts.height(),
float(clockrect.height())/ts.height()
).rotate(angle),
Qt.SmoothTransformation
)
hourhand.setPixmap(hourpixmap2)
ts = hourpixmap2.size()
hourhand.setGeometry(
clockrect.center().x()-ts.width()/2,
clockrect.center().y()-ts.height()/2,
ts.width(),
ts.height()
)
# date
sup = 'th'
if (now.day == 1 or now.day == 21 or now.day == 31): sup = 'st'
if (now.day == 2 or now.day == 22): sup = 'nd'
if (now.day == 3 or now.day == 23): sup = 'rd'
ds = "{0:%A %B} {0.day}<sup>".format(now)+sup+"</sup> {0.year}".format(now)
datex.setText(ds)
datex2.setText(ds)
datey2.setText("{0:%I:%M %p}".format(now))
def tempfinished():
global tempreply, temp
if tempreply.error() != QNetworkReply.NoError: return
tempstr = str(tempreply.readAll())
tempdata = json.loads(tempstr)
if Config.metric:
s = 'Inside Temp '+ "%3.1f" % ((float(tempdata['temp'])-32.0)*5.0/9.0)
if tempdata['temps']:
if len(tempdata['temps']) > 1:
s = ''
for tk in tempdata['temps']:
s += ' ' + tk + ':' + "%3.1f" % ((float(tempdata['temps'][tk])-32.0)*5.0/9.0)
else:
s = 'Inside Temp '+tempdata['temp']
if tempdata['temps']:
if len(tempdata['temps']) > 1:
s = ''
for tk in tempdata['temps']:
s += ' ' + tk + ':' + tempdata['temps'][tk]
temp.setText(s)
def gettemp():
global tempreply
host = 'localhost'
if platform.uname()[1] == 'KW81': host = 'piclock.local' #this is here just for testing
r = QUrl('http://'+host+':48213/temp')
r = QNetworkRequest(r)
tempreply = manager.get(r)
tempreply.finished.connect(tempfinished)
def wxfinished():
global wxreply, wxdata
global wxicon, temper, wxdesc, press, humidity, wind, wind2, wdate, bottom, forecast
global wxicon2, temper2, wxdesc
wxstr = str(wxreply.readAll())
wxdata = json.loads(wxstr)
f = wxdata['current_observation']
iconurl = f['icon_url']
icp = ''
if (re.search('/nt_',iconurl)):
icp = 'n_';
wxiconpixmap = QtGui.QPixmap(Config.icons+"/"+icp+f['icon']+".png")
wxicon.setPixmap(wxiconpixmap.scaled(wxicon.width(),wxicon.height(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation))
wxicon2.setPixmap(wxiconpixmap.scaled(wxicon.width(),wxicon.height(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation))
wxdesc.setText(f['weather'])
wxdesc2.setText(f['weather'])
if Config.metric:
temper.setText(str(f['temp_c'])+u'°C')
temper2.setText(str(f['temp_c'])+u'°C')
press.setText("Pressure "+f['pressure_mb']+' '+f['pressure_trend'])
humidity.setText("Humidity "+f['relative_humidity'])
wind.setText('Wind '+f['wind_dir']+' '+str(f['wind_kph'])+' gusting '+str(f['wind_gust_kph']))
wind2.setText("Feels like "+str(f['feelslike_c']) )
wdate.setText("{0:%H:%M}".format(datetime.datetime.fromtimestamp(int(f['local_epoch'])))+
' Precip 1hr:'+f['precip_1hr_metric']+'mm Today:'+f['precip_today_metric']+'mm')
else:
temper.setText(str(f['temp_f'])+u'°F')
temper2.setText(str(f['temp_f'])+u'°F')
press.setText("Pressure "+f['pressure_in']+' '+f['pressure_trend'])
humidity.setText("Humidity "+f['relative_humidity'])
wind.setText('Wind '+f['wind_dir']+' '+str(f['wind_mph'])+' gusting '+str(f['wind_gust_mph']))
wind2.setText("Feels like "+str(f['feelslike_f']) )
wdate.setText("{0:%H:%M}".format(datetime.datetime.fromtimestamp(int(f['local_epoch'])))+
' Precip 1hr:'+f['precip_1hr_in']+'in Today:'+f['precip_today_in']+'in')
bottom.setText('Sun Rise:'+
wxdata['sun_phase']['sunrise']['hour']+':'+wxdata['sun_phase']['sunrise']['minute']+
' Set:'+
wxdata['sun_phase']['sunset']['hour']+':'+wxdata['sun_phase']['sunset']['minute']+
' Moon Phase: '+
wxdata['moon_phase']['phaseofMoon']
)
for i in range(0,3):
f = wxdata['hourly_forecast'][i*3+2]
fl = forecast[i]
iconurl = f['icon_url']
icp = ''
if (re.search('/nt_',iconurl)):
icp = 'n_';
icon = fl.findChild(QtGui.QLabel,"icon")
wxiconpixmap = QtGui.QPixmap(Config.icons+"/"+icp+f['icon']+".png")
icon.setPixmap(wxiconpixmap.scaled(icon.width(),icon.height(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation))
wx = fl.findChild(QtGui.QLabel,"wx")
wx.setText(f['condition'])
day = fl.findChild(QtGui.QLabel,"day")
day.setText(f['FCTTIME']['weekday_name']+' '+f['FCTTIME']['civil'])
wx2 = fl.findChild(QtGui.QLabel,"wx2")
s = '';
if float(f['pop']) > 0.0: s += f['pop'] + '% ';
if Config.metric:
if float(f['snow']['metric']) > 0.0:
s += ' Snow: '+f['snow']['metric']+'mm '
else:
if float(f['qpf']['metric']) > 0.0:
s += ' Rain: '+f['qpf']['metric']+'mm '
s += f['temp']['metric']+u'°C'
else:
if float(f['snow']['english']) > 0.0:
s += ' Snow: '+f['snow']['english']+'in '
else:
if float(f['qpf']['english']) > 0.0:
s += ' Rain: '+f['qpf']['english']+'in '
s += f['temp']['english']+u'°F'
wx2.setText(s)
for i in range(3,9):
f = wxdata['forecast']['simpleforecast']['forecastday'][i-3]
fl = forecast[i]
icon = fl.findChild(QtGui.QLabel,"icon")
wxiconpixmap = QtGui.QPixmap(Config.icons+"/"+f['icon']+".png")
icon.setPixmap(wxiconpixmap.scaled(icon.width(),icon.height(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation))
wx = fl.findChild(QtGui.QLabel,"wx")
wx.setText(f['conditions'])
day = fl.findChild(QtGui.QLabel,"day")
day.setText(f['date']['weekday'])
wx2 = fl.findChild(QtGui.QLabel,"wx2")
s = '';
if float(f['pop']) > 0.0: s += str(f['pop']) + '% ';
if Config.metric:
if float(f['snow_allday']['cm']) > 0.0:
s += ' Snow: '+str(f['snow_allday']['cm'])+'cm '
else:
if float(f['qpf_allday']['mm']) > 0.0:
s += ' Rain: '+str(f['qpf_allday']['mm'])+'mm '
s += str(f['high']['celsius'])+'/'+str(f['low']['celsius'])+u'°C'
else:
if float(f['snow_allday']['in']) > 0.0:
s += ' Snow: '+str(f['snow_allday']['in'])+'in '
else:
if float(f['qpf_allday']['in']) > 0.0:
s += ' Rain: '+str(f['qpf_allday']['in'])+'in '
s += str(f['high']['fahrenheit'])+'/'+str(f['low']['fahrenheit'])+u'°F'
wx2.setText(s)
def getwx():
global wxurl
global wxreply
print "getting current and forecast:"+time.ctime()
wxurl = Config.wuprefix + ApiKeys.wuapi + '/conditions/astronomy/hourly10day/forecast10day/q/'
wxurl += str(Config.wulocation.lat)+','+str(Config.wulocation.lng)+'.json'
wxurl += '?r=' + str(random.random())
r = QUrl(wxurl)
r = QNetworkRequest(r)
wxreply = manager.get(r)
wxreply.finished.connect(wxfinished)
def getallwx():
getwx()
def qtstart():
global ctimer, wxtimer, temptimer
global manager
global objradar1
global objradar2
global objradar3
global objradar4
getallwx()
gettemp()
objradar1.start(Config.radar_refresh*60)
objradar1.wxstart()
objradar2.start(Config.radar_refresh*60)
objradar2.wxstart()
objradar3.start(Config.radar_refresh*60)
objradar4.start(Config.radar_refresh*60)
ctimer = QtCore.QTimer()
ctimer.timeout.connect(tick)
ctimer.start(1000)
wxtimer = QtCore.QTimer()
wxtimer.timeout.connect(getallwx)
wxtimer.start(1000*Config.weather_refresh*60+random.uniform(1000,10000))
temptimer = QtCore.QTimer()
temptimer.timeout.connect(gettemp)
temptimer.start(1000*10*60+random.uniform(1000,10000))
class Radar(QtGui.QLabel):
def __init__(self, parent, radar, rect, myname):
global xscale, yscale
self.myname = myname
self.rect = rect
self.baseurl = self.mapurl(radar, rect, False)
#print "google map base url: "+self.baseurl
self.mkurl = self.mapurl(radar, rect, True)
self.wxurl = self.radarurl(radar, rect)
QtGui.QLabel.__init__(self, parent)
self.interval = Config.radar_refresh*60
self.lastwx = 0
self.setObjectName("radar")
self.setGeometry(rect)
self.setStyleSheet("#radar { background-color: grey; }")
self.setAlignment(Qt.AlignCenter)
self.wwx = QtGui.QLabel(self)
self.wwx.setObjectName("wx")
self.wwx.setStyleSheet("#wx { background-color: transparent; }")
self.wwx.setGeometry(0, 0, rect.width(), rect.height())
self.wmk = QtGui.QLabel(self)
self.wmk.setObjectName("mk")
self.wmk.setStyleSheet("#mk { background-color: transparent; }")
self.wmk.setGeometry(0, 0, rect.width(), rect.height())
self.wxmovie = QMovie()
def mapurl(self, radar,rect,markersonly):
#'https://maps.googleapis.com/maps/api/staticmap?maptype=hybrid¢er='+rcenter.lat+','+rcenter.lng+'&zoom='+rzoom+'&size=300x275'+markersr;
urlp = [];
if len(ApiKeys.googleapi) > 0: urlp.append('key='+ApiKeys.googleapi)
urlp.append('center='+str(radar['center'].lat)+','+str(radar['center'].lng))
zoom = radar['zoom']
rsize = rect.size()
if rsize.width() > 640 or rsize.height() > 640:
rsize = QtCore.QSize(rsize.width()/2,rsize.height()/2)
zoom -= 1
urlp.append('zoom='+str(zoom))
urlp.append('size='+str(rsize.width())+'x'+str(rsize.height()))
if markersonly:
urlp.append('style=visibility:off')
else:
urlp.append('maptype=hybrid')
for marker in radar['markers']:
marks = []
for opts in marker:
if opts != 'location':
marks.append(opts + ':' + marker[opts])
marks.append(str(marker['location'].lat)+','+str(marker['location'].lng))
urlp.append('markers='+'|'.join(marks))
return 'http://maps.googleapis.com/maps/api/staticmap?'+'&'.join(urlp)
def radarurl(self,radar,rect):
#wuprefix = 'http://api.wunderground.com/api/';
#wuprefix+wuapi+'/animatedradar/image.gif?maxlat='+rNE.lat+'&maxlon='+rNE.lng+'&minlat='+rSW.lat+'&minlon='+rSW.lng+wuoptionsr;
#wuoptionsr = '&width=300&height=275&newmaps=0&reproj.automerc=1&num=5&delay=25&timelabel=1&timelabel.y=10&rainsnow=1&smooth=1';
rr = getCorners(radar['center'],radar['zoom'],rect.width(),rect.height())
return (Config.wuprefix+ApiKeys.wuapi+'/animatedradar/image.gif'+
'?maxlat='+str(rr['N'])+
'&maxlon='+str(rr['E'])+
'&minlat='+str(rr['S'])+
'&minlon='+str(rr['W'])+
'&width='+str(rect.width())+
'&height='+str(rect.height())+
'&newmaps=0&reproj.automerc=1&num=5&delay=25&timelabel=1&timelabel.y=10&rainsnow=1&smooth=1&radar_bitmap=1&xnoclutter=1&xnoclutter_mask=1&cors=1'
)
def basefinished(self):
if self.basereply.error() != QNetworkReply.NoError: return
self.basepixmap = QPixmap()
self.basepixmap.loadFromData(self.basereply.readAll())
if self.basepixmap.size() != self.rect.size():
self.basepixmap = self.basepixmap.scaled(self.rect.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
self.setPixmap(self.basepixmap)
def mkfinished(self):
if self.mkreply.error() != QNetworkReply.NoError: return
self.mkpixmap = QPixmap()
self.mkpixmap.loadFromData(self.mkreply.readAll())
if self.mkpixmap.size() != self.rect.size():
self.mkpixmap = self.mkpixmap.scaled(self.rect.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
self.wmk.setPixmap(self.mkpixmap)
def wxfinished(self):
if self.wxreply.error() != QNetworkReply.NoError:
print "get radar error "+self.myname+":"+str(self.wxreply.error())
self.lastwx = 0
return
print "radar map received:"+self.myname+":"+time.ctime()
self.wxmovie.stop()
self.wxdata = QtCore.QByteArray(self.wxreply.readAll())
self.wxbuff = QtCore.QBuffer(self.wxdata)
self.wxbuff.open(QtCore.QIODevice.ReadOnly)
mov = QMovie(self.wxbuff, 'GIF')
print "radar map frame count:"+self.myname+":"+str(mov.frameCount())
if mov.frameCount() > 2:
self.lastwx = time.time()
else:
# radar image retreval failed
self.lastwx = 0
# retry in 5 seconds
QtCore.QTimer.singleShot(5*1000, self.getwx)
return
self.wxmovie = mov
self.wwx.setMovie( self.wxmovie)
if self.parent().isVisible():
self.wxmovie.start()
def getwx(self):
global lastapiget
i = 0.1
# making sure there is at least 2 seconds between radar api calls
lastapiget += 2
if time.time() > lastapiget: lastapiget = time.time()
else: i = lastapiget - time.time()
print "get radar api call spacing oneshot get i="+str(i)
QtCore.QTimer.singleShot(i*1000, self.getwx2)
def getwx2(self):
global manager
try:
if self.wxreply.isRunning(): return
except Exception:
pass
print "getting radar map "+self.myname+":"+time.ctime()
self.wxreq = QNetworkRequest(QUrl(self.wxurl+'&rrrand='+str(time.time())))
self.wxreply = manager.get(self.wxreq)
QtCore.QObject.connect(self.wxreply, QtCore.SIGNAL("finished()"),self.wxfinished)
def getbase(self):
global manager
self.basereq = QNetworkRequest(QUrl(self.baseurl))
self.basereply = manager.get(self.basereq)
QtCore.QObject.connect(self.basereply,QtCore.SIGNAL("finished()"),self.basefinished)
def getmk(self):
global manager
self.mkreq = QNetworkRequest(QUrl(self.mkurl))
self.mkreply = manager.get(self.mkreq)
QtCore.QObject.connect(self.mkreply,QtCore.SIGNAL("finished()"),self.mkfinished)
def start(self, interval=0):
if interval > 0: self.interval = interval
self.getbase()
self.getmk()
self.timer = QtCore.QTimer()
QtCore.QObject.connect(self.timer,QtCore.SIGNAL("timeout()"), self.getwx)
def wxstart(self):
print "wxstart for "+self.myname
if (self.lastwx == 0 or (self.lastwx+self.interval) < time.time()): self.getwx()
# random 1 to 10 seconds added to refresh interval to spread the queries over time
i = (self.interval+random.uniform(1,10))*1000
self.timer.start(i)
self.wxmovie.start()
QtCore.QTimer.singleShot(1000, self.wxmovie.start)
def wxstop(self):
print "wxstop for "+self.myname
self.timer.stop()
self.wxmovie.stop()
def stop(self):
try:
self.timer.stop()
self.timer = None
if self.wxmovie: self.wxmovie.stop()
except Exception:
pass
def realquit():
QtGui.QApplication.exit(0)
def myquit(a=0,b=0):
global objradar1, objradar2,objradar3,objradar4
global ctimer, wtimer,temptimer
objradar1.stop()
objradar2.stop()
objradar3.stop()
objradar4.stop()
ctimer.stop()
wxtimer.stop()
temptimer.stop()
QtCore.QTimer.singleShot(30, realquit)
def fixupframe(frame,onoff):
for child in frame.children():
if isinstance(child,Radar):
if onoff:
#print "calling wxstart on radar on ",frame.objectName()
child.wxstart()
else:
#print "calling wxstop on radar on ",frame.objectName()
child.wxstop()
def nextframe(plusminus):
global frames, framep
frames[framep].setVisible(False)
fixupframe(frames[framep],False)
framep += plusminus
if framep >= len(frames): framep = 0
if framep < 0: framep = len(frames) - 1
frames[framep].setVisible(True)
fixupframe(frames[framep],True)
class myMain(QtGui.QWidget):
def keyPressEvent(self, event):
global weatherplayer, lastkeytime
if type(event) == QtGui.QKeyEvent:
# print event.key(), format(event.key(), '08x')
if event.key() == Qt.Key_F4: myquit()
if event.key() == Qt.Key_F2:
if time.time() > lastkeytime:
if weatherplayer == None:
weatherplayer = Popen(["mpg123","-q", Config.noaastream ])
else:
weatherplayer.kill()
weatherplayer = None
lastkeytime = time.time() + 2
if event.key() == Qt.Key_Space:
nextframe(1)
if event.key() == Qt.Key_Left:
nextframe(-1)
if event.key() == Qt.Key_Right:
nextframe(1)
configname = 'Config'
if len(sys.argv) > 1:
configname = sys.argv[1]
if not os.path.isfile(configname+".py"):
print "Config file not found %s" % configname+".py"
exit(1)
Config = __import__(configname)
# define default values for new/optional config variables.
try: Config.metric
except AttributeError: Config.metric = 0
try: Config.weather_refresh
except AttributeError: Config.weather_refresh = 30 #minutes
try: Config.radar_refresh
except AttributeError: Config.radar_refresh = 10 #minutes
#
lastmin = -1
weatherplayer = None
lastkeytime = 0;
lastapiget = time.time()
app = QtGui.QApplication(sys.argv)
desktop = app.desktop()
rec = desktop.screenGeometry()
height = rec.height()
width = rec.width()
signal.signal(signal.SIGINT, myquit)
w = myMain()
w.setWindowTitle(os.path.basename(__file__))
w.setStyleSheet("QWidget { background-color: black;}")
#fullbgpixmap = QtGui.QPixmap(Config.background)
#fullbgrect = fullbgpixmap.rect()
#xscale = float(width)/fullbgpixmap.width()
#yscale = float(height)/fullbgpixmap.height()
xscale = float(width)/1440.0
yscale = float(height)/900.0
frames = []
framep = 0
frame1 = QtGui.QFrame(w)
frame1.setObjectName("frame1")
frame1.setGeometry(0,0,width,height)
frame1.setStyleSheet("#frame1 { background-color: black; border-image: url("+Config.background+") 0 0 0 0 stretch stretch;}")
frames.append(frame1)
frame2 = QtGui.QFrame(w)
frame2.setObjectName("frame2")
frame2.setGeometry(0,0,width,height)
frame2.setStyleSheet("#frame2 { background-color: blue; border-image: url("+Config.background+") 0 0 0 0 stretch stretch;}")
frame2.setVisible(False)
frames.append(frame2)
#frame3 = QtGui.QFrame(w)
#frame3.setObjectName("frame3")
#frame3.setGeometry(0,0,width,height)
#frame3.setStyleSheet("#frame3 { background-color: blue; border-image: url("+Config.background+") 0 0 0 0 stretch stretch;}")
#frame3.setVisible(False)
#frames.append(frame3)
squares1 = QtGui.QFrame(frame1)
squares1.setObjectName("squares1")
squares1.setGeometry(0,height-yscale*600,xscale*340,yscale*600)
squares1.setStyleSheet("#squares1 { background-color: transparent; border-image: url("+Config.squares1+") 0 0 0 0 stretch stretch;}")
squares2 = QtGui.QFrame(frame1)
squares2.setObjectName("squares2")
squares2.setGeometry(width-xscale*340,0,xscale*340,yscale*900)
squares2.setStyleSheet("#squares2 { background-color: transparent; border-image: url("+Config.squares2+") 0 0 0 0 stretch stretch;}")
clockface = QtGui.QFrame(frame1)
clockface.setObjectName("clockface")
clockrect = QtCore.QRect(width/2-height*.4, height*.45-height*.4,height * .8, height * .8)
clockface.setGeometry(clockrect)
clockface.setStyleSheet("#clockface { background-color: transparent; border-image: url("+Config.clockface+") 0 0 0 0 stretch stretch;}")
hourhand = QtGui.QLabel(frame1)
hourhand.setObjectName("hourhand")
hourhand.setStyleSheet("#hourhand { background-color: transparent; }")
minhand = QtGui.QLabel(frame1)
minhand.setObjectName("minhand")
minhand.setStyleSheet("#minhand { background-color: transparent; }")
sechand = QtGui.QLabel(frame1)
sechand.setObjectName("sechand")
sechand.setStyleSheet("#sechand { background-color: transparent; }")
hourpixmap = QtGui.QPixmap(Config.hourhand)
hourpixmap2 = QtGui.QPixmap(Config.hourhand)
minpixmap = QtGui.QPixmap(Config.minhand)
minpixmap2 = QtGui.QPixmap(Config.minhand)
secpixmap = QtGui.QPixmap(Config.sechand)
secpixmap2 = QtGui.QPixmap(Config.sechand)
radar1rect = QtCore.QRect(3*xscale, 344*yscale, 300*xscale, 275*yscale)
objradar1 = Radar(frame1, Config.radar1, radar1rect, "radar1")
radar2rect = QtCore.QRect(3*xscale, 622*yscale, 300*xscale, 275*yscale)
objradar2 = Radar(frame1, Config.radar2, radar2rect, "radar2")
radar3rect = QtCore.QRect(13*xscale, 50*yscale, 700*xscale, 700*yscale)
objradar3 = Radar(frame2, Config.radar3, radar3rect, "radar3")
radar4rect = QtCore.QRect(726*xscale, 50*yscale, 700*xscale, 700*yscale)
objradar4 = Radar(frame2, Config.radar4, radar4rect, "radar4")
datex = QtGui.QLabel(frame1)
datex.setObjectName("datex")
datex.setStyleSheet("#datex { font-family:sans-serif; color: "+Config.textcolor+"; background-color: transparent; font-size: "+str(int(50*xscale))+"px }")
datex.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
datex.setGeometry(0,0,width,100)
datex2 = QtGui.QLabel(frame2)
datex2.setObjectName("datex2")
datex2.setStyleSheet("#datex2 { font-family:sans-serif; color: "+Config.textcolor+"; background-color: transparent; font-size: "+str(int(50*xscale))+"px }")
datex2.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
datex2.setGeometry(800*xscale,780*yscale,640*xscale,100)
datey2 = QtGui.QLabel(frame2)
datey2.setObjectName("datey2")
datey2.setStyleSheet("#datey2 { font-family:sans-serif; color: "+Config.textcolor+"; background-color: transparent; font-size: "+str(int(50*xscale))+"px }")
datey2.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
datey2.setGeometry(800*xscale,840*yscale,640*xscale,100)
ypos = -25
wxicon = QtGui.QLabel(frame1)
wxicon.setObjectName("wxicon")
wxicon.setStyleSheet("#wxicon { background-color: transparent; }")
wxicon.setGeometry(75*xscale,ypos*yscale,150*xscale,150*yscale)
wxicon2 = QtGui.QLabel(frame2)
wxicon2.setObjectName("wxicon2")
wxicon2.setStyleSheet("#wxicon2 { background-color: transparent; }")
wxicon2.setGeometry(0*xscale,750*yscale,150*xscale,150*yscale)
ypos += 130
wxdesc = QtGui.QLabel(frame1)
wxdesc.setObjectName("wxdesc")
wxdesc.setStyleSheet("#wxdesc { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(30*xscale))+"px }")
wxdesc.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
wxdesc.setGeometry(3*xscale,ypos*yscale,300*xscale,100)
wxdesc2 = QtGui.QLabel(frame2)
wxdesc2.setObjectName("wxdesc2")
wxdesc2.setStyleSheet("#wxdesc2 { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(50*xscale))+"px }")
wxdesc2.setAlignment(Qt.AlignLeft | Qt.AlignTop);
wxdesc2.setGeometry(400*xscale,800*yscale,400*xscale,100)
ypos += 25
temper = QtGui.QLabel(frame1)
temper.setObjectName("temper")
temper.setStyleSheet("#temper { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(70*xscale))+"px }")
temper.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
temper.setGeometry(3*xscale,ypos*yscale,300*xscale,100)
temper2 = QtGui.QLabel(frame2)
temper2.setObjectName("temper2")
temper2.setStyleSheet("#temper2 { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(70*xscale))+"px }")
temper2.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
temper2.setGeometry(125*xscale,780*yscale,300*xscale,100)
ypos += 80
press = QtGui.QLabel(frame1)
press.setObjectName("press")
press.setStyleSheet("#press { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(25*xscale))+"px }")
press.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
press.setGeometry(3*xscale,ypos*yscale,300*xscale,100)
ypos += 30
humidity = QtGui.QLabel(frame1)
humidity.setObjectName("humidity")
humidity.setStyleSheet("#humidity { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(25*xscale))+"px }")
humidity.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
humidity.setGeometry(3*xscale,ypos*yscale,300*xscale,100)
ypos += 30
wind = QtGui.QLabel(frame1)
wind.setObjectName("wind")
wind.setStyleSheet("#wind { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(20*xscale))+"px }")
wind.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
wind.setGeometry(3*xscale,ypos*yscale,300*xscale,100)
ypos += 20
wind2 = QtGui.QLabel(frame1)
wind2.setObjectName("wind2")
wind2.setStyleSheet("#wind2 { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(20*xscale))+"px }")
wind2.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
wind2.setGeometry(3*xscale,ypos*yscale,300*xscale,100)
ypos += 20
wdate = QtGui.QLabel(frame1)
wdate.setObjectName("wdate")
wdate.setStyleSheet("#wdate { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(15*xscale))+"px }")
wdate.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
wdate.setGeometry(3*xscale,ypos*yscale,300*xscale,100)
bottom = QtGui.QLabel(frame1)
bottom.setObjectName("bottom")
bottom.setStyleSheet("#bottom { font-family:sans-serif; color: "+Config.textcolor+"; background-color: transparent; font-size: "+str(int(30*xscale))+"px }")
bottom.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
bottom.setGeometry(0,height-50,width,50)
temp = QtGui.QLabel(frame1)
temp.setObjectName("temp")
temp.setStyleSheet("#temp { font-family:sans-serif; color: "+Config.textcolor+"; background-color: transparent; font-size: "+str(int(30*xscale))+"px }")
temp.setAlignment(Qt.AlignHCenter | Qt.AlignTop);
temp.setGeometry(0,height-100,width,50)
forecast = []
for i in range(0,9):
lab = QtGui.QLabel(frame1)
lab.setObjectName("forecast"+str(i))
lab.setStyleSheet("QWidget { background-color: transparent; color: "+Config.textcolor+"; font-size: "+str(int(20*xscale))+"px }")
lab.setGeometry(1137*xscale,i*100*yscale,300*xscale,100*yscale)
icon = QtGui.QLabel(lab)
icon.setStyleSheet("#icon { background-color: transparent; }")
icon.setGeometry(0,0,100*xscale,100*yscale)
icon.setObjectName("icon")
wx = QtGui.QLabel(lab)
wx.setStyleSheet("#wx { background-color: transparent; }")
wx.setGeometry(100*xscale,10*yscale,200*xscale,20*yscale)
wx.setObjectName("wx")
wx2 = QtGui.QLabel(lab)
wx2.setStyleSheet("#wx2 { background-color: transparent; }")
wx2.setGeometry(100*xscale,30*yscale,200*xscale,100*yscale)
wx2.setAlignment(Qt.AlignLeft | Qt.AlignTop);
wx2.setWordWrap(True)
wx2.setObjectName("wx2")
day = QtGui.QLabel(lab)
day.setStyleSheet("#day { background-color: transparent; }")
day.setGeometry(100*xscale,80*yscale,200*xscale,20*yscale)
day.setAlignment(Qt.AlignRight | Qt.AlignBottom);
day.setObjectName("day")
forecast.append(lab)
manager = QtNetwork.QNetworkAccessManager()
#proxy = QNetworkProxy()
#proxy.setType(QNetworkProxy.HttpProxy)
#proxy.setHostName("localhost")
#proxy.setPort(8888)
#QNetworkProxy.setApplicationProxy(proxy)
stimer = QtCore.QTimer()
stimer.singleShot(10, qtstart)
#print radarurl(Config.radar1,radar1rect)
w.show()
w.showFullScreen()
sys.exit(app.exec_())
``` |
{
"source": "42sol-eu/mdpre",
"score": 2
} |
#### File: mdpre/tests/test_writer.py
```python
import sys
import mdpre # TODO: fix this later
def test_stdout_g_output():
g_output_stdout = mdpre.create_output_file()
g_output_stdout.writeMarkdown('| sys.stdout works\n')
mdpre.exit_script(g_output_stdout, 0)
def test_file_g_output():
g_output_file = mdpre.create_output_file('test.md')
g_output_file.writeMarkdown('| file output works\n')
mdpre.exit_script(g_output_file, 0)
if __name__ == "__main__":
test_stdout_g_output()
test_file_g_output()
``` |
{
"source": "42sol/termbase_de_shared",
"score": 2
} |
#### File: scripts/Python/core_setup.py
```python
from pathlib import Path #!md|[docs](https://docs.python.org/3/library/pathlib.html#Path)
from toml import * #!md|[docs](https://github.com/uiri/toml)
from logging import INFO,DEBUG #!md|[docs](https://docs.python.org/3/library/logging.html?highlight=logging#module-logging)
from logging import basicConfig as config_logging
from logging import getLogger as Logger
import chevron
#OLD #!md|[docs](https://github.com/defunkt/pystache)
config_logging(level=INFO, filename='tembase_split.log',
format='%(asctime)s;%(name)s;%(levelname)s;%(message)s;[file;line]',
datefmt='%Y-%m-%d;CW%W;%H:%M:%S')
app = Logger('app.process')
test = Logger('app.test')
debug = False
test.info(f'"\nSTART\n"')
app.info('started')
class Parameters:
config_file_path= 'project_example.toml'
def __init__(self):
app.info('Parameters created')
self.load(x_local=True)
def load(self, x_config_file_path='', x_local=False):
"""
load parameters from config
"""
if x_config_file_path == '':
x_config_file_path = Parameters.config_file_path
script_path = Path(__file__).parent
test.debug(script_path)
config_file_path = Path(x_config_file_path)
if x_local:
config_file_path = script_path / x_config_file_path
self.data = {}
if config_file_path.exists():
with open(config_file_path, 'r') as toml_file:
toml_string = toml_file.read()
toml_data = loads(toml_string)
if debug: print(toml_data)
self.data = toml_data
if self.data != {}:
app.info('Parameters loaded successfull')
active_section = 'setup'
if active_section in self.data.keys():
if 'language' in self.data[active_section].keys():
setattr(self, 'language', self.data[active_section]['language'])
if 'project_path' in self.data[active_section].keys():
setattr(self, 'project_path', self.data[active_section]['project_path'])
active_section= 'files'
if active_section in self.data.keys():
if 'termbase' in self.data[active_section].keys():
setattr(self, 'termbase_path', chevron.render(self.data[active_section]['termbase'], {'project_path': self.project_path,'language':self.language}))
else:
app.error('NO Parameters found!')
``` |
{
"source": "42tak/Flood-warning-grp-85",
"score": 3
} |
#### File: Flood-warning-grp-85/floodsystem/geo.py
```python
from .utils import sorted_by_key # noqa
from haversine import haversine, Unit
def stations_within_radius(stations, centre,r):
station_radius_list=[]
for station in stations:
if haversine(centre, station.coord)<=r:
station_radius_list.append(station)
return station_radius_list
``` |
{
"source": "42tm/pylnagb",
"score": 3
} |
#### File: pylnagb/assets/coordsys.py
```python
import undefinedterms
class CoordSys:
"""
Coordinate system prototype.
"""
def __init__(self):
self.ORIGIN = undefinedterms.Point("O", 0, 0)
class Cartesian(CoordSys):
def __repr__():
return "2-dimensional Cartesian coordinates system"
class Cartesian_3(CoordSys):
def __repr__():
return "3-dimensional Cartesian coordinates system"
class Polar(CoordSys):
def __repr__():
return "Polar coordinates system"
class PhySpherical(CoordSys):
def __repr__():
return "Spherical coordinates system (in Physics)"
class MathSpherical(CoordSys):
def __repr__():
return "Spherical coordinates system (in Mathematics)"
```
#### File: pylnagb/sys/linagb_error.py
```python
class DimensionError(Exception):
def __init__(self, message="Dimension mismatch"):
super(DimensionError, self).__init__(message)
``` |
{
"source": "42triangles/plovary",
"score": 3
} |
#### File: examples/ezkana/ezkana.py
```python
from typing import *
from plovary import *
# "English Single Chord DICTionary"
ESCDict = Dictionary[Chord[EnglishSystem], str]
shared = system.parse("-LGDZ")
# only isn't needed here, but this way `mypy` checks that this
# is really just a single correct key:
add_y = system.chord("-F")
submit = system.chord("-R")
add_tu = system.chord("-P")
add_n = system.chord("-B")
# Everything is done in combinations of two
# * Space for a consonant means no consonant
# * "l" as a consonant means small y+vowel if it exists,
# otherwise it's a small vowel
# * "xu" is a small "tu"
# * "nn" is an n without a vowel
# * "--" is the katakana elongation thingy
small_tu = Dictionary({add_tu: "xu"})
n = Dictionary({add_n: "nn"})
# This is only available in Katakana mode, so the asterisk is
# there, even in IME mode (where Katakana mode only consists of
# this, because I have yet to figure out how to give `ibus`
# katakana explicitly)
katakana_elongate = system.parsed_single_dict({"AOEU": "--"})
normal_consonants = Dictionary(
(fingertyping_lowercase_no_asterisk.keys_for(k)[0], k)
for k in [
"k", "g",
"s", "z",
"t", "d",
"n",
"h", "b", "p",
"m",
"y",
"r",
"w",
"l"
]
)
no_consonant = Dictionary({system.empty_chord: " "})
y_consonants = normal_consonants.map(keys=add(add_y), values=suffix("il"))
special_consonants = system.parsed_single_dict({
"SH": "si", # sh
"KH": "ti", # ch
"TP": "hu", # f
"SKWR": "zi", # j
})
vowels = Dictionary(
(fingertyping_lowercase_no_asterisk.keys_for(k)[0], k)
for k in ["a", "i", "u", "e", "o"]
) + system.parsed_single_dict({
"AE": "a a",
"AOE": "i i",
"AOU": "u u",
"AEU": "e i",
"OU": "o u",
})
simple_consonants = normal_consonants + no_consonant + y_consonants
simple_combinations = simple_consonants * vowels
special_combinations = special_consonants.combinations(
vowels,
values=lambda cv, v: (
cv + v[1:]
if cv.endswith(v[0])
else cv + "l" + v
),
)
combinations = simple_combinations + special_combinations
extended_combinations = (
(small_tu.with_empty_chord() * combinations + small_tu) *
n.with_empty_chord() +
n
)
katakana_combinations = extended_combinations + katakana_elongate
def translate(
dictionary: ESCDict,
symbol_mapping: Callable[[str], str]
) -> ESCDict:
def value_mapping(output: str) -> str:
assert len(output) % 2 == 0
return "".join(
symbol_mapping(output[i:i+2])
for i in range(0, len(output), 2)
)
return dictionary.map(values=value_mapping)
def handle_ime() -> ESCDict:
def translate_symbol(symbol: str) -> str:
symbol = (
"ltu"
if symbol == "xu"
else (
"-"
if symbol == "--"
else (
"ly" + symbol[1]
if symbol in ["la", "lu", "lo"]
else symbol.replace(" ", "")
)
)
)
if len(symbol) == 1:
return symbol
else:
return (
symbol[0] +
"{^}{#" + " ".join(symbol[1:]) + "}{^}"
)
# We don't put anything on the asterisk except the
# elongating thingy for katakana
return translate(
extended_combinations + katakana_elongate.map(keys=add("*")),
translate_symbol,
) * system.toggle(submit, "a{^}{#Backspace}{^ ^}")
def handle_proper() -> ESCDict:
vowel_order = {
"a": 0,
"i": 1,
"u": 2,
"e": 3,
"o": 4,
}
hiragana = {
" ": "あいうえお",
"k": "かきくけこ",
"g": "がぎぐげご",
"s": "さしすせそ",
"z": "ざじずぜぞ",
"t": "たちつてと",
"d": "だぢづでど",
"n": "なにぬねの",
"h": "はひふへほ",
"b": "ばびぶべぼ",
"p": "ぱぴぷぺぽ",
"m": "まみむめも",
"y": "や ゆ よ",
"r": "らりるれろ",
"w": "わゐ ゑを",
"l": "ゃぃゅぇょ",
"nn": "ん",
"xu": "っ",
}
katakana = {
" ": "アイウエオ",
"k": "カキクケコ",
"g": "ガギグゲゴ",
"s": "サシスセソ",
"z": "ザジズゼゾ",
"t": "タチツテト",
"d": "ダヂヅデド",
"n": "ナニヌネノ",
"h": "ハヒフヘホ",
"b": "バビブベボ",
"p": "パピプペポ",
"m": "マミムメモ",
"y": "ヤ ユ ヨ",
"r": "ラリルレロ",
"w": "ワヰ ヱヲ",
"l": "ャィュェョ",
"nn": "ン",
"xu": "ッ",
}
def translation_for(
table: Dict[str, str]
) -> Callable[[str], str]:
def translate_symbol(symbol: str) -> str:
if symbol == "--":
return "ー"
elif symbol in table:
return table[symbol]
else:
return table[symbol[0]][vowel_order[symbol[1]]]
return translate_symbol
return (
translate(extended_combinations, translation_for(hiragana)) +
translate(
extended_combinations + katakana_elongate,
translation_for(katakana),
).map(keys=add("*"))
) * system.toggle(submit, "") # Make submit do nothing here
def finalize(dictionary: ESCDict) -> ESCDict:
return dictionary.map(
keys=add(shared),
values=lambda x: "{^}" + x + "{^}",
)
final_ime = finalize(handle_ime())
final_proper = finalize(handle_proper())
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.